language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/input.py | {
"start": 12512,
"end": 12828
} | class ____(NamedTuple("_InputPointer", [("node_name", str), ("input_name", str)])):
def __new__(cls, node_name: str, input_name: str):
return super().__new__(
cls,
check.str_param(node_name, "node_name"),
check.str_param(input_name, "input_name"),
)
| InputPointer |
python | walkccc__LeetCode | solutions/146. LRU Cache/146.py | {
"start": 142,
"end": 1219
} | class ____:
def __init__(self, capacity: int):
self.capacity = capacity
self.keyToNode = {}
self.head = Node(-1, -1)
self.tail = Node(-1, -1)
self.join(self.head, self.tail)
def get(self, key: int) -> int:
if key not in self.keyToNode:
return -1
node = self.keyToNode[key]
self.remove(node)
self.moveToHead(node)
return node.value
def put(self, key: int, value: int) -> None:
if key in self.keyToNode:
node = self.keyToNode[key]
node.value = value
self.remove(node)
self.moveToHead(node)
return
if len(self.keyToNode) == self.capacity:
lastNode = self.tail.prev
del self.keyToNode[lastNode.key]
self.remove(lastNode)
self.moveToHead(Node(key, value))
self.keyToNode[key] = self.head.next
def join(self, node1: Node, node2: Node):
node1.next = node2
node2.prev = node1
def moveToHead(self, node: Node):
self.join(node, self.head.next)
self.join(self.head, node)
def remove(self, node: Node):
self.join(node.prev, node.next)
| LRUCache |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/elements.py | {
"start": 178186,
"end": 182472
} | class ____(util.MemoizedSlots, str):
"""Represent a SQL identifier combined with quoting preferences.
:class:`.quoted_name` is a Python unicode/str subclass which
represents a particular identifier name along with a
``quote`` flag. This ``quote`` flag, when set to
``True`` or ``False``, overrides automatic quoting behavior
for this identifier in order to either unconditionally quote
or to not quote the name. If left at its default of ``None``,
quoting behavior is applied to the identifier on a per-backend basis
based on an examination of the token itself.
A :class:`.quoted_name` object with ``quote=True`` is also
prevented from being modified in the case of a so-called
"name normalize" option. Certain database backends, such as
Oracle Database, Firebird, and DB2 "normalize" case-insensitive names
as uppercase. The SQLAlchemy dialects for these backends
convert from SQLAlchemy's lower-case-means-insensitive convention
to the upper-case-means-insensitive conventions of those backends.
The ``quote=True`` flag here will prevent this conversion from occurring
to support an identifier that's quoted as all lower case against
such a backend.
The :class:`.quoted_name` object is normally created automatically
when specifying the name for key schema constructs such as
:class:`_schema.Table`, :class:`_schema.Column`, and others.
The class can also be
passed explicitly as the name to any function that receives a name which
can be quoted. Such as to use the :meth:`_engine.Engine.has_table`
method with
an unconditionally quoted name::
from sqlalchemy import create_engine
from sqlalchemy import inspect
from sqlalchemy.sql import quoted_name
engine = create_engine("oracle+oracledb://some_dsn")
print(inspect(engine).has_table(quoted_name("some_table", True)))
The above logic will run the "has table" logic against the Oracle Database
backend, passing the name exactly as ``"some_table"`` without converting to
upper case.
"""
__slots__ = "quote", "lower", "upper"
quote: Optional[bool]
@overload
@classmethod
def construct(cls, value: str, quote: Optional[bool]) -> quoted_name: ...
@overload
@classmethod
def construct(cls, value: None, quote: Optional[bool]) -> None: ...
@classmethod
def construct(
cls, value: Optional[str], quote: Optional[bool]
) -> Optional[quoted_name]:
if value is None:
return None
else:
return quoted_name(value, quote)
def __new__(cls, value: str, quote: Optional[bool]) -> quoted_name:
assert (
value is not None
), "use quoted_name.construct() for None passthrough"
if isinstance(value, cls) and (quote is None or value.quote == quote):
return value
self = super().__new__(cls, value)
self.quote = quote
return self
def __reduce__(self):
return quoted_name, (str(self), self.quote)
def _memoized_method_lower(self):
if self.quote:
return self
else:
return str(self).lower()
def _memoized_method_upper(self):
if self.quote:
return self
else:
return str(self).upper()
def _find_columns(clause: ClauseElement) -> Set[ColumnClause[Any]]:
"""locate Column objects within the given expression."""
cols: Set[ColumnClause[Any]] = set()
traverse(clause, {}, {"column": cols.add})
return cols
def _type_from_args(args: Sequence[ColumnElement[_T]]) -> TypeEngine[_T]:
for a in args:
if not a.type._isnull:
return a.type
else:
return type_api.NULLTYPE # type: ignore
def _corresponding_column_or_error(fromclause, column, require_embedded=False):
c = fromclause.corresponding_column(
column, require_embedded=require_embedded
)
if c is None:
raise exc.InvalidRequestError(
"Given column '%s', attached to table '%s', "
"failed to locate a corresponding column from table '%s'"
% (column, getattr(column, "table", None), fromclause.description)
)
return c
| quoted_name |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/svd_op_test.py | {
"start": 13914,
"end": 17625
} | class ____(test.Benchmark):
shapes = [
(4, 4),
(8, 8),
(16, 16),
(101, 101),
(256, 256),
(1024, 1024),
(2048, 2048),
(1, 8, 8),
(10, 8, 8),
(100, 8, 8),
(1000, 8, 8),
(1, 32, 32),
(10, 32, 32),
(100, 32, 32),
(1000, 32, 32),
(1, 256, 256),
(10, 256, 256),
(100, 256, 256),
]
def benchmarkSVDOp(self):
for shape_ in self.shapes:
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/cpu:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_cpu_{shape}".format(shape=shape_))
if test.is_gpu_available(True):
with ops.Graph().as_default(), \
session.Session(config=benchmark.benchmark_config()) as sess, \
ops.device("/device:GPU:0"):
matrix_value = np.random.uniform(
low=-1.0, high=1.0, size=shape_).astype(np.float32)
matrix = variables.Variable(matrix_value)
u, s, v = linalg_ops.svd(matrix)
self.evaluate(variables.global_variables_initializer())
self.run_op_benchmark(
sess,
control_flow_ops.group(u, s, v),
min_iters=25,
name="SVD_gpu_{shape}".format(shape=shape_))
if __name__ == "__main__":
dtypes_to_test = [np.float32, np.float64, np.complex64, np.complex128]
for compute_uv in False, True:
for full_matrices in False, True:
for dtype in dtypes_to_test:
for rows in 0, 1, 2, 5, 10, 32, 100:
for cols in 0, 1, 2, 5, 10, 32, 100:
for batch_dims in [(), (3,)] + [(3, 2)] * (max(rows, cols) < 10):
full_shape = batch_dims + (rows, cols)
for use_static_shape in set([True, False]):
name = "%s_%s_static_shape_%s__compute_uv_%s_full_%s" % (
dtype.__name__, "_".join(map(str, full_shape)),
use_static_shape, compute_uv, full_matrices)
_AddTest(
SvdOpTest, "Svd", name,
_GetSvdOpTest(dtype, full_shape, use_static_shape,
compute_uv, full_matrices))
for compute_uv in False, True:
for full_matrices in False, True:
dtypes = ([np.float32, np.float64] + [np.complex64, np.complex128] *
(not compute_uv))
for dtype in dtypes:
mat_shapes = [(10, 11), (11, 10), (11, 11), (2, 2, 2, 3)]
if not full_matrices or not compute_uv:
mat_shapes += [(5, 11), (11, 5)]
for mat_shape in mat_shapes:
for batch_dims in [(), (3,)]:
full_shape = batch_dims + mat_shape
name = "%s_%s_compute_uv_%s_full_%s" % (dtype.__name__, "_".join(
map(str, full_shape)), compute_uv, full_matrices)
_AddTest(
SvdGradOpTest, "SvdGrad", name,
_GetSvdGradOpTest(dtype, full_shape, compute_uv, full_matrices))
# The results are too inaccurate for float32.
if dtype in (np.float64, np.complex128):
_AddTest(
SvdGradGradOpTest, "SvdGradGrad", name,
_GetSvdGradGradOpTest(dtype, full_shape, compute_uv,
full_matrices))
test.main()
| SVDBenchmark |
python | getsentry__sentry | src/sentry/notifications/platform/templates/sample.py | {
"start": 868,
"end": 4384
} | class ____(NotificationTemplate[ErrorAlertData]):
category = NotificationCategory.DEBUG
example_data = ErrorAlertData(
error_type="ValueError",
error_message="'NoneType' object has no attribute 'get'",
project_name="my-app",
issue_id="12345",
error_count=15,
first_seen="2024-01-15 14:30:22 UTC",
chart_url="https://example.com/chart",
issue_url="https://example.com/issues",
assign_url="https://example.com/assign",
)
def render(self, data: ErrorAlertData) -> NotificationRenderedTemplate:
return NotificationRenderedTemplate(
subject=f"{data.error_count} new {data.error_type} errors in {data.project_name}",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="A new",
),
CodeTextBlock(
type=NotificationBodyTextBlockType.CODE,
text=data.error_type,
),
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"error has been detected in {data.project_name} with",
),
BoldTextBlock(
type=NotificationBodyTextBlockType.BOLD_TEXT,
text=f"{data.error_count} occurrences.",
),
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="The error message is:",
)
],
),
CodeBlock(
type=NotificationBodyFormattingBlockType.CODE_BLOCK,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=data.error_message,
),
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"This error was first seen at {data.first_seen} and requires immediate attention.",
)
],
),
],
actions=[
NotificationRenderedAction(label="View Issue", link="https://example.com/issues"),
NotificationRenderedAction(label="Assign to Me", link="https://example.com/assign"),
],
chart=NotificationRenderedImage(
url="https://github.com/knobiknows/all-the-bufo/blob/main/all-the-bufo/all-the-bufo.png?raw=true",
alt_text="Error occurrence chart",
),
footer="This alert was triggered by your error monitoring rules.",
)
@dataclass(frozen=True)
| ErrorAlertNotificationTemplate |
python | doocs__leetcode | solution/1400-1499/1406.Stone Game III/Solution.py | {
"start": 0,
"end": 540
} | class ____:
def stoneGameIII(self, stoneValue: List[int]) -> str:
@cache
def dfs(i: int) -> int:
if i >= n:
return 0
ans, s = -inf, 0
for j in range(3):
if i + j >= n:
break
s += stoneValue[i + j]
ans = max(ans, s - dfs(i + j + 1))
return ans
n = len(stoneValue)
ans = dfs(0)
if ans == 0:
return 'Tie'
return 'Alice' if ans > 0 else 'Bob'
| Solution |
python | pypa__packaging | src/packaging/specifiers.py | {
"start": 26668,
"end": 39539
} | class ____(BaseSpecifier):
"""This class abstracts handling of a set of version specifiers.
It can be passed a single specifier (``>=3.0``), a comma-separated list of
specifiers (``>=3.0,!=3.1``), or no specifier at all.
"""
__slots__ = ("_prereleases", "_specs")
def __init__(
self,
specifiers: str | Iterable[Specifier] = "",
prereleases: bool | None = None,
) -> None:
"""Initialize a SpecifierSet instance.
:param specifiers:
The string representation of a specifier or a comma-separated list of
specifiers which will be parsed and normalized before use.
May also be an iterable of ``Specifier`` instances, which will be used
as is.
:param prereleases:
This tells the SpecifierSet if it should accept prerelease versions if
applicable or not. The default of ``None`` will autodetect it from the
given specifiers.
:raises InvalidSpecifier:
If the given ``specifiers`` are not parseable than this exception will be
raised.
"""
if isinstance(specifiers, str):
# Split on `,` to break each individual specifier into its own item, and
# strip each item to remove leading/trailing whitespace.
split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()]
# Make each individual specifier a Specifier and save in a frozen set
# for later.
self._specs = frozenset(map(Specifier, split_specifiers))
else:
# Save the supplied specifiers in a frozen set.
self._specs = frozenset(specifiers)
# Store our prereleases value so we can use it later to determine if
# we accept prereleases or not.
self._prereleases = prereleases
@property
def prereleases(self) -> bool | None:
# If we have been given an explicit prerelease modifier, then we'll
# pass that through here.
if self._prereleases is not None:
return self._prereleases
# If we don't have any specifiers, and we don't have a forced value,
# then we'll just return None since we don't know if this should have
# pre-releases or not.
if not self._specs:
return None
# Otherwise we'll see if any of the given specifiers accept
# prereleases, if any of them do we'll return True, otherwise False.
if any(s.prereleases for s in self._specs):
return True
return None
@prereleases.setter
def prereleases(self, value: bool | None) -> None:
self._prereleases = value
def __repr__(self) -> str:
"""A representation of the specifier set that shows all internal state.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> SpecifierSet('>=1.0.0,!=2.0.0')
<SpecifierSet('!=2.0.0,>=1.0.0')>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=False)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=False)>
>>> SpecifierSet('>=1.0.0,!=2.0.0', prereleases=True)
<SpecifierSet('!=2.0.0,>=1.0.0', prereleases=True)>
"""
pre = (
f", prereleases={self.prereleases!r}"
if self._prereleases is not None
else ""
)
return f"<SpecifierSet({str(self)!r}{pre})>"
def __str__(self) -> str:
"""A string representation of the specifier set that can be round-tripped.
Note that the ordering of the individual specifiers within the set may not
match the input string.
>>> str(SpecifierSet(">=1.0.0,!=1.0.1"))
'!=1.0.1,>=1.0.0'
>>> str(SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False))
'!=1.0.1,>=1.0.0'
"""
return ",".join(sorted(str(s) for s in self._specs))
def __hash__(self) -> int:
return hash(self._specs)
def __and__(self, other: SpecifierSet | str) -> SpecifierSet:
"""Return a SpecifierSet which is a combination of the two sets.
:param other: The other object to combine with.
>>> SpecifierSet(">=1.0.0,!=1.0.1") & '<=2.0.0,!=2.0.1'
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
>>> SpecifierSet(">=1.0.0,!=1.0.1") & SpecifierSet('<=2.0.0,!=2.0.1')
<SpecifierSet('!=1.0.1,!=2.0.1,<=2.0.0,>=1.0.0')>
"""
if isinstance(other, str):
other = SpecifierSet(other)
elif not isinstance(other, SpecifierSet):
return NotImplemented
specifier = SpecifierSet()
specifier._specs = frozenset(self._specs | other._specs)
if self._prereleases is None and other._prereleases is not None:
specifier._prereleases = other._prereleases
elif (
self._prereleases is not None and other._prereleases is None
) or self._prereleases == other._prereleases:
specifier._prereleases = self._prereleases
else:
raise ValueError(
"Cannot combine SpecifierSets with True and False prerelease overrides."
)
return specifier
def __eq__(self, other: object) -> bool:
"""Whether or not the two SpecifierSet-like objects are equal.
:param other: The other object to check against.
The value of :attr:`prereleases` is ignored.
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> (SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False) ==
... SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == ">=1.0.0,!=1.0.1"
True
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1") == SpecifierSet(">=1.0.0,!=1.0.2")
False
"""
if isinstance(other, (str, Specifier)):
other = SpecifierSet(str(other))
elif not isinstance(other, SpecifierSet):
return NotImplemented
return self._specs == other._specs
def __len__(self) -> int:
"""Returns the number of specifiers in this specifier set."""
return len(self._specs)
def __iter__(self) -> Iterator[Specifier]:
"""
Returns an iterator over all the underlying :class:`Specifier` instances
in this specifier set.
>>> sorted(SpecifierSet(">=1.0.0,!=1.0.1"), key=str)
[<Specifier('!=1.0.1')>, <Specifier('>=1.0.0')>]
"""
return iter(self._specs)
def __contains__(self, item: UnparsedVersion) -> bool:
"""Return whether or not the item is contained in this specifier.
:param item: The item to check for.
This is used for the ``in`` operator and behaves the same as
:meth:`contains` with no ``prereleases`` argument passed.
>>> "1.2.3" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> Version("1.2.3") in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.0.1" in SpecifierSet(">=1.0.0,!=1.0.1")
False
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1")
True
>>> "1.3.0a1" in SpecifierSet(">=1.0.0,!=1.0.1", prereleases=True)
True
"""
return self.contains(item)
def contains(
self,
item: UnparsedVersion,
prereleases: bool | None = None,
installed: bool | None = None,
) -> bool:
"""Return whether or not the item is contained in this SpecifierSet.
:param item:
The item to check for, which can be a version string or a
:class:`Version` instance.
:param prereleases:
Whether or not to match prereleases with this SpecifierSet. If set to
``None`` (the default), it will follow the recommendation from :pep:`440`
and match prereleases, as there are no other versions.
:param installed:
Whether or not the item is installed. If set to ``True``, it will
accept prerelease versions even if the specifier does not allow them.
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.2.3")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains(Version("1.2.3"))
True
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.0.1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1")
True
>>> SpecifierSet(">=1.0.0,!=1.0.1", prereleases=False).contains("1.3.0a1")
False
>>> SpecifierSet(">=1.0.0,!=1.0.1").contains("1.3.0a1", prereleases=True)
True
"""
version = _coerce_version(item)
if version is None:
return False
if installed and version.is_prerelease:
prereleases = True
return bool(list(self.filter([version], prereleases=prereleases)))
def filter(
self, iterable: Iterable[UnparsedVersionVar], prereleases: bool | None = None
) -> Iterator[UnparsedVersionVar]:
"""Filter items in the given iterable, that match the specifiers in this set.
:param iterable:
An iterable that can contain version strings and :class:`Version` instances.
The items in the iterable will be filtered according to the specifier.
:param prereleases:
Whether or not to allow prereleases in the returned iterator. If set to
``None`` (the default), it will follow the recommendation from :pep:`440`
and match prereleases if there are no other versions.
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.3", Version("1.4")]))
['1.3', <Version('1.4')>]
>>> list(SpecifierSet(">=1.2.3").filter(["1.2", "1.5a1"]))
['1.5a1']
>>> list(SpecifierSet(">=1.2.3").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
>>> list(SpecifierSet(">=1.2.3", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
An "empty" SpecifierSet will filter items based on the presence of prerelease
versions in the set.
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"]))
['1.3']
>>> list(SpecifierSet("").filter(["1.5a1"]))
['1.5a1']
>>> list(SpecifierSet("", prereleases=True).filter(["1.3", "1.5a1"]))
['1.3', '1.5a1']
>>> list(SpecifierSet("").filter(["1.3", "1.5a1"], prereleases=True))
['1.3', '1.5a1']
"""
# Determine if we're forcing a prerelease or not, if we're not forcing
# one for this particular filter call, then we'll use whatever the
# SpecifierSet thinks for whether or not we should support prereleases.
if prereleases is None and self.prereleases is not None:
prereleases = self.prereleases
# If we have any specifiers, then we want to wrap our iterable in the
# filter method for each one, this will act as a logical AND amongst
# each specifier.
if self._specs:
# When prereleases is None, we need to let all versions through
# the individual filters, then decide about prereleases at the end
# based on whether any non-prereleases matched ALL specs.
for spec in self._specs:
iterable = spec.filter(
iterable, prereleases=True if prereleases is None else prereleases
)
if prereleases is not None:
# If we have a forced prereleases value,
# we can immediately return the iterator.
return iter(iterable)
else:
# Handle empty SpecifierSet cases where prereleases is not None.
if prereleases is True:
return iter(iterable)
if prereleases is False:
return (
item
for item in iterable
if (version := _coerce_version(item)) is not None
and not version.is_prerelease
)
# Finally if prereleases is None, apply PEP 440 logic:
# exclude prereleases unless there are no final releases that matched.
filtered: list[UnparsedVersionVar] = []
found_prereleases: list[UnparsedVersionVar] = []
for item in iterable:
parsed_version = _coerce_version(item)
if parsed_version is None:
continue
if parsed_version.is_prerelease:
found_prereleases.append(item)
else:
filtered.append(item)
return iter(filtered if filtered else found_prereleases)
| SpecifierSet |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 397284,
"end": 397570
} | class ____(BatchRequest):
"""
Updates a batch of tasks.
Headers
Content type should be 'application/json-lines'.
"""
_service = "tasks"
_action = "update_batch"
_version = "2.13"
_batched_request_cls = UpdateRequest
| UpdateBatchRequest |
python | getsentry__sentry | src/sentry/api/serializers/base.py | {
"start": 2762,
"end": 4730
} | class ____:
"""A Serializer class contains the logic to serialize a specific type of object."""
def __call__(
self,
obj: Any,
attrs: Mapping[Any, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> Mapping[str, Any] | None:
"""See documentation for `serialize`."""
if obj is None:
return None
return self._serialize(obj, attrs, user, **kwargs)
def get_attrs(
self, item_list: Sequence[Any], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> MutableMapping[Any, Any]:
"""
Fetch all of the associated data needed to serialize the objects in `item_list`.
:param item_list: List of input objects that should be serialized.
:param user: The user who will be viewing the objects.
:param kwargs: Any
:returns A mapping of items from the `item_list` to an Object.
"""
return {}
def _serialize(
self,
obj: Any,
attrs: Mapping[Any, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> Mapping[str, Any] | None:
try:
return self.serialize(obj, attrs, user, **kwargs)
except Exception:
logger.exception("Failed to serialize", extra={"instance": obj})
return None
def serialize(
self,
obj: Any,
attrs: Mapping[Any, Any],
user: User | RpcUser | AnonymousUser,
**kwargs: Any,
) -> Mapping[str, Any]:
"""
Convert an arbitrary python object `obj` to an object that only contains primitives.
:param obj: An item from `item_list` that was passed to `get_attrs`.
:param attrs: The object in `get_attrs` that corresponds to `obj`.
:param user: The user who will be viewing the objects.
:param kwargs: Any
:returns A serialized version of `obj`.
"""
return {}
| Serializer |
python | matplotlib__matplotlib | lib/matplotlib/collections.py | {
"start": 93090,
"end": 97068
} | class ____(_MeshData, Collection):
r"""
Class for the efficient drawing of a quadrilateral mesh.
A quadrilateral mesh is a grid of M by N adjacent quadrilaterals that are
defined via a (M+1, N+1) grid of vertices. The quadrilateral (m, n) is
defined by the vertices ::
(m+1, n) ----------- (m+1, n+1)
/ /
/ /
/ /
(m, n) -------- (m, n+1)
The mesh need not be regular and the polygons need not be convex.
Parameters
----------
coordinates : (M+1, N+1, 2) array-like
The vertices. ``coordinates[m, n]`` specifies the (x, y) coordinates
of vertex (m, n).
antialiased : bool, default: True
shading : {'flat', 'gouraud'}, default: 'flat'
Notes
-----
Unlike other `.Collection`\s, the default *pickradius* of `.QuadMesh` is 0,
i.e. `~.Artist.contains` checks whether the test point is within any of the
mesh quadrilaterals.
"""
def __init__(self, coordinates, *, antialiased=True, shading='flat',
**kwargs):
kwargs.setdefault("pickradius", 0)
super().__init__(coordinates=coordinates, shading=shading)
Collection.__init__(self, **kwargs)
self._antialiased = antialiased
self._bbox = transforms.Bbox.unit()
self._bbox.update_from_data_xy(self._coordinates.reshape(-1, 2))
self.set_mouseover(False)
def get_paths(self):
if self._paths is None:
self.set_paths()
return self._paths
def set_paths(self):
self._paths = self._convert_mesh_to_paths(self._coordinates)
self.stale = True
def get_datalim(self, transData):
return (self.get_transform() - transData).transform_bbox(self._bbox)
@artist.allow_rasterization
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group(self.__class__.__name__, self.get_gid())
transform = self.get_transform()
offset_trf = self.get_offset_transform()
offsets = self.get_offsets()
if self.have_units():
xs = self.convert_xunits(offsets[:, 0])
ys = self.convert_yunits(offsets[:, 1])
offsets = np.column_stack([xs, ys])
self.update_scalarmappable()
if not transform.is_affine:
coordinates = self._coordinates.reshape((-1, 2))
coordinates = transform.transform(coordinates)
coordinates = coordinates.reshape(self._coordinates.shape)
transform = transforms.IdentityTransform()
else:
coordinates = self._coordinates
if not offset_trf.is_affine:
offsets = offset_trf.transform_non_affine(offsets)
offset_trf = offset_trf.get_affine()
gc = renderer.new_gc()
gc.set_snap(self.get_snap())
self._set_gc_clip(gc)
gc.set_linewidth(self.get_linewidth()[0])
if self._shading == 'gouraud':
triangles, colors = self._convert_mesh_to_triangles(coordinates)
renderer.draw_gouraud_triangles(
gc, triangles, colors, transform.frozen())
else:
renderer.draw_quad_mesh(
gc, transform.frozen(),
coordinates.shape[1] - 1, coordinates.shape[0] - 1,
coordinates, offsets, offset_trf,
# Backends expect flattened rgba arrays (n*m, 4) for fc and ec
self.get_facecolor().reshape((-1, 4)),
self._antialiased, self.get_edgecolors().reshape((-1, 4)))
gc.restore()
renderer.close_group(self.__class__.__name__)
self.stale = False
def get_cursor_data(self, event):
contained, info = self.contains(event)
if contained and self.get_array() is not None:
return self.get_array().ravel()[info["ind"]]
return None
| QuadMesh |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/ddl.py | {
"start": 34021,
"end": 34159
} | class ____(_CreateBase["Sequence"]):
"""Represent a CREATE SEQUENCE statement."""
__visit_name__ = "create_sequence"
| CreateSequence |
python | dask__dask | dask/dataframe/dask_expr/_merge.py | {
"start": 18842,
"end": 24040
} | class ____(Merge, PartitionsFiltered):
_parameters = [
"left",
"right",
"how",
"left_on",
"right_on",
"left_index",
"right_index",
"suffixes",
"indicator",
"_partitions",
"shuffle_left_on",
"shuffle_right_on",
"_npartitions",
]
_defaults = {
"how": "inner",
"left_on": None,
"right_on": None,
"left_index": False,
"right_index": False,
"suffixes": ("_x", "_y"),
"indicator": False,
"_partitions": None,
"shuffle_left_on": None,
"shuffle_right_on": None,
"_npartitions": None,
}
is_broadcast_join = False
def _lower(self):
return None
def _layer(self) -> dict:
from distributed.shuffle._core import (
P2PBarrierTask,
ShuffleId,
barrier_key,
p2p_barrier,
)
from distributed.shuffle._merge import merge_unpack
from distributed.shuffle._shuffle import DataFrameShuffleSpec
dsk = {}
token_left = _tokenize_deterministic(
# Include self._name to ensure that shuffle IDs are unique for individual
# merge operations. Reusing shuffles between merges is dangerous because of
# required coordination and complexity introduced through dynamic clusters.
self._name,
self.left._name,
self.shuffle_left_on,
self.left_index,
)
token_right = _tokenize_deterministic(
# Include self._name to ensure that shuffle IDs are unique for individual
# merge operations. Reusing shuffles between merges is dangerous because of
# required coordination and complexity introduced through dynamic clusters.
self._name,
self.right._name,
self.shuffle_right_on,
self.right_index,
)
_barrier_key_left = barrier_key(ShuffleId(token_left))
_barrier_key_right = barrier_key(ShuffleId(token_right))
transfer_name_left = f"hash-join-transfer-{token_left}"
transfer_name_right = f"hash-join-transfer-{token_right}"
transfer_keys_left = list()
transfer_keys_right = list()
func = create_assign_index_merge_transfer()
for i in range(self.left.npartitions):
t = Task(
(transfer_name_left, i),
func,
TaskRef((self.left._name, i)),
self.shuffle_left_on,
_HASH_COLUMN_NAME,
self.npartitions,
token_left,
i,
self.left_index,
)
dsk[t.key] = t
transfer_keys_left.append(t.ref())
for i in range(self.right.npartitions):
t = Task(
(transfer_name_right, i),
func,
TaskRef((self.right._name, i)),
self.shuffle_right_on,
_HASH_COLUMN_NAME,
self.npartitions,
token_right,
i,
self.right_index,
)
dsk[t.key] = t
transfer_keys_right.append(t.ref())
meta_left = self.left._meta.assign(**{_HASH_COLUMN_NAME: 0})
barrier_left = P2PBarrierTask(
_barrier_key_left,
p2p_barrier,
token_left,
*transfer_keys_left,
spec=DataFrameShuffleSpec(
id=token_left,
npartitions=self.npartitions,
column=_HASH_COLUMN_NAME,
meta=meta_left,
parts_out=self._partitions,
disk=True,
drop_column=True,
),
)
dsk[barrier_left.key] = barrier_left
meta_right = self.right._meta.assign(**{_HASH_COLUMN_NAME: 0})
barrier_right = P2PBarrierTask(
_barrier_key_right,
p2p_barrier,
token_right,
*transfer_keys_right,
spec=DataFrameShuffleSpec(
id=token_right,
npartitions=self.npartitions,
column=_HASH_COLUMN_NAME,
meta=meta_right,
parts_out=self._partitions,
disk=True,
drop_column=True,
),
)
dsk[barrier_right.key] = barrier_right
for part_out in self._partitions:
t = Task(
(self._name, part_out),
merge_unpack,
token_left,
token_right,
part_out,
barrier_left.ref(),
barrier_right.ref(),
self.how,
self.left_on,
self.right_on,
self._meta,
self.suffixes,
self.left_index,
self.right_index,
self.indicator,
)
dsk[t.key] = t
return dsk
def _divisions(self):
return (None,) * (self._npartitions + 1)
def _simplify_up(self, parent, dependents):
return
| HashJoinP2P |
python | walkccc__LeetCode | solutions/1420. Build Array Where You Can Find The Maximum Exactly K Comparisons/1420.py | {
"start": 0,
"end": 947
} | class ____:
def numOfArrays(self, n: int, m: int, k: int) -> int:
MOD = 1_000_000_007
# dp[i][j][k] := the number of ways to build an array of length i, where j
# is the maximum number and k is `search_cost`
dp = [[[0] * (k + 1) for j in range(m + 1)] for _ in range(n + 1)]
for j in range(1, m + 1):
dp[1][j][1] = 1
for i in range(2, n + 1): # for each length
for j in range(1, m + 1): # for each max value
for cost in range(1, k + 1): # for each cost
# 1. Appending any of [1, j] in the i-th position doesn't change the
# maximum and cost.
dp[i][j][cost] = j * dp[i - 1][j][cost] % MOD
# 2. Appending j in the i-th position makes j the new max and cost 1.
for prevMax in range(1, j):
dp[i][j][cost] += dp[i - 1][prevMax][cost - 1]
dp[i][j][cost] %= MOD
return sum(dp[n][j][k] for j in range(1, m + 1)) % MOD
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/classification/__init__.py | {
"start": 5434,
"end": 34143
} | class ____:
"""Map file paths to test commands and targets."""
def __init__(self, args: TestConfig) -> None:
self.args = args
self.integration_all_target = get_integration_all_target(self.args)
self.integration_targets = list(walk_integration_targets())
self.module_targets = list(walk_module_targets())
self.compile_targets = list(walk_compile_targets())
self.units_targets = list(walk_units_targets())
self.sanity_targets = list(walk_sanity_targets())
self.powershell_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] in ('.ps1', '.psm1')]
self.csharp_targets = [target for target in self.sanity_targets if os.path.splitext(target.path)[1] == '.cs']
self.units_modules = set(target.module for target in self.units_targets if target.module)
self.units_paths = set(a for target in self.units_targets for a in target.aliases)
self.sanity_paths = set(target.path for target in self.sanity_targets)
self.module_names_by_path = dict((target.path, target.module) for target in self.module_targets)
self.integration_targets_by_name = dict((target.name, target) for target in self.integration_targets)
self.integration_targets_by_alias = dict((a, target) for target in self.integration_targets for a in target.aliases)
self.posix_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'posix/' in target.aliases for m in target.modules)
self.windows_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'windows/' in target.aliases for m in target.modules)
self.network_integration_by_module = dict((m, target.name) for target in self.integration_targets
if 'network/' in target.aliases for m in target.modules)
self.prefixes = load_integration_prefixes()
self.integration_dependencies = analyze_integration_target_dependencies(self.integration_targets)
self.python_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.powershell_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.csharp_module_utils_imports: dict[str, set[str]] = {} # populated on first use to reduce overhead when not needed
self.paths_to_dependent_targets: dict[str, set[IntegrationTarget]] = {}
for target in self.integration_targets:
for path in target.needs_file:
if path not in self.paths_to_dependent_targets:
self.paths_to_dependent_targets[path] = set()
self.paths_to_dependent_targets[path].add(target)
def get_dependent_paths(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path, recursively expanding dependent paths as well."""
unprocessed_paths = set(self.get_dependent_paths_non_recursive(path))
paths = set()
while unprocessed_paths:
queued_paths = list(unprocessed_paths)
paths |= unprocessed_paths
unprocessed_paths = set()
for queued_path in queued_paths:
new_paths = self.get_dependent_paths_non_recursive(queued_path)
for new_path in new_paths:
if new_path not in paths:
unprocessed_paths.add(new_path)
return sorted(paths)
def get_dependent_paths_non_recursive(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path, including dependent integration test target paths."""
paths = self.get_dependent_paths_internal(path)
paths += [target.path + '/' for target in self.paths_to_dependent_targets.get(path, set())]
paths = sorted(set(paths))
return paths
def get_dependent_paths_internal(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path."""
ext = os.path.splitext(os.path.split(path)[1])[1]
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.py':
return self.get_python_module_utils_usage(path)
if ext == '.psm1':
return self.get_powershell_module_utils_usage(path)
if ext == '.cs':
return self.get_csharp_module_utils_usage(path)
if is_subdir(path, data_context().content.integration_targets_path):
return self.get_integration_target_usage(path)
return []
def get_python_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a Python module_utils file."""
if not self.python_module_utils_imports:
display.info('Analyzing python module_utils imports...')
before = time.time()
self.python_module_utils_imports = get_python_module_utils_imports(self.compile_targets)
after = time.time()
display.info('Processed %d python module_utils in %d second(s).' % (len(self.python_module_utils_imports), after - before))
name = get_python_module_utils_name(path)
return sorted(self.python_module_utils_imports[name])
def get_powershell_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a PowerShell module_utils file."""
if not self.powershell_module_utils_imports:
display.info('Analyzing powershell module_utils imports...')
before = time.time()
self.powershell_module_utils_imports = get_powershell_module_utils_imports(self.powershell_targets)
after = time.time()
display.info('Processed %d powershell module_utils in %d second(s).' % (len(self.powershell_module_utils_imports), after - before))
name = get_powershell_module_utils_name(path)
return sorted(self.powershell_module_utils_imports[name])
def get_csharp_module_utils_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is a C# module_utils file."""
if not self.csharp_module_utils_imports:
display.info('Analyzing C# module_utils imports...')
before = time.time()
self.csharp_module_utils_imports = get_csharp_module_utils_imports(self.powershell_targets, self.csharp_targets)
after = time.time()
display.info('Processed %d C# module_utils in %d second(s).' % (len(self.csharp_module_utils_imports), after - before))
name = get_csharp_module_utils_name(path)
return sorted(self.csharp_module_utils_imports[name])
def get_integration_target_usage(self, path: str) -> list[str]:
"""Return a list of paths which depend on the given path which is an integration target file."""
target_name = path.split('/')[3]
dependents = [os.path.join(data_context().content.integration_targets_path, target) + os.path.sep
for target in sorted(self.integration_dependencies.get(target_name, set()))]
return dependents
def classify(self, path: str) -> t.Optional[dict[str, str]]:
"""Classify the given path and return an optional dictionary of the results."""
result = self._classify(path)
# run all tests when no result given
if result is None:
return None
# run sanity on path unless result specified otherwise
if path in self.sanity_paths and 'sanity' not in result:
result['sanity'] = path
return result
def _classify(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path."""
if data_context().content.is_ansible:
return self._classify_ansible(path)
if data_context().content.collection:
return self._classify_collection(path)
return None
def _classify_common(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules common to all layouts."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
if os.path.sep not in path:
if filename in (
'azure-pipelines.yml',
):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.azure-pipelines'):
return all_tests(self.args) # test infrastructure, run all tests
if is_subdir(path, '.github'):
return minimal
if is_subdir(path, data_context().content.integration_targets_path):
if not os.path.exists(path):
return minimal
target = self.integration_targets_by_name.get(path.split('/')[3])
if not target:
display.warning('Unexpected non-target found: %s' % path)
return minimal
if 'hidden/' in target.aliases:
return minimal # already expanded using get_dependent_paths
return {
'integration': target.name if 'posix/' in target.aliases else None,
'windows-integration': target.name if 'windows/' in target.aliases else None,
'network-integration': target.name if 'network/' in target.aliases else None,
FOCUSED_TARGET: target.name,
}
if is_subdir(path, data_context().content.integration_path):
if dirname == data_context().content.integration_path:
for command in (
'integration',
'windows-integration',
'network-integration',
):
if name == command and ext == '.cfg':
return {
command: self.integration_all_target,
}
if name == command + '.requirements' and ext == '.txt':
return {
command: self.integration_all_target,
}
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
}
if is_subdir(path, data_context().content.sanity_path):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
}
if is_subdir(path, data_context().content.unit_path):
if path in self.units_paths:
return {
'units': path,
}
# changes to files which are not unit tests should trigger tests from the nearest parent directory
test_path = os.path.dirname(path)
while test_path:
if test_path + '/' in self.units_paths:
return {
'units': test_path + '/',
}
test_path = os.path.dirname(test_path)
if is_subdir(path, data_context().content.module_path):
module_name = self.module_names_by_path.get(path)
if module_name:
return {
'units': module_name if module_name in self.units_modules else None,
'integration': self.posix_integration_by_module.get(module_name) if ext == '.py' else None,
'windows-integration': self.windows_integration_by_module.get(module_name) if ext in ['.cs', '.ps1'] else None,
'network-integration': self.network_integration_by_module.get(module_name),
FOCUSED_TARGET: module_name,
}
return minimal
if is_subdir(path, data_context().content.module_utils_path):
if ext == '.cs':
return minimal # already expanded using get_dependent_paths
if ext == '.psm1':
return minimal # already expanded using get_dependent_paths
if ext == '.py':
return minimal # already expanded using get_dependent_paths
if is_subdir(path, data_context().content.plugin_paths['action']):
if ext == '.py':
if name.startswith('net_'):
network_target = 'network/.*_%s' % name[4:]
if any(re.search(r'^%s$' % network_target, alias) for alias in self.integration_targets_by_alias):
return {
'network-integration': network_target,
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if self.prefixes.get(name) == 'network':
network_platform = name
elif name.endswith('_config') and self.prefixes.get(name[:-7]) == 'network':
network_platform = name[:-7]
elif name.endswith('_template') and self.prefixes.get(name[:-9]) == 'network':
network_platform = name[:-9]
else:
network_platform = None
if network_platform:
network_target = 'network/%s/' % network_platform
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['connection']):
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'connection')
if name == '__init__':
return {
'integration': self.integration_all_target,
'windows-integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': os.path.join(units_dir, ''),
}
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
integration_name = 'connection_%s' % name
if integration_name not in self.integration_targets_by_name:
integration_name = None
windows_integration_name = 'connection_windows_%s' % name
if windows_integration_name not in self.integration_targets_by_name:
windows_integration_name = None
# entire integration test commands depend on these connection plugins
if name in ['winrm', 'psrp']:
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if name == 'local':
return {
'integration': self.integration_all_target,
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'network_cli':
return {
'network-integration': self.integration_all_target,
'units': units_path,
}
if name == 'paramiko_ssh':
return {
'integration': integration_name,
'network-integration': self.integration_all_target,
'units': units_path,
}
# other connection plugins have isolated integration and unit tests
return {
'integration': integration_name,
'windows-integration': windows_integration_name,
'units': units_path,
}
if is_subdir(path, data_context().content.plugin_paths['doc_fragments']):
return {
'sanity': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['inventory']):
if name == '__init__':
return all_tests(self.args) # broad impact, run all tests
# These inventory plugins are enabled by default (see INVENTORY_ENABLED).
# Without dedicated integration tests for these we must rely on the incidental coverage from other tests.
test_all = [
'host_list',
'script',
'yaml',
'ini',
'auto',
]
if name in test_all:
posix_integration_fallback = get_integration_all_target(self.args)
else:
posix_integration_fallback = None
target = self.integration_targets_by_name.get('inventory_%s' % name)
units_dir = os.path.join(data_context().content.unit_path, 'plugins', 'inventory')
units_path = os.path.join(units_dir, 'test_%s.py' % name)
if units_path not in self.units_paths:
units_path = None
return {
'integration': target.name if target and 'posix/' in target.aliases else posix_integration_fallback,
'windows-integration': target.name if target and 'windows/' in target.aliases else None,
'network-integration': target.name if target and 'network/' in target.aliases else None,
'units': units_path,
FOCUSED_TARGET: target.name if target else None,
}
if is_subdir(path, data_context().content.plugin_paths['filter']):
return self._simple_plugin_tests('filter', name)
if is_subdir(path, data_context().content.plugin_paths['lookup']):
return self._simple_plugin_tests('lookup', name)
if (is_subdir(path, data_context().content.plugin_paths['terminal']) or
is_subdir(path, data_context().content.plugin_paths['cliconf']) or
is_subdir(path, data_context().content.plugin_paths['netconf'])):
if ext == '.py':
if name in self.prefixes and self.prefixes[name] == 'network':
network_target = 'network/%s/' % name
if network_target in self.integration_targets_by_alias:
return {
'network-integration': network_target,
'units': 'all',
}
display.warning('Integration tests for "%s" not found.' % network_target, unique=True)
return {
'units': 'all',
}
return {
'network-integration': self.integration_all_target,
'units': 'all',
}
if is_subdir(path, data_context().content.plugin_paths['test']):
return self._simple_plugin_tests('test', name)
return None
def _classify_collection(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules specific to collections."""
result = self._classify_common(path)
if result is not None:
return result
filename = os.path.basename(path)
dummy, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
if path.startswith('changelogs/'):
return minimal
if path.startswith('docs/'):
return minimal
if '/' not in path:
if path in (
'.gitignore',
'COPYING',
'LICENSE',
'Makefile',
):
return minimal
if ext in (
'.in',
'.md',
'.rst',
'.toml',
'.txt',
):
return minimal
return None
def _classify_ansible(self, path: str) -> t.Optional[dict[str, str]]:
"""Return the classification for the given path using rules specific to Ansible."""
dirname = os.path.dirname(path)
filename = os.path.basename(path)
name, ext = os.path.splitext(filename)
minimal: dict[str, str] = {}
packaging = {
'integration': 'packaging/',
}
# Early classification that needs to occur before common classification belongs here.
if dirname == '.azure-pipelines/commands':
test_map = {
'cloud.sh': 'integration:cloud/',
'linux.sh': 'integration:all',
'network.sh': 'network-integration:all',
'remote.sh': 'integration:all',
'sanity.sh': 'sanity:all',
'units.sh': 'units:all',
'windows.sh': 'windows-integration:all',
}
test_match = test_map.get(filename)
if test_match:
test_command, test_target = test_match.split(':')
return {
test_command: test_target,
}
cloud_target = f'cloud/{name}/'
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
# Classification common to both ansible and collections.
result = self._classify_common(path)
if result is not None:
return result
# Classification here is specific to ansible, and runs after common classification.
if path.startswith('bin/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('changelogs/'):
return minimal
if path.startswith('hacking/'):
return minimal
if path.startswith('lib/ansible/executor/powershell/'):
units_path = 'test/units/executor/powershell/'
if units_path not in self.units_paths:
units_path = None
return {
'windows-integration': self.integration_all_target,
'units': units_path,
}
if path.startswith('lib/ansible/'):
return all_tests(self.args) # broad impact, run all tests
if path.startswith('licenses/'):
return minimal
if path.startswith('packaging/'):
packaging_target = f'packaging_{os.path.splitext(path.split(os.path.sep)[1])[0]}'
if packaging_target in self.integration_targets_by_name:
return {
'integration': packaging_target,
}
return minimal
if path.startswith('test/ansible_test/'):
return minimal # these tests are not invoked from ansible-test
if path.startswith('test/lib/ansible_test/config/'):
if name.startswith('cloud-config-'):
cloud_target = 'cloud/%s/' % name.split('-')[2].split('.')[0]
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
if path.startswith('test/lib/ansible_test/_data/completion/'):
if path == 'test/lib/ansible_test/_data/completion/docker.txt':
return all_tests(self.args, force=True) # force all tests due to risk of breaking changes in new test environment
if path.startswith('test/lib/ansible_test/_internal/commands/integration/cloud/'):
cloud_target = 'cloud/%s/' % name
if cloud_target in self.integration_targets_by_alias:
return {
'integration': cloud_target,
}
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/lib/ansible_test/_internal/commands/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_internal/commands/units/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_data/requirements/'):
if name in (
'integration',
'network-integration',
'windows-integration',
):
return {
name: self.integration_all_target,
}
if name in (
'sanity',
'units',
):
return {
name: 'all',
}
if path.startswith('test/lib/ansible_test/_util/controller/sanity/') or path.startswith('test/lib/ansible_test/_util/target/sanity/'):
return {
'sanity': 'all', # test infrastructure, run all sanity checks
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/ansible_test/_util/target/pytest/'):
return {
'units': 'all', # test infrastructure, run all unit tests
'integration': 'ansible-test/', # run ansible-test self tests
}
if path.startswith('test/lib/'):
return all_tests(self.args) # test infrastructure, run all tests
if path.startswith('test/support/'):
return all_tests(self.args) # test infrastructure, run all tests
if '/' not in path:
if path in (
'.gitattributes',
'.gitignore',
'.mailmap',
'COPYING',
'Makefile',
):
return minimal
if path in (
'MANIFEST.in',
'pyproject.toml',
'requirements.txt',
):
return packaging
if ext in (
'.md',
'.rst',
):
return minimal
return None # unknown, will result in fall-back to run all tests
def _simple_plugin_tests(self, plugin_type: str, plugin_name: str) -> dict[str, t.Optional[str]]:
"""
Return tests for the given plugin type and plugin name.
This function is useful for plugin types which do not require special processing.
"""
if plugin_name == '__init__':
return all_tests(self.args, True)
integration_target = self.integration_targets_by_name.get('%s_%s' % (plugin_type, plugin_name))
if integration_target:
integration_name = integration_target.name
else:
integration_name = None
units_path = os.path.join(data_context().content.unit_path, 'plugins', plugin_type, 'test_%s.py' % plugin_name)
if units_path not in self.units_paths:
units_path = None
return dict(
integration=integration_name,
units=units_path,
)
def all_tests(args: TestConfig, force: bool = False) -> dict[str, str]:
"""Return the targets for each test command when all tests should be run."""
if force:
integration_all_target = 'all'
else:
integration_all_target = get_integration_all_target(args)
return {
'sanity': 'all',
'units': 'all',
'integration': integration_all_target,
'windows-integration': integration_all_target,
'network-integration': integration_all_target,
}
def get_integration_all_target(args: TestConfig) -> str:
"""Return the target to use when all tests should be run."""
if isinstance(args, IntegrationConfig):
return args.changed_all_target
return 'all'
| PathMapper |
python | neetcode-gh__leetcode | python/0682-baseball-game.py | {
"start": 0,
"end": 792
} | class ____:
def calPoints(self, operations: List[str]) -> int:
score_stack = []
for o in operations:
# it is +, D, or C
# if stack isn't of sufficient length, then operation is voided
if o == "+" and len(score_stack) >= 2:
summed = score_stack[-2] + score_stack[-1]
score_stack.append(summed)
elif o == "D" and len(score_stack) >= 1:
doubled = score_stack[-1] * 2
score_stack.append(doubled)
elif o == "C" and len(score_stack) >= 1:
score_stack.pop()
else:
score_stack.append(int(o))
return sum(score_stack) | Solution |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 73219,
"end": 74292
} | class ____(TypedDict, total=False):
"""
:class:`altair.CompositionConfig` ``TypedDict`` wrapper.
Parameters
----------
columns
The number of columns to include in the view composition layout.
**Default value**: ``undefined`` -- An infinite number of columns (a single row)
will be assumed. This is equivalent to ``hconcat`` (for ``concat``) and to using the
``column`` channel (for ``facet`` and ``repeat``).
**Note**:
1) This property is only for:
* the general (wrappable) ``concat`` operator (not ``hconcat``/``vconcat``)
* the ``facet`` and ``repeat`` operator with one field/repetition definition
(without row/column nesting)
2) Setting the ``columns`` to ``1`` is equivalent to ``vconcat`` (for ``concat``)
and to using the ``row`` channel (for ``facet`` and ``repeat``).
spacing
The default spacing in pixels between composed sub-views.
**Default value**: ``20``
"""
columns: float
spacing: float
| CompositionConfigKwds |
python | ansible__ansible | test/lib/ansible_test/_internal/host_configs.py | {
"start": 4683,
"end": 5446
} | class ____(PythonConfig):
"""Configuration for Python in a virtual environment."""
system_site_packages: t.Optional[bool] = None
def apply_defaults(self, context: HostContext, defaults: PosixCompletionConfig) -> None:
"""Apply default settings."""
super().apply_defaults(context, defaults)
if self.system_site_packages is None:
self.system_site_packages = False
@property
def is_managed(self) -> bool:
"""
True if this Python is a managed instance, otherwise False.
Managed instances are used exclusively by ansible-test and can safely have requirements installed without explicit permission from the user.
"""
return True
@dataclasses.dataclass
| VirtualPythonConfig |
python | ray-project__ray | python/ray/data/preprocessors/vectorizer.py | {
"start": 7094,
"end": 13502
} | class ____(Preprocessor):
"""Count the frequency of tokens in a column of strings.
:class:`CountVectorizer` operates on columns that contain strings. For example:
.. code-block::
corpus
0 I dislike Python
1 I like Python
This preprocessor creates a list column for each input column. Each list contains
the frequency counts of tokens in order of their first appearance. For example:
.. code-block::
corpus
0 [1, 1, 1, 0] # Counts for [I, dislike, Python, like]
1 [1, 0, 1, 1] # Counts for [I, dislike, Python, like]
Examples:
>>> import pandas as pd
>>> import ray
>>> from ray.data.preprocessors import CountVectorizer
>>>
>>> df = pd.DataFrame({
... "corpus": [
... "Jimmy likes volleyball",
... "Bob likes volleyball too",
... "Bob also likes fruit jerky"
... ]
... })
>>> ds = ray.data.from_pandas(df) # doctest: +SKIP
>>>
>>> vectorizer = CountVectorizer(["corpus"])
>>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
corpus
0 [1, 0, 1, 1, 0, 0, 0, 0]
1 [1, 1, 1, 0, 0, 0, 0, 1]
2 [1, 1, 0, 0, 1, 1, 1, 0]
You can limit the number of tokens in the vocabulary with ``max_features``.
>>> vectorizer = CountVectorizer(["corpus"], max_features=3)
>>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
corpus
0 [1, 0, 1]
1 [1, 1, 1]
2 [1, 1, 0]
:class:`CountVectorizer` can also be used in append mode by providing the
name of the output_columns that should hold the encoded values.
>>> vectorizer = CountVectorizer(["corpus"], output_columns=["corpus_counts"])
>>> vectorizer.fit_transform(ds).to_pandas() # doctest: +SKIP
corpus corpus_counts
0 Jimmy likes volleyball [1, 0, 1, 1, 0, 0, 0, 0]
1 Bob likes volleyball too [1, 1, 1, 0, 0, 0, 0, 1]
2 Bob also likes fruit jerky [1, 1, 0, 0, 1, 1, 1, 0]
Args:
columns: The columns to separately tokenize and count.
tokenization_fn: The function used to generate tokens. This function
should accept a string as input and return a list of tokens as
output. If unspecified, the tokenizer uses a function equivalent to
``lambda s: s.split(" ")``.
max_features: The maximum number of tokens to encode in the transformed
dataset. If specified, only the most frequent tokens are encoded.
output_columns: The names of the transformed columns. If None, the transformed
columns will be the same as the input columns. If not None, the length of
``output_columns`` must match the length of ``columns``, othwerwise an error
will be raised.
""" # noqa: E501
def __init__(
self,
columns: List[str],
tokenization_fn: Optional[Callable[[str], List[str]]] = None,
max_features: Optional[int] = None,
*,
output_columns: Optional[List[str]] = None,
):
super().__init__()
self.columns = columns
self.tokenization_fn = tokenization_fn or simple_split_tokenizer
self.max_features = max_features
self.output_columns = Preprocessor._derive_and_validate_output_columns(
columns, output_columns
)
def _fit(self, dataset: "Dataset") -> Preprocessor:
def stat_fn(key_gen):
def get_pd_value_counts(df: pd.DataFrame) -> List[Counter]:
def get_token_counts(col):
token_series = df[col].apply(self.tokenization_fn)
tokens = token_series.sum()
return Counter(tokens)
return {col: [get_token_counts(col)] for col in self.columns}
value_counts = dataset.map_batches(
get_pd_value_counts, batch_format="pandas"
)
total_counts = {col: Counter() for col in self.columns}
for batch in value_counts.iter_batches(batch_size=None):
for col, counters in batch.items():
for counter in counters:
total_counts[col].update(counter)
def most_common(counter: Counter, n: int):
return Counter(dict(counter.most_common(n)))
top_counts = [
most_common(counter, self.max_features)
for counter in total_counts.values()
]
return {
key_gen(col): counts # noqa
for (col, counts) in zip(self.columns, top_counts)
}
self.stat_computation_plan.add_callable_stat(
stat_fn=lambda key_gen: stat_fn(key_gen),
stat_key_fn=lambda col: f"token_counts({col})",
columns=self.columns,
)
return self
def _transform_pandas(self, df: pd.DataFrame):
result_columns = []
for col, output_col in zip(self.columns, self.output_columns):
token_counts = self.stats_[f"token_counts({col})"]
sorted_tokens = [token for (token, count) in token_counts.most_common()]
tokenized = df[col].map(self.tokenization_fn).map(Counter)
# Create a list to store token frequencies
token_columns = []
for token in sorted_tokens:
series = tokenized.map(lambda val: val[token])
series.name = token
token_columns.append(series)
# Concatenate all token columns into a single list column
if token_columns:
df[output_col] = pd.concat(token_columns, axis=1).values.tolist()
else:
df[output_col] = [[]] * len(df)
result_columns.append(output_col)
return df
def __repr__(self):
fn_name = getattr(self.tokenization_fn, "__name__", self.tokenization_fn)
return (
f"{self.__class__.__name__}(columns={self.columns!r}, "
f"tokenization_fn={fn_name}, max_features={self.max_features!r}, "
f"output_columns={self.output_columns!r})"
)
| CountVectorizer |
python | squidfunk__mkdocs-material | material/plugins/projects/builder/log.py | {
"start": 1529,
"end": 4033
} | class ____(Filter):
# Filter log messages
def filter(self, record):
message = record.getMessage()
return not message.startswith("A 'dirty' build")
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
# Retrieve logger for project
def get_log_for(project: Project):
log = logging.getLogger("".join(["mkdocs.material.projects", project.slug]))
# Ensure logger does not propagate messags to parent logger, or messages
# will be printed multiple times, and attach handler with color formatter
log.propagate = False
if not log.hasHandlers():
log.addHandler(get_log_handler(project))
log.setLevel(get_log_level_for(project))
# Return logger
return log
# Retrieve log level for project
def get_log_level_for(project: Project):
level = logging.INFO
# Determine log level as set in MkDocs - if the build is started with the
# `--quiet` flag, the log level is set to `ERROR` to suppress all messages,
# except for errors. If it's started with `--verbose`, MkDocs sets the log
# level to `DEBUG`, the most verbose of all log levels.
log = logging.getLogger("mkdocs")
for handler in log.handlers:
level = handler.level
break
# Determine if MkDocs was invoked with the `--quiet` flag and the log level
# as configured in the plugin configuration. When `--quiet` is set, or the
# projects plugin configuration disables logging, ignore the configured log
# level and set it to `ERROR` to suppress all messages.
quiet = level == logging.ERROR
level = project.plugin.log_level.upper()
if quiet or not project.plugin.log:
level = logging.ERROR
# Retun log level
return level
# -----------------------------------------------------------------------------
# Retrieve log handler for project
def get_log_handler(project: Project):
handler = logging.StreamHandler()
handler.setFormatter(get_log_formatter(project))
# Add filter to suppress dirty build warning, or we'll get as many of those
# as projects are built - one warning is surely enough, KTHXBYE
handler.addFilter(ProjectsFilter())
return handler
# Retrieve log formatter for project
def get_log_formatter(project: Project):
prefix = style(f"project://{project.slug}", underline = True)
return ColorFormatter(f"[{prefix}] %(message)s")
| ProjectsFilter |
python | pytorch__pytorch | benchmarks/operator_benchmark/pt/instancenorm_test.py | {
"start": 285,
"end": 920
} | class ____(op_bench.TorchBenchmarkBase):
def init(self, dims):
num_channels = dims[1]
self.inputs = {
"input": (torch.rand(*dims) - 0.5) * 256,
"weight": torch.rand(num_channels, dtype=torch.float),
"bias": torch.rand(num_channels, dtype=torch.float),
"eps": 1e-5,
}
def forward(self, input, weight, bias, eps: float):
return F.instance_norm(input, weight=weight, bias=bias, eps=eps)
op_bench.generate_pt_test(instancenorm_configs_short, InstanceNormBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| InstanceNormBenchmark |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 158,
"end": 232
} | class ____(JoseError):
error = "missing_algorithm"
| MissingAlgorithmError |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants.py | {
"start": 2151,
"end": 2414
} | class ____(
collections.namedtuple("_TensorData", ["numpy", "dtype", "index"])):
"""Data about a tensor that was converted to a constant."""
__slots__ = ()
@property
def dtype_attr(self):
return attr_value_pb2.AttrValue(type=self.dtype)
| _TensorData |
python | lazyprogrammer__machine_learning_examples | ab_testing/optimistic.py | {
"start": 505,
"end": 1914
} | class ____:
def __init__(self, p):
# p: the win rate
self.p = p
self.p_estimate = 5.
self.N = 1. # num samples collected so far
def pull(self):
# draw a 1 with probability p
return np.random.random() < self.p
def update(self, x):
self.N += 1.
self.p_estimate = ((self.N - 1)*self.p_estimate + x) / self.N
def experiment():
bandits = [Bandit(p) for p in BANDIT_PROBABILITIES]
rewards = np.zeros(NUM_TRIALS)
for i in range(NUM_TRIALS):
# use optimistic initial values to select the next bandit
j = np.argmax([b.p_estimate for b in bandits])
# pull the arm for the bandit with the largest sample
x = bandits[j].pull()
# update rewards log
rewards[i] = x
# update the distribution for the bandit whose arm we just pulled
bandits[j].update(x)
# print mean estimates for each bandit
for b in bandits:
print("mean estimate:", b.p_estimate)
# print total reward
print("total reward earned:", rewards.sum())
print("overall win rate:", rewards.sum() / NUM_TRIALS)
print("num times selected each bandit:", [b.N for b in bandits])
# plot the results
cumulative_rewards = np.cumsum(rewards)
win_rates = cumulative_rewards / (np.arange(NUM_TRIALS) + 1)
plt.ylim([0, 1])
plt.plot(win_rates)
plt.plot(np.ones(NUM_TRIALS)*np.max(BANDIT_PROBABILITIES))
plt.show()
if __name__ == "__main__":
experiment()
| Bandit |
python | lepture__authlib | authlib/jose/errors.py | {
"start": 2584,
"end": 2781
} | class ____(JoseError):
error = "missing_claim"
def __init__(self, claim):
description = f"Missing '{claim}' claim"
super().__init__(description=description)
| MissingClaimError |
python | tensorflow__tensorflow | tensorflow/python/ops/clustering_ops_test.py | {
"start": 933,
"end": 2048
} | class ____(test.TestCase):
# All but one input point are close to (101, 1). With uniform random sampling,
# it is highly improbable for (-1, -1) to be selected.
def setUp(self):
self._points = np.array([[100., 0.],
[101., 2.],
[102., 0.],
[100., 1.],
[100., 2.],
[101., 0.],
[101., 0.],
[101., 1.],
[102., 0.],
[-1., -1.]]).astype(np.float32)
def runTestWithSeed(self, seed):
with self.cached_session():
sampled_points = clustering_ops.kmeans_plus_plus_initialization(
self._points, 3, seed, (seed % 5) - 1)
self.assertAllClose(
sorted(self.evaluate(sampled_points).tolist()),
[[-1., -1.], [101., 1.], [101., 1.]],
atol=1.0)
def testBasic(self):
for seed in range(100):
self.runTestWithSeed(seed)
@test_util.run_all_in_graph_and_eager_modes
| KmeansPlusPlusInitializationTest |
python | getsentry__sentry | src/sentry/api/event_search.py | {
"start": 15638,
"end": 16393
} | class ____(NamedTuple):
name: str
@property
def is_tag(self) -> bool:
return bool(TAG_KEY_RE.match(self.name)) or (
self.name not in SEARCH_MAP
and self.name not in FIELD_ALIASES
and not self.is_measurement
and not self.is_span_op_breakdown
)
@property
def is_measurement(self) -> bool:
return is_measurement(self.name) and self.name not in SEARCH_MAP
@property
def is_span_op_breakdown(self) -> bool:
return is_span_op_breakdown(self.name) and self.name not in SEARCH_MAP
def _is_wildcard(raw_value: object) -> TypeIs[str]:
if not isinstance(raw_value, str):
return False
return bool(WILDCARD_CHARS.search(raw_value))
| SearchKey |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1147107,
"end": 1147367
} | class ____(VegaLiteSchema):
"""ScaleInvalidDataShowAstheta schema wrapper."""
_schema = {"$ref": '#/definitions/ScaleInvalidDataShowAs<"theta">'}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| ScaleInvalidDataShowAstheta |
python | redis__redis-py | redis/commands/search/__init__.py | {
"start": 5732,
"end": 5841
} | class ____(AsyncSearchCommands, AsyncioPipeline, Pipeline):
"""AsyncPipeline for the module."""
| AsyncPipeline |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-google/llama_index/readers/google/docs/base.py | {
"start": 1173,
"end": 9026
} | class ____(BasePydanticReader):
"""
Google Docs reader.
Reads a page from Google Docs
"""
is_remote: bool = True
split_on_heading_level: Optional[int] = Field(
default=None,
description="If set the document will be split on the specified heading level.",
)
include_toc: bool = Field(
default=True, description="Include table of contents elements."
)
@classmethod
def class_name(cls) -> str:
return "GoogleDocsReader"
def load_data(self, document_ids: List[str]) -> List[Document]:
"""
Load data from the input directory.
Args:
document_ids (List[str]): a list of document ids.
"""
if document_ids is None:
raise ValueError('Must specify a "document_ids" in `load_kwargs`.')
results = []
for document_id in document_ids:
docs = self._load_doc(document_id)
results.extend(docs)
return results
def _load_doc(self, document_id: str) -> str:
"""
Load a document from Google Docs.
Args:
document_id: the document id.
Returns:
The document text.
"""
credentials = self._get_credentials()
docs_service = discovery.build("docs", "v1", credentials=credentials)
google_doc = docs_service.documents().get(documentId=document_id).execute()
google_doc_content = google_doc.get("body").get("content")
doc_metadata = {"document_id": document_id}
return self._structural_elements_to_docs(google_doc_content, doc_metadata)
def _get_credentials(self) -> Any:
"""
Get valid user credentials from storage.
The file token.json stores the user's access and refresh tokens, and is
created automatically when the authorization flow completes for the first
time.
Returns:
Credentials, the obtained credential.
"""
creds = None
port = 8080
if os.path.exists("token.json"):
creds = Credentials.from_authorized_user_file("token.json", SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
"credentials.json", SCOPES
)
with open("credentials.json") as json_file:
client_config = json.load(json_file)
redirect_uris = client_config["web"].get("redirect_uris", [])
if len(redirect_uris) > 0:
port = int(redirect_uris[0].strip("/").split(":")[-1])
creds = flow.run_local_server(port=port)
# Save the credentials for the next run
with open("token.json", "w") as token:
token.write(creds.to_json())
return creds
def _read_paragraph_element(self, element: Any) -> Any:
"""
Return the text in the given ParagraphElement.
Args:
element: a ParagraphElement from a Google Doc.
"""
text_run = element.get("textRun")
if not text_run:
return ""
return text_run.get("content")
def _read_structural_elements(self, elements: List[Any]) -> Any:
"""
Recurse through a list of Structural Elements.
Read a document's text where text may be in nested elements.
Args:
elements: a list of Structural Elements.
"""
text = ""
for value in elements:
if "paragraph" in value:
elements = value.get("paragraph").get("elements")
for elem in elements:
text += self._read_paragraph_element(elem)
elif "table" in value:
# The text in table cells are in nested Structural Elements
# and tables may be nested.
table = value.get("table")
for row in table.get("tableRows"):
cells = row.get("tableCells")
for cell in cells:
text += self._read_structural_elements(cell.get("content"))
elif "tableOfContents" in value:
# The text in the TOC is also in a Structural Element.
toc = value.get("tableOfContents")
text += self._read_structural_elements(toc.get("content"))
return text
def _determine_heading_level(self, element):
"""
Extracts the heading level, label, and ID from a document element.
Args:
element: a Structural Element.
"""
level = None
heading_key = None
heading_id = None
if self.split_on_heading_level and "paragraph" in element:
style = element.get("paragraph").get("paragraphStyle")
style_type = style.get("namedStyleType", "")
heading_id = style.get("headingId", None)
if style_type == "TITLE":
level = 0
heading_key = "title"
elif style_type.startswith("HEADING_"):
level = int(style_type.split("_")[1])
if level > self.split_on_heading_level:
return None, None, None
heading_key = f"Header {level}"
return level, heading_key, heading_id
def _generate_doc_id(self, metadata: dict):
if "heading_id" in metadata:
heading_id = metadata["heading_id"]
else:
heading_id = "".join(
random.choices(string.ascii_letters + string.digits, k=8)
)
return f"{metadata['document_id']}_{heading_id}"
def _structural_elements_to_docs(
self, elements: List[Any], doc_metadata: dict
) -> Any:
"""
Recurse through a list of Structural Elements.
Split documents on heading if split_on_heading_level is set.
Args:
elements: a list of Structural Elements.
"""
docs = []
current_heading_level = self.split_on_heading_level
metadata = doc_metadata.copy()
text = ""
for value in elements:
element_text = self._read_structural_elements([value])
level, heading_key, heading_id = self._determine_heading_level(value)
if level is not None:
if level == self.split_on_heading_level:
if text.strip():
docs.append(
Document(
id_=self._generate_doc_id(metadata),
text=text,
metadata=metadata.copy(),
)
)
text = ""
if "heading_id" in metadata:
metadata["heading_id"] = heading_id
elif level < current_heading_level:
metadata = doc_metadata.copy()
metadata[heading_key] = element_text
current_heading_level = level
else:
text += element_text
if text:
if docs:
id_ = self._generate_doc_id(metadata)
else:
id_ = metadata["document_id"]
docs.append(Document(id_=id_, text=text, metadata=metadata))
return docs
if __name__ == "__main__":
reader = GoogleDocsReader(split_on_heading_level=1)
docs = reader.load_data(
document_ids=["1UORoHYBKmOdcv4g94znMF0ildBYWiu3C2M2MEsWN4mM"]
)
logger.info(docs)
| GoogleDocsReader |
python | run-llama__llama_index | llama-index-integrations/storage/chat_store/llama-index-storage-chat-store-azurecosmosmongovcore/llama_index/storage/chat_store/azurecosmosmongovcore/base.py | {
"start": 848,
"end": 5452
} | class ____(BaseChatStore, ABC):
"""Creates an Azure Cosmos DB NoSql Chat Store."""
_mongo_client = MongoClient
_database = Database
_collection = Collection
def __init__(
self,
mongo_client: MongoClient,
uri: Optional[str] = None,
host: Optional[str] = None,
port: Optional[int] = None,
db_name: Optional[str] = None,
collection_name: Optional[str] = None,
):
super().__init__(
mongo_client=mongo_client,
uri=uri,
host=host,
port=port,
db_name=db_name,
)
self._mongo_client = mongo_client
self._uri = uri
self._host = host
self._port = port
self._database = self._mongo_client[db_name]
self._collection = self._mongo_client[db_name][collection_name]
@classmethod
def from_connection_string(
cls,
connection_string: str,
db_name: Optional[str] = None,
collection_name: Optional[str] = None,
):
"""Creates an instance of AzureCosmosMongoVCoreChatStore using a connection string."""
# Parse the MongoDB URI
parsed_uri = urllib.parse.urlparse(connection_string)
# Extract username and password, and perform url_encoding
username = urllib.parse.quote_plus(parsed_uri.username)
password = urllib.parse.quote_plus(parsed_uri.password)
encoded_conn_string = f"mongodb+srv://{username}:{password}@{parsed_uri.hostname}/?{parsed_uri.query}"
mongo_client = MongoClient(encoded_conn_string, appname=APP_NAME)
return cls(
mongo_client=mongo_client,
db_name=db_name,
collection_name=collection_name,
)
@classmethod
def from_host_and_port(
cls,
host: str,
port: int,
db_name: Optional[str] = None,
collection_name: Optional[str] = None,
) -> "AzureCosmosMongoVCoreChatStore":
"""Initializes AzureCosmosMongoVCoreChatStore from an endpoint url and key."""
mongo_client = MongoClient(host=host, port=port, appname=APP_NAME)
return cls(
mongo_client=mongo_client,
host=host,
port=port,
db_name=db_name,
collection_name=collection_name,
)
def set_messages(self, key: str, messages: List[ChatMessage]) -> None:
"""Set messages for a key."""
self._collection.updateOne(
{"_id": key},
{"$set": {"messages": _messages_to_dict(messages)}},
upsert=True,
)
def get_messages(self, key: str) -> List[ChatMessage]:
"""Get messages for a key."""
response = self._collection.find_one({"_id": key})
if response is not None:
message_history = response["messages"]
else:
message_history = []
return [_dict_to_message(message) for message in message_history]
def add_message(self, key: str, message: ChatMessage) -> None:
"""Add a message for a key."""
current_messages = _messages_to_dict(self.get_messages(key))
current_messages.append(_message_to_dict(message))
self._collection.insert_one(
{
"id": key,
"messages": current_messages,
}
)
def delete_messages(self, key: str) -> Optional[List[ChatMessage]]:
"""Delete messages for a key."""
messages_to_delete = self.get_messages(key)
self._collection.delete_one({"_id": key})
return messages_to_delete
def delete_message(self, key: str, idx: int) -> Optional[ChatMessage]:
"""Delete specific message for a key."""
current_messages = self.get_messages(key)
try:
message_to_delete = current_messages[idx]
del current_messages[idx]
self.set_messages(key, current_messages)
return message_to_delete
except IndexError:
logger.error(
IndexError(f"No message exists at index, {idx}, for key {key}")
)
return None
def delete_last_message(self, key: str) -> Optional[ChatMessage]:
"""Delete last message for a key."""
return self.delete_message(key, -1)
def get_keys(self) -> List[str]:
"""Get all keys."""
return [doc["id"] for doc in self._collection.find({}, {"id": 1})]
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "AzureCosmosMongoVCoreChatStore"
| AzureCosmosMongoVCoreChatStore |
python | gevent__gevent | src/gevent/events.py | {
"start": 9713,
"end": 9986
} | class ____(Interface):
"""
The root for all monkey-patch events gevent emits.
"""
source = Attribute("The source object containing the patches.")
target = Attribute("The destination object to be patched.")
@implementer(IGeventPatchEvent)
| IGeventPatchEvent |
python | fastai__fastai | fastai/test_utils.py | {
"start": 1171,
"end": 1778
} | class ____(Module):
def __init__(self): self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
# %% ../nbs/97_test_utils.ipynb 6
@delegates(Learner.__init__)
def synth_learner(n_trn=10, n_val=2, cuda=False, lr=1e-3, data=None, model=None, **kwargs):
if data is None: data=synth_dbunch(n_train=n_trn,n_valid=n_val, cuda=cuda)
if model is None: model=RegModel()
return Learner(data, model, lr=lr, loss_func=MSELossFlat(),
opt_func=partial(SGD, mom=0.9), **kwargs)
# %% ../nbs/97_test_utils.ipynb 7
| RegModel |
python | huggingface__transformers | src/transformers/models/led/modeling_led.py | {
"start": 90959,
"end": 101887
} | class ____(LEDPreTrainedModel, GenerationMixin):
base_model_prefix = "led"
_keys_to_ignore_on_load_missing = ["final_logits_bias"]
_tied_weights_keys = {
"lm_head.weight": "led.shared.weight",
}
def __init__(self, config: LEDConfig):
super().__init__(config)
self.led = LEDModel(config)
self.register_buffer("final_logits_bias", torch.zeros((1, self.led.shared.num_embeddings)))
self.lm_head = nn.Linear(config.d_model, self.led.shared.num_embeddings, bias=False)
# Initialize weights and apply final processing
self.post_init()
def resize_token_embeddings(
self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None, mean_resizing: bool = True
) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of, mean_resizing)
self._resize_final_logits_bias(new_embeddings.weight.shape[0])
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = torch.zeros((1, new_num_tokens - old_num_tokens), device=self.final_logits_bias.device)
new_bias = torch.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
decoder_input_ids: Optional[torch.LongTensor] = None,
decoder_attention_mask: Optional[torch.LongTensor] = None,
encoder_outputs: Optional[tuple[tuple[torch.FloatTensor]]] = None,
global_attention_mask: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
decoder_inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
cache_position: Optional[torch.Tensor] = None,
) -> Union[tuple[torch.Tensor], LEDSeq2SeqLMOutput]:
r"""
decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Indices of decoder input sequence tokens in the vocabulary.
Indices can be obtained using [`LedTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
LED uses the `eos_token_id` as the starting token for `decoder_input_ids` generation. If `past_key_values`
is used, optionally only the last `decoder_input_ids` have to be input (see `past_key_values`).
decoder_attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
be used by default.
If you want to change padding behavior, you should read [`modeling_led._prepare_decoder_inputs`] and modify
to your needs. See diagram 1 in [the paper](https://huggingface.co/papers/1910.13461) for more information on the
default strategy.
global_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
Mask to decide the attention given on each token, local attention or global attention for the encoder.
Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is
important for task-specific finetuning because it makes the model more flexible at representing the task.
For example, for classification, the <s> token should be given global attention. For QA, all question
tokens should also have global attention. Please refer to the [Longformer
paper](https://huggingface.co/papers/2004.05150) for more details. Mask values selected in `[0, 1]`:
- 0 for local attention (a sliding window attention),
- 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them).
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example Summarization:
```python
>>> import torch
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-large-16384-arxiv")
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-large-16384-arxiv")
>>> ARTICLE_TO_SUMMARIZE = '''Transformers (Vaswani et al., 2017) have achieved state-of-the-art
... results in a wide range of natural language tasks including generative language modeling
... (Dai et al., 2019; Radford et al., 2019) and discriminative ... language understanding (Devlin et al., 2019).
... This success is partly due to the self-attention component which enables the network to capture contextual
... information from the entire sequence. While powerful, the memory and computational requirements of
... self-attention grow quadratically with sequence length, making it infeasible (or very expensive) to
... process long sequences. To address this limitation, we present Longformer, a modified Transformer
... architecture with a self-attention operation that scales linearly with the sequence length, making it
... versatile for processing long documents (Fig 1). This is an advantage for natural language tasks such as
... long document classification, question answering (QA), and coreference resolution, where existing approaches
... partition or shorten the long context into smaller sequences that fall within the typical 512 token limit
... of BERT-style pretrained models. Such partitioning could potentially result in loss of important
... cross-partition information, and to mitigate this problem, existing methods often rely on complex
... architectures to address such interactions. On the other hand, our proposed Longformer is able to build
... contextual representations of the entire context using multiple layers of attention, reducing the need for
... task-specific architectures.'''
>>> inputs = tokenizer.encode(ARTICLE_TO_SUMMARIZE, return_tensors="pt")
>>> # Global attention on the first token (cf. Beltagy et al. 2020)
>>> global_attention_mask = torch.zeros_like(inputs)
>>> global_attention_mask[:, 0] = 1
>>> # Generate Summary
>>> summary_ids = model.generate(inputs, global_attention_mask=global_attention_mask, num_beams=3, max_length=32)
>>> print(tokenizer.decode(summary_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=True))
```
Example Conditional generation :
```python
>>> from transformers import AutoTokenizer, LEDForConditionalGeneration
>>> tokenizer = AutoTokenizer.from_pretrained("allenai/led-base-16384")
>>> TXT = "My friends are <mask> but they eat too many carbs."
>>> model = LEDForConditionalGeneration.from_pretrained("allenai/led-base-16384")
>>> input_ids = tokenizer([TXT], return_tensors="pt")["input_ids"]
>>> prediction = model.generate(input_ids)[0]
>>> print(tokenizer.decode(prediction, skip_special_tokens=True))
```
"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if labels is not None:
if use_cache:
logger.warning("The `use_cache` argument is changed to `False` since `labels` is provided.")
use_cache = False
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
labels, self.config.pad_token_id, self.config.decoder_start_token_id
)
outputs = self.led(
input_ids,
attention_mask=attention_mask,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
encoder_outputs=encoder_outputs,
global_attention_mask=global_attention_mask,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
decoder_inputs_embeds=decoder_inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
cache_position=cache_position,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1))
if not return_dict:
output = (lm_logits,) + outputs[1:]
return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
return LEDSeq2SeqLMOutput(
loss=masked_lm_loss,
logits=lm_logits,
past_key_values=outputs.past_key_values,
decoder_hidden_states=outputs.decoder_hidden_states,
decoder_attentions=outputs.decoder_attentions,
cross_attentions=outputs.cross_attentions,
encoder_last_hidden_state=outputs.encoder_last_hidden_state,
encoder_hidden_states=outputs.encoder_hidden_states,
encoder_attentions=outputs.encoder_attentions,
encoder_global_attentions=outputs.encoder_global_attentions,
)
def prepare_decoder_input_ids_from_labels(self, labels: torch.Tensor):
return shift_tokens_right(labels, self.config.pad_token_id, self.config.decoder_start_token_id)
@auto_docstring(
custom_intro="""
LED model with a sequence classification/head on top (a linear layer on top of the pooled output) e.g. for GLUE
tasks.
"""
)
| LEDForConditionalGeneration |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 42923,
"end": 43250
} | class ____(fftw_info):
section = 'fftw'
dir_env_var = 'FFTW'
ver_info = [{'name':'sfftw threads',
'libs':['srfftw_threads', 'sfftw_threads'],
'includes':['sfftw_threads.h', 'srfftw_threads.h'],
'macros':[('SCIPY_SFFTW_THREADS_H', None)]}]
| sfftw_threads_info |
python | PrefectHQ__prefect | tests/cli/test_start_server.py | {
"start": 12184,
"end": 14738
} | class ____:
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
async def test_sigint_shutsdown_cleanly(self):
async with start_server_process() as server_process:
server_process.send_signal(signal.SIGINT)
with anyio.fail_after(SHUTDOWN_TIMEOUT):
exit_code = await server_process.wait()
assert exit_code == 0, (
"After one sigint, the process should exit successfully"
)
server_process.out.seek(0)
out = server_process.out.read().decode()
assert "Application shutdown complete." in out, (
"When sending a SIGINT, the application should shutdown cleanly. "
f"Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform == "win32",
reason="SIGTERM is only used in non-Windows environments",
)
async def test_sigterm_shutsdown_cleanly(self):
async with start_server_process() as server_process:
server_process.send_signal(signal.SIGTERM)
with anyio.fail_after(SHUTDOWN_TIMEOUT):
exit_code = await server_process.wait()
assert exit_code == -signal.SIGTERM, (
"After a sigterm, the server process should indicate it was terminated"
)
server_process.out.seek(0)
out = server_process.out.read().decode()
assert "Application shutdown complete." in out, (
"When sending a SIGTERM, the application should shutdown cleanly. "
f"Output:\n{out}"
)
@pytest.mark.skipif(
sys.platform != "win32",
reason="CTRL_BREAK_EVENT is only defined in Windows",
)
async def test_ctrl_break_shutsdown_cleanly(self):
async with start_server_process() as server_process:
server_process.send_signal(signal.SIGINT)
with anyio.fail_after(SHUTDOWN_TIMEOUT):
exit_code = await server_process.wait()
assert exit_code == 0, (
"After a ctrl-break, the process should exit successfully"
)
server_process.out.seek(0)
out = server_process.out.read().decode()
assert "Sending CTRL_BREAK_EVENT" in out, (
"When sending a SIGINT, the main process should send a"
f" CTRL_BREAK_EVENT to the uvicorn subprocess. Output:\n{out}"
)
| TestUvicornSignalForwarding |
python | huggingface__transformers | tests/models/superglue/test_modeling_superglue.py | {
"start": 1256,
"end": 4356
} | class ____:
def __init__(
self,
parent,
batch_size=2,
image_width=80,
image_height=60,
keypoint_detector_config=None,
hidden_size: int = 64,
keypoint_encoder_sizes: list[int] = [32, 64],
gnn_layers_types: list[str] = ["self", "cross"] * 2,
num_attention_heads: int = 4,
sinkhorn_iterations: int = 100,
matching_threshold: float = 0.2,
):
if keypoint_detector_config is None:
keypoint_detector_config = {
"encoder_hidden_sizes": [32, 64],
"decoder_hidden_size": 64,
"keypoint_decoder_dim": 65,
"descriptor_decoder_dim": 64,
"keypoint_threshold": 0.005,
"max_keypoints": 256,
"nms_radius": 4,
"border_removal_distance": 4,
}
self.parent = parent
self.batch_size = batch_size
self.image_width = image_width
self.image_height = image_height
self.keypoint_detector_config = keypoint_detector_config
self.hidden_size = hidden_size
self.keypoint_encoder_sizes = keypoint_encoder_sizes
self.gnn_layers_types = gnn_layers_types
self.num_attention_heads = num_attention_heads
self.sinkhorn_iterations = sinkhorn_iterations
self.matching_threshold = matching_threshold
def prepare_config_and_inputs(self):
# SuperGlue expects a grayscale image as input
pixel_values = floats_tensor([self.batch_size, 2, 3, self.image_height, self.image_width])
config = self.get_config()
return config, pixel_values
def get_config(self):
return SuperGlueConfig(
keypoint_detector_config=self.keypoint_detector_config,
hidden_size=self.hidden_size,
keypoint_encoder_sizes=self.keypoint_encoder_sizes,
gnn_layers_types=self.gnn_layers_types,
num_attention_heads=self.num_attention_heads,
sinkhorn_iterations=self.sinkhorn_iterations,
matching_threshold=self.matching_threshold,
)
def create_and_check_model(self, config, pixel_values):
model = SuperGlueForKeypointMatching(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
maximum_num_matches = result.mask.shape[-1]
self.parent.assertEqual(
result.keypoints.shape,
(self.batch_size, 2, maximum_num_matches, 2),
)
self.parent.assertEqual(
result.matches.shape,
(self.batch_size, 2, maximum_num_matches),
)
self.parent.assertEqual(
result.matching_scores.shape,
(self.batch_size, 2, maximum_num_matches),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| SuperGlueModelTester |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 87894,
"end": 89047
} | class ____(Response):
"""
Response of tasks.archive endpoint.
:param archived: Indicates number of archived tasks
:type archived: int
"""
_service = "tasks"
_action = "archive"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"archived": {
"description": "Indicates number of archived tasks",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, archived: Optional[int] = None, **kwargs: Any) -> None:
super(ArchiveResponse, self).__init__(**kwargs)
self.archived = archived
@schema_property("archived")
def archived(self) -> Optional[int]:
return self._property_archived
@archived.setter
def archived(self, value: Optional[int]) -> None:
if value is None:
self._property_archived = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "archived", six.integer_types)
self._property_archived = value
| ArchiveResponse |
python | kamyu104__LeetCode-Solutions | Python/minimum-domino-rotations-for-equal-row.py | {
"start": 48,
"end": 423
} | class ____(object):
def minDominoRotations(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: int
"""
intersect = reduce(set.__and__, [set(d) for d in itertools.izip(A, B)])
if not intersect:
return -1
x = intersect.pop()
return min(len(A)-A.count(x), len(B)-B.count(x))
| Solution |
python | pytorch__pytorch | torch/testing/_internal/optests/generate_tests.py | {
"start": 16904,
"end": 26700
} | class ____(TorchFunctionMode):
"""
For a given test, OpCheckMode intercepts calls to operators and runs
test_util(op, args, kwargs) for each intercepted (op, args, kwargs).
"""
def __init__(
self,
namespaces: list[str],
test_util_name: str,
test_util: Callable,
failures_dict: "FailuresDict",
test_name: str,
failures_dict_path: str,
):
# We will intercept calls to ops with these namespaces
self.namespaces = namespaces
# The test utility function. Its signature should be (op, args, kwargs) -> None.
# Examples of test utilities are: schema_check, make_fx_check
self.test_util = test_util
self.test_util_name = test_util_name
# The name of the test that is running this OpCheckMode.
self.test_name = test_name
# Maps qualname -> test_name -> skip/xfail
# Tells us if we should skip a test or assert that there is a failure.
self.failures_dict = failures_dict
# Location of the failures dict. Makes it so that the error message is better.
self.failures_dict_path = failures_dict_path
# OpCheckMode suppresses errors, collects them here, and then raises them on exit.
# Maps qualname -> List[(Exception, func, maybe args, maybe kwargs)]
self.seen_ops_to_errors = {}
def maybe_raise_errors_on_exit(self) -> None:
# Check expected failures first
for qualname in self.seen_ops_to_errors:
option = self.failures_dict.get_status(qualname, self.test_name)
if len(self.seen_ops_to_errors[qualname]) == 0:
if should_update_failures_dict():
self.failures_dict.set_status(
qualname, self.test_name, "xsuccess", comment=""
)
else:
if option == "xfail":
raise OpCheckError(
f"generate_opcheck_tests: Unexpected success for operator "
f"{qualname} on test {self.test_name}. This may mean that "
f"you have fixed this test failure. Please rerun the test with "
f"PYTORCH_OPCHECK_ACCEPT=1 to automatically update the test runner "
f"or manually remove the "
f"expected failure in the failure dict at "
f"{self.failures_dict_path}"
f"For more details, see "
f"{GDOC}"
)
continue
failed_ops = []
for qualname in self.seen_ops_to_errors:
option = self.failures_dict.get_status(qualname, self.test_name)
if option != "xsuccess":
continue
if len(self.seen_ops_to_errors[qualname]) == 0:
continue
failed_ops.append(qualname)
if not failed_ops:
return
if should_update_failures_dict():
for op in failed_ops:
self.failures_dict.set_status(op, self.test_name, "xfail")
return
# Raise from the first error but also report about all of them to make
# recording xfails easier.
ex, op, args, kwargs = self.seen_ops_to_errors[failed_ops[0]][0]
repro_command = generate_repro(
self.test_util_name, op, args, kwargs, save_data=should_print_better_repro()
)
raise OpCheckError(
f"Test generated by `generate_opcheck_tests`, {self.test_name}, "
f"failed on operators {failed_ops}. This usually means that the "
f"operators are not implemented correctly and may lead to silently "
f"incorrect behavior. Set PYTORCH_OPCHECK_PRINT_BETTER_REPRO=1 for a standalone repro, "
f"or please see "
f"{GDOC} "
f"for more recommendations. "
f"To reproduce this problem locally, try to run the following:\n{repro_command}"
) from ex
def __enter__(self, *args, **kwargs):
self.prev_is_opcheck_mode = _is_inside_opcheck_mode.value
self.prev_dynamo_disable = os.environ.get("TORCHDYNAMO_DISABLE", "")
_is_inside_opcheck_mode.value = True
os.environ["TORCHDYNAMO_DISABLE"] = "1"
return super().__enter__(*args, **kwargs)
def __exit__(self, *args, **kwargs):
_is_inside_opcheck_mode.value = self.prev_is_opcheck_mode
os.environ["TORCHDYNAMO_DISABLE"] = self.prev_dynamo_disable
try:
self.maybe_raise_errors_on_exit()
if should_update_failures_dict():
self.failures_dict.save()
finally:
result = super().__exit__(*args, **kwargs)
return result
def run_test_util(self, op, args, kwargs):
try:
self.test_util(op, args, kwargs, copy_inputs=False)
except torch._subclasses.fake_tensor.UnsupportedFakeTensorException:
# We might get here if the input is already a FakeTensor
# or if we're in a torch.compile block. Just ignore these
# since we can't handle them and reporting them as failures
# is too noisy.
pass
def __torch_function__(self, func, types, args=(), kwargs=None):
kwargs = kwargs if kwargs else {}
# Only intercept calls to operators
if not isinstance(func, (torch._ops.OpOverloadPacket, torch._ops.OpOverload)):
return func(*args, **kwargs)
if (
torch.jit.is_tracing()
or torch.jit.is_scripting()
or torch._dynamo.is_compiling()
):
return func(*args, **kwargs)
# Pre-existing code may not use the .default overload. If we see an
# OpOverloadPacket and we cannot resolve the overload, then we just throw
# and ask the user to clarify. Otherwise, we attempt to resolve the overload.
if isinstance(func, torch._ops.OpOverloadPacket):
func = resolve_unique_overload_or_throw(func)
qualname = func.name()
ns = qualname.split("::")[0]
if ns not in self.namespaces:
return func(*args, **kwargs)
args_c, kwargs_c = deepcopy_tensors((args, kwargs))
result = func(*args, **kwargs)
option = self.failures_dict.get_status(qualname, self.test_name)
if option == "xsuccess" or option == "xfail":
# Suppress all errors during execution. Raise them during __exit__.
try:
if qualname not in self.seen_ops_to_errors:
self.seen_ops_to_errors[qualname] = []
self.run_test_util(func, args_c, kwargs_c)
except Exception as ex:
if should_print_better_repro():
self.seen_ops_to_errors[qualname].append((ex, func, args, kwargs))
else:
self.seen_ops_to_errors[qualname].append((ex, func, None, None))
elif option == "skip":
pass
return result
def should_print_better_repro() -> None:
"""If set, the tests generated by `generate_opcheck_tests` will print a
repro command on failure.
In order to print the repro command, we need to save some tensors to disk.
These will be saved under the following directory:
{tempfile.gettempdir()}/pytorch_opcheck_safe_to_delete/.
Although this is a temp folder, it will usually not automatically get cleaned
up, so you'll need to manually delete it.
"""
key = "PYTORCH_OPCHECK_PRINT_BETTER_REPRO"
if key not in os.environ:
return False
value = os.environ[key]
return value == "1" or value == 1
def opcheck(
op: Union[torch._ops.OpOverload, torch._ops.OpOverloadPacket, CustomOpDef],
args: tuple[Any, ...],
kwargs: Optional[dict[str, Any]] = None,
*,
test_utils: Union[str, Sequence[str]] = DEFAULT_TEST_UTILS,
raise_exception: bool = True,
rtol: Optional[float] = None,
atol: Optional[float] = None,
) -> dict[str, str]:
"""See torch.library.opcheck for docstring"""
if (rtol is None) ^ (atol is None):
raise ValueError(
"opcheck(op, ...): if you specify one of rtol/atol, you must specify both"
)
if kwargs is None:
kwargs = {}
if isinstance(op, CustomOpDef):
op = op._opoverload
if isinstance(op, torch._ops.OpOverloadPacket):
op = resolve_unique_overload_or_throw(op)
if not isinstance(op, torch._ops.OpOverload):
raise ValueError(
f"opcheck(op, ...): op must be instance of torch._ops.OpOverload, "
f"e.g. torch.ops.aten.sin.default, got {type(op)}"
)
if test_utils == "ALL":
test_utils = tuple(ALL_TEST_UTILS.keys())
if isinstance(test_utils, str):
test_utils = (test_utils,)
if not isinstance(test_utils, (tuple, list)) or not set(test_utils).issubset(
ALL_TEST_UTILS.keys()
):
raise ValueError(
f"opcheck(op, ..., test_utils={test_utils}), expected test_utils "
f"to be subset of {tuple(ALL_TEST_UTILS.keys())} but it was not"
)
results_dict = {}
for test_util in test_utils:
tester = ALL_TEST_UTILS[test_util]
try:
tester(op, args, kwargs, rtol=rtol, atol=atol)
results_dict[test_util] = "SUCCESS"
except Exception as ex:
if raise_exception:
raise OpCheckError(
f"opcheck(op, ...): {test_util} failed with {ex} "
f"(scroll up for stack trace)"
) from ex
results_dict[test_util] = ex
return results_dict
| OpCheckMode |
python | redis__redis-py | tests/test_maint_notifications_handling.py | {
"start": 93103,
"end": 104454
} | class ____(
TestMaintenanceNotificationsBase
):
"""Integration tests for maintenance notifications handling with real connection pool."""
def setup_method(self):
"""Set up test fixtures with mocked sockets."""
super().setup_method()
self.orig_host = "test.address.com"
ips = ["1.2.3.4", "5.6.7.8", "9.10.11.12"]
ips = ips * 3
# Mock socket creation to return our mock sockets
def mock_socket_getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
if host == self.orig_host:
ip_address = ips.pop(0)
else:
ip_address = host
# Return the standard getaddrinfo format
# (family, type, proto, canonname, sockaddr)
return [
(
socket.AF_INET,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
"",
(ip_address, port),
)
]
self.getaddrinfo_patcher = patch(
"socket.getaddrinfo", side_effect=mock_socket_getaddrinfo
)
self.getaddrinfo_patcher.start()
def teardown_method(self):
"""Clean up test fixtures."""
super().teardown_method()
self.getaddrinfo_patcher.stop()
@pytest.mark.parametrize("pool_class", [ConnectionPool, BlockingConnectionPool])
def test_migrating_after_moving_multiple_proxies(self, pool_class):
""" """
# Setup
pool = pool_class(
host=self.orig_host,
port=12345,
max_connections=10,
protocol=3, # Required for maintenance notifications
maint_notifications_config=self.config,
)
pool_handler = pool._maint_notifications_pool_handler
# Create and release some connections
key1 = "1.2.3.4"
key2 = "5.6.7.8"
key3 = "9.10.11.12"
in_use_connections = {key1: [], key2: [], key3: []}
# Create 7 connections
for _ in range(7):
conn = pool.get_connection()
in_use_connections[conn.getpeername()].append(conn)
for _, conns in in_use_connections.items():
while len(conns) > 1:
pool.release(conns.pop())
# Send MOVING notification to con with ip = key1
conn = in_use_connections[key1][0]
pool_handler.set_connection(conn)
new_ip = "13.14.15.16"
pool_handler.handle_notification(
NodeMovingNotification(
id=1, new_node_host=new_ip, new_node_port=6379, ttl=1
)
)
# validate in use connection and ip1
Helpers.validate_in_use_connections_state(
in_use_connections[key1],
expected_state=MaintenanceState.MOVING,
expected_host_address=new_ip,
expected_socket_timeout=self.config.relaxed_timeout,
expected_socket_connect_timeout=self.config.relaxed_timeout,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=self.config.relaxed_timeout,
expected_current_peername=key1,
)
# validate free connections for ip1
changed_free_connections = 0
if isinstance(pool, BlockingConnectionPool):
free_connections = [conn for conn in pool.pool.queue if conn is not None]
elif isinstance(pool, ConnectionPool):
free_connections = pool._available_connections
for conn in free_connections:
if conn.host == new_ip:
changed_free_connections += 1
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn.host == new_ip
assert conn.socket_timeout == self.config.relaxed_timeout
assert conn.socket_connect_timeout == self.config.relaxed_timeout
assert conn.orig_host_address == self.orig_host
assert conn.orig_socket_timeout is None
assert conn.orig_socket_connect_timeout is None
else:
assert conn.maintenance_state == MaintenanceState.NONE
assert conn.host == self.orig_host
assert conn.socket_timeout is None
assert conn.socket_connect_timeout is None
assert conn.orig_host_address == self.orig_host
assert conn.orig_socket_timeout is None
assert conn.orig_socket_connect_timeout is None
assert changed_free_connections == 2
assert len(free_connections) == 4
# Send second MOVING notification to con with ip = key2
conn = in_use_connections[key2][0]
pool_handler.set_connection(conn)
new_ip_2 = "17.18.19.20"
pool_handler.handle_notification(
NodeMovingNotification(
id=2, new_node_host=new_ip_2, new_node_port=6379, ttl=2
)
)
# validate in use connection and ip2
Helpers.validate_in_use_connections_state(
in_use_connections[key2],
expected_state=MaintenanceState.MOVING,
expected_host_address=new_ip_2,
expected_socket_timeout=self.config.relaxed_timeout,
expected_socket_connect_timeout=self.config.relaxed_timeout,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=self.config.relaxed_timeout,
expected_current_peername=key2,
)
# validate free connections for ip2
changed_free_connections = 0
if isinstance(pool, BlockingConnectionPool):
free_connections = [conn for conn in pool.pool.queue if conn is not None]
elif isinstance(pool, ConnectionPool):
free_connections = pool._available_connections
for conn in free_connections:
if conn.host == new_ip_2:
changed_free_connections += 1
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn.host == new_ip_2
assert conn.socket_timeout == self.config.relaxed_timeout
assert conn.socket_connect_timeout == self.config.relaxed_timeout
assert conn.orig_host_address == self.orig_host
assert conn.orig_socket_timeout is None
assert conn.orig_socket_connect_timeout is None
# here I can't validate the other connections since some of
# them are in MOVING state from the first notification
# and some are in NONE state
assert changed_free_connections == 1
# MIGRATING notification on connection that has already been marked as MOVING
conn = in_use_connections[key2][0]
conn_notification_handler = conn._maint_notifications_connection_handler
conn_notification_handler.handle_notification(
NodeMigratingNotification(id=3, ttl=1)
)
# validate connection does not lose its MOVING state
assert conn.maintenance_state == MaintenanceState.MOVING
# MIGRATED notification
conn_notification_handler.handle_notification(NodeMigratedNotification(id=3))
# validate connection does not lose its MOVING state and relaxed timeout
assert conn.maintenance_state == MaintenanceState.MOVING
assert conn.socket_timeout == self.config.relaxed_timeout
# Send Migrating notification to con with ip = key3
conn = in_use_connections[key3][0]
conn_notification_handler = conn._maint_notifications_connection_handler
conn_notification_handler.handle_notification(
NodeMigratingNotification(id=3, ttl=1)
)
# validate connection is in MIGRATING state
assert conn.maintenance_state == MaintenanceState.MAINTENANCE
assert conn.socket_timeout == self.config.relaxed_timeout
# Send MIGRATED notification to con with ip = key3
conn_notification_handler.handle_notification(NodeMigratedNotification(id=3))
# validate connection is in MOVING state
assert conn.maintenance_state == MaintenanceState.NONE
assert conn.socket_timeout is None
# sleep to expire only the first MOVING notifications
sleep(1.3)
# validate only the connections affected by the first MOVING notification
# have lost their MOVING state
Helpers.validate_in_use_connections_state(
in_use_connections[key1],
expected_state=MaintenanceState.NONE,
expected_host_address=self.orig_host,
expected_socket_timeout=None,
expected_socket_connect_timeout=None,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=None,
expected_current_peername=key1,
)
Helpers.validate_in_use_connections_state(
in_use_connections[key2],
expected_state=MaintenanceState.MOVING,
expected_host_address=new_ip_2,
expected_socket_timeout=self.config.relaxed_timeout,
expected_socket_connect_timeout=self.config.relaxed_timeout,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=self.config.relaxed_timeout,
expected_current_peername=key2,
)
Helpers.validate_in_use_connections_state(
in_use_connections[key3],
expected_state=MaintenanceState.NONE,
expected_should_reconnect=False,
expected_host_address=self.orig_host,
expected_socket_timeout=None,
expected_socket_connect_timeout=None,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=None,
expected_current_peername=key3,
)
# TODO validate free connections
# sleep to expire the second MOVING notifications
sleep(1)
# validate all connections have lost their MOVING state
Helpers.validate_in_use_connections_state(
[
*in_use_connections[key1],
*in_use_connections[key2],
*in_use_connections[key3],
],
expected_state=MaintenanceState.NONE,
expected_should_reconnect="any",
expected_host_address=self.orig_host,
expected_socket_timeout=None,
expected_socket_connect_timeout=None,
expected_orig_host_address=self.orig_host,
expected_orig_socket_timeout=None,
expected_orig_socket_connect_timeout=None,
expected_current_socket_timeout=None,
expected_current_peername="any",
)
# TODO validate free connections
| TestMaintenanceNotificationsHandlingMultipleProxies |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_illinois_zip.py | {
"start": 1751,
"end": 4094
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid Illinois zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_illinois_zip": ["61240", "61412", "61641", "62053"],
"invalid_illinois_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_illinois_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_illinois_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_illinois_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidIllinoisZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidIllinoisZip |
python | matplotlib__matplotlib | lib/matplotlib/scale.py | {
"start": 25976,
"end": 26449
} | class ____(Transform):
input_dims = output_dims = 1
def __init__(self, nonpositive='mask'):
super().__init__()
self._nonpositive = nonpositive
def transform_non_affine(self, values):
"""logistic transform (base 10)"""
return 1.0 / (1 + 10**(-values))
def inverted(self):
return LogitTransform(self._nonpositive)
def __str__(self):
return f"{type(self).__name__}({self._nonpositive!r})"
| LogisticTransform |
python | openai__openai-python | src/openai/types/evals/run_create_params.py | {
"start": 6301,
"end": 6608
} | class ____(
TypedDict, total=False
):
text: Required[str]
"""The text output from the model."""
type: Required[Literal["output_text"]]
"""The type of the output text. Always `output_text`."""
| DataSourceCreateEvalResponsesRunDataSourceInputMessagesTemplateTemplateEvalItemContentOutputText |
python | huggingface__transformers | tests/models/deepseek_vl/test_modeling_deepseek_vl.py | {
"start": 1287,
"end": 4270
} | class ____:
def __init__(
self,
parent,
batch_size=2,
seq_length=25,
num_channels=3,
initializer_range=0.02,
is_training=True,
use_cache=False,
text_config={
"num_hidden_layers": 2,
"vocab_size": 99,
"hidden_size": 16,
"intermediate_size": 37,
"max_position_embeddings": 512,
"num_attention_heads": 4,
"pad_token_id": 1,
},
vision_config={
"num_hidden_layers": 1,
"hidden_size": 16,
"intermediate_size": 37,
"image_size": 32,
"patch_size": 8,
"hidden_act": "gelu",
"vision_use_head": False,
"num_attention_heads": 4,
},
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_channels = num_channels
self.initializer_range = initializer_range
self.is_training = is_training
self.use_cache = use_cache
self.text_config = text_config
self.vision_config = vision_config
self.vision_config["num_channels"] = self.num_channels
self.num_hidden_layers = text_config["num_hidden_layers"]
self.vocab_size = text_config["vocab_size"]
self.hidden_size = text_config["hidden_size"]
self.num_attention_heads = text_config["num_attention_heads"]
self.image_size = vision_config["image_size"]
self.num_image_tokens = 16
self.pad_token_id = text_config["pad_token_id"]
self.image_token_id = 0
def get_config(self):
return DeepseekVLConfig(
text_config=self.text_config,
vision_config=self.vision_config,
image_token_id=self.image_token_id,
)
def prepare_config_and_inputs(self):
config = self.get_config()
# create text and vision inputs
input_ids = ids_tensor([self.batch_size, self.seq_length], config.text_config.vocab_size - 2) + 1
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
pixel_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.image_size,
self.image_size,
]
)
# fill image_tokens
input_ids[input_ids == self.num_image_tokens] = config.text_config.pad_token_id
input_ids[:, : self.num_image_tokens] = self.image_token_id
return config, input_ids, attention_mask, pixel_values
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, attention_mask, pixel_values = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": attention_mask, "pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| DeepseekVLModelTester |
python | kamyu104__LeetCode-Solutions | Python/substring-matching-pattern.py | {
"start": 40,
"end": 1160
} | class ____(object):
def hasMatch(self, s, p):
"""
:type s: str
:type p: str
:rtype: bool
"""
def getPrefix(pattern):
prefix = [-1]*len(pattern)
j = -1
for i in xrange(1, len(pattern)):
while j+1 > 0 and pattern[j+1] != pattern[i]:
j = prefix[j]
if pattern[j+1] == pattern[i]:
j += 1
prefix[i] = j
return prefix
def KMP(text, pattern, i):
prefix = getPrefix(pattern)
j = -1
for i in xrange(i, len(text)):
while j+1 > 0 and pattern[j+1] != text[i]:
j = prefix[j]
if pattern[j+1] == text[i]:
j += 1
if j+1 == len(pattern):
return i-j
return -1
i = 0
for x in p.split('*'):
if not x:
continue
i = KMP(s, x, i)
if i == -1:
return False
i += len(x)
return True
| Solution |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_cloud_base.py | {
"start": 1352,
"end": 1922
} | class ____:
def test_handles_deepcopy_with_method_default(self):
op = GoogleSampleOperator(task_id=TASK_ID)
copied_op = copy.deepcopy(op)
assert copied_op.retry == DEFAULT
assert copied_op.config is None
def test_handles_deepcopy_with_non_default_retry(self):
op = GoogleSampleOperator(task_id=TASK_ID, retry=Retry(deadline=30), config={"config": "value"})
copied_op = copy.deepcopy(op)
assert copied_op.retry.deadline == 30
assert copied_op.config == {"config": "value"}
| TestGoogleCloudBaseOperator |
python | apache__avro | lang/py/avro/io.py | {
"start": 41548,
"end": 52013
} | class ____:
"""DatumWriter for generic python objects."""
_writers_schema: Optional[avro.schema.Schema]
def __init__(self, writers_schema: Optional[avro.schema.Schema] = None) -> None:
self._writers_schema = writers_schema
@property
def writers_schema(self) -> Optional[avro.schema.Schema]:
return self._writers_schema
@writers_schema.setter
def writers_schema(self, writers_schema: avro.schema.Schema) -> None:
self._writers_schema = writers_schema
def write(self, datum: object, encoder: BinaryEncoder) -> None:
if self.writers_schema is None:
raise avro.errors.IONotReadyException("Cannot write without a writer's schema.")
validate(self.writers_schema, datum, raise_on_error=True)
self.write_data(self.writers_schema, datum, encoder)
def write_data(self, writers_schema: avro.schema.Schema, datum: object, encoder: BinaryEncoder) -> None:
# function dispatch to write datum
logical_type = getattr(writers_schema, "logical_type", None)
if writers_schema.type == "null":
if datum is None:
return encoder.write_null(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "boolean":
if isinstance(datum, bool):
return encoder.write_boolean(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "string":
if isinstance(datum, str):
return encoder.write_utf8(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "int":
if logical_type == avro.constants.DATE:
if isinstance(datum, datetime.date):
return encoder.write_date_int(datum)
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a date type"))
elif logical_type == avro.constants.TIME_MILLIS:
if isinstance(datum, datetime.time):
return encoder.write_time_millis_int(datum)
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a time type"))
if isinstance(datum, int):
return encoder.write_int(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "long":
if logical_type == avro.constants.TIME_MICROS:
if isinstance(datum, datetime.time):
return encoder.write_time_micros_long(datum)
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a time type"))
elif logical_type == avro.constants.TIMESTAMP_MILLIS:
if isinstance(datum, datetime.datetime):
return encoder.write_timestamp_millis_long(datum)
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a datetime type"))
elif logical_type == avro.constants.TIMESTAMP_MICROS:
if isinstance(datum, datetime.datetime):
return encoder.write_timestamp_micros_long(datum)
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a datetime type"))
if isinstance(datum, int):
return encoder.write_long(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "float":
if isinstance(datum, (int, float)):
return encoder.write_float(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "double":
if isinstance(datum, (int, float)):
return encoder.write_double(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if writers_schema.type == "bytes":
if logical_type == "decimal":
scale = writers_schema.get_prop("scale")
if not (isinstance(scale, int) and scale >= 0):
warnings.warn(avro.errors.IgnoredLogicalType(f"Invalid decimal scale {scale}. Must be a non-negative integer."))
elif not isinstance(datum, decimal.Decimal):
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a decimal type"))
else:
return encoder.write_decimal_bytes(datum, scale)
if isinstance(datum, bytes):
return encoder.write_bytes(datum)
raise avro.errors.AvroTypeException(writers_schema, datum)
if isinstance(writers_schema, avro.schema.FixedSchema):
if logical_type == "decimal":
scale = writers_schema.get_prop("scale")
size = writers_schema.size
if not (isinstance(scale, int) and scale >= 0):
warnings.warn(avro.errors.IgnoredLogicalType(f"Invalid decimal scale {scale}. Must be a non-negative integer."))
elif not isinstance(datum, decimal.Decimal):
warnings.warn(avro.errors.IgnoredLogicalType(f"{datum} is not a decimal type"))
else:
return encoder.write_decimal_fixed(datum, scale, size)
if isinstance(datum, bytes):
return self.write_fixed(writers_schema, datum, encoder)
raise avro.errors.AvroTypeException(writers_schema, datum)
if isinstance(writers_schema, avro.schema.EnumSchema):
if isinstance(datum, str):
return self.write_enum(writers_schema, datum, encoder)
raise avro.errors.AvroTypeException(writers_schema, datum)
if isinstance(writers_schema, avro.schema.ArraySchema):
if isinstance(datum, Sequence):
return self.write_array(writers_schema, datum, encoder)
raise avro.errors.AvroTypeException(writers_schema, datum)
if isinstance(writers_schema, avro.schema.MapSchema):
if isinstance(datum, Mapping):
return self.write_map(writers_schema, datum, encoder)
raise avro.errors.AvroTypeException(writers_schema, datum)
if isinstance(writers_schema, avro.schema.UnionSchema):
return self.write_union(writers_schema, datum, encoder)
if isinstance(writers_schema, avro.schema.RecordSchema):
if isinstance(datum, Mapping):
return self.write_record(writers_schema, datum, encoder)
raise avro.errors.AvroTypeException(writers_schema, datum)
raise avro.errors.AvroException(f"Unknown type: {writers_schema.type}")
def write_fixed(self, writers_schema: avro.schema.FixedSchema, datum: bytes, encoder: BinaryEncoder) -> None:
"""
Fixed instances are encoded using the number of bytes declared
in the schema.
"""
return encoder.write(datum)
def write_enum(self, writers_schema: avro.schema.EnumSchema, datum: str, encoder: BinaryEncoder) -> None:
"""
An enum is encoded by a int, representing the zero-based position
of the symbol in the schema.
"""
index_of_datum = writers_schema.symbols.index(datum)
return encoder.write_int(index_of_datum)
def write_array(self, writers_schema: avro.schema.ArraySchema, datum: Sequence[object], encoder: BinaryEncoder) -> None:
"""
Arrays are encoded as a series of blocks.
Each block consists of a long count value,
followed by that many array items.
A block with count zero indicates the end of the array.
Each item is encoded per the array's item schema.
If a block's count is negative,
then the count is followed immediately by a long block size,
indicating the number of bytes in the block.
The actual count in this case
is the absolute value of the count written.
"""
if len(datum) > 0:
encoder.write_long(len(datum))
for item in datum:
self.write_data(writers_schema.items, item, encoder)
return encoder.write_long(0)
def write_map(self, writers_schema: avro.schema.MapSchema, datum: Mapping[str, object], encoder: BinaryEncoder) -> None:
"""
Maps are encoded as a series of blocks.
Each block consists of a long count value,
followed by that many key/value pairs.
A block with count zero indicates the end of the map.
Each item is encoded per the map's value schema.
If a block's count is negative,
then the count is followed immediately by a long block size,
indicating the number of bytes in the block.
The actual count in this case
is the absolute value of the count written.
"""
if len(datum) > 0:
encoder.write_long(len(datum))
for key, val in datum.items():
encoder.write_utf8(key)
self.write_data(writers_schema.values, val, encoder)
return encoder.write_long(0)
def write_union(self, writers_schema: avro.schema.UnionSchema, datum: object, encoder: BinaryEncoder) -> None:
"""
A union is encoded by first writing an int value indicating
the zero-based position within the union of the schema of its value.
The value is then encoded per the indicated schema within the union.
"""
# resolve union
index_of_schema = -1
for i, candidate_schema in enumerate(writers_schema.schemas):
if validate(candidate_schema, datum):
index_of_schema = i
if index_of_schema < 0:
raise avro.errors.AvroTypeException(writers_schema, datum)
# write data
encoder.write_long(index_of_schema)
return self.write_data(writers_schema.schemas[index_of_schema], datum, encoder)
def write_record(self, writers_schema: avro.schema.RecordSchema, datum: Mapping[str, object], encoder: BinaryEncoder) -> None:
"""
A record is encoded by encoding the values of its fields
in the order that they are declared. In other words, a record
is encoded as just the concatenation of the encodings of its fields.
Field values are encoded per their schema.
"""
for field in writers_schema.fields:
self.write_data(field.type, datum.get(field.name), encoder)
| DatumWriter |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/zip_test.py | {
"start": 8582,
"end": 14518
} | class ____(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 3, 4])))
def testInvalidIndex(self, index):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(1, 4), dataset_ops.Dataset.range(4, 7)))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
combinations.combine(index=[-1, 0])))
def testEmptyDataset(self, index):
dataset = dataset_ops.Dataset.zip(
datasets=(dataset_ops.Dataset.from_tensor_slices([]),
dataset_ops.Dataset.from_tensor_slices([])))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=index))
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testZipBasic(self):
dataset = dataset_ops.Dataset.zip(
(dataset_ops.Dataset.range(1, 4), dataset_ops.Dataset.range(4, 7)))
expected_dataset = [(1, 4), (2, 5), (3, 6)]
for i in range(3):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)),
expected_dataset[i])
@combinations.generate(
combinations.times(test_base.default_test_combinations()))
def testZipBasicWithoutTuple(self):
dataset = dataset_ops.Dataset.zip(
dataset_ops.Dataset.range(1, 4), dataset_ops.Dataset.range(4, 7)
)
expected_dataset = [(1, 4), (2, 5), (3, 6)]
for i in range(3):
self.assertEqual(
self.evaluate(random_access.at(dataset, index=i)), expected_dataset[i]
)
@combinations.generate(
combinations.times(test_base.default_test_combinations())
)
def testZipEqual(self):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
dataset = _dataset_factory(components)
for i in range(4):
results = self.evaluate(random_access.at(dataset, index=i))
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=4))
@combinations.generate(test_base.default_test_combinations())
def testZipUnequal(self):
components = [[1, 2, 3, 4], [1, 2, 3, 4, 5], [1.0, 2.0]]
dataset = _dataset_factory(components)
for i in range(2):
results = self.evaluate(random_access.at(dataset, index=i))
for component, result_component in zip(components, results):
self.assertAllEqual(component[i], result_component)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=2))
@combinations.generate(test_base.default_test_combinations())
def testNested(self):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0])
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
dataset = dataset_ops.Dataset.zip((datasets[0], (datasets[1], datasets[2])))
for i in range(4):
result1, (result2,
result3) = self.evaluate(random_access.at(dataset, index=i))
self.assertAllEqual(components[0][i], result1)
self.assertAllEqual(components[1][i], result2)
self.assertAllEqual(components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=4))
@combinations.generate(test_base.default_test_combinations())
def testNestedWithoutTuple(self):
components = [
np.tile(np.array([[1], [2], [3], [4]]), 20),
np.tile(np.array([[12], [13], [14], [15]]), 22),
np.array([37.0, 38.0, 39.0, 40.0]),
]
datasets = [
dataset_ops.Dataset.from_tensor_slices(component)
for component in components
]
dataset = dataset_ops.Dataset.zip(datasets[0], (datasets[1], datasets[2]))
for i in range(4):
result1, (result2, result3) = self.evaluate(
random_access.at(dataset, index=i)
)
self.assertAllEqual(components[0][i], result1)
self.assertAllEqual(components[1][i], result2)
self.assertAllEqual(components[2][i], result3)
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=4))
@combinations.generate(test_base.default_test_combinations())
def testNamedTuple(self):
Foo = collections.namedtuple("Foo", ["x", "y"])
x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6))
dataset = dataset_ops.Dataset.zip(x)
expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)]
for i in range(3):
self.assertAllEqual(
self.evaluate(random_access.at(dataset, index=i)), expected[i])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=4))
@combinations.generate(test_base.default_test_combinations())
def testAttrs(self):
if attr is None:
self.skipTest("attr module is not available.")
@attr.s
class Foo:
x = attr.ib()
y = attr.ib()
x = Foo(x=dataset_ops.Dataset.range(3), y=dataset_ops.Dataset.range(3, 6))
dataset = dataset_ops.Dataset.zip(x)
expected = [Foo(x=0, y=3), Foo(x=1, y=4), Foo(x=2, y=5)]
for i in range(3):
self.assertAllEqual(
self.evaluate(random_access.at(dataset, index=i)), expected[i])
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(random_access.at(dataset, index=4))
| ZipRandomAccessTest |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 143537,
"end": 149317
} | class ____:
def test_noniterable_hook_raises(self):
def failure_hook():
pass
with pytest.raises(
TypeError,
match=re.escape(
"Expected iterable for 'on_failure'; got function instead. Please"
" provide a list of hooks to 'on_failure':\n\n"
"@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass"
),
):
@flow(on_failure=failure_hook)
def flow1():
pass
def test_noncallable_hook_raises(self):
with pytest.raises(
TypeError,
match=re.escape(
"Expected callables in 'on_failure'; got str instead. Please provide a"
" list of hooks to 'on_failure':\n\n"
"@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass"
),
):
@flow(on_failure=["test"])
def flow1():
pass
def test_callable_noncallable_hook_raises(self):
def failure_hook():
pass
with pytest.raises(
TypeError,
match=re.escape(
"Expected callables in 'on_failure'; got str instead. Please provide a"
" list of hooks to 'on_failure':\n\n"
"@flow(on_failure=[hook1, hook2])\ndef my_flow():\n\tpass"
),
):
@flow(on_failure=[failure_hook, "test"])
def flow2():
pass
def test_decorated_on_failure_hooks_run_on_failure(self):
my_mock = MagicMock()
@task
def my_task():
raise Exception("oops")
@my_task.on_failure
def failed1(task, task_run, state):
my_mock("failed1")
@my_task.on_failure
def failed2(task, task_run, state):
my_mock("failed2")
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
with pytest.raises(Exception, match="oops"):
state = my_flow()
assert state.type == StateType.FAILED
assert my_mock.call_args_list == [call("failed1"), call("failed2")]
def test_on_failure_hooks_run_on_failure(self):
my_mock = MagicMock()
def failed1(task, task_run, state):
my_mock("failed1")
def failed2(task, task_run, state):
my_mock("failed2")
@task(on_failure=[failed1, failed2])
def my_task():
raise Exception("oops")
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
with pytest.raises(Exception, match="oops"):
state = my_flow()
assert state.type == StateType.FAILED
assert my_mock.call_args_list == [call("failed1"), call("failed2")]
def test_on_failure_hooks_dont_run_on_completed(self):
my_mock = MagicMock()
def failed1(task, task_run, state):
my_mock("failed1")
def failed2(task, task_run, state):
my_mock("failed2")
@task(on_failure=[failed1, failed2])
def my_task():
pass
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
state = my_flow()
assert state.type == StateType.COMPLETED
assert my_mock.call_args_list == []
def test_other_failure_hooks_run_if_a_hook_fails(self):
my_mock = MagicMock()
def failed1(task, task_run, state):
my_mock("failed1")
def exception_hook(task, task_run, state):
raise Exception("bad hook")
def failed2(task, task_run, state):
my_mock("failed2")
@task(on_failure=[failed1, exception_hook, failed2])
def my_task():
raise Exception("oops")
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
with pytest.raises(Exception, match="oops"):
state = my_flow()
assert state.type == StateType.FAILED
assert my_mock.call_args_list == [call("failed1"), call("failed2")]
@pytest.mark.parametrize(
"hook1, hook2",
[
(create_hook, create_hook),
(create_hook, create_async_hook),
(create_async_hook, create_hook),
(create_async_hook, create_async_hook),
],
)
def test_on_failure_hooks_work_with_sync_and_async_functions(self, hook1, hook2):
my_mock = MagicMock()
hook1_with_mock = hook1(my_mock)
hook2_with_mock = hook2(my_mock)
@task(on_failure=[hook1_with_mock, hook2_with_mock])
def my_task():
raise Exception("oops")
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
with pytest.raises(Exception, match="oops"):
state = my_flow()
assert state.type == StateType.FAILED
assert my_mock.call_args_list == [call(), call()]
def test_failure_hooks_dont_run_on_retries(self):
my_mock = MagicMock()
def failed1(task, task_run, state):
my_mock("failed1")
@task(retries=2, on_failure=[failed1])
def my_task():
raise Exception("oops")
@flow
def my_flow():
future = my_task.submit()
future.wait()
return future.state
state = my_flow(return_state=True)
assert state.type == StateType.FAILED
assert my_mock.call_args_list == [call("failed1")]
| TestTaskHooksOnFailure |
python | pypa__warehouse | warehouse/utils/enum.py | {
"start": 78,
"end": 396
} | class ____(str, enum.Enum):
"""Base class for Enum with string value and display label."""
label: str
# Name = "value", _("Label")
def __new__(cls, value: str, label: str) -> Self:
obj = str.__new__(cls, value)
obj._value_ = value
obj.label = label
return obj
| StrLabelEnum |
python | encode__django-rest-framework | tests/test_views.py | {
"start": 2389,
"end": 2841
} | class ____(TestCase):
def setUp(self):
self.view = basic_view
def test_400_parse_error(self):
request = factory.post('/', 'f00bar', content_type='application/json')
response = self.view(request)
expected = {
'detail': JSON_ERROR
}
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert sanitise_json_error(response.data) == expected
| FunctionBasedViewIntegrationTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1232730,
"end": 1234639
} | class ____(sgqlc.types.Type, Node):
"""Entries in a MergeQueue"""
__schema__ = github_schema
__field_names__ = (
"base_commit",
"enqueued_at",
"enqueuer",
"estimated_time_to_merge",
"head_commit",
"jump",
"merge_queue",
"position",
"pull_request",
"solo",
"state",
)
base_commit = sgqlc.types.Field(Commit, graphql_name="baseCommit")
"""The base commit for this entry"""
enqueued_at = sgqlc.types.Field(sgqlc.types.non_null(DateTime), graphql_name="enqueuedAt")
"""The date and time this entry was added to the merge queue"""
enqueuer = sgqlc.types.Field(sgqlc.types.non_null(Actor), graphql_name="enqueuer")
"""The actor that enqueued this entry"""
estimated_time_to_merge = sgqlc.types.Field(Int, graphql_name="estimatedTimeToMerge")
"""The estimated time in seconds until this entry will be merged"""
head_commit = sgqlc.types.Field(Commit, graphql_name="headCommit")
"""The head commit for this entry"""
jump = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="jump")
"""Whether this pull request should jump the queue"""
merge_queue = sgqlc.types.Field(MergeQueue, graphql_name="mergeQueue")
"""The merge queue that this entry belongs to"""
position = sgqlc.types.Field(sgqlc.types.non_null(Int), graphql_name="position")
"""The position of this entry in the queue"""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The pull request that will be added to a merge group"""
solo = sgqlc.types.Field(sgqlc.types.non_null(Boolean), graphql_name="solo")
"""Does this pull request need to be deployed on its own"""
state = sgqlc.types.Field(sgqlc.types.non_null(MergeQueueEntryState), graphql_name="state")
"""The state of this entry in the queue"""
| MergeQueueEntry |
python | ray-project__ray | python/ray/_common/tests/test_formatters.py | {
"start": 4042,
"end": 5149
} | class ____:
def test_record_with_user_provided_context(self):
formatter = TextFormatter()
record = logging.makeLogRecord({"user": "ray"})
formatted = formatter.format(record)
assert "user=ray" in formatted
def test_record_with_exception(self):
formatter = TextFormatter()
record = logging.LogRecord(
name="test_logger",
level=logging.INFO,
pathname="test.py",
lineno=1000,
msg="Test message",
args=None,
exc_info=None,
)
formatted = formatter.format(record)
for s in ["INFO", "Test message", "test.py:1000", "--"]:
assert s in formatted
def test_record_with_valid_additional_log_standard_attrs(self, shutdown_only):
formatter = TextFormatter()
formatter.set_additional_log_standard_attrs(["name"])
record = logging.makeLogRecord({})
formatted = formatter.format(record)
assert "name=" in formatted
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| TestTextFormatter |
python | python-visualization__folium | folium/features.py | {
"start": 48821,
"end": 51042
} | class ____(GeoJsonDetail):
"""
Create a popup feature to bind to each element of a GeoJson layer based on
its attributes.
Parameters
----------
fields: list or tuple.
Labels of GeoJson/TopoJson 'properties' or GeoPandas GeoDataFrame
columns you'd like to display.
aliases: list/tuple of strings, same length/order as fields, default None.
Optional aliases you'd like to display in the tooltip as field name
instead of the keys of `fields`.
labels: bool, default True.
Set to False to disable displaying the field names or aliases.
localize: bool, default False.
This will use JavaScript's .toLocaleString() to format 'clean' values
as strings for the user's location; i.e. 1,000,000.00 comma separators,
float truncation, etc.
Available for most of JavaScript's primitive types (any data you'll
serve into the template).
style: str, default None.
HTML inline style properties like font and colors. Will be applied to
a div with the text in it.
Examples
---
gjson = folium.GeoJson(gdf).add_to(m)
folium.features.GeoJsonPopup(fields=['NAME'],
labels=False
).add_to(gjson)
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
{{ this._parent.get_name() }}.bindPopup("""
+ GeoJsonDetail.base_template
+ """,{{ this.popup_options | tojavascript }});
{% endmacro %}
"""
)
def __init__(
self,
fields: Sequence[str],
aliases: Optional[Sequence[str]] = None,
labels: bool = True,
style: str = "margin: auto;",
class_name: str = "foliumpopup",
localize: bool = True,
**kwargs: TypeJsonValue,
):
super().__init__(
fields=fields,
aliases=aliases,
labels=labels,
localize=localize,
class_name=class_name,
style=style,
)
self._name = "GeoJsonPopup"
kwargs.update({"class_name": self.class_name})
self.popup_options = kwargs
| GeoJsonPopup |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_imsi.py | {
"start": 855,
"end": 1842
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.to_be_valid_imsi"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_imsi(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidImsi |
python | apache__airflow | providers/google/tests/unit/google/cloud/sensors/test_dataform.py | {
"start": 1433,
"end": 6524
} | class ____:
@pytest.mark.parametrize(
("expected_status", "current_status", "sensor_return"),
[
(WorkflowInvocationAction.State.SUCCEEDED, WorkflowInvocationAction.State.SUCCEEDED, True),
(WorkflowInvocationAction.State.SUCCEEDED, WorkflowInvocationAction.State.RUNNING, False),
],
)
@mock.patch("airflow.providers.google.cloud.sensors.dataform.DataformHook")
def test_poke(
self,
mock_hook: mock.MagicMock,
expected_status: WorkflowInvocationAction.State,
current_status: WorkflowInvocationAction.State,
sensor_return: bool,
):
target = Target(database="", schema="", name=TEST_TARGET_NAME)
workflow_invocation_action = WorkflowInvocationAction(target=target, state=current_status)
mock_query_workflow_invocation_actions = mock_hook.return_value.query_workflow_invocation_actions
mock_query_workflow_invocation_actions.return_value = [workflow_invocation_action]
task = DataformWorkflowInvocationActionStateSensor(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
target_name=TEST_TARGET_NAME,
expected_statuses=[expected_status],
failure_statuses=[],
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
results = task.poke(mock.MagicMock())
assert sensor_return == results
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN
)
mock_query_workflow_invocation_actions.assert_called_once_with(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
)
@mock.patch("airflow.providers.google.cloud.sensors.dataform.DataformHook")
def test_target_state_failure_raises_exception(self, mock_hook: mock.MagicMock):
target = Target(database="", schema="", name=TEST_TARGET_NAME)
workflow_invocation_action = WorkflowInvocationAction(
target=target, state=WorkflowInvocationAction.State.FAILED
)
mock_query_workflow_invocation_actions = mock_hook.return_value.query_workflow_invocation_actions
mock_query_workflow_invocation_actions.return_value = [workflow_invocation_action]
task = DataformWorkflowInvocationActionStateSensor(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
target_name=TEST_TARGET_NAME,
expected_statuses=[WorkflowInvocationAction.State.SUCCEEDED],
failure_statuses=[WorkflowInvocationAction.State.FAILED],
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
with pytest.raises(AirflowException):
task.poke(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN
)
mock_query_workflow_invocation_actions.assert_called_once_with(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
)
@mock.patch("airflow.providers.google.cloud.sensors.dataform.DataformHook")
def test_target_not_found_raises_exception(self, mock_hook: mock.MagicMock):
mock_query_workflow_invocation_actions = mock_hook.return_value.query_workflow_invocation_actions
mock_query_workflow_invocation_actions.return_value = []
task = DataformWorkflowInvocationActionStateSensor(
task_id=TEST_TASK_ID,
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
target_name=TEST_TARGET_NAME,
expected_statuses=[WorkflowInvocationAction.State.SUCCEEDED],
failure_statuses=[WorkflowInvocationAction.State.FAILED],
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
with pytest.raises(AirflowException):
task.poke(mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID, impersonation_chain=TEST_IMPERSONATION_CHAIN
)
mock_query_workflow_invocation_actions.assert_called_once_with(
project_id=TEST_PROJECT_ID,
region=TEST_REGION,
repository_id=TEST_REPOSITORY_ID,
workflow_invocation_id=TEST_WORKFLOW_INVOCATION_ID,
)
| TestDataformWorkflowInvocationActionStateSensor |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/progress_bar/formatters.py | {
"start": 5900,
"end": 6876
} | class ____(Formatter):
"""
Display the progress as text. E.g. "8/20"
"""
template = HTML("<current>{current:>3}</current>/<total>{total:>3}</total>")
def format(
self,
progress_bar: ProgressBar,
progress: ProgressBarCounter[object],
width: int,
) -> AnyFormattedText:
return self.template.format(
current=progress.items_completed, total=progress.total or "?"
)
def get_width(self, progress_bar: ProgressBar) -> AnyDimension:
all_lengths = [
len("{:>3}".format(c.total or "?")) for c in progress_bar.counters
]
all_lengths.append(1)
return D.exact(max(all_lengths) * 2 + 1)
def _format_timedelta(timedelta: datetime.timedelta) -> str:
"""
Return hh:mm:ss, or mm:ss if the amount of hours is zero.
"""
result = f"{timedelta}".split(".")[0]
if result.startswith("0:"):
result = result[2:]
return result
| Progress |
python | getsentry__sentry | fixtures/safe_migrations_apps/bad_flow_change_char_type_that_unsafe_app/migrations/0002_change_type_from_char120_to_char100.py | {
"start": 153,
"end": 469
} | class ____(CheckedMigration):
dependencies = [
("bad_flow_change_char_type_that_unsafe_app", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="testtable",
name="field",
field=models.CharField(max_length=100),
),
]
| Migration |
python | gevent__gevent | src/gevent/pool.py | {
"start": 1132,
"end": 11662
} | class ____(object):
# Internal, non-public API class.
# Provides mixin methods for implementing mapping pools. Subclasses must define:
__slots__ = ()
def spawn(self, func, *args, **kwargs):
"""
A function that runs *func* with *args* and *kwargs*, potentially
asynchronously. Return a value with a ``get`` method that blocks
until the results of func are available, and a ``rawlink`` method
that calls a callback when the results are available.
If this object has an upper bound on how many asyncronously executing
tasks can exist, this method may block until a slot becomes available.
"""
raise NotImplementedError()
def _apply_immediately(self):
"""
should the function passed to apply be called immediately,
synchronously?
"""
raise NotImplementedError()
def _apply_async_use_greenlet(self):
"""
Should apply_async directly call Greenlet.spawn(), bypassing
`spawn`?
Return true when self.spawn would block.
"""
raise NotImplementedError()
def _apply_async_cb_spawn(self, callback, result):
"""
Run the given callback function, possibly
asynchronously, possibly synchronously.
"""
raise NotImplementedError()
def apply_cb(self, func, args=None, kwds=None, callback=None):
"""
:meth:`apply` the given *func(\\*args, \\*\\*kwds)*, and, if a *callback* is given, run it with the
results of *func* (unless an exception was raised.)
The *callback* may be called synchronously or asynchronously. If called
asynchronously, it will not be tracked by this group. (:class:`Group` and :class:`Pool`
call it asynchronously in a new greenlet; :class:`~gevent.threadpool.ThreadPool` calls
it synchronously in the current greenlet.)
"""
result = self.apply(func, args, kwds)
if callback is not None:
self._apply_async_cb_spawn(callback, result)
return result
def apply_async(self, func, args=None, kwds=None, callback=None):
"""
A variant of the :meth:`apply` method which returns a :class:`~.Greenlet` object.
When the returned greenlet gets to run, it *will* call :meth:`apply`,
passing in *func*, *args* and *kwds*.
If *callback* is specified, then it should be a callable which
accepts a single argument. When the result becomes ready
callback is applied to it (unless the call failed).
This method will never block, even if this group is full (that is,
even if :meth:`spawn` would block, this method will not).
.. caution:: The returned greenlet may or may not be tracked
as part of this group, so :meth:`joining <join>` this group is
not a reliable way to wait for the results to be available or
for the returned greenlet to run; instead, join the returned
greenlet.
.. tip:: Because :class:`~.ThreadPool` objects do not track greenlets, the returned
greenlet will never be a part of it. To reduce overhead and improve performance,
:class:`Group` and :class:`Pool` may choose to track the returned
greenlet. These are implementation details that may change.
"""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self._apply_async_use_greenlet():
# cannot call self.spawn() directly because it will block
# XXX: This is always the case for ThreadPool, but for Group/Pool
# of greenlets, this is only the case when they are full...hence
# the weasely language about "may or may not be tracked". Should we make
# Group/Pool always return true as well so it's never tracked by any
# implementation? That would simplify that logic, but could increase
# the total number of greenlets in the system and add a layer of
# overhead for the simple cases when the pool isn't full.
return Greenlet.spawn(self.apply_cb, func, args, kwds, callback)
greenlet = self.spawn(func, *args, **kwds)
if callback is not None:
greenlet.link(pass_value(callback))
return greenlet
def apply(self, func, args=None, kwds=None):
"""
Rough quivalent of the :func:`apply()` builtin function blocking until
the result is ready and returning it.
The ``func`` will *usually*, but not *always*, be run in a way
that allows the current greenlet to switch out (for example,
in a new greenlet or thread, depending on implementation). But
if the current greenlet or thread is already one that was
spawned by this pool, the pool may choose to immediately run
the `func` synchronously.
Any exception ``func`` raises will be propagated to the caller of ``apply`` (that is,
this method will raise the exception that ``func`` raised).
"""
if args is None:
args = ()
if kwds is None:
kwds = {}
if self._apply_immediately():
return func(*args, **kwds)
return self.spawn(func, *args, **kwds).get()
def __map(self, func, iterable):
return [g.get() for g in
[self.spawn(func, i) for i in iterable]]
def map(self, func, iterable):
"""Return a list made by applying the *func* to each element of
the iterable.
.. seealso:: :meth:`imap`
"""
# We can't return until they're all done and in order. It
# wouldn't seem to much matter what order we wait on them in,
# so the simple, fast (50% faster than imap) solution would be:
# return [g.get() for g in
# [self.spawn(func, i) for i in iterable]]
# If the pool size is unlimited (or more than the len(iterable)), this
# is equivalent to imap (spawn() will never block, all of them run concurrently,
# we call get() in the order the iterable was given).
# Now lets imagine the pool if is limited size. Suppose the
# func is time.sleep, our pool is limited to 3 threads, and
# our input is [10, 1, 10, 1, 1] We would start three threads,
# one to sleep for 10, one to sleep for 1, and the last to
# sleep for 10. We would block starting the fourth thread. At
# time 1, we would finish the second thread and start another
# one for time 1. At time 2, we would finish that one and
# start the last thread, and then begin executing get() on the first
# thread.
# Because it's spawn that blocks, this is *also* equivalent to what
# imap would do.
# The one remaining difference is that imap runs in its own
# greenlet, potentially changing the way the event loop runs.
# That's easy enough to do.
g = Greenlet.spawn(self.__map, func, iterable)
return g.get()
def map_cb(self, func, iterable, callback=None):
result = self.map(func, iterable)
if callback is not None:
callback(result) # pylint:disable=not-callable
return result
def map_async(self, func, iterable, callback=None):
"""
A variant of the map() method which returns a Greenlet object that is executing
the map function.
If callback is specified then it should be a callable which accepts a
single argument.
"""
return Greenlet.spawn(self.map_cb, func, iterable, callback)
def __imap(self, cls, func, *iterables, **kwargs):
# Python 2 doesn't support the syntax that lets us mix varargs and
# a named kwarg, so we have to unpack manually
maxsize = kwargs.pop('maxsize', None)
if kwargs:
raise TypeError("Unsupported keyword arguments")
return cls.spawn(func, izip(*iterables), spawn=self.spawn,
_zipped=True, maxsize=maxsize)
def imap(self, func, *iterables, **kwargs):
"""
imap(func, *iterables, maxsize=None) -> iterable
An equivalent of :func:`itertools.imap`, operating in parallel.
The *func* is applied to each element yielded from each
iterable in *iterables* in turn, collecting the result.
If this object has a bound on the number of active greenlets it can
contain (such as :class:`Pool`), then at most that number of tasks will operate
in parallel.
:keyword int maxsize: If given and not-None, specifies the maximum number of
finished results that will be allowed to accumulate awaiting the reader;
more than that number of results will cause map function greenlets to begin
to block. This is most useful if there is a great disparity in the speed of
the mapping code and the consumer and the results consume a great deal of resources.
.. note:: This is separate from any bound on the number of active parallel
tasks, though they may have some interaction (for example, limiting the
number of parallel tasks to the smallest bound).
.. note:: Using a bound is slightly more computationally expensive than not using a bound.
.. tip:: The :meth:`imap_unordered` method makes much better
use of this parameter. Some additional, unspecified,
number of objects may be required to be kept in memory
to maintain order by this function.
:return: An iterable object.
.. versionchanged:: 1.1b3
Added the *maxsize* keyword parameter.
.. versionchanged:: 1.1a1
Accept multiple *iterables* to iterate in parallel.
"""
return self.__imap(IMap, func, *iterables, **kwargs)
def imap_unordered(self, func, *iterables, **kwargs):
"""
imap_unordered(func, *iterables, maxsize=None) -> iterable
The same as :meth:`imap` except that the ordering of the results
from the returned iterator should be considered in arbitrary
order.
This is lighter weight than :meth:`imap` and should be preferred if order
doesn't matter.
.. seealso:: :meth:`imap` for more details.
"""
return self.__imap(IMapUnordered, func, *iterables, **kwargs)
| GroupMappingMixin |
python | joke2k__faker | faker/providers/lorem/fr_FR/__init__.py | {
"start": 68,
"end": 26958
} | class ____(LoremProvider):
"""Implement lorem provider for ``fr_FR`` locale.
Word list is drawn from the French Education Ministry's website Eduscol. The
"lexical frequency list" can be found in the source(s) below.
Sources:
- http://eduscol.education.fr/cid47915/liste-des-mots-classee-par-ordre-alphabetique.html
"""
word_list = (
"à",
"abandonner",
"abattre",
"abri",
"absence",
"absolu",
"absolument",
"accent",
"accepter",
"accompagner",
"accomplir",
"accord",
"accorder",
"accrocher",
"accuser",
"acheter",
"achever",
"acte",
"action",
"admettre",
"adresser",
"affaire",
"affirmer",
"afin de",
"âgé",
"âge",
"agent",
"agir",
"agiter",
"ah",
"aide",
"aider",
"aile",
"ailleurs",
"aimer",
"ainsi",
"air",
"ajouter",
"aller",
"allumer",
"alors",
"âme",
"amener",
"ami",
"amour",
"amuser",
"an",
"ancien",
"anglais",
"anglais",
"angoisse",
"animal",
"animer",
"année",
"annoncer",
"apercevoir",
"apparaître",
"apparence",
"appartement",
"appartenir",
"appel",
"appeler",
"apporter",
"apprendre",
"approcher",
"appuyer",
"après",
"après",
"arbre",
"argent",
"arme",
"armée",
"armer",
"arracher",
"arrêter",
"arrière",
"arrivée",
"arriver",
"art",
"article",
"as",
"aspect",
"asseoir",
"assez",
"assister",
"assurer",
"attacher",
"attaquer",
"atteindre",
"attendre",
"attention",
"attirer",
"attitude",
"au",
"aucun",
"aucun",
"aujourd'hui",
"auprès",
"auquel",
"aussi",
"aussitôt",
"autant",
"auteur",
"autorité",
"autour",
"autre",
"autre",
"autrefois",
"autrement",
"avance",
"avancer",
"avant",
"avant",
"avec",
"avec",
"avenir",
"aventure",
"avis",
"avoir",
"avouer",
"baisser",
"banc",
"bande",
"barbe",
"bas",
"bas",
"bas",
"bataille",
"battre",
"beau",
"beau",
"beau",
"beaucoup",
"beauté",
"beaux",
"besoin",
"bête",
"bien",
"bien",
"bientôt",
"billet",
"blanc",
"blanc",
"bleu",
"blond",
"boire",
"bois",
"bon",
"bon",
"bonheur",
"bord",
"bouche",
"bout",
"branche",
"bras",
"briller",
"briser",
"bruit",
"brûler",
"brusquement",
"bureau",
"but",
"ça",
"cabinet",
"cacher",
"calme",
"calme",
"calmer",
"camarade",
"campagne",
"capable",
"car",
"caractère",
"caresser",
"carte",
"cas",
"casser",
"cause",
"causer",
"ce",
"ce",
"ceci",
"céder",
"cela",
"celui",
"cent",
"centre",
"cependant",
"cercle",
"certain",
"certain",
"certainement",
"certes",
"cerveau",
"cesse",
"cesser",
"chacun",
"chaîne",
"chair",
"chaise",
"chaleur",
"chambre",
"champ",
"chance",
"changement",
"changer",
"chant",
"chanter",
"chaque",
"charge",
"charger",
"chasse",
"chasser",
"chat",
"chaud",
"chef",
"chemin",
"chemise",
"cher",
"chercher",
"cheval",
"cheveu",
"chez",
"chien",
"chiffre",
"choisir",
"choix",
"chose",
"chute",
"ci",
"ciel",
"cinq",
"cinquante",
"circonstance",
"clair",
"claire",
"classe",
"clef",
"coeur",
"coin",
"colère",
"colline",
"colon",
"combat",
"combien",
"commander",
"comme",
"comme",
"commencement",
"commencer",
"comment",
"comment",
"commun",
"compagnie",
"compagnon",
"complet",
"complètement",
"composer",
"comprendre",
"compte",
"compter",
"conclure",
"condamner",
"condition",
"conduire",
"confiance",
"confier",
"confondre",
"connaissance",
"connaître",
"conscience",
"conseil",
"consentir",
"considérer",
"construire",
"consulter",
"contenir",
"content",
"contenter",
"continuer",
"contraire",
"contre",
"convenir",
"conversation",
"corde",
"corps",
"côte",
"côté",
"cou",
"couche",
"coucher",
"couler",
"couleur",
"coup",
"couper",
"cour",
"courage",
"courant",
"courir",
"cours",
"course",
"court",
"coûter",
"couvrir",
"craindre",
"crainte",
"créer",
"creuser",
"cri",
"crier",
"crise",
"croire",
"croiser",
"croix",
"cruel",
"cuisine",
"curieux",
"curiosité",
"d'abord",
"dame",
"danger",
"dangereux",
"dans",
"danser",
"d'autres",
"d'autres",
"davantage",
"de",
"de",
"debout",
"début",
"déchirer",
"décider",
"déclarer",
"découvrir",
"décrire",
"défaut",
"défendre",
"dégager",
"dehors",
"dehors",
"déjà",
"delà",
"demain",
"demain",
"demande",
"demander",
"demeurer",
"demi",
"dent",
"départ",
"dépasser",
"déposer",
"depuis",
"depuis",
"dernier",
"dernier",
"derrière",
"dès",
"descendre",
"désert",
"désespoir",
"désigner",
"désir",
"désirer",
"désormais",
"dessiner",
"dessus",
"détacher",
"détail",
"détruire",
"deux",
"devant",
"devant",
"devenir",
"deviner",
"devoir",
"devoir",
"dieu",
"différent",
"difficile",
"digne",
"dimanche",
"dire",
"direction",
"diriger",
"discours",
"discussion",
"discuter",
"disparaître",
"disposer",
"distance",
"distinguer",
"divers",
"dix",
"docteur",
"doigt",
"dominer",
"donc",
"donner",
"dont",
"dormir",
"dos",
"double",
"doucement",
"douceur",
"douleur",
"doute",
"douter",
"doux",
"douze",
"drame",
"dresser",
"droit",
"droit",
"droite",
"droite",
"drôle",
"du",
"dur",
"durant",
"durer",
"eau",
"eaux",
"écarter",
"échapper",
"éclairer",
"éclat",
"éclater",
"école",
"écouter",
"écraser",
"écrire",
"effacer",
"effet",
"effort",
"égal",
"également",
"eh",
"élément",
"élever",
"elle",
"éloigner",
"embrasser",
"emmener",
"émotion",
"empêcher",
"empire",
"employer",
"emporter",
"en",
"en",
"en",
"encore",
"endormir",
"endroit",
"énergie",
"enfance",
"enfant",
"enfermer",
"enfin",
"enfoncer",
"engager",
"enlever",
"ennemi",
"énorme",
"ensemble",
"ensemble",
"ensuite",
"entendre",
"entier",
"entourer",
"entraîner",
"entre",
"entrée",
"entrer",
"entretenir",
"envelopper",
"envie",
"environ",
"envoyer",
"épais",
"épaule",
"époque",
"éprouver",
"erreur",
"escalier",
"espace",
"espèce",
"espérer",
"espoir",
"esprit",
"essayer",
"essuyer",
"est",
"et",
"établir",
"étage",
"étaler",
"état",
"etc",
"été",
"éteindre",
"étendre",
"étendue",
"éternel",
"étoile",
"étonner",
"étouffer",
"étrange",
"étranger",
"étranger",
"être",
"être",
"étroit",
"étude",
"étudier",
"événement",
"éviter",
"examiner",
"exécuter",
"exemple",
"exiger",
"existence",
"exister",
"expérience",
"expliquer",
"exposer",
"expression",
"exprimer",
"extraordinaire",
"face",
"facile",
"façon",
"faible",
"faim",
"faire",
"fait",
"fait",
"falloir",
"famille",
"fatigue",
"fatiguer",
"faute",
"fauteuil",
"faux",
"faveur",
"femme",
"fenêtre",
"fer",
"ferme",
"fermer",
"fête",
"feu",
"feuille",
"fidèle",
"fier",
"figure",
"figurer",
"fil",
"fille",
"fils",
"fin",
"fin",
"fine",
"finir",
"fixe",
"fixer",
"flamme",
"fleur",
"flot",
"foi",
"fois",
"folie",
"fonction",
"fond",
"fonder",
"force",
"forcer",
"forêt",
"forme",
"former",
"fort",
"fort",
"fortune",
"fou",
"foule",
"frais",
"franc",
"français",
"français",
"franchir",
"françois",
"frapper",
"frère",
"froid",
"froid",
"front",
"fruit",
"fuir",
"fumée",
"fumer",
"fusil",
"gagner",
"garçon",
"garde",
"garder",
"gauche",
"gauche",
"général",
"général",
"genou",
"genre",
"gens",
"geste",
"glace",
"glisser",
"gloire",
"goût",
"goutte",
"gouvernement",
"grâce",
"grâce",
"grain",
"grand",
"grand",
"grandir",
"grave",
"gris",
"gros",
"groupe",
"guère",
"guerre",
"habiller",
"habitant",
"habiter",
"habitude",
"haine",
"haïr",
"hasard",
"haut",
"haut",
"haut",
"haute",
"hauteur",
"herbe",
"hésiter",
"heure",
"heureux",
"hier",
"histoire",
"hiver",
"homme",
"honneur",
"honte",
"horizon",
"hors",
"hôtel",
"huit",
"humain",
"humide",
"ici",
"idée",
"ignorer",
"il",
"île",
"image",
"imaginer",
"immense",
"immobile",
"importance",
"important",
"importer",
"imposer",
"impossible",
"impression",
"inconnu",
"indiquer",
"inquiéter",
"inquiétude",
"inspirer",
"installer",
"instant",
"instinct",
"intelligence",
"intention",
"intéresser",
"intérêt",
"intérieur",
"intérieur",
"interroger",
"interrompre",
"inutile",
"inventer",
"inviter",
"jamais",
"jambe",
"jardin",
"jaune",
"je",
"jeter",
"jeu",
"jeune",
"jeune",
"jeunesse",
"joie",
"joindre",
"joli",
"joue",
"jouer",
"jour",
"journal",
"journée",
"juge",
"juger",
"jusque",
"juste",
"justice",
"là",
"large",
"larme",
"le",
"le",
"léger",
"lendemain",
"lentement",
"lequel",
"lettre",
"leur",
"leur",
"lever",
"lèvre",
"liberté",
"libre",
"lien",
"lier",
"lieu",
"ligne",
"lire",
"lisser",
"lit",
"livre",
"livrer",
"loi",
"loin",
"long",
"long",
"longtemps",
"lors",
"lorsque",
"loup",
"lourd",
"lueur",
"lui",
"lumière",
"l'un",
"l'une",
"lune",
"lutte",
"lutter",
"machine",
"madame",
"magnifique",
"main",
"maintenant",
"maintenir",
"mais",
"maison",
"maître",
"mal",
"mal",
"malade",
"maladie",
"malgré",
"malheur",
"manger",
"manier",
"manquer",
"marchand",
"marché",
"marche",
"marcher",
"mari",
"mari",
"mariage",
"marier",
"marquer",
"masse",
"matière",
"matin",
"mauvais",
"me",
"médecin",
"meilleur",
"mêler",
"membre",
"même",
"même",
"même",
"mémoire",
"menacer",
"mener",
"mensonge",
"mentir",
"mer",
"mériter",
"mesure",
"métier",
"mettre",
"midi",
"mien",
"mieux",
"milieu",
"militaire",
"mille",
"million",
"mince",
"mine",
"ministre",
"minute",
"miser",
"mode",
"moi",
"moindre",
"moins",
"mois",
"moitié",
"moment",
"mon",
"monde",
"monsieur",
"montagne",
"monter",
"montrer",
"morceau",
"mort",
"mort",
"mot",
"mourir",
"mouvement",
"moyen",
"moyen",
"muet",
"mur",
"musique",
"naissance",
"naître",
"nation",
"nature",
"naturel",
"naturellement",
"ne",
"nécessaire",
"nerveux",
"neuf",
"neuf",
"nez",
"ni",
"noir",
"noir",
"noire",
"nom",
"nombre",
"nombreux",
"nommer",
"non",
"nord",
"note",
"notre",
"nourrir",
"nous",
"nouveau",
"nouveau",
"nu",
"nuage",
"nuit",
"nul",
"obéir",
"objet",
"obliger",
"observer",
"obtenir",
"occasion",
"occuper",
"odeur",
"oeil",
"oeuvre",
"officier",
"offrir",
"oh",
"oiseau",
"ombre",
"on",
"oncle",
"or",
"or",
"ordre",
"oreille",
"oser",
"ou",
"où",
"oublier",
"oui",
"ouvert",
"ouvrage",
"ouvrir",
"page",
"pain",
"paix",
"palais",
"papa",
"papier",
"paquet",
"par",
"paraître",
"parce que",
"parcourir",
"pareil",
"parent",
"parfaitement",
"parfois",
"parler",
"parmi",
"parole",
"part",
"partager",
"parti",
"particulier",
"partie",
"partir",
"partout",
"parvenir",
"pas",
"pas",
"passage",
"passé",
"passé",
"passer",
"passion",
"patron",
"paupière",
"pauvre",
"pauvre",
"payer",
"pays",
"paysage",
"paysan",
"peau",
"peine",
"pencher",
"pendant",
"pendre",
"pénétrer",
"pensée",
"penser",
"perdre",
"perdu",
"père",
"permettre",
"personnage",
"personne",
"personne",
"perte",
"peser",
"petit",
"petit",
"peu",
"peuple",
"peur",
"phrase",
"pièce",
"pied",
"pierre",
"pitié",
"place",
"placer",
"plaindre",
"plaine",
"plaire",
"plaisir",
"plan",
"planche",
"plante",
"plein",
"plein",
"pleurer",
"plonger",
"pluie",
"plus",
"plusieurs",
"plutôt",
"poche",
"poésie",
"poète",
"poids",
"point",
"point",
"pointe",
"poitrine",
"police",
"politique",
"politique",
"pont",
"port",
"porte",
"porter",
"portier",
"poser",
"position",
"posséder",
"possible",
"poste",
"pour",
"pourquoi",
"pourquoi",
"poursuivre",
"pourtant",
"pousser",
"poussière",
"pouvoir",
"pouvoir",
"précéder",
"précieux",
"précipiter",
"précis",
"préférer",
"premier",
"premier",
"prendre",
"préparer",
"près",
"près",
"présence",
"présent",
"présent",
"présenter",
"président",
"presque",
"presser",
"prêt",
"prétendre",
"prêter",
"preuve",
"prévenir",
"prévoir",
"prier",
"prière",
"prince",
"principe",
"printemps",
"prison",
"prix",
"problème",
"prochain",
"produire",
"professeur",
"profiter",
"profond",
"profondément",
"projet",
"promener",
"promettre",
"prononcer",
"propos",
"proposer",
"propre",
"protéger",
"prouver",
"public",
"public",
"puis",
"puis",
"puisque",
"puissance",
"puissant",
"pur",
"qualité",
"quand",
"quant à",
"quarante",
"quart",
"quartier",
"quatre",
"que",
"que",
"quel",
"quelque",
"quelque",
"quelqu'un",
"question",
"queue",
"qui",
"quinze",
"quitter",
"quoi",
"race",
"raconter",
"raison",
"ramasser",
"ramener",
"rang",
"rapide",
"rapidement",
"rappeler",
"rapport",
"rapporter",
"rare",
"rassurer",
"rayon",
"réalité",
"recevoir",
"recherche",
"réclamer",
"recommencer",
"reconnaître",
"recueillir",
"reculer",
"réduire",
"réel",
"réfléchir",
"réflexion",
"refuser",
"regard",
"regarder",
"règle",
"regretter",
"rejeter",
"rejoindre",
"relation",
"relever",
"religion",
"remarquer",
"remercier",
"remettre",
"remonter",
"remplacer",
"remplir",
"rencontre",
"rencontrer",
"rendre",
"renoncer",
"rentrer",
"renverser",
"répandre",
"repas",
"répéter",
"répondre",
"réponse",
"reposer",
"repousser",
"reprendre",
"représenter",
"réserver",
"résistance",
"résister",
"résoudre",
"respect",
"respecter",
"respirer",
"ressembler",
"reste",
"rester",
"résultat",
"retenir",
"retirer",
"retomber",
"retour",
"retourner",
"retrouver",
"réunir",
"réussir",
"rêve",
"réveiller",
"révéler",
"revenir",
"rêver",
"revoir",
"révolution",
"riche",
"rideau",
"rien",
"rire",
"rire",
"risquer",
"robe",
"roche",
"rocher",
"roi",
"rôle",
"roman",
"rompre",
"rond",
"rose",
"rose",
"rouge",
"rouge",
"rouler",
"route",
"rue",
"ruine",
"sable",
"sac",
"saint",
"saint",
"saisir",
"saison",
"salle",
"saluer",
"salut",
"sang",
"sans",
"santé",
"satisfaire",
"sauter",
"sauvage",
"sauver",
"savoir",
"savoir",
"scène",
"science",
"se",
"sec",
"second",
"seconde",
"secours",
"secret",
"secret",
"secrétaire",
"seigneur",
"sein",
"selon",
"semaine",
"semblable",
"sembler",
"sens",
"sentier",
"sentiment",
"sentir",
"séparer",
"sept",
"sérieux",
"serrer",
"service",
"servir",
"seuil",
"seul",
"seulement",
"si",
"si",
"siècle",
"siège",
"sien",
"signe",
"signer",
"signifier",
"silence",
"silencieux",
"simple",
"simplement",
"situation",
"six",
"social",
"société",
"soi",
"soin",
"soir",
"soirée",
"soit",
"sol",
"soldat",
"soleil",
"solitude",
"sombre",
"somme",
"sommeil",
"sommet",
"son",
"son",
"songer",
"sonner",
"sorte",
"sortir",
"sou",
"soudain",
"souffler",
"souffrance",
"souffrir",
"souhaiter",
"soulever",
"soumettre",
"source",
"sourd",
"sourire",
"sourire",
"sous",
"soutenir",
"souvenir",
"souvenir",
"souvent",
"spectacle",
"subir",
"succès",
"sueur",
"suffire",
"suite",
"suivant",
"suivre",
"sujet",
"supérieur",
"supporter",
"supposer",
"sûr",
"sur",
"surprendre",
"surtout",
"surveiller",
"système",
"table",
"tâche",
"tache",
"taille",
"taire",
"tandis que",
"tant",
"tantôt",
"tapis",
"tard",
"te",
"tel",
"tellement",
"témoin",
"tempête",
"temps",
"tendre",
"tendre",
"tenir",
"tenter",
"terme",
"terminer",
"terrain",
"terre",
"terreur",
"terrible",
"tête",
"théâtre",
"tirer",
"titre",
"toi",
"toile",
"toit",
"tombe",
"tomber",
"ton",
"ton",
"tôt",
"toucher",
"toujours",
"tour",
"tourner",
"tout",
"tout",
"tout",
"tout",
"toute",
"trace",
"tracer",
"train",
"traîner",
"trait",
"traiter",
"tranquille",
"transformer",
"travail",
"travailler",
"travers",
"traverser",
"trembler",
"trente",
"très",
"trésor",
"triste",
"trois",
"troisième",
"tromper",
"trop",
"trou",
"troubler",
"trouver",
"tu",
"tuer",
"type",
"un",
"un",
"un",
"unique",
"usage",
"user",
"vague",
"vague",
"vaincre",
"valeur",
"valoir",
"vaste",
"veille",
"veiller",
"vendre",
"venir",
"vent",
"ventre",
"véritable",
"vérité",
"verre",
"vers",
"vers",
"verser",
"vert",
"vêtement",
"vêtir",
"victime",
"vide",
"vide",
"vie",
"vieil",
"vieillard",
"vieux",
"vieux",
"vif",
"village",
"ville",
"vin",
"vingt",
"violence",
"violent",
"visage",
"visible",
"vision",
"visite",
"vite",
"vivant",
"vivre",
"voici",
"voie",
"voilà",
"voile",
"voir",
"voisin",
"voisin",
"voiture",
"voix",
"vol",
"voler",
"volonté",
"votre",
"vouloir",
"vous",
"voyage",
"voyager",
"vrai",
"vraiment",
"vue",
"y",
"yeux",
)
parts_of_speech: Dict[str, tuple] = {}
| Provider |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/loop18.py | {
"start": 168,
"end": 203
} | class ____:
parent: Self | None
| A |
python | openai__openai-python | src/openai/types/fine_tuning/fine_tuning_job_wandb_integration.py | {
"start": 202,
"end": 1026
} | class ____(BaseModel):
project: str
"""The name of the project that the new run will be created under."""
entity: Optional[str] = None
"""The entity to use for the run.
This allows you to set the team or username of the WandB user that you would
like associated with the run. If not set, the default entity for the registered
WandB API key is used.
"""
name: Optional[str] = None
"""A display name to set for the run.
If not set, we will use the Job ID as the name.
"""
tags: Optional[List[str]] = None
"""A list of tags to be attached to the newly created run.
These tags are passed through directly to WandB. Some default tags are generated
by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}".
"""
| FineTuningJobWandbIntegration |
python | joke2k__faker | faker/providers/bank/no_NO/__init__.py | {
"start": 42,
"end": 185
} | class ____(BankProvider):
"""Implement bank provider for ``no_NO`` locale."""
bban_format = "###########"
country_code = "NO"
| Provider |
python | celery__celery | celery/bin/base.py | {
"start": 8513,
"end": 9174
} | class ____(click.Choice):
"""Log level option."""
def __init__(self):
"""Initialize the log level option with the relevant choices."""
super().__init__(('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL', 'FATAL'))
def convert(self, value, param, ctx):
if isinstance(value, numbers.Integral):
return value
value = value.upper()
value = super().convert(value, param, ctx)
return mlevel(value)
JSON_ARRAY = JsonArray()
JSON_OBJECT = JsonObject()
ISO8601 = ISO8601DateTime()
ISO8601_OR_FLOAT = ISO8601DateTimeOrFloat()
LOG_LEVEL = LogLevel()
COMMA_SEPARATED_LIST = CommaSeparatedList()
| LogLevel |
python | ansible__ansible | lib/ansible/_internal/_templating/_jinja_bits.py | {
"start": 16825,
"end": 17200
} | class ____(_ambient_context.AmbientContextBase):
"""
This context is active during Ansible's explicit compilation of templates/expressions, but not during Jinja's runtime compilation.
Historically, Ansible-specific pre-processing like `escape_backslashes` was not applied to imported/included templates.
"""
escape_backslashes: bool
| _TemplateCompileContext |
python | huggingface__transformers | src/transformers/utils/chat_template_utils.py | {
"start": 23051,
"end": 23630
} | class ____:
"""This class is intended to just be used internally for pipelines and not exposed to users. We convert chats
to this format because the rest of the pipeline code tends to assume that lists of messages are
actually a batch of samples rather than messages in the same conversation."""
def __init__(self, messages: dict):
for message in messages:
if not is_valid_message(message):
raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
self.messages = messages
| Chat |
python | PrefectHQ__prefect | tests/blocks/test_block_reference.py | {
"start": 2937,
"end": 6598
} | class ____:
@pytest.fixture
def ParamBlock(self) -> Type[Block]:
# Ignore warning caused by matching key in registry due to block fixture
warnings.filterwarnings("ignore", category=UserWarning)
class ParamBlock(Block):
a: int
b: str
return ParamBlock
@pytest.fixture
def OtherParamBlock(self) -> Type[Block]:
# Ignore warning caused by matching key in registry due to block fixture
warnings.filterwarnings("ignore", category=UserWarning)
class OtherParamBlock(Block):
a: int
b: str
return OtherParamBlock
def test_flow_with_block_params(self, ParamBlock):
ref_block = ParamBlock(a=10, b="foo")
ref_block.save("param-block")
@flow
def flow_with_block_param(block: ParamBlock) -> int:
return block.a
assert (
flow_with_block_param({"$ref": str(ref_block._block_document_id)})
== ref_block.a
)
assert (
flow_with_block_param(
{"$ref": {"block_document_id": str(ref_block._block_document_id)}}
)
== ref_block.a
)
def test_flow_with_invalid_block_param_type(self, ParamBlock, OtherParamBlock):
ref_block = OtherParamBlock(a=10, b="foo")
ref_block.save("other-param-block")
@flow
def flow_with_block_param(block: ParamBlock) -> int:
return block.a
with pytest.raises(
ParameterTypeError, match="Flow run received invalid parameters"
):
flow_with_block_param({"$ref": str(ref_block._block_document_id)})
def test_flow_with_nested_block_params(self, ParamBlock):
class NestedParamBlock(Block):
inner_block: ParamBlock
nested_block = NestedParamBlock(inner_block=ParamBlock(a=12, b="foo"))
nested_block.save("nested-block")
@flow
def flow_with_nested_block_param(block: NestedParamBlock):
return block.inner_block.a
assert (
flow_with_nested_block_param(
{"$ref": {"block_document_id": str(nested_block._block_document_id)}}
)
== nested_block.inner_block.a
)
def test_flow_with_block_param_in_basemodel(self, ParamBlock):
class ParamModel(pydantic.BaseModel):
block: ParamBlock
param_block = ParamBlock(a=12, b="foo")
param_block.save("param-block")
@flow
def flow_with_block_param_in_basemodel(param: ParamModel):
return param.block.a
assert (
flow_with_block_param_in_basemodel(
{
"block": {
"$ref": {
"block_document_id": str(param_block._block_document_id)
}
}
}
)
== param_block.a
)
def test_async_flow_with_block_params(self, ParamBlock):
ref_block = ParamBlock(a=10, b="foo")
ref_block.save("param-block")
@flow
async def flow_with_block_param(block: ParamBlock) -> int:
return block.a
assert (
asyncio.run(
flow_with_block_param({"$ref": str(ref_block._block_document_id)})
)
== ref_block.a
)
assert (
asyncio.run(
flow_with_block_param(
{"$ref": {"block_document_id": str(ref_block._block_document_id)}}
)
)
== ref_block.a
)
| TestFlowWithBlockParam |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess1.py | {
"start": 619,
"end": 847
} | class ____:
bar = DescriptorA[str]()
@classmethod
def func1(cls):
a: DescriptorA[str] = cls.bar
reveal_type(ClassA.bar, expected_text="DescriptorA[str]")
reveal_type(ClassA().bar, expected_text="str")
| ClassA |
python | great-expectations__great_expectations | great_expectations/expectations/metrics/column_aggregate_metrics/column_proportion_of_unique_values.py | {
"start": 1490,
"end": 3125
} | class ____(ColumnAggregateMetricProvider):
metric_name = "column.unique_proportion"
@metric_value(engine=PandasExecutionEngine)
def _pandas(*args, metrics, **kwargs):
return unique_proportion(metrics)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(*args, metrics, **kwargs):
return unique_proportion(metrics)
@metric_value(engine=SparkDFExecutionEngine)
def _spark(*args, metrics, **kwargs):
return unique_proportion(metrics)
@classmethod
@override
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
dependencies["column.distinct_values.count"] = MetricConfiguration(
metric_name="column.distinct_values.count",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
dependencies[
f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}"
] = MetricConfiguration(
metric_name=f"column_values.nonnull.{SummarizationMetricNameSuffixes.UNEXPECTED_COUNT.value}",
metric_domain_kwargs=metric.metric_domain_kwargs,
)
return dependencies
| ColumnUniqueProportion |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_ai_review_summarize_smoke.py | {
"start": 219,
"end": 5666
} | class ____:
"""Basic smoke tests for the ai-review-summarize command."""
def test_import_and_basic_structure(self):
"""Test that command can be imported and has expected structure."""
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
assert ai_review_summarize is not None
assert ai_review_summarize.name == "ai-review-summarize"
assert callable(ai_review_summarize)
def test_help_command(self):
"""Test that help command works."""
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--help"])
assert result.exit_code == 0
assert "ai-review-summarize" in result.output
assert "--diff-range" in result.output
assert "--format" in result.output
assert "--confidence-threshold" in result.output
def test_basic_json_output(self):
"""Test basic command with mocked diff summary."""
with patch(
"automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary"
) as mock_diff:
mock_diff.return_value = SmartDiffSummary(
change_category=ChangeType.NEW_FEATURE,
files_changed=1,
additions=10,
deletions=2,
functions=[],
classes=[],
imports=[],
key_implementation_details="Test details",
api_changes=[],
summary_confidence=0.8,
needs_detailed_review=False,
)
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "json"])
assert result.exit_code == 0
mock_diff.assert_called_once()
def test_human_format_output(self):
"""Test command with human-readable format."""
with patch(
"automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary"
) as mock_diff:
mock_diff.return_value = SmartDiffSummary(
change_category=ChangeType.BUG_FIX,
files_changed=2,
additions=15,
deletions=5,
functions=[],
classes=[],
imports=[],
key_implementation_details="Bug fix details",
api_changes=[],
summary_confidence=0.9,
needs_detailed_review=False,
)
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "human"])
assert result.exit_code == 0
mock_diff.assert_called_once()
def test_invalid_format(self):
"""Test command with invalid format."""
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--format", "invalid"])
assert result.exit_code != 0
assert "Invalid value" in result.output
def test_confidence_threshold_parameter(self):
"""Test that confidence threshold parameter is accepted."""
with patch(
"automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary"
) as mock_diff:
mock_diff.return_value = SmartDiffSummary(
change_category=ChangeType.REFACTOR,
files_changed=3,
additions=20,
deletions=10,
functions=[],
classes=[],
imports=[],
key_implementation_details="Refactor details",
api_changes=[],
summary_confidence=0.5,
needs_detailed_review=True,
)
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--confidence-threshold", "0.3"])
# Should not crash with valid threshold
assert result.exit_code == 0
mock_diff.assert_called_once()
def test_custom_diff_range(self):
"""Test command with custom diff range."""
with patch(
"automation.dagster_dev.commands.ai_review_summarize.get_smart_diff_summary"
) as mock_diff:
mock_diff.return_value = SmartDiffSummary(
change_category=ChangeType.TESTS,
files_changed=1,
additions=25,
deletions=0,
functions=[],
classes=[],
imports=[],
key_implementation_details="Test additions",
api_changes=[],
summary_confidence=0.7,
needs_detailed_review=False,
)
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--diff-range", "HEAD~2..HEAD"])
assert result.exit_code == 0
mock_diff.assert_called_once()
| TestAiReviewSummarizeSmoke |
python | pandas-dev__pandas | asv_bench/benchmarks/io/csv.py | {
"start": 10815,
"end": 11210
} | class ____(StringIORewind):
params = ["c", "python"]
param_names = ["engine"]
def setup(self, engine):
data = ["A,B,C"] + (["1,2,3 # comment"] * 100000)
self.StringIO_input = StringIO("\n".join(data))
def time_comment(self, engine):
read_csv(
self.data(self.StringIO_input), comment="#", header=None, names=list("abc")
)
| ReadCSVComment |
python | plotly__plotly.py | plotly/graph_objs/scatter3d/line/colorbar/_title.py | {
"start": 233,
"end": 4021
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "scatter3d.line.colorbar"
_path_str = "scatter3d.line.colorbar.title"
_valid_props = {"font", "side", "text"}
@property
def font(self):
"""
Sets this color bar's title font.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter3d.line.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.scatter3d.line.colorbar.title.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def side(self):
"""
Determines the location of color bar's title with respect to
the color bar. Defaults to "top" when `orientation` if "v" and
defaults to "right" when `orientation` if "h".
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
Any
"""
return self["side"]
@side.setter
def side(self, val):
self["side"] = val
@property
def text(self):
"""
Sets the title of the color bar.
The 'text' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
@property
def _prop_descriptions(self):
return """\
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
"""
def __init__(self, arg=None, font=None, side=None, text=None, **kwargs):
"""
Construct a new Title object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scatter3d.line
.colorbar.Title`
font
Sets this color bar's title font.
side
Determines the location of color bar's title with
respect to the color bar. Defaults to "top" when
`orientation` if "v" and defaults to "right" when
`orientation` if "h".
text
Sets the title of the color bar.
Returns
-------
Title
"""
super().__init__("title")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.scatter3d.line.colorbar.Title
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter3d.line.colorbar.Title`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("font", arg, font)
self._set_property("side", arg, side)
self._set_property("text", arg, text)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Title |
python | doocs__leetcode | solution/0700-0799/0718.Maximum Length of Repeated Subarray/Solution.py | {
"start": 0,
"end": 423
} | class ____:
def findLength(self, nums1: List[int], nums2: List[int]) -> int:
m, n = len(nums1), len(nums2)
f = [[0] * (n + 1) for _ in range(m + 1)]
ans = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
if nums1[i - 1] == nums2[j - 1]:
f[i][j] = f[i - 1][j - 1] + 1
ans = max(ans, f[i][j])
return ans
| Solution |
python | mlflow__mlflow | mlflow/data/spark_dataset_source.py | {
"start": 190,
"end": 2110
} | class ____(DatasetSource):
"""
Represents the source of a dataset stored in a spark table.
"""
def __init__(
self,
path: str | None = None,
table_name: str | None = None,
sql: str | None = None,
):
if (path, table_name, sql).count(None) != 2:
raise MlflowException(
'Must specify exactly one of "path", "table_name", or "sql"',
INVALID_PARAMETER_VALUE,
)
self._path = path
self._table_name = table_name
self._sql = sql
@staticmethod
def _get_source_type() -> str:
return "spark"
def load(self, **kwargs):
"""Loads the dataset source as a Spark Dataset Source.
Returns:
An instance of ``pyspark.sql.DataFrame``.
"""
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
if self._path:
return spark.read.parquet(self._path)
if self._table_name:
return spark.read.table(self._table_name)
if self._sql:
return spark.sql(self._sql)
@staticmethod
def _can_resolve(raw_source: Any):
return False
@classmethod
def _resolve(cls, raw_source: str) -> "SparkDatasetSource":
raise NotImplementedError
def to_dict(self) -> dict[Any, Any]:
info = {}
if self._path is not None:
info["path"] = self._path
elif self._table_name is not None:
info["table_name"] = self._table_name
elif self._sql is not None:
info["sql"] = self._sql
return info
@classmethod
def from_dict(cls, source_dict: dict[Any, Any]) -> "SparkDatasetSource":
return cls(
path=source_dict.get("path"),
table_name=source_dict.get("table_name"),
sql=source_dict.get("sql"),
)
| SparkDatasetSource |
python | getsentry__sentry | tests/snuba/search/test_backend.py | {
"start": 1849,
"end": 3914
} | class ____(SnubaTestCase):
@property
def backend(self) -> SnubaSearchBackendBase:
raise NotImplementedError(self)
def build_search_filter(self, query, projects=None, user=None, environments=None):
user = user if user is not None else self.user
projects = projects if projects is not None else [self.project]
return convert_query_values(parse_search_query(query), projects, user, environments)
def make_query(
self,
projects=None,
search_filter_query=None,
user=None,
environments=None,
sort_by="date",
limit=None,
count_hits=False,
date_from=None,
date_to=None,
cursor=None,
aggregate_kwargs=None,
):
search_filters = []
projects = projects if projects is not None else [self.project]
if search_filter_query is not None:
search_filters = self.build_search_filter(
search_filter_query, projects, user=user, environments=environments
)
kwargs = {}
if limit is not None:
kwargs["limit"] = limit
if aggregate_kwargs:
kwargs["aggregate_kwargs"] = {"trends": {**aggregate_kwargs}}
return self.backend.query(
projects,
search_filters=search_filters,
environments=environments,
count_hits=count_hits,
sort_by=sort_by,
date_from=date_from,
date_to=date_to,
cursor=cursor,
referrer=Referrer.TESTING_TEST,
**kwargs,
)
def store_event(self, data, *args, **kwargs):
event = super().store_event(data, *args, **kwargs)
environment_name = data.get("environment")
if environment_name:
GroupEnvironment.objects.filter(
group_id=event.group_id,
environment__name=environment_name,
first_seen__gt=event.datetime,
).update(first_seen=event.datetime)
return event
| SharedSnubaMixin |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/random_shuffle_queue_test.py | {
"start": 1358,
"end": 51354
} | class ____(test.TestCase):
def setUp(self):
# Useful for debugging when a test times out.
super(RandomShuffleQueueTest, self).setUp()
tf_logging.error("Starting: %s", self._testMethodName)
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
def tearDown(self):
super(RandomShuffleQueueTest, self).tearDown()
tf_logging.error("Finished: %s", self._testMethodName)
def testEnqueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
self.assertAllEqual(0, q.size())
enqueue_op.run()
self.assertAllEqual(1, q.size())
def testEnqueueWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=tensor_shape.TensorShape([3, 2]))
enqueue_correct_op = q.enqueue(([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]],))
enqueue_correct_op.run()
self.assertAllEqual(1, q.size())
with self.assertRaises(ValueError):
q.enqueue(([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],))
def testEnqueueManyWithShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(
10, 5, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (2,)])
q.enqueue_many([[1, 2, 3, 4], [[1, 1], [2, 2], [3, 3], [4, 4]]]).run()
self.assertAllEqual(4, q.size())
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shapes=tensor_shape.TensorShape([3]))
q2.enqueue(([1, 2, 3],))
q2.enqueue_many(([[1, 2, 3]],))
def testScalarShapes(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, [dtypes_lib.int32, dtypes_lib.int32], shapes=[(), (1,)])
q.enqueue_many([[1, 2, 3, 4], [[5], [6], [7], [8]]]).run()
q.enqueue([9, [10]]).run()
dequeue_t = q.dequeue()
results = []
for _ in range(2):
a, b = self.evaluate(dequeue_t)
results.append((a, b))
a, b = self.evaluate(q.dequeue_many(3))
for i in range(3):
results.append((a[i], b[i]))
self.assertItemsEqual([(1, [5]), (2, [6]), (3, [7]), (4, [8]), (9, [10])],
results)
def testParallelEnqueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Run one producer thread for each element in elems.
def enqueue(enqueue_op):
self.evaluate(enqueue_op)
threads = [
self.checkedThread(
target=enqueue, args=(e,)) for e in enqueue_ops
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# Dequeue every element using a single thread.
results = []
for _ in range(len(elems)):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testParallelDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
# Enqueue every element using a single thread.
for enqueue_op in enqueue_ops:
enqueue_op.run()
# Run one consumer thread for each element in elems.
results = []
def dequeue():
results.append(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in enqueue_ops]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, results)
def testDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
vals = [dequeued_t.eval() for _ in range(len(elems))]
self.assertItemsEqual(elems, vals)
def testEnqueueAndBlockingDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(3, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0]
enqueue_ops = [q.enqueue((x,)) for x in elems]
dequeued_t = q.dequeue()
def enqueue():
# The enqueue_ops should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
for enqueue_op in enqueue_ops:
self.evaluate(enqueue_op)
results = []
def dequeue():
for _ in range(len(elems)):
results.append(self.evaluate(dequeued_t))
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, results)
def testMultiEnqueueAndDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.int32, dtypes_lib.float32))
elems = [(5, 10.0), (10, 20.0), (15, 30.0)]
enqueue_ops = [q.enqueue((x, y)) for x, y in elems]
dequeued_t = q.dequeue()
for enqueue_op in enqueue_ops:
enqueue_op.run()
results = []
for _ in range(len(elems)):
x, y = self.evaluate(dequeued_t)
results.append((x, y))
self.assertItemsEqual(elems, results)
def testQueueSizeEmpty(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
self.assertEqual(0, q.size().eval())
def testQueueSizeAfterEnqueueAndDequeue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue()
size = q.size()
self.assertEqual([], size.get_shape())
enqueue_op.run()
self.assertEqual([1], self.evaluate(size))
dequeued_t.op.run()
self.assertEqual([0], self.evaluate(size))
def testEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + elems, results)
def testEmptyEnqueueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32)
empty_t = constant_op.constant(
[], dtype=dtypes_lib.float32, shape=[0, 2, 3])
enqueue_op = q.enqueue_many((empty_t,))
size_t = q.size()
self.assertEqual(0, self.evaluate(size_t))
enqueue_op.run()
self.assertEqual(0, self.evaluate(size_t))
def testEmptyDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_many(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueUpTo(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, shapes=())
enqueue_op = q.enqueue((10.0,))
dequeued_t = q.dequeue_up_to(0)
self.assertEqual([], self.evaluate(dequeued_t).tolist())
enqueue_op.run()
self.assertEqual([], self.evaluate(dequeued_t).tolist())
def testEmptyDequeueManyWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_many(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueMany
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testEmptyDequeueUpToWithNoShape(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
enqueue_op = q.enqueue((constant_op.constant(
[10.0, 20.0], shape=(1, 2)),))
dequeued_t = q.dequeue_up_to(0)
# Expect the operation to fail due to the shape not being constrained.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
enqueue_op.run()
# RandomShuffleQueue does not make any attempt to support DequeueUpTo
# with unspecified shapes, even if a shape could be inferred from the
# elements enqueued.
with self.assertRaisesOpError(
"require the components to have specified shapes"):
self.evaluate(dequeued_t)
def testMultiEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32))
float_elems = [10.0, 20.0, 30.0, 40.0]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue()
enqueue_op.run()
enqueue_op.run()
results = []
for _ in range(8):
float_val, int_val = self.evaluate(dequeued_t)
results.append((float_val, [int_val[0], int_val[1]]))
expected = list(zip(float_elems, int_elems)) * 2
self.assertItemsEqual(expected, results)
def testDequeueMany(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testDequeueUpToNoBlocking(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(5)
enqueue_op.run()
results = self.evaluate(dequeued_t).tolist()
results.extend(dequeued_t.eval())
self.assertItemsEqual(elems, results)
def testMultiDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_many(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
self.assertEqual(float_val.shape, dequeued_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_t[1].get_shape())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testMultiDequeueUpToNoBlocking(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
10, 0, (dtypes_lib.float32, dtypes_lib.int32), shapes=((), (2,)))
float_elems = [
10.0, 20.0, 30.0, 40.0, 50.0, 60.0, 70.0, 80.0, 90.0, 100.0
]
int_elems = [[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12], [13, 14],
[15, 16], [17, 18], [19, 20]]
enqueue_op = q.enqueue_many((float_elems, int_elems))
dequeued_t = q.dequeue_up_to(4)
dequeued_single_t = q.dequeue()
enqueue_op.run()
results = []
float_val, int_val = self.evaluate(dequeued_t)
# dequeue_up_to has undefined shape.
self.assertEqual([None], dequeued_t[0].get_shape().as_list())
self.assertEqual([None, 2], dequeued_t[1].get_shape().as_list())
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_t)
results.extend(zip(float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
self.assertEqual(float_val.shape, dequeued_single_t[0].get_shape())
self.assertEqual(int_val.shape, dequeued_single_t[1].get_shape())
results.append((float_val, int_val.tolist()))
float_val, int_val = self.evaluate(dequeued_single_t)
results.append((float_val, int_val.tolist()))
self.assertItemsEqual(zip(float_elems, int_elems), results)
def testHighDimension(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.int32, (
(4, 4, 4, 4)))
elems = np.array([[[[[x] * 4] * 4] * 4] * 4 for x in range(10)], np.int32)
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(10)
enqueue_op.run()
self.assertItemsEqual(dequeued_t.eval().tolist(), elems.tolist())
def testParallelEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(100)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(1000)
# Enqueue 100 items in parallel on 10 threads.
def enqueue():
self.evaluate(enqueue_op)
threads = [self.checkedThread(target=enqueue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(dequeued_t.eval(), elems * 10)
def testParallelDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
1000, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(1000)]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(100)
enqueue_op.run()
# Dequeue 100 items in parallel on 10 threads.
dequeued_elems = []
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t))
threads = [self.checkedThread(target=dequeue) for _ in range(10)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testParallelDequeueUpToRandomPartition(self):
with self.cached_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in range(10)]
total_elements = sum(dequeue_sizes)
q = data_flow_ops.RandomShuffleQueue(
total_elements, 0, dtypes_lib.float32, shapes=())
elems = [10.0 * x for x in range(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(self.evaluate(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_many(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testBlockingDequeueUpTo(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
dequeued_t = q.dequeue_up_to(4)
dequeued_elems = []
def enqueue():
# The enqueue_op should run after the dequeue op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.evaluate(enqueue_op)
def dequeue():
dequeued_elems.extend(self.evaluate(dequeued_t).tolist())
enqueue_thread = self.checkedThread(target=enqueue)
dequeue_thread = self.checkedThread(target=dequeue)
enqueue_thread.start()
dequeue_thread.start()
enqueue_thread.join()
dequeue_thread.join()
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueManyWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesMany
# that number of elements.
dequeued_t = q.dequeue_many(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueUpToWithTensorParameter(self):
with self.cached_session():
# Define a first queue that contains integer counts.
dequeue_counts = [random.randint(1, 10) for _ in range(100)]
count_q = data_flow_ops.RandomShuffleQueue(100, 0, dtypes_lib.int32)
enqueue_counts_op = count_q.enqueue_many((dequeue_counts,))
total_count = sum(dequeue_counts)
# Define a second queue that contains total_count elements.
elems = [random.randint(0, 100) for _ in range(total_count)]
q = data_flow_ops.RandomShuffleQueue(total_count, 0, dtypes_lib.int32, (
(),))
enqueue_elems_op = q.enqueue_many((elems,))
# Define a subgraph that first dequeues a count, then DequeuesUpTo
# that number of elements.
dequeued_t = q.dequeue_up_to(count_q.dequeue())
enqueue_counts_op.run()
enqueue_elems_op.run()
dequeued_elems = []
for _ in dequeue_counts:
dequeued_elems.extend(dequeued_t.eval())
self.assertItemsEqual(elems, dequeued_elems)
def testDequeueFromClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 2, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
close_op.run()
results = [dequeued_t.eval() for _ in elems]
expected = [[elem] for elem in elems]
self.assertItemsEqual(expected, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
def testBlockingDequeueFromClosedQueue(self):
with self.cached_session() as sess:
min_size = 2
q = data_flow_ops.RandomShuffleQueue(10, min_size, dtypes_lib.float32)
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue()
enqueue_op.run()
results = []
# Manually dequeue until we hit min_size.
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
def blocking_dequeue():
results.append(self.evaluate(dequeued_t))
results.append(self.evaluate(dequeued_t))
self.assertItemsEqual(elems, results)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=blocking_dequeue)
dequeue_thread.start()
time.sleep(0.1)
# The dequeue thread blocked when it hit the min_size requirement.
self.assertEqual(len(results), 2)
close_op.run()
dequeue_thread.join()
# Once the queue is closed, the min_size requirement is lifted.
self.assertEqual(len(results), 4)
def testBlockingDequeueFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
close_op = q.close()
dequeued_t = q.dequeue()
finished = [] # Needs to be a mutable type
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
finished.append(True)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(finished), 0)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(finished), 1)
def testBlockingDequeueManyFromClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
enqueue_op.run()
progress = [] # Must be mutable
def dequeue():
self.assertItemsEqual(elems, self.evaluate(dequeued_t))
progress.append(1)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
progress.append(2)
self.assertEqual(len(progress), 0)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
for _ in range(100):
time.sleep(0.01)
if len(progress) == 1:
break
self.assertEqual(len(progress), 1)
time.sleep(0.01)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(progress), 2)
def testBlockingDequeueUpToFromClosedQueueReturnsRemainder(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueUpToSmallerThanMinAfterDequeue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(
capacity=10,
min_after_dequeue=2,
dtypes=dtypes_lib.float32,
shapes=((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_up_to(3)
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(3, len(results))
# min_after_dequeue is 2, we ask for 3 elements, and we end up only
# getting the remaining 1.
results.extend(self.evaluate(dequeued_t))
self.assertEqual(4, len(results))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertItemsEqual(results, elems)
def testBlockingDequeueManyFromClosedQueueWithElementsRemaining(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
dequeued_t = q.dequeue_many(3)
cleanup_dequeue_t = q.dequeue_many(q.size())
enqueue_op.run()
results = []
def dequeue():
results.extend(self.evaluate(dequeued_t))
self.assertEqual(len(results), 3)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
# While the last dequeue failed, we want to insure that it returns
# any elements that it potentially reserved to dequeue. Thus the
# next cleanup should return a single element.
results.extend(self.evaluate(cleanup_dequeue_t))
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
self.assertEqual(len(results), 4)
def testBlockingDequeueManyFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_many(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testBlockingDequeueUpToFromClosedEmptyQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
close_op = q.close()
dequeued_t = q.dequeue_up_to(4)
def dequeue():
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.OutOfRangeError,
"is closed and has insufficient"):
self.evaluate(dequeued_t)
dequeue_thread = self.checkedThread(target=dequeue)
dequeue_thread.start()
# The close_op should run after the dequeue_thread has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
close_op.run()
dequeue_thread.join()
def testEnqueueToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 4, dtypes_lib.float32)
enqueue_op = q.enqueue((10.0,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testEnqueueManyToClosedQueue(self):
with self.cached_session():
q = data_flow_ops.RandomShuffleQueue(10, 5, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
close_op = q.close()
enqueue_op.run()
close_op.run()
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "is closed"):
enqueue_op.run()
def testBlockingEnqueueToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0], results)
# There wasn't room for 50.0 in the queue when the first element was
# dequeued.
self.assertNotEqual(50.0, results[0])
thread.join()
def testBlockingEnqueueManyToFullQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
dequeued_t = q.dequeue()
enqueue_op.run()
def blocking_enqueue():
self.evaluate(blocking_enqueue_op)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The dequeue ops should run after the blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
for _ in elems:
time.sleep(0.01)
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
results.append(dequeued_t.eval())
self.assertItemsEqual(elems + [50.0, 60.0], results)
# There wasn't room for 50.0 or 60.0 in the queue when the first
# element was dequeued.
self.assertNotEqual(50.0, results[0])
self.assertNotEqual(60.0, results[0])
# Similarly for 60.0 and the second element.
self.assertNotEqual(60.0, results[1])
thread.join()
def testBlockingEnqueueToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0, 40.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue((50.0,))
dequeued_t = q.dequeue()
close_op = q.close()
enqueue_op.run()
def blocking_enqueue():
# Expect the operation to succeed since it will complete
# before the queue is closed.
self.evaluate(blocking_enqueue_op)
# Expect the operation to fail due to the queue being closed.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# The close_op should run after the first blocking_enqueue_op has blocked.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Wait for the close op to block before unblocking the enqueue.
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
results = []
# Dequeue to unblock the first blocking_enqueue_op, after which the
# close will complete.
results.append(dequeued_t.eval())
self.assertTrue(results[0] in elems)
thread2.join()
thread1.join()
def testBlockingEnqueueManyToClosedQueue(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(4, 0, dtypes_lib.float32, ((),))
elems = [10.0, 20.0, 30.0]
enqueue_op = q.enqueue_many((elems,))
blocking_enqueue_op = q.enqueue_many(([50.0, 60.0],))
close_op = q.close()
size_t = q.size()
enqueue_op.run()
self.assertEqual(size_t.eval(), 3)
def blocking_enqueue():
# This will block until the dequeue after the close.
self.evaluate(blocking_enqueue_op)
thread1 = self.checkedThread(target=blocking_enqueue)
thread1.start()
# First blocking_enqueue_op of blocking_enqueue has enqueued 1 of 2
# elements, and is blocked waiting for one more element to be dequeue.
for i in range(50):
queue_size = self.evaluate(size_t)
if queue_size == 4:
break
elif i == 49:
self.fail(
"Blocking enqueue op did not execute within the expected time.")
time.sleep(0.1)
def blocking_close():
self.evaluate(close_op)
thread2 = self.checkedThread(target=blocking_close)
thread2.start()
# Unblock the first blocking_enqueue_op in blocking_enqueue.
q.dequeue().eval()
thread2.join()
thread1.join()
# At this point the close operation will complete, so the next enqueue
# will fail.
with self.assertRaisesRegex(errors_impl.CancelledError, "closed"):
self.evaluate(blocking_enqueue_op)
def testSharedQueueSameSession(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1.enqueue((10.0,)).run()
# TensorFlow TestCase adds a default graph seed (=87654321). We check if
# the seed computed from the default graph seed is reproduced.
seed = 887634792
q2 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=seed)
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q2.dequeue().eval(), 10.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
q2.enqueue((20.0,)).run()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
self.assertEqual(q1.dequeue().eval(), 20.0)
self.assertEqual(q1_size_t.eval(), 0)
self.assertEqual(q2_size_t.eval(), 0)
def testSharedQueueSameSessionGraphSeedNone(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(
1,
0,
dtypes_lib.float32, ((),),
shared_name="shared_queue",
seed=98765432)
q1.enqueue((10.0,)).run()
# If both graph and op seeds are not provided, the default value must be
# used, and in case a shared queue is already created, the second queue op
# must accept any previous seed value.
random_seed.set_random_seed(None)
q2 = data_flow_ops.RandomShuffleQueue(
1, 0, dtypes_lib.float32, ((),), shared_name="shared_queue")
q1_size_t = q1.size()
q2_size_t = q2.size()
self.assertEqual(q1_size_t.eval(), 1)
self.assertEqual(q2_size_t.eval(), 1)
def testIncompatibleSharedQueueErrors(self):
with self.cached_session():
q_a_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_a")
q_a_2 = data_flow_ops.RandomShuffleQueue(
15, 5, dtypes_lib.float32, shared_name="q_a")
q_a_1.queue_ref.op.run()
with self.assertRaisesOpError("capacity"):
q_a_2.queue_ref.op.run()
q_b_1 = data_flow_ops.RandomShuffleQueue(
10, 0, dtypes_lib.float32, shared_name="q_b")
q_b_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_b")
q_b_1.queue_ref.op.run()
with self.assertRaisesOpError("min_after_dequeue"):
q_b_2.queue_ref.op.run()
q_c_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_c")
q_c_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, shared_name="q_c")
q_c_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_c_2.queue_ref.op.run()
q_d_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_d")
q_d_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_d")
q_d_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_d_2.queue_ref.op.run()
q_e_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_e")
q_e_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_e")
q_e_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_e_2.queue_ref.op.run()
q_f_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 3)], shared_name="q_f")
q_f_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shapes=[(1, 1, 2, 4)], shared_name="q_f")
q_f_1.queue_ref.op.run()
with self.assertRaisesOpError("component shapes"):
q_f_2.queue_ref.op.run()
q_g_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, shared_name="q_g")
q_g_2 = data_flow_ops.RandomShuffleQueue(
10, 5, (dtypes_lib.float32, dtypes_lib.int32), shared_name="q_g")
q_g_1.queue_ref.op.run()
with self.assertRaisesOpError("component types"):
q_g_2.queue_ref.op.run()
q_h_1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=12, shared_name="q_h")
q_h_2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.float32, seed=21, shared_name="q_h")
q_h_1.queue_ref.op.run()
with self.assertRaisesOpError("random seeds"):
q_h_2.queue_ref.op.run()
def testSelectQueue(self):
with self.cached_session():
num_queues = 10
qlist = []
for _ in range(num_queues):
qlist.append(
data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32))
# Enqueue/Dequeue into a dynamically selected queue
for _ in range(20):
index = np.random.randint(num_queues)
q = data_flow_ops.RandomShuffleQueue.from_list(index, qlist)
q.enqueue((10.,)).run()
self.assertEqual(q.dequeue().eval(), 10.0)
def testSelectQueueOutOfRange(self):
with self.cached_session():
q1 = data_flow_ops.RandomShuffleQueue(10, 0, dtypes_lib.float32)
q2 = data_flow_ops.RandomShuffleQueue(15, 0, dtypes_lib.float32)
enq_q = data_flow_ops.RandomShuffleQueue.from_list(3, [q1, q2])
with self.assertRaisesOpError("is not in"):
enq_q.dequeue().eval()
def _blockingDequeue(self, sess, dequeue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_op)
def _blockingDequeueMany(self, sess, dequeue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_many_op)
def _blockingDequeueUpTo(self, sess, dequeue_up_to_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(dequeue_up_to_op)
def _blockingEnqueue(self, sess, enqueue_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_op)
def _blockingEnqueueMany(self, sess, enqueue_many_op):
with self.assertRaisesOpError("was cancelled"):
self.evaluate(enqueue_many_op)
def testResetOfBlockingOperation(self):
# We need each thread to keep its own device stack or the device scopes
# won't be properly nested.
ops.get_default_graph().switch_to_thread_local()
with self.cached_session() as sess:
q_empty = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, (
(),))
dequeue_op = q_empty.dequeue()
dequeue_many_op = q_empty.dequeue_many(1)
dequeue_up_to_op = q_empty.dequeue_up_to(1)
q_full = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.float32, ((),))
sess.run(q_full.enqueue_many(([1.0, 2.0, 3.0, 4.0, 5.0],)))
enqueue_op = q_full.enqueue((6.0,))
enqueue_many_op = q_full.enqueue_many(([6.0],))
threads = [
self.checkedThread(
self._blockingDequeue, args=(sess, dequeue_op)),
self.checkedThread(
self._blockingDequeueMany, args=(sess, dequeue_many_op)),
self.checkedThread(
self._blockingDequeueUpTo, args=(sess, dequeue_up_to_op)),
self.checkedThread(
self._blockingEnqueue, args=(sess, enqueue_op)),
self.checkedThread(
self._blockingEnqueueMany, args=(sess, enqueue_many_op))
]
for t in threads:
t.start()
time.sleep(0.1)
sess.close() # Will cancel the blocked operations.
for t in threads:
t.join()
def testDequeueManyInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_many(5)
deq2 = q2.dequeue_many(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueUpToInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue_up_to(5)
deq2 = q2.dequeue_up_to(5)
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
results[0].extend(deq1.eval())
results[1].extend(deq2.eval())
q1.close().run()
q2.close().run()
results[2].extend(deq1.eval())
results[3].extend(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testDequeueInDifferentOrders(self):
with self.cached_session():
# Specify seeds to make the test deterministic
# (https://en.wikipedia.org/wiki/Taxicab_number).
q1 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=1729)
q2 = data_flow_ops.RandomShuffleQueue(
10, 5, dtypes_lib.int32, ((),), seed=87539319)
enq1 = q1.enqueue_many(([1, 2, 3, 4, 5],))
enq2 = q2.enqueue_many(([1, 2, 3, 4, 5],))
deq1 = q1.dequeue()
deq2 = q2.dequeue()
enq1.run()
enq1.run()
enq2.run()
enq2.run()
results = [[], [], [], []]
for _ in range(5):
results[0].append(deq1.eval())
results[1].append(deq2.eval())
q1.close().run()
q2.close().run()
for _ in range(5):
results[2].append(deq1.eval())
results[3].append(deq2.eval())
# No two should match
for i in range(1, 4):
for j in range(i):
self.assertNotEqual(results[i], results[j])
def testBigEnqueueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(5, 0, dtypes_lib.int32, ((),))
elem = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
enq = q.enqueue_many((elem,))
deq = q.dequeue()
size_op = q.size()
enq_done = []
def blocking_enqueue():
enq_done.append(False)
# This will fill the queue and then block until enough dequeues happen.
self.evaluate(enq)
enq_done.append(True)
thread = self.checkedThread(target=blocking_enqueue)
thread.start()
# The enqueue should start and then block.
results = []
results.append(deq.eval()) # Will only complete after the enqueue starts.
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
for _ in range(3):
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 1)
self.assertEqual(self.evaluate(size_op), 5)
# This dequeue will unblock the thread.
results.append(deq.eval())
time.sleep(0.1)
self.assertEqual(len(enq_done), 2)
thread.join()
for i in range(5):
self.assertEqual(size_op.eval(), 5 - i)
results.append(deq.eval())
self.assertEqual(size_op.eval(), 5 - i - 1)
self.assertItemsEqual(elem, results)
def testBigDequeueMany(self):
with self.cached_session() as sess:
q = data_flow_ops.RandomShuffleQueue(2, 0, dtypes_lib.int32, ((),))
elem = np.arange(4, dtype=np.int32)
enq_list = [q.enqueue((e,)) for e in elem]
deq = q.dequeue_many(4)
results = []
def blocking_dequeue():
# Will only complete after 4 enqueues complete.
results.extend(self.evaluate(deq))
thread = self.checkedThread(target=blocking_dequeue)
thread.start()
# The dequeue should start and then block.
for enq in enq_list:
# TODO(mrry): Figure out how to do this without sleeping.
time.sleep(0.1)
self.assertEqual(len(results), 0)
self.evaluate(enq)
# Enough enqueued to unblock the dequeue
thread.join()
self.assertItemsEqual(elem, results)
if __name__ == "__main__":
test.main()
| RandomShuffleQueueTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeAlias2.py | {
"start": 223,
"end": 269
} | class ____(Base):
pass
Mix = Union[A, B]
| B |
python | kamyu104__LeetCode-Solutions | Python/reverse-substrings-between-each-pair-of-parentheses.py | {
"start": 658,
"end": 1091
} | class ____(object):
def reverseParentheses(self, s):
"""
:type s: str
:rtype: str
"""
stk = [[]]
for c in s:
if c == '(':
stk.append([])
elif c == ')':
end = stk.pop()
end.reverse()
stk[-1].extend(end)
else:
stk[-1].append(c)
return "".join(stk.pop())
| Solution2 |
python | kamyu104__LeetCode-Solutions | Python/cycle-length-queries-in-a-tree.py | {
"start": 45,
"end": 480
} | class ____(object):
def cycleLengthQueries(self, n, queries):
"""
:type n: int
:type queries: List[List[int]]
:rtype: List[int]
"""
result = []
for x, y in queries:
cnt = 1
while x != y:
if x > y:
x, y = y, x
y //= 2
cnt += 1
result.append(cnt)
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 29048,
"end": 33367
} | class ____(IncrementalMixin, GithubStream):
"""
API docs: https://docs.github.com/en/rest/commits/commits?apiVersion=2022-11-28#list-commits
Pull commits from each branch of each repository, tracking state for each branch
"""
primary_key = "sha"
cursor_field = "created_at"
slice_keys = ["repository", "branch"]
def __init__(self, branches_to_pull: List[str], **kwargs):
super().__init__(**kwargs)
kwargs.pop("start_date")
self.branches_to_repos = {}
self.branches_to_pull = set(branches_to_pull)
self.branches_stream = Branches(**kwargs)
self.repositories_stream = RepositoryStats(**kwargs)
def request_params(self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = super(IncrementalMixin, self).request_params(stream_state=stream_state, stream_slice=stream_slice, **kwargs)
since = self.get_starting_point(stream_state=stream_state, stream_slice=stream_slice)
if since:
params["since"] = since
params["sha"] = stream_slice["branch"]
return params
def stream_slices(self, **kwargs) -> Iterable[Optional[Mapping[str, Any]]]:
self._validate_branches_to_pull()
for stream_slice in super().stream_slices(**kwargs):
repository = stream_slice["repository"]
for branch in self.branches_to_repos.get(repository, []):
yield {"branch": branch, "repository": repository}
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record = super().transform(record=record, stream_slice=stream_slice)
# Record of the `commits` stream doesn't have an updated_at/created_at field at the top level (so we could
# just write `record["updated_at"]` or `record["created_at"]`). Instead each record has such value in
# `commit.author.date`. So the easiest way is to just enrich the record returned from API with top level
# field `created_at` and use it as cursor_field.
# Include the branch in the record
record["created_at"] = record["commit"]["author"]["date"]
record["branch"] = stream_slice["branch"]
return record
def _get_updated_state(self, current_stream_state: MutableMapping[str, Any], latest_record: Mapping[str, Any]):
repository = latest_record["repository"]
branch = latest_record["branch"]
updated_state = latest_record[self.cursor_field]
stream_state_value = current_stream_state.get(repository, {}).get(branch, {}).get(self.cursor_field)
if stream_state_value:
updated_state = max(updated_state, stream_state_value)
current_stream_state.setdefault(repository, {}).setdefault(branch, {})[self.cursor_field] = updated_state
return current_stream_state
def _validate_branches_to_pull(self):
# Get the default branch for each repository
default_branches = {}
for stream_slice in self.repositories_stream.stream_slices(sync_mode=SyncMode.full_refresh):
for repo_stats in self.repositories_stream.read_records(stream_slice=stream_slice, sync_mode=SyncMode.full_refresh):
default_branches[repo_stats["full_name"]] = repo_stats["default_branch"]
all_branches = []
for stream_slice in self.branches_stream.stream_slices(sync_mode=SyncMode.full_refresh):
for branch in self.branches_stream.read_records(sync_mode=SyncMode.full_refresh, stream_slice=stream_slice):
all_branches.append(f"{branch['repository']}/{branch['name']}")
# Create mapping of repository to list of branches to pull commits for
# If no branches are specified for a repo, use its default branch
for repo in self.repositories:
repo_branches = []
for branch in self.branches_to_pull:
branch_parts = branch.split("/", 2)
if "/".join(branch_parts[:2]) == repo and branch in all_branches:
repo_branches.append(branch_parts[-1])
if not repo_branches:
repo_branches = [default_branches[repo]]
self.branches_to_repos[repo] = repo_branches
| Commits |
python | doocs__leetcode | solution/1300-1399/1395.Count Number of Teams/Solution.py | {
"start": 0,
"end": 325
} | class ____:
def numTeams(self, rating: List[int]) -> int:
ans, n = 0, len(rating)
for i, b in enumerate(rating):
l = sum(a < b for a in rating[:i])
r = sum(c > b for c in rating[i + 1 :])
ans += l * r
ans += (i - l) * (n - i - 1 - r)
return ans
| Solution |
python | realpython__materials | celery-async-tasks/source_code_final/feedback/forms.py | {
"start": 79,
"end": 412
} | class ____(forms.Form):
email = forms.EmailField(label="Email Address")
message = forms.CharField(
label="Message", widget=forms.Textarea(attrs={"rows": 5})
)
def send_email(self):
send_feedback_email_task.delay(
self.cleaned_data["email"], self.cleaned_data["message"]
)
| FeedbackForm |
python | realpython__materials | python-type-checking/hearts.py | {
"start": 976,
"end": 2408
} | class ____(Sequence[Card]):
def __init__(self, cards: List[Card]) -> None:
self.cards = cards
@classmethod
def create(cls, shuffle: bool = False) -> "Deck":
"""Create a new deck of 52 cards"""
cards = [Card(s, r) for r in Card.RANKS for s in Card.SUITS]
if shuffle:
random.shuffle(cards)
return cls(cards)
def play(self, card: Card) -> None:
"""Play one card by removing it from the deck"""
self.cards.remove(card)
def deal(self, num_hands: int) -> Tuple["Deck", ...]:
"""Deal the cards in the deck into a number of hands"""
return tuple(self[i::num_hands] for i in range(num_hands))
def add_cards(self, cards: List[Card]) -> None:
"""Add a list of cards to the deck"""
self.cards += cards
def __len__(self) -> int:
return len(self.cards)
@overload
def __getitem__(self, key: int) -> Card: ...
@overload
def __getitem__(self, key: slice) -> "Deck": ...
def __getitem__(self, key: Union[int, slice]) -> Union[Card, "Deck"]:
if isinstance(key, int):
return self.cards[key]
elif isinstance(key, slice):
cls = self.__class__
return cls(self.cards[key])
else:
raise TypeError("Indices must be integers or slices")
def __repr__(self) -> str:
return " ".join(repr(c) for c in self.cards)
| Deck |
python | django__django | tests/admin_utils/models.py | {
"start": 1135,
"end": 1417
} | class ____(models.Model):
num = models.PositiveSmallIntegerField()
parent = models.ForeignKey("self", models.DB_CASCADE, null=True)
def __str__(self):
return str(self.num)
class Meta:
required_db_features = {"supports_on_delete_db_cascade"}
| DBCascade |
python | nedbat__coveragepy | tests/test_concurrency.py | {
"start": 12309,
"end": 14746
} | class ____(CoverageTest):
"""Tests of what happens if the requested concurrency isn't installed."""
@pytest.mark.parametrize("module", ["eventlet", "gevent", "greenlet"])
def test_missing_module(self, module: str) -> None:
self.make_file("prog.py", "a = 1")
sys.modules[module] = None # type: ignore[assignment]
msg = rf"Couldn't trace with concurrency={module}, the module isn't installed."
with pytest.raises(ConfigError, match=msg):
self.command_line(f"run --concurrency={module} prog.py")
SQUARE_OR_CUBE_WORK = """
def work(x):
# Use different lines in different subprocesses.
if x % 2:
y = x*x
else:
y = x*x*x
return y
"""
SUM_RANGE_WORK = """
def work(x):
return sum_range((x+1)*100)
"""
MULTI_CODE = """
# Above this will be a definition of work().
import multiprocessing
import os
import time
import sys
def process_worker_main(args):
# Need to pause, or the tasks go too quickly, and some processes
# in the pool don't get any work, and then don't record data.
ret = work(*args)
time.sleep(0.1)
return os.getpid(), ret
if __name__ == "__main__": # pragma: no branch
# This if is on a single line so we can get 100% coverage
# even if we have no arguments.
if len(sys.argv) > 1: multiprocessing.set_start_method(sys.argv[1])
pool = multiprocessing.Pool({NPROCS})
inputs = [(x,) for x in range({UPTO})]
outputs = pool.imap_unordered(process_worker_main, inputs)
pids = set()
total = 0
for pid, sq in outputs:
pids.add(pid)
total += sq
print(f"{{len(pids)}} pids, {{total = }}")
pool.close()
pool.join()
"""
@pytest.fixture(params=["fork", "spawn"], name="start_method")
def start_method_fixture(request: pytest.FixtureRequest) -> str:
"""Parameterized fixture to choose the start_method for multiprocessing."""
start_method: str = request.param
if start_method not in multiprocessing.get_all_start_methods():
# Windows doesn't support "fork".
pytest.skip(f"start_method={start_method} not supported here")
return start_method
# Sometimes a test fails due to inherent randomness. Try more times.
# @pytest.mark.flaky(max_runs=30)
| WithoutConcurrencyModuleTest |
python | plotly__plotly.py | plotly/io/_base_renderers.py | {
"start": 1595,
"end": 1982
} | class ____(MimetypeRenderer):
"""
Renderer to display figures as JSON hierarchies. This renderer is
compatible with JupyterLab and VSCode.
mime type: 'application/json'
"""
def to_mimebundle(self, fig_dict):
value = json.loads(to_json(fig_dict, validate=False, remove_uids=False))
return {"application/json": value}
# Plotly mimetype
| JsonRenderer |
python | scipy__scipy | scipy/signal/tests/test_windows.py | {
"start": 26380,
"end": 27576
} | class ____:
def test_basic(self, xp):
xp_assert_close(windows.nuttall(6, sym=False, xp=xp),
xp.asarray([0.0003628, 0.0613345, 0.5292298, 1.0, 0.5292298,
0.0613345], dtype=xp.float64))
xp_assert_close(windows.nuttall(7, sym=False, xp=xp),
xp.asarray([0.0003628, 0.03777576895352025,
0.3427276199688195,
0.8918518610776603, 0.8918518610776603,
0.3427276199688196, 0.0377757689535203],
dtype=xp.float64))
xp_assert_close(windows.nuttall(6, xp=xp),
xp.asarray([0.0003628, 0.1105152530498718,
0.7982580969501282, 0.7982580969501283,
0.1105152530498719, 0.0003628], dtype=xp.float64))
xp_assert_close(windows.nuttall(7, True, xp=xp),
xp.asarray([0.0003628, 0.0613345, 0.5292298, 1.0,
0.5292298, 0.0613345, 0.0003628], dtype=xp.float64))
@make_xp_test_case(windows.parzen)
| TestNuttall |
python | apache__airflow | devel-common/src/sphinx_exts/operators_and_hooks_ref.py | {
"start": 12950,
"end": 14865
} | class ____(Directive):
"""The base directive for OperatorsHooksReferenceDirective and TransfersReferenceDirective"""
optional_arguments = 1
option_spec = {"tags": directives.unchanged, "header-separator": directives.unchanged_required}
def run(self):
tags_arg = self.options.get("tags")
tags = {t.strip() for t in tags_arg.split(",")} if tags_arg else None
header_separator = self.options.get("header-separator")
new_content = self.render_content(tags=tags, header_separator=header_separator)
with switch_source_input(self.state, self.content):
new_content = StringList(new_content.splitlines(), source="")
node: Element = nodes.section()
# necessary so that the child nodes get the right source/line set
node.document = self.state.document
nested_parse_with_titles(self.state, new_content, node)
# record all filenames as dependencies -- this will at least
# partially make automatic invalidation possible
for filepath in get_all_provider_yaml_paths():
self.state.document.settings.record_dependencies.add(filepath)
return node.children
def render_content(self, *, tags: set[str] | None, header_separator: str = DEFAULT_HEADER_SEPARATOR):
"""Return content in RST format"""
raise NotImplementedError("You need to override render_content method.")
def _common_render_list_content(*, header_separator: str, resource_type: str, template: str):
tabular_data = {
provider["package-name"]: {
"name": provider["name"],
resource_type: provider.get(resource_type, []),
}
for provider in load_package_data()
if provider.get(resource_type) is not None
}
return _render_template(template, items=tabular_data, header_separator=header_separator)
| BaseJinjaReferenceDirective |
python | realpython__materials | python-property/currency_v2.py | {
"start": 23,
"end": 565
} | class ____:
def __init__(self, units, cents):
self._total_cents = units * CENTS_PER_UNIT + cents
@property
def units(self):
return self._total_cents // CENTS_PER_UNIT
@units.setter
def units(self, value):
self._total_cents = self.cents + value * CENTS_PER_UNIT
@property
def cents(self):
return self._total_cents % CENTS_PER_UNIT
@cents.setter
def cents(self, value):
self._total_cents = self.units * CENTS_PER_UNIT + value
# Currency implementation...
| Currency |
python | prakhar1989__Algorithms | tests/lcs_test.py | {
"start": 28,
"end": 405
} | class ____(unittest.TestCase):
def test_lcs(self):
self.assertEqual(lcs.longest_common_subsequence("ABCD", "BBDABXYDCCAD"), (4, "ABCD"))
self.assertEqual(lcs.longest_common_subsequence("BANANA", "ATANA"), (4, "AANA"))
self.assertEqual(lcs.longest_common_subsequence("ABCDEFG", "BDGK"), (3, "BDG"))
if __name__ == "__main__":
unittest.main()
| TestLCS |
python | cython__cython | runtests.py | {
"start": 85719,
"end": 86508
} | class ____(object):
# This is an exclude selector so it can override the (include) selectors.
# It may not provide uniform distribution (in time or count), but is a
# determanistic partition of the tests which is important.
# Random seed to improve the hash distribution.
_seed = base64.b64decode(b'2ged1EtsGz/GkisJr22UcLeP6n9XIaA5Vby2wM49Wvg=')
def __init__(self, shard_num, shard_count):
self.shard_num = shard_num
self.shard_count = shard_count
def __call__(self, testname, tags=None, _hash=zlib.crc32):
# Cannot use simple hash() here as shard processes might use different hash seeds.
# CRC32 is fast and simple.
return _hash(self._seed + testname.encode()) % self.shard_count != self.shard_num
| ShardExcludeSelector |
python | pytorch__pytorch | test/cpp/aoti_inference/test.py | {
"start": 138,
"end": 559
} | class ____(torch.nn.Module):
def __init__(self, device, size=4):
super().__init__()
self.w_pre = torch.randn(size, size, device=device)
self.w_add = torch.randn(size, size, device=device)
def forward(self, x):
w_transpose = torch.transpose(self.w_pre, 0, 1)
w_relu = torch.nn.functional.relu(w_transpose)
w = w_relu + self.w_add
return torch.matmul(x, w)
| Net |
python | pypa__hatch | tests/backend/builders/test_wheel.py | {
"start": 11846,
"end": 15389
} | class ____:
def test_default(self, isolation):
builder = WheelBuilder(str(isolation))
assert builder.config.shared_data == builder.config.shared_data == {}
def test_invalid_type(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-data": 42}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(TypeError, match="Field `tool.hatch.build.targets.wheel.shared-data` must be a mapping"):
_ = builder.config.shared_data
def test_absolute(self, isolation):
config = {
"tool": {
"hatch": {"build": {"targets": {"wheel": {"shared-data": {str(isolation / "source"): "/target/"}}}}}
}
}
builder = WheelBuilder(str(isolation), config=config)
assert builder.config.shared_data == {str(isolation / "source"): "target"}
def test_relative(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-data": {"../source": "/target/"}}}}}}}
builder = WheelBuilder(str(isolation / "foo"), config=config)
assert builder.config.shared_data == {str(isolation / "source"): "target"}
def test_source_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-data": {"": "/target/"}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
ValueError,
match="Source #1 in field `tool.hatch.build.targets.wheel.shared-data` cannot be an empty string",
):
_ = builder.config.shared_data
def test_relative_path_not_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-data": {"source": 0}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
TypeError,
match="Path for source `source` in field `tool.hatch.build.targets.wheel.shared-data` must be a string",
):
_ = builder.config.shared_data
def test_relative_path_empty_string(self, isolation):
config = {"tool": {"hatch": {"build": {"targets": {"wheel": {"shared-data": {"source": ""}}}}}}}
builder = WheelBuilder(str(isolation), config=config)
with pytest.raises(
ValueError,
match=(
"Path for source `source` in field `tool.hatch.build.targets.wheel.shared-data` "
"cannot be an empty string"
),
):
_ = builder.config.shared_data
def test_order(self, isolation):
config = {
"tool": {
"hatch": {
"build": {
"targets": {
"wheel": {
"shared-data": {
"../very-nested": "target1/embedded",
"../source1": "/target2/",
"../source2": "/target1/",
}
}
}
}
}
}
}
builder = WheelBuilder(str(isolation / "foo"), config=config)
assert builder.config.shared_data == {
str(isolation / "source2"): "target1",
str(isolation / "very-nested"): f"target1{os.sep}embedded",
str(isolation / "source1"): "target2",
}
| TestSharedData |
python | pydantic__pydantic | pydantic/_internal/_namespace_utils.py | {
"start": 5443,
"end": 12878
} | class ____:
"""A class responsible for the namespaces resolving logic for annotations evaluation.
This class handles the namespace logic when evaluating annotations mainly for class objects.
It holds a stack of classes that are being inspected during the core schema building,
and the `types_namespace` property exposes the globals and locals to be used for
type annotation evaluation. Additionally -- if no class is present in the stack -- a
fallback globals and locals can be provided using the `namespaces_tuple` argument
(this is useful when generating a schema for a simple annotation, e.g. when using
`TypeAdapter`).
The namespace creation logic is unfortunately flawed in some cases, for backwards
compatibility reasons and to better support valid edge cases. See the description
for the `parent_namespace` argument and the example for more details.
Args:
namespaces_tuple: The default globals and locals to use if no class is present
on the stack. This can be useful when using the `GenerateSchema` class
with `TypeAdapter`, where the "type" being analyzed is a simple annotation.
parent_namespace: An optional parent namespace that will be added to the locals
with the lowest priority. For a given class defined in a function, the locals
of this function are usually used as the parent namespace:
```python {lint="skip" test="skip"}
from pydantic import BaseModel
def func() -> None:
SomeType = int
class Model(BaseModel):
f: 'SomeType'
# when collecting fields, an namespace resolver instance will be created
# this way:
# ns_resolver = NsResolver(parent_namespace={'SomeType': SomeType})
```
For backwards compatibility reasons and to support valid edge cases, this parent
namespace will be used for *every* type being pushed to the stack. In the future,
we might want to be smarter by only doing so when the type being pushed is defined
in the same module as the parent namespace.
Example:
```python {lint="skip" test="skip"}
ns_resolver = NsResolver(
parent_namespace={'fallback': 1},
)
class Sub:
m: 'Model'
class Model:
some_local = 1
sub: Sub
ns_resolver = NsResolver()
# This is roughly what happens when we build a core schema for `Model`:
with ns_resolver.push(Model):
ns_resolver.types_namespace
#> NamespacesTuple({'Sub': Sub}, {'Model': Model, 'some_local': 1})
# First thing to notice here, the model being pushed is added to the locals.
# Because `NsResolver` is being used during the model definition, it is not
# yet added to the globals. This is useful when resolving self-referencing annotations.
with ns_resolver.push(Sub):
ns_resolver.types_namespace
#> NamespacesTuple({'Sub': Sub}, {'Sub': Sub, 'Model': Model})
# Second thing to notice: `Sub` is present in both the globals and locals.
# This is not an issue, just that as described above, the model being pushed
# is added to the locals, but it happens to be present in the globals as well
# because it is already defined.
# Third thing to notice: `Model` is also added in locals. This is a backwards
# compatibility workaround that allows for `Sub` to be able to resolve `'Model'`
# correctly (as otherwise models would have to be rebuilt even though this
# doesn't look necessary).
```
"""
def __init__(
self,
namespaces_tuple: NamespacesTuple | None = None,
parent_namespace: MappingNamespace | None = None,
) -> None:
self._base_ns_tuple = namespaces_tuple or NamespacesTuple({}, {})
self._parent_ns = parent_namespace
self._types_stack: list[type[Any] | TypeAliasType] = []
@cached_property
def types_namespace(self) -> NamespacesTuple:
"""The current global and local namespaces to be used for annotations evaluation."""
if not self._types_stack:
# TODO: should we merge the parent namespace here?
# This is relevant for TypeAdapter, where there are no types on the stack, and we might
# need access to the parent_ns. Right now, we sidestep this in `type_adapter.py` by passing
# locals to both parent_ns and the base_ns_tuple, but this is a bit hacky.
# we might consider something like:
# if self._parent_ns is not None:
# # Hacky workarounds, see class docstring:
# # An optional parent namespace that will be added to the locals with the lowest priority
# locals_list: list[MappingNamespace] = [self._parent_ns, self._base_ns_tuple.locals]
# return NamespacesTuple(self._base_ns_tuple.globals, LazyLocalNamespace(*locals_list))
return self._base_ns_tuple
typ = self._types_stack[-1]
globalns = get_module_ns_of(typ)
locals_list: list[MappingNamespace] = []
# Hacky workarounds, see class docstring:
# An optional parent namespace that will be added to the locals with the lowest priority
if self._parent_ns is not None:
locals_list.append(self._parent_ns)
if len(self._types_stack) > 1:
first_type = self._types_stack[0]
locals_list.append({first_type.__name__: first_type})
# Adding `__type_params__` *before* `vars(typ)`, as the latter takes priority
# (see https://github.com/python/cpython/pull/120272).
# TODO `typ.__type_params__` when we drop support for Python 3.11:
type_params: tuple[_TypeVarLike, ...] = getattr(typ, '__type_params__', ())
if type_params:
# Adding `__type_params__` is mostly useful for generic classes defined using
# PEP 695 syntax *and* using forward annotations (see the example in
# https://github.com/python/cpython/issues/114053). For TypeAliasType instances,
# it is way less common, but still required if using a string annotation in the alias
# value, e.g. `type A[T] = 'T'` (which is not necessary in most cases).
locals_list.append({t.__name__: t for t in type_params})
# TypeAliasType instances don't have a `__dict__` attribute, so the check
# is necessary:
if hasattr(typ, '__dict__'):
locals_list.append(vars(typ))
# The `len(self._types_stack) > 1` check above prevents this from being added twice:
locals_list.append({typ.__name__: typ})
return NamespacesTuple(globalns, LazyLocalNamespace(*locals_list))
@contextmanager
def push(self, typ: type[Any] | TypeAliasType, /) -> Generator[None]:
"""Push a type to the stack."""
self._types_stack.append(typ)
# Reset the cached property:
self.__dict__.pop('types_namespace', None)
try:
yield
finally:
self._types_stack.pop()
self.__dict__.pop('types_namespace', None)
| NsResolver |
python | huggingface__transformers | src/transformers/models/detr/modeling_detr.py | {
"start": 3945,
"end": 6398
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` are provided)):
Total loss as a linear combination of a negative log-likehood (cross-entropy) for class prediction and a
bounding box loss. The latter is defined as a linear combination of the L1 loss and the generalized
scale-invariant IoU loss.
loss_dict (`Dict`, *optional*):
A dictionary containing the individual losses. Useful for logging.
logits (`torch.FloatTensor` of shape `(batch_size, num_queries, num_classes + 1)`):
Classification logits (including no-object) for all queries.
pred_boxes (`torch.FloatTensor` of shape `(batch_size, num_queries, 4)`):
Normalized boxes coordinates for all queries, represented as (center_x, center_y, width, height). These
values are normalized in [0, 1], relative to the size of each individual image in the batch (disregarding
possible padding). You can use [`~DetrImageProcessor.post_process_object_detection`] to retrieve the
unnormalized bounding boxes.
auxiliary_outputs (`list[Dict]`, *optional*):
Optional, only returned when auxiliary losses are activated (i.e. `config.auxiliary_loss` is set to `True`)
and labels are provided. It is a list of dictionaries containing the two above keys (`logits` and
`pred_boxes`) for each decoder layer.
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the decoder of the model.
"""
loss: Optional[torch.FloatTensor] = None
loss_dict: Optional[dict] = None
logits: Optional[torch.FloatTensor] = None
pred_boxes: Optional[torch.FloatTensor] = None
auxiliary_outputs: Optional[list[dict]] = None
last_hidden_state: Optional[torch.FloatTensor] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor]] = None
cross_attentions: Optional[tuple[torch.FloatTensor]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Output type of [`DetrForSegmentation`].
"""
)
| DetrObjectDetectionOutput |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/define_relationship_attributes/tutorial001_py310.py | {
"start": 292,
"end": 1975
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
team: Team | None = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
def main():
create_db_and_tables()
create_heroes()
if __name__ == "__main__":
main()
| Hero |
python | tensorflow__tensorflow | tensorflow/python/tpu/tpu_embedding_v3_utils_test.py | {
"start": 1701,
"end": 9018
} | class ____(test.TestCase, parameterized.TestCase):
def test_unpadding(self):
self.assertAllEqual(
v3_utils.remove_padding_from_sc(
array_ops.ones((4, 5)), variable_shape=(3, 2)
),
array_ops.ones((3, 2)),
)
x = array_ops.reshape(math_ops.range(12), (3, 4))
self.assertAllEqual(
v3_utils.remove_padding_from_sc(x, variable_shape=(2, 2)),
tf_constant([[0, 1], [4, 5]]),
)
self.assertAllEqual(
v3_utils.remove_padding_from_sc(x, variable_shape=(3, 5)),
x,
)
@parameterized.named_parameters(
("one", 8, 4, 4), ("two", 27, 6, 3), ("three", 128, 8, 4)
)
def test_unshuffle_one_table_basic(self, vocab, dim, num_sc):
# input vocab should be multiple of num_sc
self.assertEqual(vocab % num_sc, 0)
x, shards = create_test_table_shards(
TestTable(vocab=vocab, dim=dim, shift=0), num_sc
)
x_sharded = array_ops.concat(shards, axis=0)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=x_sharded,
num_sparse_cores=num_sc,
offset_in_shard=0,
size_in_shard=vocab // num_sc,
shard_rotation=0,
),
x,
)
def test_unshuffle_stacking_basic(self):
num_sc = 4
ta = TestTable(vocab=12, dim=4, shift=0)
tb = TestTable(vocab=32, dim=4, shift=1)
x, x_shards = create_test_table_shards(ta, num_sc)
y, y_shards = create_test_table_shards(tb, num_sc)
stacked_shards = [
array_ops.concat([i, j], axis=0) for i, j in zip(x_shards, y_shards)
]
stacked = array_ops.concat(stacked_shards, axis=0)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=0,
size_in_shard=ta.vocab // num_sc,
shard_rotation=ta.shift,
),
x,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=ta.vocab // num_sc,
size_in_shard=tb.vocab // num_sc,
shard_rotation=tb.shift,
),
y,
)
def test_unshuffle_stacking_many_tables(self):
num_sc = 4
tables = [
TestTable(vocab=12, dim=4, shift=0),
TestTable(vocab=32, dim=4, shift=1),
TestTable(vocab=32, dim=4, shift=2),
TestTable(vocab=32, dim=4, shift=3),
TestTable(vocab=32, dim=4, shift=4),
TestTable(vocab=32, dim=4, shift=5),
]
u, u_shards = create_test_table_shards(tables[0], num_sc)
v, v_shards = create_test_table_shards(tables[1], num_sc)
w, w_shards = create_test_table_shards(tables[2], num_sc)
x, x_shards = create_test_table_shards(tables[3], num_sc)
y, y_shards = create_test_table_shards(tables[4], num_sc)
z, z_shards = create_test_table_shards(tables[5], num_sc)
stacked_shards = [
array_ops.concat([i, j, k, l, m, n], axis=0)
for i, j, k, l, m, n in zip(
u_shards, v_shards, w_shards, x_shards, y_shards, z_shards
)
]
stacked = array_ops.concat(stacked_shards, axis=0)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=0,
size_in_shard=tables[0].vocab // num_sc,
shard_rotation=tables[0].shift,
),
u,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=tables[0].vocab // num_sc,
size_in_shard=tables[1].vocab // num_sc,
shard_rotation=tables[1].shift,
),
v,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=(tables[0].vocab + tables[1].vocab) // num_sc,
size_in_shard=tables[2].vocab // num_sc,
shard_rotation=tables[2].shift,
),
w,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=(
tables[0].vocab + tables[1].vocab + tables[2].vocab
)
// num_sc,
size_in_shard=tables[3].vocab // num_sc,
shard_rotation=tables[3].shift,
),
x,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=(
tables[0].vocab
+ tables[1].vocab
+ tables[2].vocab
+ tables[3].vocab
)
// num_sc,
size_in_shard=tables[4].vocab // num_sc,
shard_rotation=tables[4].shift,
),
y,
)
self.assertAllEqual(
v3_utils.unshuffle_from_sc_to_cpu(
t=stacked,
num_sparse_cores=num_sc,
offset_in_shard=(
tables[0].vocab
+ tables[1].vocab
+ tables[2].vocab
+ tables[3].vocab
+ tables[4].vocab
)
// num_sc,
size_in_shard=tables[5].vocab // num_sc,
shard_rotation=tables[5].shift,
),
z,
)
def test_index_mapping_one_table(self):
num_sc = 4
x, shards = create_test_table_shards(
TestTable(vocab=12, dim=4, shift=0), num_sc
)
indices = tf_constant([1, 2, 5, 7, 9])
shard_idx, position_in_shard = v3_utils.map_indices_in_shard(
num_sparse_cores=num_sc,
offset_in_shard=0,
shard_rotation=0,
row_indices=indices,
)
self.assertAllEqual(
shard_idx,
indices % num_sc,
)
self.assertAllEqual(
[x[i] for i in indices],
[shards[j][k] for j, k in zip(shard_idx, position_in_shard)],
)
def test_index_mapping_stacked_tables(self):
num_sc = 4
ta = TestTable(vocab=12, dim=4, shift=0)
tb = TestTable(vocab=32, dim=4, shift=1)
x, x_shards = create_test_table_shards(ta, num_sc)
y, y_shards = create_test_table_shards(tb, num_sc, table_data_start=100)
stacked_shards = [
array_ops.concat([i, j], axis=0) for i, j in zip(x_shards, y_shards)
]
indices_ta = tf_constant([1, 2, 7, 9, 11])
shard_idx, position_in_shard = v3_utils.map_indices_in_shard(
num_sparse_cores=num_sc,
offset_in_shard=0,
shard_rotation=ta.shift,
row_indices=indices_ta,
)
self.assertAllEqual(
[x[i] for i in indices_ta],
[stacked_shards[j][k] for j, k in zip(shard_idx, position_in_shard)],
)
indices_tb = tf_constant([1, 2, 7, 9, 15, 27])
shard_idx, position_in_shard = v3_utils.map_indices_in_shard(
num_sparse_cores=num_sc,
offset_in_shard=ta.vocab // num_sc,
shard_rotation=tb.shift,
row_indices=indices_tb,
)
self.assertAllEqual(
[y[i] for i in indices_tb],
[stacked_shards[j][k] for j, k in zip(shard_idx, position_in_shard)],
)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
test.main()
| TpuEmbeddingV3UtilsTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table25.py | {
"start": 315,
"end": 1641
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table25.xlsx")
def test_create_file_style_is_none(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13", {"style": None})
workbook.close()
self.assertExcelEqual()
def test_create_file_style_is_blank(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13", {"style": ""})
workbook.close()
self.assertExcelEqual()
def test_create_file_style_is_none_str(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C3:F13", {"style": "None"})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | joke2k__faker | tests/providers/test_currency.py | {
"start": 3152,
"end": 3817
} | class ____:
"""Test az_AZ currency provider"""
num_samples = 100
@classmethod
def setup_class(cls):
from faker.providers.currency.az_AZ import Provider as AzAzCurrencyProvider
cls.provider = AzAzCurrencyProvider
cls.currencies = cls.provider.currencies
def test_currency(self, faker, num_samples):
for _ in range(num_samples):
cur = faker.currency()
assert isinstance(cur, tuple) and cur in self.currencies
def test_pricetag(self, faker, num_samples):
for _ in range(num_samples):
pricetag = faker.pricetag()
assert isinstance(pricetag, str)
| TestAzAz |
python | pandas-dev__pandas | pandas/core/strings/accessor.py | {
"start": 4591,
"end": 133744
} | class ____(NoNewAttributesMixin):
"""
Vectorized string functions for Series and Index.
NAs stay NA unless handled otherwise by a particular method.
Patterned after Python's string methods, with some inspiration from
R's stringr package.
Parameters
----------
data : Series or Index
The content of the Series or Index.
See Also
--------
Series.str : Vectorized string functions for Series.
Index.str : Vectorized string functions for Index.
Examples
--------
>>> s = pd.Series(["A_Str_Series"])
>>> s
0 A_Str_Series
dtype: str
>>> s.str.split("_")
0 [A, Str, Series]
dtype: object
>>> s.str.replace("_", "")
0 AStrSeries
dtype: str
"""
# Note: see the docstring in pandas.core.strings.__init__
# for an explanation of the implementation.
# TODO: Dispatch all the methods
# Currently the following are not dispatched to the array
# * cat
# * extractall
def __init__(self, data) -> None:
from pandas.core.arrays.string_ import StringDtype
self._inferred_dtype = self._validate(data)
self._is_categorical = isinstance(data.dtype, CategoricalDtype)
self._is_string = isinstance(data.dtype, StringDtype)
self._data = data
self._index = self._name = None
if isinstance(data, ABCSeries):
self._index = data.index
self._name = data.name
# ._values.categories works for both Series/Index
self._parent = data._values.categories if self._is_categorical else data
# save orig to blow up categoricals to the right type
self._orig = data
self._freeze()
@staticmethod
def _validate(data):
"""
Auxiliary function for StringMethods, infers and checks dtype of data.
This is a "first line of defence" at the creation of the StringMethods-
object, and just checks that the dtype is in the
*union* of the allowed types over all string methods below; this
restriction is then refined on a per-method basis using the decorator
@forbid_nonstring_types (more info in the corresponding docstring).
This really should exclude all series/index with any non-string values,
but that isn't practical for performance reasons until we have a str
dtype (GH 9343 / 13877)
Parameters
----------
data : The content of the Series
Returns
-------
dtype : inferred dtype of data
"""
if isinstance(data, ABCMultiIndex):
raise AttributeError(
"Can only use .str accessor with Index, not MultiIndex"
)
# see _libs/lib.pyx for list of inferred types
allowed_types = ["string", "empty", "bytes", "mixed", "mixed-integer"]
data = extract_array(data)
values = getattr(data, "categories", data) # categorical / normal
inferred_dtype = lib.infer_dtype(values, skipna=True)
if inferred_dtype not in allowed_types:
raise AttributeError(
f"Can only use .str accessor with string values, not {inferred_dtype}"
)
return inferred_dtype
def __getitem__(self, key):
result = self._data.array._str_getitem(key)
return self._wrap_result(result)
def __iter__(self) -> Iterator:
raise TypeError(f"'{type(self).__name__}' object is not iterable")
def _wrap_result(
self,
result,
name=None,
expand: bool | None = None,
fill_value=np.nan,
returns_string: bool = True,
dtype=None,
):
from pandas import (
Index,
MultiIndex,
)
if not hasattr(result, "ndim") or not hasattr(result, "dtype"):
if isinstance(result, ABCDataFrame):
result = result.__finalize__(self._orig, name="str")
return result
assert result.ndim < 3
# We can be wrapping a string / object / categorical result, in which
# case we'll want to return the same dtype as the input.
# Or we can be wrapping a numeric output, in which case we don't want
# to return a StringArray.
# Ideally the array method returns the right array type.
if expand is None:
# infer from ndim if expand is not specified
expand = result.ndim != 1
elif expand is True and not isinstance(self._orig, ABCIndex):
# required when expand=True is explicitly specified
# not needed when inferred
if isinstance(result.dtype, ArrowDtype):
import pyarrow as pa
from pandas.core.arrays.arrow.array import ArrowExtensionArray
value_lengths = pa.compute.list_value_length(result._pa_array)
max_len = pa.compute.max(value_lengths).as_py()
min_len = pa.compute.min(value_lengths).as_py()
if result._hasna:
# ArrowExtensionArray.fillna doesn't work for list scalars
result = ArrowExtensionArray(
result._pa_array.fill_null([None] * max_len)
)
if min_len < max_len:
# append nulls to each scalar list element up to max_len
result = ArrowExtensionArray(
pa.compute.list_slice(
result._pa_array,
start=0,
stop=max_len,
return_fixed_size_list=True,
)
)
if name is None:
name = range(max_len)
result = (
pa.compute.list_flatten(result._pa_array)
.to_numpy()
.reshape(len(result), max_len)
)
result = {
label: ArrowExtensionArray(pa.array(res))
for label, res in zip(name, result.T, strict=True)
}
elif is_object_dtype(result):
def cons_row(x):
if is_list_like(x):
return x
else:
return [x]
result = [cons_row(x) for x in result]
if result and not self._is_string:
# propagate nan values to match longest sequence (GH 18450)
max_len = max(len(x) for x in result)
result = [
x * max_len if len(x) == 0 or x[0] is np.nan else x
for x in result
]
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
if expand is False:
# if expand is False, result should have the same name
# as the original otherwise specified
if name is None:
name = getattr(result, "name", None)
if name is None:
# do not use logical or, _orig may be a DataFrame
# which has "name" column
name = self._orig.name
# Wait until we are sure result is a Series or Index before
# checking attributes (GH 12180)
if isinstance(self._orig, ABCIndex):
# if result is a boolean np.array, return the np.array
# instead of wrapping it into a boolean Index (GH 8875)
if is_bool_dtype(result):
return result
if expand:
result = list(result)
out: Index = MultiIndex.from_tuples(result, names=name)
if out.nlevels == 1:
# We had all tuples of length-one, which are
# better represented as a regular Index.
out = out.get_level_values(0)
return out
else:
return Index(result, name=name, dtype=dtype)
else:
index = self._orig.index
# This is a mess.
_dtype: DtypeObj | str | None = dtype
vdtype = getattr(result, "dtype", None)
if _dtype is not None:
pass
elif self._is_string:
if is_bool_dtype(vdtype):
_dtype = result.dtype
elif returns_string:
_dtype = self._orig.dtype
else:
_dtype = vdtype
elif vdtype is not None:
_dtype = vdtype
if expand:
cons = self._orig._constructor_expanddim
result = cons(result, columns=name, index=index, dtype=_dtype)
else:
# Must be a Series
cons = self._orig._constructor
result = cons(result, name=name, index=index, dtype=_dtype)
result = result.__finalize__(self._orig, method="str")
if name is not None and result.ndim == 1:
# __finalize__ might copy over the original name, but we may
# want the new name (e.g. str.extract).
result.name = name
return result
def _get_series_list(self, others):
"""
Auxiliary function for :meth:`str.cat`. Turn potentially mixed input
into a list of Series (elements without an index must match the length
of the calling Series/Index).
Parameters
----------
others : Series, DataFrame, np.ndarray, list-like or list-like of
Objects that are either Series, Index or np.ndarray (1-dim).
Returns
-------
list of Series
Others transformed into list of Series.
"""
from pandas import (
DataFrame,
Series,
)
# self._orig is either Series or Index
idx = self._orig if isinstance(self._orig, ABCIndex) else self._orig.index
# Generally speaking, all objects without an index inherit the index
# `idx` of the calling Series/Index - i.e. must have matching length.
# Objects with an index (i.e. Series/Index/DataFrame) keep their own.
if isinstance(others, ABCSeries):
return [others]
elif isinstance(others, ABCIndex):
return [Series(others, index=idx, dtype=others.dtype)]
elif isinstance(others, ABCDataFrame):
return [others[x] for x in others]
elif isinstance(others, np.ndarray) and others.ndim == 2:
others = DataFrame(others, index=idx)
return [others[x] for x in others]
elif is_list_like(others, allow_sets=False):
try:
others = list(others) # ensure iterators do not get read twice etc
except TypeError:
# e.g. ser.str, raise below
pass
else:
# in case of list-like `others`, all elements must be
# either Series/Index/np.ndarray (1-dim)...
if all(
isinstance(x, (ABCSeries, ABCIndex, ExtensionArray))
or (isinstance(x, np.ndarray) and x.ndim == 1)
for x in others
):
los: list[Series] = []
while others: # iterate through list and append each element
los = los + self._get_series_list(others.pop(0))
return los
# ... or just strings
elif all(not is_list_like(x) for x in others):
return [Series(others, index=idx)]
raise TypeError(
"others must be Series, Index, DataFrame, np.ndarray "
"or list-like (either containing only strings or "
"containing only objects of type Series/Index/"
"np.ndarray[1-dim])"
)
@forbid_nonstring_types(["bytes", "mixed", "mixed-integer"])
def cat(
self,
others=None,
sep: str | None = None,
na_rep=None,
join: AlignJoin = "left",
) -> str | Series | Index:
"""
Concatenate strings in the Series/Index with given separator.
If `others` is specified, this function concatenates the Series/Index
and elements of `others` element-wise.
If `others` is not passed, then all values in the Series/Index are
concatenated into a single string with a given `sep`.
Parameters
----------
others : Series, Index, DataFrame, np.ndarray or list-like
Series, Index, DataFrame, np.ndarray (one- or two-dimensional) and
other list-likes of strings must have the same length as the
calling Series/Index, with the exception of indexed objects (i.e.
Series/Index/DataFrame) if `join` is not None.
If others is a list-like that contains a combination of Series,
Index or np.ndarray (1-dim), then all elements will be unpacked and
must satisfy the above criteria individually.
If others is None, the method returns the concatenation of all
strings in the calling Series/Index.
sep : str, default ''
The separator between the different elements/columns. By default
the empty string `''` is used.
na_rep : str or None, default None
Representation that is inserted for all missing values:
- If `na_rep` is None, and `others` is None, missing values in the
Series/Index are omitted from the result.
- If `na_rep` is None, and `others` is not None, a row containing a
missing value in any of the columns (before concatenation) will
have a missing value in the result.
join : {'left', 'right', 'outer', 'inner'}, default 'left'
Determines the join-style between the calling Series/Index and any
Series/Index/DataFrame in `others` (objects without an index need
to match the length of the calling Series/Index). To disable
alignment, use `.values` on any Series/Index/DataFrame in `others`.
Returns
-------
str, Series or Index
If `others` is None, `str` is returned, otherwise a `Series/Index`
(same type as caller) of objects is returned.
See Also
--------
split : Split each string in the Series/Index.
join : Join lists contained as elements in the Series/Index.
Examples
--------
When not passing `others`, all values are concatenated into a single
string:
>>> s = pd.Series(["a", "b", np.nan, "d"])
>>> s.str.cat(sep=" ")
'a b d'
By default, NA values in the Series are ignored. Using `na_rep`, they
can be given a representation:
>>> s.str.cat(sep=" ", na_rep="?")
'a b ? d'
If `others` is specified, corresponding values are concatenated with
the separator. Result will be a Series of strings.
>>> s.str.cat(["A", "B", "C", "D"], sep=",")
0 a,A
1 b,B
2 NaN
3 d,D
dtype: str
Missing values will remain missing in the result, but can again be
represented using `na_rep`
>>> s.str.cat(["A", "B", "C", "D"], sep=",", na_rep="-")
0 a,A
1 b,B
2 -,C
3 d,D
dtype: str
If `sep` is not specified, the values are concatenated without
separation.
>>> s.str.cat(["A", "B", "C", "D"], na_rep="-")
0 aA
1 bB
2 -C
3 dD
dtype: str
Series with different indexes can be aligned before concatenation. The
`join`-keyword works as in other methods.
>>> t = pd.Series(["d", "a", "e", "c"], index=[3, 0, 4, 2])
>>> s.str.cat(t, join="left", na_rep="-")
0 aa
1 b-
2 -c
3 dd
dtype: str
>>>
>>> s.str.cat(t, join="outer", na_rep="-")
0 aa
1 b-
2 -c
3 dd
4 -e
dtype: str
>>>
>>> s.str.cat(t, join="inner", na_rep="-")
0 aa
2 -c
3 dd
dtype: str
>>>
>>> s.str.cat(t, join="right", na_rep="-")
3 dd
0 aa
4 -e
2 -c
dtype: str
For more examples, see :ref:`here <text.concatenate>`.
"""
# TODO: dispatch
from pandas import (
Index,
Series,
concat,
)
if isinstance(others, str):
raise ValueError("Did you mean to supply a `sep` keyword?")
if sep is None:
sep = ""
if isinstance(self._orig, ABCIndex):
data = Series(self._orig, index=self._orig, dtype=self._orig.dtype)
else: # Series
data = self._orig
# concatenate Series/Index with itself if no "others"
if others is None:
# error: Incompatible types in assignment (expression has type
# "ndarray", variable has type "Series")
data = ensure_object(data) # type: ignore[assignment]
na_mask = isna(data)
if na_rep is None and na_mask.any():
return sep.join(data[~na_mask])
elif na_rep is not None and na_mask.any():
return sep.join(np.where(na_mask, na_rep, data))
else:
return sep.join(data)
try:
# turn anything in "others" into lists of Series
others = self._get_series_list(others)
except ValueError as err: # do not catch TypeError raised by _get_series_list
raise ValueError(
"If `others` contains arrays or lists (or other "
"list-likes without an index), these must all be "
"of the same length as the calling Series/Index."
) from err
# align if required
if any(not data.index.equals(x.index) for x in others):
# Need to add keys for uniqueness in case of duplicate columns
others = concat(
others,
axis=1,
join=(join if join == "inner" else "outer"),
keys=range(len(others)),
sort=False,
)
data, others = data.align(others, join=join)
others = [others[x] for x in others] # again list of Series
all_cols = [ensure_object(x) for x in [data] + others]
na_masks = np.array([isna(x) for x in all_cols])
union_mask = np.logical_or.reduce(na_masks, axis=0)
if na_rep is None and union_mask.any():
# no na_rep means NaNs for all rows where any column has a NaN
# only necessary if there are actually any NaNs
result = np.empty(len(data), dtype=object)
np.putmask(result, union_mask, np.nan)
not_masked = ~union_mask
result[not_masked] = cat_safe([x[not_masked] for x in all_cols], sep)
elif na_rep is not None and union_mask.any():
# fill NaNs with na_rep in case there are actually any NaNs
all_cols = [
np.where(nm, na_rep, col)
for nm, col in zip(na_masks, all_cols, strict=True)
]
result = cat_safe(all_cols, sep)
else:
# no NaNs - can just concatenate
result = cat_safe(all_cols, sep)
out: Index | Series
if isinstance(self._orig.dtype, CategoricalDtype):
# We need to infer the new categories.
dtype = self._orig.dtype.categories.dtype
else:
dtype = self._orig.dtype
if isinstance(self._orig, ABCIndex):
# add dtype for case that result is all-NA
if isna(result).all():
dtype = object # type: ignore[assignment]
out = Index(result, dtype=dtype, name=self._orig.name)
else: # Series
res_ser = Series(
result, dtype=dtype, index=data.index, name=self._orig.name, copy=False
)
out = res_ser.__finalize__(self._orig, method="str_cat")
return out
_shared_docs["str_split"] = r"""
Split strings around given separator/delimiter.
Splits the string in the Series/Index from the %(side)s,
at the specified delimiter string.
Parameters
----------
pat : str%(pat_regex)s, optional
%(pat_description)s.
If not specified, split on whitespace.
n : int, default -1 (all)
Limit number of splits in output.
``None``, 0 and -1 will be interpreted as return all splits.
expand : bool, default False
Expand the split strings into separate columns.
- If ``True``, return DataFrame/MultiIndex expanding dimensionality.
- If ``False``, return Series/Index, containing lists of strings.
%(regex_argument)s
Returns
-------
Series, Index, DataFrame or MultiIndex
Type matches caller unless ``expand=True`` (see Notes).
%(raises_split)s
See Also
--------
Series.str.split : Split strings around given separator/delimiter.
Series.str.rsplit : Splits string around given separator/delimiter,
starting from the right.
Series.str.join : Join lists contained as elements in the Series/Index
with passed delimiter.
str.split : Standard library version for split.
str.rsplit : Standard library version for rsplit.
Notes
-----
The handling of the `n` keyword depends on the number of found splits:
- If found splits > `n`, make first `n` splits only
- If found splits <= `n`, make all splits
- If for a certain row the number of found splits < `n`,
append `None` for padding up to `n` if ``expand=True``
If using ``expand=True``, Series and Index callers return DataFrame and
MultiIndex objects, respectively.
%(regex_pat_note)s
Examples
--------
>>> s = pd.Series(
... [
... "this is a regular sentence",
... "https://docs.python.org/3/tutorial/index.html",
... np.nan
... ]
... )
>>> s
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html
2 NaN
dtype: str
In the default setting, the string is split by whitespace.
>>> s.str.split()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
Without the `n` parameter, the outputs of `rsplit` and `split`
are identical.
>>> s.str.rsplit()
0 [this, is, a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `n` parameter can be used to limit the number of splits on the
delimiter. The outputs of `split` and `rsplit` are different.
>>> s.str.split(n=2)
0 [this, is, a regular sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
>>> s.str.rsplit(n=2)
0 [this is a, regular, sentence]
1 [https://docs.python.org/3/tutorial/index.html]
2 NaN
dtype: object
The `pat` parameter can be used to split by other characters.
>>> s.str.split(pat="/")
0 [this is a regular sentence]
1 [https:, , docs.python.org, 3, tutorial, index...
2 NaN
dtype: object
When using ``expand=True``, the split elements will expand out into
separate columns. If NaN is present, it is propagated throughout
the columns during the split.
>>> s.str.split(expand=True)
0 1 2 3 4
0 this is a regular sentence
1 https://docs.python.org/3/tutorial/index.html NaN NaN NaN NaN
2 NaN NaN NaN NaN NaN
For slightly more complex use cases like splitting the html document name
from a url, a combination of parameter settings can be used.
>>> s.str.rsplit("/", n=1, expand=True)
0 1
0 this is a regular sentence NaN
1 https://docs.python.org/3/tutorial index.html
2 NaN NaN
%(regex_examples)s"""
@Appender(
_shared_docs["str_split"]
% {
"side": "beginning",
"pat_regex": " or compiled regex",
"pat_description": "String or regular expression to split on",
"regex_argument": """
regex : bool, default None
Determines if the passed-in pattern is a regular expression:
- If ``True``, assumes the passed-in pattern is a regular expression
- If ``False``, treats the pattern as a literal string.
- If ``None`` and `pat` length is 1, treats `pat` as a literal string.
- If ``None`` and `pat` length is not 1, treats `pat` as a regular expression.
- Cannot be set to False if `pat` is a compiled regex
.. versionadded:: 1.4.0
""",
"raises_split": """
Raises
------
ValueError
* if `regex` is False and `pat` is a compiled regex
""",
"regex_pat_note": """
Use of `regex =False` with a `pat` as a compiled regex will raise an error.
""",
"method": "split",
"regex_examples": r"""
Remember to escape special characters when explicitly using regular expressions.
>>> s = pd.Series(["foo and bar plus baz"])
>>> s.str.split(r"and|plus", expand=True)
0 1 2
0 foo bar baz
Regular expressions can be used to handle urls or file names.
When `pat` is a string and ``regex=None`` (the default), the given `pat` is compiled
as a regex only if ``len(pat) != 1``.
>>> s = pd.Series(['foojpgbar.jpg'])
>>> s.str.split(r".", expand=True)
0 1
0 foojpgbar jpg
>>> s.str.split(r"\.jpg", expand=True)
0 1
0 foojpgbar
When ``regex=True``, `pat` is interpreted as a regex
>>> s.str.split(r"\.jpg", regex=True, expand=True)
0 1
0 foojpgbar
A compiled regex can be passed as `pat`
>>> import re
>>> s.str.split(re.compile(r"\.jpg"), expand=True)
0 1
0 foojpgbar
When ``regex=False``, `pat` is interpreted as the string itself
>>> s.str.split(r"\.jpg", regex=False, expand=True)
0
0 foojpgbar.jpg
""",
}
)
@forbid_nonstring_types(["bytes"])
def split(
self,
pat: str | re.Pattern | None = None,
*,
n=-1,
expand: bool = False,
regex: bool | None = None,
):
if regex is False and is_re(pat):
raise ValueError(
"Cannot use a compiled regex as replacement pattern with regex=False"
)
if is_re(pat):
regex = True
result = self._data.array._str_split(pat, n, expand, regex)
if self._data.dtype == "category":
dtype = self._data.dtype.categories.dtype
else:
dtype = object if self._data.dtype == object else None
return self._wrap_result(
result, expand=expand, returns_string=expand, dtype=dtype
)
@Appender(
_shared_docs["str_split"]
% {
"side": "end",
"pat_regex": "",
"pat_description": "String to split on",
"regex_argument": "",
"raises_split": "",
"regex_pat_note": "",
"method": "rsplit",
"regex_examples": "",
}
)
@forbid_nonstring_types(["bytes"])
def rsplit(self, pat=None, *, n=-1, expand: bool = False):
result = self._data.array._str_rsplit(pat, n=n)
dtype = object if self._data.dtype == object else None
return self._wrap_result(
result, expand=expand, returns_string=expand, dtype=dtype
)
_shared_docs["str_partition"] = """
Split the string at the %(side)s occurrence of `sep`.
This method splits the string at the %(side)s occurrence of `sep`,
and returns 3 elements containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found, return %(return)s.
Parameters
----------
sep : str, default whitespace
String to split on.
expand : bool, default True
If True, return DataFrame/MultiIndex expanding dimensionality.
If False, return Series/Index.
Returns
-------
DataFrame/MultiIndex or Series/Index of objects
Returns appropriate type based on `expand` parameter with strings
split based on the `sep` parameter.
See Also
--------
%(also)s
Series.str.split : Split strings around given separators.
str.partition : Standard library version.
Examples
--------
>>> s = pd.Series(['Linda van der Berg', 'George Pitt-Rivers'])
>>> s
0 Linda van der Berg
1 George Pitt-Rivers
dtype: str
>>> s.str.partition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by the last space instead of the first one:
>>> s.str.rpartition()
0 1 2
0 Linda van der Berg
1 George Pitt-Rivers
To partition by something different than a space:
>>> s.str.partition('-')
0 1 2
0 Linda van der Berg
1 George Pitt - Rivers
To return a Series containing tuples instead of a DataFrame:
>>> s.str.partition('-', expand=False)
0 (Linda van der Berg, , )
1 (George Pitt, -, Rivers)
dtype: object
Also available on indices:
>>> idx = pd.Index(['X 123', 'Y 999'])
>>> idx
Index(['X 123', 'Y 999'], dtype='str')
Which will create a MultiIndex:
>>> idx.str.partition()
MultiIndex([('X', ' ', '123'),
('Y', ' ', '999')],
)
Or an index with tuples with ``expand=False``:
>>> idx.str.partition(expand=False)
Index([('X', ' ', '123'), ('Y', ' ', '999')], dtype='object')
"""
@Appender(
_shared_docs["str_partition"]
% {
"side": "first",
"return": "3 elements containing the string itself, followed by two "
"empty strings",
"also": "rpartition : Split the string at the last occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def partition(self, sep: str = " ", expand: bool = True):
result = self._data.array._str_partition(sep, expand)
if self._data.dtype == "category":
dtype = self._data.dtype.categories.dtype
else:
dtype = object if self._data.dtype == object else None
return self._wrap_result(
result, expand=expand, returns_string=expand, dtype=dtype
)
@Appender(
_shared_docs["str_partition"]
% {
"side": "last",
"return": "3 elements containing two empty strings, followed by the "
"string itself",
"also": "partition : Split the string at the first occurrence of `sep`.",
}
)
@forbid_nonstring_types(["bytes"])
def rpartition(self, sep: str = " ", expand: bool = True):
result = self._data.array._str_rpartition(sep, expand)
if self._data.dtype == "category":
dtype = self._data.dtype.categories.dtype
else:
dtype = object if self._data.dtype == object else None
return self._wrap_result(
result, expand=expand, returns_string=expand, dtype=dtype
)
def get(self, i):
"""
Extract element from each component at specified position or with specified key.
Extract element from lists, tuples, dict, or strings in each element in the
Series/Index.
Parameters
----------
i : int or hashable dict label
Position or key of element to extract.
Returns
-------
Series or Index
Series or Index where each value is the extracted element from
the corresponding input component.
See Also
--------
Series.str.extract : Extract capture groups in the regex as columns
in a DataFrame.
Examples
--------
>>> s = pd.Series(
... [
... "String",
... (1, 2, 3),
... ["a", "b", "c"],
... 123,
... -456,
... {1: "Hello", "2": "World"},
... ]
... )
>>> s
0 String
1 (1, 2, 3)
2 [a, b, c]
3 123
4 -456
5 {1: 'Hello', '2': 'World'}
dtype: object
>>> s.str.get(1)
0 t
1 2
2 b
3 NaN
4 NaN
5 Hello
dtype: object
>>> s.str.get(-1)
0 g
1 3
2 c
3 NaN
4 NaN
5 None
dtype: object
Return element with given key
>>> s = pd.Series(
... [
... {"name": "Hello", "value": "World"},
... {"name": "Goodbye", "value": "Planet"},
... ]
... )
>>> s.str.get("name")
0 Hello
1 Goodbye
dtype: object
"""
result = self._data.array._str_get(i)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def join(self, sep: str):
"""
Join lists contained as elements in the Series/Index with passed delimiter.
If the elements of a Series are lists themselves, join the content of these
lists using the delimiter passed to the function.
This function is an equivalent to :meth:`str.join`.
Parameters
----------
sep : str
Delimiter to use between list entries.
Returns
-------
Series/Index: object
The list entries concatenated by intervening occurrences of the
delimiter.
Raises
------
AttributeError
If the supplied Series contains neither strings nor lists.
See Also
--------
str.join : Standard library version of this method.
Series.str.split : Split strings around given separator/delimiter.
Notes
-----
If any of the list items is not a string object, the result of the join
will be `NaN`.
Examples
--------
Example with a list that contains non-string elements.
>>> s = pd.Series(
... [
... ["lion", "elephant", "zebra"],
... [1.1, 2.2, 3.3],
... ["cat", np.nan, "dog"],
... ["cow", 4.5, "goat"],
... ["duck", ["swan", "fish"], "guppy"],
... ]
... )
>>> s
0 [lion, elephant, zebra]
1 [1.1, 2.2, 3.3]
2 [cat, nan, dog]
3 [cow, 4.5, goat]
4 [duck, [swan, fish], guppy]
dtype: object
Join all lists using a '-'. The lists containing object(s) of types other
than str will produce a NaN.
>>> s.str.join("-")
0 lion-elephant-zebra
1 NaN
2 NaN
3 NaN
4 NaN
dtype: object
"""
result = self._data.array._str_join(sep)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def contains(
self,
pat,
case: bool = True,
flags: int = 0,
na=lib.no_default,
regex: bool = True,
):
r"""
Test if pattern or regex is contained within a string of a Series or Index.
Return boolean Series or Index based on whether a given pattern or regex is
contained within a string of a Series or Index.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Flags to pass through to the re module, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
regex : bool, default True
If True, assumes the pat is a regular expression.
If False, treats the pat as a literal string.
Returns
-------
Series or Index of boolean values
A Series or Index of boolean values indicating whether the
given pattern is contained within the string of each element
of the Series or Index.
See Also
--------
match : Analogous, but stricter, relying on re.match instead of re.search.
Series.str.startswith : Test if the start of each string element matches a
pattern.
Series.str.endswith : Same as startswith, but tests the end of string.
Examples
--------
Returning a Series of booleans using only a literal pattern.
>>> s1 = pd.Series(["Mouse", "dog", "house and parrot", "23", np.nan])
>>> s1.str.contains("og", regex=False)
0 False
1 True
2 False
3 False
4 False
dtype: bool
Returning an Index of booleans using only a literal pattern.
>>> ind = pd.Index(["Mouse", "dog", "house and parrot", "23.0", np.nan])
>>> ind.str.contains("23", regex=False)
array([False, False, False, True, False])
Specifying case sensitivity using `case`.
>>> s1.str.contains("oG", case=True, regex=True)
0 False
1 False
2 False
3 False
4 False
dtype: bool
Returning 'house' or 'dog' when either expression occurs in a string.
>>> s1.str.contains("house|dog", regex=True)
0 False
1 True
2 True
3 False
4 False
dtype: bool
Ignoring case sensitivity using `flags` with regex.
>>> import re
>>> s1.str.contains("PARROT", flags=re.IGNORECASE, regex=True)
0 False
1 False
2 True
3 False
4 False
dtype: bool
Returning any digit using regular expression.
>>> s1.str.contains("\\d", regex=True)
0 False
1 False
2 False
3 True
4 False
dtype: bool
Ensure `pat` is a not a literal pattern when `regex` is set to True.
Note in the following example one might expect only `s2[1]` and `s2[3]` to
return `True`. However, '.0' as a regex matches any character
followed by a 0.
>>> s2 = pd.Series(["40", "40.0", "41", "41.0", "35"])
>>> s2.str.contains(".0", regex=True)
0 True
1 True
2 False
3 True
4 False
dtype: bool
"""
if regex and re.compile(pat).groups:
warnings.warn(
"This pattern is interpreted as a regular expression, and has "
"match groups. To actually get the groups, use str.extract.",
UserWarning,
stacklevel=find_stack_level(),
)
result = self._data.array._str_contains(pat, case, flags, na, regex)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def match(
self,
pat: str | re.Pattern,
case: bool | lib.NoDefault = lib.no_default,
flags: int | lib.NoDefault = lib.no_default,
na=lib.no_default,
):
"""
Determine if each string starts with a match of a regular expression.
Determines whether each string in the Series or Index starts with a
match to a specified regular expression. This function is especially
useful for validating prefixes, such as ensuring that codes, tags, or
identifiers begin with a specific pattern.
Parameters
----------
pat : str or compiled regex
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
A Series, Index, or array of boolean values indicating whether the start
of each string matches the pattern. The result will be of the same type
as the input.
See Also
--------
fullmatch : Stricter matching that requires the entire string to match.
contains : Analogous, but less strict, relying on re.search instead of
re.match.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["horse", "eagle", "donkey"])
>>> ser.str.match("e")
0 False
1 True
2 False
dtype: bool
"""
if flags is not lib.no_default:
# pat.flags will have re.U regardless, so we need to add it here
# before checking for a match
flags = flags | re.U
if is_re(pat):
if pat.flags != flags:
raise ValueError(
"Cannot both specify 'flags' and pass a compiled regexp "
"object with conflicting flags"
)
else:
pat = re.compile(pat, flags=flags)
# set flags=0 to ensure that when we call
# re.compile(pat, flags=flags) the constructor does not raise.
flags = 0
else:
flags = 0
if case is lib.no_default:
if is_re(pat):
case = not bool(pat.flags & re.IGNORECASE)
else:
# Case-sensitive default
case = True
elif is_re(pat):
implicit_case = not bool(pat.flags & re.IGNORECASE)
if implicit_case != case:
# GH#62240
raise ValueError(
"Cannot both specify 'case' and pass a compiled regexp "
"object with conflicting case-sensitivity"
)
result = self._data.array._str_match(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def fullmatch(self, pat, case: bool = True, flags: int = 0, na=lib.no_default):
"""
Determine if each string entirely matches a regular expression.
Checks if each string in the Series or Index fully matches the
specified regular expression pattern. This function is useful when the
requirement is for an entire string to conform to a pattern, such as
validating formats like phone numbers or email addresses.
Parameters
----------
pat : str
Character sequence or regular expression.
case : bool, default True
If True, case sensitive.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE.
na : scalar, optional
Fill value for missing values. The default depends on dtype of the
array. For the ``"str"`` dtype, ``False`` is used. For object
dtype, ``numpy.nan`` is used. For the nullable ``StringDtype``,
``pandas.NA`` is used.
Returns
-------
Series/Index/array of boolean values
The function returns a Series, Index, or array of boolean values,
where True indicates that the entire string matches the regular
expression pattern and False indicates that it does not.
See Also
--------
match : Similar, but also returns `True` when only a *prefix* of the string
matches the regular expression.
extract : Extract matched groups.
Examples
--------
>>> ser = pd.Series(["cat", "duck", "dove"])
>>> ser.str.fullmatch(r"d.+")
0 False
1 True
2 True
dtype: bool
"""
result = self._data.array._str_fullmatch(pat, case=case, flags=flags, na=na)
return self._wrap_result(result, fill_value=na, returns_string=False)
@forbid_nonstring_types(["bytes"])
def replace(
self,
pat: str | re.Pattern | dict,
repl: str | Callable | None = None,
n: int = -1,
case: bool | None = None,
flags: int = 0,
regex: bool = False,
):
r"""
Replace each occurrence of pattern/regex in the Series/Index.
Equivalent to :meth:`str.replace` or :func:`re.sub`, depending on
the regex value.
Parameters
----------
pat : str, compiled regex, or a dict
String can be a character sequence or regular expression.
Dictionary contains <key : value> pairs of strings to be replaced
along with the updated value.
repl : str or callable
Replacement string or a callable. The callable is passed the regex
match object and must return a replacement string to be used.
Must have a value of None if `pat` is a dict
See :func:`re.sub`.
n : int, default -1 (all)
Number of replacements to make from start.
case : bool, default None
Determines if replace is case sensitive:
- If True, case sensitive (the default if `pat` is a string)
- Set to False for case insensitive
- Cannot be set if `pat` is a compiled regex.
flags : int, default 0 (no flags)
Regex module flags, e.g. re.IGNORECASE. Cannot be set if `pat` is a compiled
regex.
regex : bool, default False
Determines if the passed-in pattern is a regular expression:
- If True, assumes the passed-in pattern is a regular expression.
- If False, treats the pattern as a literal string
- Cannot be set to False if `pat` is a compiled regex or `repl` is
a callable.
Returns
-------
Series or Index of object
A copy of the object with all matching occurrences of `pat` replaced by
`repl`.
Raises
------
ValueError
* if `regex` is False and `repl` is a callable or `pat` is a compiled
regex
* if `pat` is a compiled regex and `case` or `flags` is set
* if `pat` is a dictionary and `repl` is not None.
See Also
--------
Series.str.replace : Method to replace occurrences of a substring with another
substring.
Series.str.extract : Extract substrings using a regular expression.
Series.str.findall : Find all occurrences of a pattern or regex in each string.
Series.str.split : Split each string by a specified delimiter or pattern.
Notes
-----
When `pat` is a compiled regex, all flags should be included in the
compiled regex. Use of `case`, `flags`, or `regex=False` with a compiled
regex will raise an error.
Examples
--------
When `pat` is a dictionary, every key in `pat` is replaced
with its corresponding value:
>>> pd.Series(["A", "B", np.nan]).str.replace(pat={"A": "a", "B": "b"})
0 a
1 b
2 NaN
dtype: str
When `pat` is a string and `regex` is True, the given `pat`
is compiled as a regex. When `repl` is a string, it replaces matching
regex patterns as with :meth:`re.sub`. NaN value(s) in the Series are
left as is:
>>> pd.Series(["foo", "fuz", np.nan]).str.replace("f.", "ba", regex=True)
0 bao
1 baz
2 NaN
dtype: str
When `pat` is a string and `regex` is False, every `pat` is replaced with
`repl` as with :meth:`str.replace`:
>>> pd.Series(["f.o", "fuz", np.nan]).str.replace("f.", "ba", regex=False)
0 bao
1 fuz
2 NaN
dtype: str
When `repl` is a callable, it is called on every `pat` using
:func:`re.sub`. The callable should expect one positional argument
(a regex object) and return a string.
To get the idea:
>>> pd.Series(["foo", "fuz", np.nan]).str.replace("f", repr, regex=True)
0 <re.Match object; span=(0, 1), match='f'>oo
1 <re.Match object; span=(0, 1), match='f'>uz
2 NaN
dtype: str
Reverse every lowercase alphabetic word:
>>> repl = lambda m: m.group(0)[::-1]
>>> ser = pd.Series(["foo 123", "bar baz", np.nan])
>>> ser.str.replace(r"[a-z]+", repl, regex=True)
0 oof 123
1 rab zab
2 NaN
dtype: str
Using regex groups (extract second group and swap case):
>>> pat = r"(?P<one>\w+) (?P<two>\w+) (?P<three>\w+)"
>>> repl = lambda m: m.group("two").swapcase()
>>> ser = pd.Series(["One Two Three", "Foo Bar Baz"])
>>> ser.str.replace(pat, repl, regex=True)
0 tWO
1 bAR
dtype: str
Using a compiled regex with flags
>>> import re
>>> regex_pat = re.compile(r"FUZ", flags=re.IGNORECASE)
>>> pd.Series(["foo", "fuz", np.nan]).str.replace(regex_pat, "bar", regex=True)
0 foo
1 bar
2 NaN
dtype: str
"""
if isinstance(pat, dict) and repl is not None:
raise ValueError("repl cannot be used when pat is a dictionary")
# Check whether repl is valid (GH 13438, GH 15055)
if not isinstance(pat, dict) and not (isinstance(repl, str) or callable(repl)):
raise TypeError("repl must be a string or callable")
is_compiled_re = is_re(pat)
if regex or regex is None:
if is_compiled_re and (case is not None or flags != 0):
raise ValueError(
"case and flags cannot be set when pat is a compiled regex"
)
elif is_compiled_re:
raise ValueError(
"Cannot use a compiled regex as replacement pattern with regex=False"
)
elif callable(repl):
raise ValueError("Cannot use a callable replacement when regex=False")
if case is None:
case = True
res_output = self._data
if not isinstance(pat, dict):
pat = {pat: repl}
for key, value in pat.items():
result = res_output.array._str_replace(
key, value, n=n, case=case, flags=flags, regex=regex
)
res_output = self._wrap_result(result)
return res_output
@forbid_nonstring_types(["bytes"])
def repeat(self, repeats):
"""
Duplicate each string in the Series or Index.
Duplicates each string in the Series or Index, either by applying the
same repeat count to all elements or by using different repeat values
for each element.
Parameters
----------
repeats : int or sequence of int
Same value for all (int) or different value per (sequence).
Returns
-------
Series or pandas.Index
Series or Index of repeated string objects specified by
input parameter repeats.
See Also
--------
Series.str.lower : Convert all characters in each string to lowercase.
Series.str.upper : Convert all characters in each string to uppercase.
Series.str.title : Convert each string to title case (capitalizing the first
letter of each word).
Series.str.strip : Remove leading and trailing whitespace from each string.
Series.str.replace : Replace occurrences of a substring with another substring
in each string.
Series.str.ljust : Left-justify each string in the Series/Index by padding with
a specified character.
Series.str.rjust : Right-justify each string in the Series/Index by padding with
a specified character.
Examples
--------
>>> s = pd.Series(["a", "b", "c"])
>>> s
0 a
1 b
2 c
dtype: str
Single int repeats string in Series
>>> s.str.repeat(repeats=2)
0 aa
1 bb
2 cc
dtype: str
Sequence of int repeats corresponding string in Series
>>> s.str.repeat(repeats=[1, 2, 3])
0 a
1 bb
2 ccc
dtype: str
"""
result = self._data.array._str_repeat(repeats)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def pad(
self,
width: int,
side: Literal["left", "right", "both"] = "left",
fillchar: str = " ",
):
"""
Pad strings in the Series/Index up to width.
This function pads strings in a Series or Index to a specified width,
filling the extra space with a character of your choice. It provides
flexibility in positioning the padding, allowing it to be added to the
left, right, or both sides. This is useful for formatting strings to
align text or ensure consistent string lengths in data processing.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with character defined in `fillchar`.
side : {'left', 'right', 'both'}, default 'left'
Side from which to fill resulting string.
fillchar : str, default ' '
Additional character for filling, default is whitespace.
Returns
-------
Series or Index of object
Returns Series or Index with minimum number of char in object.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='left')``.
Series.str.ljust : Fills the right side of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='right')``.
Series.str.center : Fills both sides of strings with an arbitrary
character. Equivalent to ``Series.str.pad(side='both')``.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character. Equivalent to ``Series.str.pad(side='left', fillchar='0')``.
Examples
--------
>>> s = pd.Series(["caribou", "tiger"])
>>> s
0 caribou
1 tiger
dtype: str
>>> s.str.pad(width=10)
0 caribou
1 tiger
dtype: str
>>> s.str.pad(width=10, side="right", fillchar="-")
0 caribou---
1 tiger-----
dtype: str
>>> s.str.pad(width=10, side="both", fillchar="-")
0 -caribou--
1 --tiger---
dtype: str
"""
if not isinstance(fillchar, str):
msg = f"fillchar must be a character, not {type(fillchar).__name__}"
raise TypeError(msg)
if len(fillchar) != 1:
raise TypeError("fillchar must be a character, not str")
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
result = self._data.array._str_pad(width, side=side, fillchar=fillchar)
return self._wrap_result(result)
_shared_docs["str_pad"] = """
Pad %(side)s side of strings in the Series/Index.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
width : int
Minimum width of resulting string; additional characters will be filled
with ``fillchar``.
fillchar : str
Additional character for filling, default is whitespace.
Returns
-------
Series/Index of objects.
A Series or Index where the strings are modified by :meth:`str.%(method)s`.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.center : Fills both sides of strings with an arbitrary
character.
Series.str.zfill : Pad strings in the Series/Index by prepending '0'
character.
Examples
--------
For Series.str.center:
>>> ser = pd.Series(['dog', 'bird', 'mouse'])
>>> ser.str.center(8, fillchar='.')
0 ..dog...
1 ..bird..
2 .mouse..
dtype: str
For Series.str.ljust:
>>> ser = pd.Series(['dog', 'bird', 'mouse'])
>>> ser.str.ljust(8, fillchar='.')
0 dog.....
1 bird....
2 mouse...
dtype: str
For Series.str.rjust:
>>> ser = pd.Series(['dog', 'bird', 'mouse'])
>>> ser.str.rjust(8, fillchar='.')
0 .....dog
1 ....bird
2 ...mouse
dtype: str
"""
@Appender(_shared_docs["str_pad"] % {"side": "left and right", "method": "center"})
@forbid_nonstring_types(["bytes"])
def center(self, width: int, fillchar: str = " "):
return self.pad(width, side="both", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "right", "method": "ljust"})
@forbid_nonstring_types(["bytes"])
def ljust(self, width: int, fillchar: str = " "):
return self.pad(width, side="right", fillchar=fillchar)
@Appender(_shared_docs["str_pad"] % {"side": "left", "method": "rjust"})
@forbid_nonstring_types(["bytes"])
def rjust(self, width: int, fillchar: str = " "):
return self.pad(width, side="left", fillchar=fillchar)
@forbid_nonstring_types(["bytes"])
def zfill(self, width: int):
"""
Pad strings in the Series/Index by prepending '0' characters.
Strings in the Series/Index are padded with '0' characters on the
left of the string to reach a total string length `width`. Strings
in the Series/Index with length greater or equal to `width` are
unchanged.
Parameters
----------
width : int
Minimum length of resulting string; strings with length less
than `width` be prepended with '0' characters.
Returns
-------
Series/Index of objects.
A Series or Index where the strings are prepended with '0' characters.
See Also
--------
Series.str.rjust : Fills the left side of strings with an arbitrary
character.
Series.str.ljust : Fills the right side of strings with an arbitrary
character.
Series.str.pad : Fills the specified sides of strings with an arbitrary
character.
Series.str.center : Fills both sides of strings with an arbitrary
character.
Notes
-----
Differs from :meth:`str.zfill` which has special handling
for '+'/'-' in the string.
Examples
--------
>>> s = pd.Series(["-1", "1", "1000", 10, np.nan])
>>> s
0 -1
1 1
2 1000
3 10
4 NaN
dtype: object
Note that ``10`` and ``NaN`` are not strings, therefore they are
converted to ``NaN``. The minus sign in ``'-1'`` is treated as a
special character and the zero is added to the right of it
(:meth:`str.zfill` would have moved it to the left). ``1000``
remains unchanged as it is longer than `width`.
>>> s.str.zfill(3)
0 -01
1 001
2 1000
3 NaN
4 NaN
dtype: object
"""
if not is_integer(width):
msg = f"width must be of integer type, not {type(width).__name__}"
raise TypeError(msg)
result = self._data.array._str_zfill(width)
return self._wrap_result(result)
def slice(self, start=None, stop=None, step=None):
"""
Slice substrings from each element in the Series or Index.
Slicing substrings from strings in a Series or Index helps extract
specific portions of data, making it easier to analyze or manipulate
text. This is useful for tasks like parsing structured text fields or
isolating parts of strings with a consistent format.
Parameters
----------
start : int, optional
Start position for slice operation.
stop : int, optional
Stop position for slice operation.
step : int, optional
Step size for slice operation.
Returns
-------
Series or Index of object
Series or Index from sliced substring from original string object.
See Also
--------
Series.str.slice_replace : Replace a slice with a string.
Series.str.get : Return element at position.
Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
being the position.
Examples
--------
>>> s = pd.Series(["koala", "dog", "chameleon"])
>>> s
0 koala
1 dog
2 chameleon
dtype: str
>>> s.str.slice(start=1)
0 oala
1 og
2 hameleon
dtype: str
>>> s.str.slice(start=-1)
0 a
1 g
2 n
dtype: str
>>> s.str.slice(stop=2)
0 ko
1 do
2 ch
dtype: str
>>> s.str.slice(step=2)
0 kaa
1 dg
2 caeen
dtype: str
>>> s.str.slice(start=0, stop=5, step=3)
0 kl
1 d
2 cm
dtype: str
Equivalent behaviour to:
>>> s.str[0:5:3]
0 kl
1 d
2 cm
dtype: str
"""
result = self._data.array._str_slice(start, stop, step)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def slice_replace(self, start=None, stop=None, repl=None):
"""
Replace a positional slice of a string with another value.
This function allows replacing specific parts of a string in a Series
or Index by specifying start and stop positions. It is useful for
modifying substrings in a controlled way, such as updating sections of
text based on their positions or patterns.
Parameters
----------
start : int, optional
Left index position to use for the slice. If not specified (None),
the slice is unbounded on the left, i.e. slice from the start
of the string.
stop : int, optional
Right index position to use for the slice. If not specified (None),
the slice is unbounded on the right, i.e. slice until the
end of the string.
repl : str, optional
String for replacement. If not specified (None), the sliced region
is replaced with an empty string.
Returns
-------
Series or Index
Same type as the original object.
See Also
--------
Series.str.slice : Just slicing without replacement.
Examples
--------
>>> s = pd.Series(["a", "ab", "abc", "abdc", "abcde"])
>>> s
0 a
1 ab
2 abc
3 abdc
4 abcde
dtype: str
Specify just `start`, meaning replace `start` until the end of the
string with `repl`.
>>> s.str.slice_replace(1, repl="X")
0 aX
1 aX
2 aX
3 aX
4 aX
dtype: str
Specify just `stop`, meaning the start of the string to `stop` is replaced
with `repl`, and the rest of the string is included.
>>> s.str.slice_replace(stop=2, repl="X")
0 X
1 X
2 Xc
3 Xdc
4 Xcde
dtype: str
Specify `start` and `stop`, meaning the slice from `start` to `stop` is
replaced with `repl`. Everything before or after `start` and `stop` is
included as is.
>>> s.str.slice_replace(start=1, stop=3, repl="X")
0 aX
1 aX
2 aX
3 aXc
4 aXde
dtype: str
"""
result = self._data.array._str_slice_replace(start, stop, repl)
return self._wrap_result(result)
def decode(
self, encoding, errors: str = "strict", dtype: str | DtypeObj | None = None
):
"""
Decode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.decode` in python2 and :meth:`bytes.decode` in
python3.
Parameters
----------
encoding : str
Specifies the encoding to be used.
errors : str, optional
Specifies the error handling scheme.
Possible values are those supported by :meth:`bytes.decode`.
dtype : str or dtype, optional
The dtype of the result. When not ``None``, must be either a string or
object dtype. When ``None``, the dtype of the result is determined by
``pd.options.future.infer_string``.
.. versionadded:: 2.3.0
Returns
-------
Series or Index
A Series or Index with decoded strings.
See Also
--------
Series.str.encode : Encodes strings into bytes in a Series/Index.
Examples
--------
For Series:
>>> ser = pd.Series([b"cow", b"123", b"()"])
>>> ser.str.decode("ascii")
0 cow
1 123
2 ()
dtype: str
"""
if dtype is not None and not is_string_dtype(dtype):
raise ValueError(f"dtype must be string or object, got {dtype=}")
if dtype is None and using_string_dtype():
dtype = "str"
# TODO: Add a similar _bytes interface.
if encoding in _cpython_optimized_decoders:
# CPython optimized implementation
f = lambda x: x.decode(encoding, errors)
else:
decoder = codecs.getdecoder(encoding)
f = lambda x: decoder(x, errors)[0]
arr = self._data.array
result = arr._str_map(f)
return self._wrap_result(result, dtype=dtype)
@forbid_nonstring_types(["bytes"])
def encode(self, encoding, errors: str = "strict"):
"""
Encode character string in the Series/Index using indicated encoding.
Equivalent to :meth:`str.encode`.
Parameters
----------
encoding : str
Specifies the encoding to be used.
errors : str, optional
Specifies the error handling scheme.
Possible values are those supported by :meth:`str.encode`.
Returns
-------
Series/Index of objects
A Series or Index with strings encoded into bytes.
See Also
--------
Series.str.decode : Decodes bytes into strings in a Series/Index.
Examples
--------
>>> ser = pd.Series(["cow", "123", "()"])
>>> ser.str.encode(encoding="ascii")
0 b'cow'
1 b'123'
2 b'()'
dtype: object
"""
result = self._data.array._str_encode(encoding, errors)
return self._wrap_result(result, returns_string=False)
_shared_docs["str_strip"] = r"""
Remove %(position)s characters.
Strip whitespaces (including newlines) or a set of specified characters
from each string in the Series/Index from %(side)s.
Replaces any non-strings in Series with NaNs.
Equivalent to :meth:`str.%(method)s`.
Parameters
----------
to_strip : str or None, default None
Specifying the set of characters to be removed.
All combinations of this set of characters will be stripped.
If None then whitespaces are removed.
Returns
-------
Series or Index of object
Series or Index with the strings being stripped from the %(side)s.
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Examples
--------
>>> s = pd.Series(['1. Ant. ', '2. Bee!\n', '3. Cat?\t', np.nan, 10, True])
>>> s
0 1. Ant.
1 2. Bee!\n
2 3. Cat?\t
3 NaN
4 10
5 True
dtype: object
>>> s.str.strip()
0 1. Ant.
1 2. Bee!
2 3. Cat?
3 NaN
4 NaN
5 NaN
dtype: object
>>> s.str.lstrip('123.')
0 Ant.
1 Bee!\n
2 Cat?\t
3 NaN
4 NaN
5 NaN
dtype: object
>>> s.str.rstrip('.!? \n\t')
0 1. Ant
1 2. Bee
2 3. Cat
3 NaN
4 NaN
5 NaN
dtype: object
>>> s.str.strip('123.!? \n\t')
0 Ant
1 Bee
2 Cat
3 NaN
4 NaN
5 NaN
dtype: object
"""
@Appender(
_shared_docs["str_strip"]
% {
"side": "left and right sides",
"method": "strip",
"position": "leading and trailing",
}
)
@forbid_nonstring_types(["bytes"])
def strip(self, to_strip=None):
result = self._data.array._str_strip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "left side", "method": "lstrip", "position": "leading"}
)
@forbid_nonstring_types(["bytes"])
def lstrip(self, to_strip=None):
result = self._data.array._str_lstrip(to_strip)
return self._wrap_result(result)
@Appender(
_shared_docs["str_strip"]
% {"side": "right side", "method": "rstrip", "position": "trailing"}
)
@forbid_nonstring_types(["bytes"])
def rstrip(self, to_strip=None):
result = self._data.array._str_rstrip(to_strip)
return self._wrap_result(result)
_shared_docs["str_removefix"] = r"""
Remove a %(side)s from an object series.
If the %(side)s is not present, the original string will be returned.
Parameters
----------
%(side)s : str
Remove the %(side)s of the string.
Returns
-------
Series/Index: object
The Series or Index with given %(side)s removed.
See Also
--------
Series.str.remove%(other_side)s : Remove a %(other_side)s from an object series.
Examples
--------
>>> s = pd.Series(["str_foo", "str_bar", "no_prefix"])
>>> s
0 str_foo
1 str_bar
2 no_prefix
dtype: str
>>> s.str.removeprefix("str_")
0 foo
1 bar
2 no_prefix
dtype: str
>>> s = pd.Series(["foo_str", "bar_str", "no_suffix"])
>>> s
0 foo_str
1 bar_str
2 no_suffix
dtype: str
>>> s.str.removesuffix("_str")
0 foo
1 bar
2 no_suffix
dtype: str
"""
@Appender(
_shared_docs["str_removefix"] % {"side": "prefix", "other_side": "suffix"}
)
@forbid_nonstring_types(["bytes"])
def removeprefix(self, prefix: str):
result = self._data.array._str_removeprefix(prefix)
return self._wrap_result(result)
@Appender(
_shared_docs["str_removefix"] % {"side": "suffix", "other_side": "prefix"}
)
@forbid_nonstring_types(["bytes"])
def removesuffix(self, suffix: str):
result = self._data.array._str_removesuffix(suffix)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def wrap(
self,
width: int,
expand_tabs: bool = True,
tabsize: int = 8,
replace_whitespace: bool = True,
drop_whitespace: bool = True,
initial_indent: str = "",
subsequent_indent: str = "",
fix_sentence_endings: bool = False,
break_long_words: bool = True,
break_on_hyphens: bool = True,
max_lines: int | None = None,
placeholder: str = " [...]",
):
r"""
Wrap strings in Series/Index at specified line width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int, optional
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
tabsize : int, optional
If expand_tabs is true, then all tab characters in text will be
expanded to zero or more spaces, depending on the current column
and the given tab size (default: 8).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
initial_indent : str, optional
String that will be prepended to the first line of wrapped output.
Counts towards the length of the first line. The empty string is
not indented (default: '').
subsequent_indent : str, optional
String that will be prepended to all lines of wrapped output except
the first. Counts towards the length of each line except the first
(default: '').
fix_sentence_endings : bool, optional
If true, TextWrapper attempts to detect sentence endings and ensure
that sentences are always separated by exactly two spaces. This is
generally desired for text in a monospaced font. However, the sentence
detection algorithm is imperfect: it assumes that a sentence ending
consists of a lowercase letter followed by one of '.', '!', or '?',
possibly followed by one of '"' or "'", followed by a space. One
problem with this algorithm is that it is unable to detect the
difference between “Dr.” in `[...] Dr. Frankenstein's monster [...]`
and “Spot.” in `[...] See Spot. See Spot run [...]`
Since the sentence detection algorithm relies on string.lowercase
for the definition of “lowercase letter”, and a convention of using
two spaces after a period to separate sentences on the same line,
it is specific to English-language texts (default: False).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
max_lines : int, optional
If not None, then the output will contain at most max_lines lines, with
placeholder appearing at the end of the output (default: None).
placeholder : str, optional
String that will appear at the end of the output text if it has been
truncated (default: ' [...]').
Returns
-------
Series or Index
A Series or Index where the strings are wrapped at the specified line width.
See Also
--------
Series.str.strip : Remove leading and trailing characters in Series/Index.
Series.str.lstrip : Remove leading characters in Series/Index.
Series.str.rstrip : Remove trailing characters in Series/Index.
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(["line to be wrapped", "another line to be wrapped"])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: str
"""
result = self._data.array._str_wrap(
width=width,
expand_tabs=expand_tabs,
tabsize=tabsize,
replace_whitespace=replace_whitespace,
drop_whitespace=drop_whitespace,
initial_indent=initial_indent,
subsequent_indent=subsequent_indent,
fix_sentence_endings=fix_sentence_endings,
break_long_words=break_long_words,
break_on_hyphens=break_on_hyphens,
max_lines=max_lines,
placeholder=placeholder,
)
return self._wrap_result(result)
@forbid_nonstring_types(["bytes"])
def get_dummies(
self,
sep: str = "|",
dtype: NpDtype | None = None,
):
"""
Return DataFrame of dummy/indicator variables for Series.
Each string in Series is split by sep and returned as a DataFrame
of dummy/indicator variables.
Parameters
----------
sep : str, default "|"
String to split on.
dtype : dtype, default np.int64
Data type for new columns. Only a single dtype is allowed.
Returns
-------
DataFrame
Dummy variables corresponding to values of the Series.
See Also
--------
get_dummies : Convert categorical variable into dummy/indicator
variables.
Examples
--------
>>> pd.Series(["a|b", "a", "a|c"]).str.get_dummies()
a b c
0 1 1 0
1 1 0 0
2 1 0 1
>>> pd.Series(["a|b", np.nan, "a|c"]).str.get_dummies()
a b c
0 1 1 0
1 0 0 0
2 1 0 1
>>> pd.Series(["a|b", np.nan, "a|c"]).str.get_dummies(dtype=bool)
a b c
0 True True False
1 False False False
2 True False True
"""
from pandas.core.frame import DataFrame
if dtype is not None and not (is_numeric_dtype(dtype) or is_bool_dtype(dtype)):
raise ValueError("Only numeric or boolean dtypes are supported for 'dtype'")
# we need to cast to Series of strings as only that has all
# methods available for making the dummies...
result, name = self._data.array._str_get_dummies(sep, dtype)
if is_extension_array_dtype(dtype):
return self._wrap_result(
DataFrame(result, columns=name, dtype=dtype),
name=name,
returns_string=False,
)
return self._wrap_result(
result,
name=name,
expand=True,
returns_string=False,
)
@forbid_nonstring_types(["bytes"])
def translate(self, table):
"""
Map all characters in the string through the given mapping table.
This method is equivalent to the standard :meth:`str.translate`
method for strings. It maps each character in the string to a new
character according to the translation table provided. Unmapped
characters are left unchanged, while characters mapped to None
are removed.
Parameters
----------
table : dict
Table is a mapping of Unicode ordinals to Unicode ordinals, strings, or
None. Unmapped characters are left untouched.
Characters mapped to None are deleted. :meth:`str.maketrans` is a
helper function for making translation tables.
Returns
-------
Series or Index
A new Series or Index with translated strings.
See Also
--------
Series.str.replace : Replace occurrences of pattern/regex in the
Series with some other string.
Index.str.replace : Replace occurrences of pattern/regex in the
Index with some other string.
Examples
--------
>>> ser = pd.Series(["El niño", "Françoise"])
>>> mytable = str.maketrans({"ñ": "n", "ç": "c"})
>>> ser.str.translate(mytable)
0 El nino
1 Francoise
dtype: str
"""
result = self._data.array._str_translate(table)
dtype = object if self._data.dtype == "object" else None
return self._wrap_result(result, dtype=dtype)
@forbid_nonstring_types(["bytes"])
def count(self, pat, flags: int = 0):
r"""
Count occurrences of pattern in each string of the Series/Index.
This function is used to count the number of times a particular regex
pattern is repeated in each of the string elements of the
:class:`~pandas.Series`.
Parameters
----------
pat : str
Valid regular expression.
flags : int, default 0, meaning no flags
Flags for the `re` module. For a complete list, `see here
<https://docs.python.org/3/howto/regex.html#compilation-flags>`_.
Returns
-------
Series or Index
Same type as the calling object containing the integer counts.
See Also
--------
re : Standard library module for regular expressions.
str.count : Standard library version, without regular expression support.
Notes
-----
Some characters need to be escaped when passing in `pat`.
eg. ``'$'`` has a special meaning in regex and must be escaped when
finding this literal character.
Examples
--------
>>> s = pd.Series(["A", "B", "Aaba", "Baca", np.nan, "CABA", "cat"])
>>> s.str.count("a")
0 0.0
1 0.0
2 2.0
3 2.0
4 NaN
5 0.0
6 1.0
dtype: float64
Escape ``'$'`` to find the literal dollar sign.
>>> s = pd.Series(["$", "B", "Aab$", "$$ca", "C$B$", "cat"])
>>> s.str.count("\\$")
0 1
1 0
2 1
3 2
4 2
5 0
dtype: int64
This is also available on Index
>>> pd.Index(["A", "A", "Aaba", "cat"]).str.count("a")
Index([0, 0, 2, 1], dtype='int64')
"""
result = self._data.array._str_count(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def startswith(
self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault = lib.no_default
) -> Series | Index:
"""
Test if the start of each string element matches a pattern.
Equivalent to :meth:`str.startswith`.
Parameters
----------
pat : str or tuple[str, ...]
Character sequence or tuple of strings. Regular expressions are not
accepted.
na : scalar, optional
Object shown if element tested is not a string. The default depends
on dtype of the array. For the ``"str"`` dtype, ``False`` is used.
For object dtype, ``numpy.nan`` is used. For the nullable
``StringDtype``, ``pandas.NA`` is used.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the start of each string element.
See Also
--------
str.startswith : Python standard library string method.
Series.str.endswith : Same as startswith, but tests the end of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(["bat", "Bear", "cat", np.nan])
>>> s
0 bat
1 Bear
2 cat
3 NaN
dtype: str
>>> s.str.startswith("b")
0 True
1 False
2 False
3 False
dtype: bool
>>> s.str.startswith(("b", "B"))
0 True
1 True
2 False
3 False
dtype: bool
"""
if not isinstance(pat, (str, tuple)):
msg = f"expected a string or tuple, not {type(pat).__name__}"
raise TypeError(msg)
result = self._data.array._str_startswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def endswith(
self, pat: str | tuple[str, ...], na: Scalar | lib.NoDefault = lib.no_default
) -> Series | Index:
"""
Test if the end of each string element matches a pattern.
Equivalent to :meth:`str.endswith`.
Parameters
----------
pat : str or tuple[str, ...]
Character sequence or tuple of strings. Regular expressions are not
accepted.
na : scalar, optional
Object shown if element tested is not a string. The default depends
on dtype of the array. For the ``"str"`` dtype, ``False`` is used.
For object dtype, ``numpy.nan`` is used. For the nullable
``StringDtype``, ``pandas.NA`` is used.
Returns
-------
Series or Index of bool
A Series of booleans indicating whether the given pattern matches
the end of each string element.
See Also
--------
str.endswith : Python standard library string method.
Series.str.startswith : Same as endswith, but tests the start of string.
Series.str.contains : Tests if string element contains a pattern.
Examples
--------
>>> s = pd.Series(["bat", "bear", "caT", np.nan])
>>> s
0 bat
1 bear
2 caT
3 NaN
dtype: str
>>> s.str.endswith("t")
0 True
1 False
2 False
3 False
dtype: bool
>>> s.str.endswith(("t", "T"))
0 True
1 False
2 True
3 False
dtype: bool
"""
if not isinstance(pat, (str, tuple)):
msg = f"expected a string or tuple, not {type(pat).__name__}"
raise TypeError(msg)
result = self._data.array._str_endswith(pat, na=na)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def findall(self, pat, flags: int = 0):
"""
Find all occurrences of pattern or regular expression in the Series/Index.
Equivalent to applying :func:`re.findall` to all the elements in the
Series/Index.
Parameters
----------
pat : str
Pattern or regular expression.
flags : int, default 0
Flags from ``re`` module, e.g. `re.IGNORECASE` (default is 0, which
means no flags).
Returns
-------
Series/Index of lists of strings
All non-overlapping matches of pattern or regular expression in each
string of this Series/Index.
See Also
--------
count : Count occurrences of pattern or regular expression in each string
of the Series/Index.
extractall : For each string in the Series, extract groups from all matches
of regular expression and return a DataFrame with one row for each
match and one column for each group.
re.findall : The equivalent ``re`` function to all non-overlapping matches
of pattern or regular expression in string, as a list of strings.
Examples
--------
>>> s = pd.Series(["Lion", "Monkey", "Rabbit"])
The search for the pattern 'Monkey' returns one match:
>>> s.str.findall("Monkey")
0 []
1 [Monkey]
2 []
dtype: object
On the other hand, the search for the pattern 'MONKEY' doesn't return any
match:
>>> s.str.findall("MONKEY")
0 []
1 []
2 []
dtype: object
Flags can be added to the pattern or regular expression. For instance,
to find the pattern 'MONKEY' ignoring the case:
>>> import re
>>> s.str.findall("MONKEY", flags=re.IGNORECASE)
0 []
1 [Monkey]
2 []
dtype: object
When the pattern matches more than one string in the Series, all matches
are returned:
>>> s.str.findall("on")
0 [on]
1 [on]
2 []
dtype: object
Regular expressions are supported too. For instance, the search for all the
strings ending with the word 'on' is shown next:
>>> s.str.findall("on$")
0 [on]
1 []
2 []
dtype: object
If the pattern is found more than once in the same string, then a list of
multiple strings is returned:
>>> s.str.findall("b")
0 []
1 []
2 [b, b]
dtype: object
"""
result = self._data.array._str_findall(pat, flags)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def extract(
self, pat: str, flags: int = 0, expand: bool = True
) -> DataFrame | Series | Index:
r"""
Extract capture groups in the regex `pat` as columns in a DataFrame.
For each subject string in the Series, extract groups from the
first match of regular expression `pat`.
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
Flags from the ``re`` module, e.g. ``re.IGNORECASE``, that
modify regular expression matching for things like case,
spaces, etc. For more details, see :mod:`re`.
expand : bool, default True
If True, return DataFrame with one column per capture group.
If False, return a Series/Index if there is one capture group
or DataFrame if there are multiple capture groups.
Returns
-------
DataFrame or Series or Index
A DataFrame with one row for each subject string, and one
column for each group. Any capture group names in regular
expression pat will be used for column names; otherwise
capture group numbers will be used. The dtype of each result
column is always object, even when no match is found. If
``expand=False`` and pat has only one capture group, then
return a Series (if subject is a Series) or Index (if subject
is an Index).
See Also
--------
extractall : Returns all matches (not just the first match).
Examples
--------
A pattern with two groups will return a DataFrame with two columns.
Non-matches will be NaN.
>>> s = pd.Series(["a1", "b2", "c3"])
>>> s.str.extract(r"([ab])(\d)")
0 1
0 a 1
1 b 2
2 NaN NaN
A pattern may contain optional groups.
>>> s.str.extract(r"([ab])?(\d)")
0 1
0 a 1
1 b 2
2 NaN 3
Named groups will become column names in the result.
>>> s.str.extract(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
0 a 1
1 b 2
2 NaN NaN
A pattern with one group will return a DataFrame with one column
if expand=True.
>>> s.str.extract(r"[ab](\d)", expand=True)
0
0 1
1 2
2 NaN
A pattern with one group will return a Series if expand=False.
>>> s.str.extract(r"[ab](\d)", expand=False)
0 1
1 2
2 NaN
dtype: str
"""
from pandas import DataFrame
if not isinstance(expand, bool):
raise ValueError("expand must be True or False")
regex = re.compile(pat, flags=flags)
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if not expand and regex.groups > 1 and isinstance(self._data, ABCIndex):
raise ValueError("only one regex group is supported with Index")
obj = self._data
result_dtype = _result_dtype(obj)
returns_df = regex.groups > 1 or expand
if returns_df:
name = None
columns = _get_group_names(regex)
if obj.array.size == 0:
result = DataFrame(columns=columns, dtype=result_dtype)
else:
result_list = self._data.array._str_extract(
pat, flags=flags, expand=returns_df
)
result_index: Index | None
if isinstance(obj, ABCSeries):
result_index = obj.index
else:
result_index = None
result = DataFrame(
result_list, columns=columns, index=result_index, dtype=result_dtype
)
else:
name = _get_single_group_name(regex)
result = self._data.array._str_extract(pat, flags=flags, expand=returns_df)
return self._wrap_result(result, name=name, dtype=result_dtype)
@forbid_nonstring_types(["bytes"])
def extractall(self, pat, flags: int = 0) -> DataFrame:
r"""
Extract capture groups in the regex `pat` as columns in DataFrame.
For each subject string in the Series, extract groups from all
matches of regular expression pat. When each subject string in the
Series has exactly one match, extractall(pat).xs(0, level='match')
is the same as extract(pat).
Parameters
----------
pat : str
Regular expression pattern with capturing groups.
flags : int, default 0 (no flags)
A ``re`` module flag, for example ``re.IGNORECASE``. These allow
to modify regular expression matching for things like case, spaces,
etc. Multiple flags can be combined with the bitwise OR operator,
for example ``re.IGNORECASE | re.MULTILINE``.
Returns
-------
DataFrame
A ``DataFrame`` with one row for each match, and one column for each
group. Its rows have a ``MultiIndex`` with first levels that come from
the subject ``Series``. The last level is named 'match' and indexes the
matches in each item of the ``Series``. Any capture group names in
regular expression pat will be used for column names; otherwise capture
group numbers will be used.
See Also
--------
extract : Returns first match only (not all matches).
Examples
--------
A pattern with one group will return a DataFrame with one column.
Indices with no matches will not appear in the result.
>>> s = pd.Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
>>> s.str.extractall(r"[ab](\d)")
0
match
A 0 1
1 2
B 0 1
Capture group names are used for column names of the result.
>>> s.str.extractall(r"[ab](?P<digit>\d)")
digit
match
A 0 1
1 2
B 0 1
A pattern with two groups will return a DataFrame with two columns.
>>> s.str.extractall(r"(?P<letter>[ab])(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
Optional groups that do not match are NaN in the result.
>>> s.str.extractall(r"(?P<letter>[ab])?(?P<digit>\d)")
letter digit
match
A 0 a 1
1 a 2
B 0 b 1
C 0 NaN 1
"""
# TODO: dispatch
return str_extractall(self._orig, pat, flags)
_shared_docs["find"] = """
Return %(side)s indexes in each strings in the Series/Index.
Each of returned indexes corresponds to the position where the
substring is fully contained between [start:end]. Return -1 on
failure. Equivalent to standard :meth:`str.%(method)s`.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of int.
A Series (if the input is a Series) or an Index (if the input is an
Index) of the %(side)s indexes corresponding to the positions where the
substring is found in each string of the input.
See Also
--------
%(also)s
Examples
--------
For Series.str.find:
>>> ser = pd.Series(["_cow_", "duck_", "do_v_e"])
>>> ser.str.find("_")
0 0
1 4
2 2
dtype: int64
For Series.str.rfind:
>>> ser = pd.Series(["_cow_", "duck_", "do_v_e"])
>>> ser.str.rfind("_")
0 4
1 4
2 4
dtype: int64
"""
@Appender(
_shared_docs["find"]
% {
"side": "lowest",
"method": "find",
"also": "rfind : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def find(self, sub, start: int = 0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_find(sub, start, end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["find"]
% {
"side": "highest",
"method": "rfind",
"also": "find : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rfind(self, sub, start: int = 0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rfind(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@forbid_nonstring_types(["bytes"])
def normalize(self, form):
"""
Return the Unicode normal form for the strings in the Series/Index.
For more information on the forms, see the
:func:`unicodedata.normalize`.
Parameters
----------
form : {'NFC', 'NFKC', 'NFD', 'NFKD'}
Unicode form.
Returns
-------
Series/Index of objects
A Series or Index of strings in the same Unicode form specified by `form`.
The returned object retains the same type as the input (Series or Index),
and contains the normalized strings.
See Also
--------
Series.str.upper : Convert all characters in each string to uppercase.
Series.str.lower : Convert all characters in each string to lowercase.
Series.str.title : Convert each string to title case (capitalizing the
first letter of each word).
Series.str.strip : Remove leading and trailing whitespace from each string.
Series.str.replace : Replace occurrences of a substring with another substring
in each string.
Examples
--------
>>> ser = pd.Series(["ñ"])
>>> ser.str.normalize("NFC") == ser.str.normalize("NFD")
0 False
dtype: bool
"""
result = self._data.array._str_normalize(form)
return self._wrap_result(result)
_shared_docs["index"] = """
Return %(side)s indexes in each string in Series/Index.
Each of the returned indexes corresponds to the position where the
substring is fully contained between [start:end]. This is the same
as ``str.%(similar)s`` except instead of returning -1, it raises a
ValueError when the substring is not found. Equivalent to standard
``str.%(method)s``.
Parameters
----------
sub : str
Substring being searched.
start : int
Left edge index.
end : int
Right edge index.
Returns
-------
Series or Index of object
Returns a Series or an Index of the %(side)s indexes
in each string of the input.
See Also
--------
%(also)s
Examples
--------
For Series.str.index:
>>> ser = pd.Series(["horse", "eagle", "donkey"])
>>> ser.str.index("e")
0 4
1 0
2 4
dtype: int64
For Series.str.rindex:
>>> ser = pd.Series(["Deer", "eagle", "Sheep"])
>>> ser.str.rindex("e")
0 2
1 4
2 3
dtype: int64
"""
@Appender(
_shared_docs["index"]
% {
"side": "lowest",
"similar": "find",
"method": "index",
"also": "rindex : Return highest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def index(self, sub, start: int = 0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_index(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
@Appender(
_shared_docs["index"]
% {
"side": "highest",
"similar": "rfind",
"method": "rindex",
"also": "index : Return lowest indexes in each strings.",
}
)
@forbid_nonstring_types(["bytes"])
def rindex(self, sub, start: int = 0, end=None):
if not isinstance(sub, str):
msg = f"expected a string object, not {type(sub).__name__}"
raise TypeError(msg)
result = self._data.array._str_rindex(sub, start=start, end=end)
return self._wrap_result(result, returns_string=False)
def len(self):
"""
Compute the length of each element in the Series/Index.
The element may be a sequence (such as a string, tuple or list) or a collection
(such as a dictionary).
Returns
-------
Series or Index of int
A Series or Index of integer values indicating the length of each
element in the Series or Index.
See Also
--------
str.len : Python built-in function returning the length of an object.
Series.size : Returns the length of the Series.
Examples
--------
Returns the length (number of characters) in a string. Returns the
number of entries for dictionaries, lists or tuples.
>>> s = pd.Series(
... ["dog", "", 5, {"foo": "bar"}, [2, 3, 5, 7], ("one", "two", "three")]
... )
>>> s
0 dog
1
2 5
3 {'foo': 'bar'}
4 [2, 3, 5, 7]
5 (one, two, three)
dtype: object
>>> s.str.len()
0 3.0
1 0.0
2 NaN
3 1.0
4 4.0
5 3.0
dtype: float64
"""
result = self._data.array._str_len()
return self._wrap_result(result, returns_string=False)
_shared_docs["casemethods"] = """
Convert strings in the Series/Index to %(type)s.
%(version)s
Equivalent to :meth:`str.%(method)s`.
Returns
-------
Series or Index of objects
A Series or Index where the strings are modified by :meth:`str.%(method)s`.
See Also
--------
Series.str.lower : Converts all characters to lowercase.
Series.str.upper : Converts all characters to uppercase.
Series.str.title : Converts first character of each word to uppercase and
remaining to lowercase.
Series.str.capitalize : Converts first character to uppercase and
remaining to lowercase.
Series.str.swapcase : Converts uppercase to lowercase and lowercase to
uppercase.
Series.str.casefold: Removes all case distinctions in the string.
Examples
--------
>>> s = pd.Series(['lower', 'CAPITALS', 'this is a sentence', 'SwApCaSe'])
>>> s
0 lower
1 CAPITALS
2 this is a sentence
3 SwApCaSe
dtype: str
>>> s.str.lower()
0 lower
1 capitals
2 this is a sentence
3 swapcase
dtype: str
>>> s.str.upper()
0 LOWER
1 CAPITALS
2 THIS IS A SENTENCE
3 SWAPCASE
dtype: str
>>> s.str.title()
0 Lower
1 Capitals
2 This Is A Sentence
3 Swapcase
dtype: str
>>> s.str.capitalize()
0 Lower
1 Capitals
2 This is a sentence
3 Swapcase
dtype: str
>>> s.str.swapcase()
0 LOWER
1 capitals
2 THIS IS A SENTENCE
3 sWaPcAsE
dtype: str
"""
# Types:
# cases:
# upper, lower, title, capitalize, swapcase, casefold
# boolean:
# isalpha, isnumeric isalnum isdigit isdecimal isspace islower
# isupper istitle isascii
# _doc_args holds dict of strings to use in substituting casemethod docs
_doc_args: dict[str, dict[str, str]] = {}
_doc_args["lower"] = {"type": "lowercase", "method": "lower", "version": ""}
_doc_args["upper"] = {"type": "uppercase", "method": "upper", "version": ""}
_doc_args["title"] = {"type": "titlecase", "method": "title", "version": ""}
_doc_args["capitalize"] = {
"type": "be capitalized",
"method": "capitalize",
"version": "",
}
_doc_args["swapcase"] = {
"type": "be swapcased",
"method": "swapcase",
"version": "",
}
_doc_args["casefold"] = {
"type": "be casefolded",
"method": "casefold",
"version": "",
}
@Appender(_shared_docs["casemethods"] % _doc_args["lower"])
@forbid_nonstring_types(["bytes"])
def lower(self):
result = self._data.array._str_lower()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["upper"])
@forbid_nonstring_types(["bytes"])
def upper(self):
result = self._data.array._str_upper()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["title"])
@forbid_nonstring_types(["bytes"])
def title(self):
result = self._data.array._str_title()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["capitalize"])
@forbid_nonstring_types(["bytes"])
def capitalize(self):
result = self._data.array._str_capitalize()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["swapcase"])
@forbid_nonstring_types(["bytes"])
def swapcase(self):
result = self._data.array._str_swapcase()
return self._wrap_result(result)
@Appender(_shared_docs["casemethods"] % _doc_args["casefold"])
@forbid_nonstring_types(["bytes"])
def casefold(self):
result = self._data.array._str_casefold()
return self._wrap_result(result)
_shared_docs["ismethods"] = """
Check whether all characters in each string are %(type)s.
This is equivalent to running the Python string method
:meth:`str.%(method)s` for each element of the Series/Index. If a string
has zero characters, ``False`` is returned for that check.
Returns
-------
Series or Index of bool
Series or Index of boolean values with the same length as the original
Series/Index.
"""
_shared_docs["isalpha"] = """
See Also
--------
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalpha()
0 True
1 False
2 False
3 False
dtype: bool
"""
_shared_docs["isnumeric"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
The ``s.str.isnumeric`` method is the same as ``s3.str.isdigit`` but
also includes other characters that can represent quantities such as
unicode fractions.
>>> s1 = pd.Series(['one', 'one1', '1', '', '³', '⅕'])
>>> s1.str.isnumeric()
0 False
1 False
2 True
3 False
4 True
5 True
dtype: bool
For a string to be considered numeric, all its characters must have a Unicode
numeric property matching :py:meth:`str.is_numeric`. As a consequence,
the following cases are **not** recognized as numeric:
- **Decimal numbers** (e.g., "1.1"): due to period ``"."``
- **Negative numbers** (e.g., "-5"): due to minus sign ``"-"``
- **Scientific notation** (e.g., "1e3"): due to characters like ``"e"``
>>> s2 = pd.Series(["1.1", "-5", "1e3"])
>>> s2.str.isnumeric()
0 False
1 False
2 False
dtype: bool
"""
_shared_docs["isalnum"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
>>> s1 = pd.Series(['one', 'one1', '1', ''])
>>> s1.str.isalnum()
0 True
1 True
2 True
3 False
dtype: bool
Note that checks against characters mixed with any additional punctuation
or whitespace will evaluate to false for an alphanumeric check.
>>> s2 = pd.Series(['A B', '1.5', '3,000'])
>>> s2.str.isalnum()
0 False
1 False
2 False
dtype: bool
"""
_shared_docs["isdecimal"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
The ``s3.str.isdecimal`` method checks for characters used to form
numbers in base 10.
>>> s3 = pd.Series(['23', '³', '⅕', ''])
>>> s3.str.isdecimal()
0 True
1 False
2 False
3 False
dtype: bool
"""
_shared_docs["isdigit"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Notes
-----
Similar to ``str.isdecimal`` but also includes special digits, like
superscripted and subscripted digits in unicode.
The exact behavior of this method, i.e. which unicode characters are
considered as digits, depends on the backend used for string operations,
and there can be small differences.
For example, Python considers the ³ superscript character as a digit, but
not the ⅕ fraction character, while PyArrow considers both as digits. For
simple (ascii) decimal numbers, the behaviour is consistent.
Examples
--------
>>> s3 = pd.Series(['23', '³', '⅕', ''])
>>> s3.str.isdigit()
0 True
1 True
2 True
3 False
dtype: bool
"""
_shared_docs["isspace"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
>>> s4 = pd.Series([' ', '\\t\\r\\n ', ''])
>>> s4.str.isspace()
0 True
1 True
2 False
dtype: bool
"""
_shared_docs["islower"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.islower()
0 True
1 False
2 False
3 False
dtype: bool
"""
_shared_docs["isupper"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.istitle : Check whether all characters are titlecase.
Examples
--------
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.isupper()
0 False
1 False
2 True
3 False
dtype: bool
"""
_shared_docs["istitle"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.isascii : Check whether all characters are ascii.
Series.str.isupper : Check whether all characters are uppercase.
Examples
--------
The ``s5.str.istitle`` method checks for whether all words are in title
case (whether only the first letter of each word is capitalized). Words are
assumed to be as any sequence of non-numeric characters separated by
whitespace characters.
>>> s5 = pd.Series(['leopard', 'Golden Eagle', 'SNAKE', ''])
>>> s5.str.istitle()
0 False
1 True
2 False
3 False
dtype: bool
"""
_shared_docs["isascii"] = """
See Also
--------
Series.str.isalpha : Check whether all characters are alphabetic.
Series.str.isnumeric : Check whether all characters are numeric.
Series.str.isalnum : Check whether all characters are alphanumeric.
Series.str.isdigit : Check whether all characters are digits.
Series.str.isdecimal : Check whether all characters are decimal.
Series.str.isspace : Check whether all characters are whitespace.
Series.str.islower : Check whether all characters are lowercase.
Series.str.istitle : Check whether all characters are titlecase.
Series.str.isupper : Check whether all characters are uppercase.
Examples
------------
The ``s5.str.isascii`` method checks for whether all characters are ascii
characters, which includes digits 0-9, capital and lowercase letters A-Z,
and some other special characters.
>>> s5 = pd.Series(['ö', 'see123', 'hello world', ''])
>>> s5.str.isascii()
0 False
1 True
2 True
3 True
dtype: bool
"""
_doc_args["isalnum"] = {"type": "alphanumeric", "method": "isalnum"}
_doc_args["isalpha"] = {"type": "alphabetic", "method": "isalpha"}
_doc_args["isdigit"] = {"type": "digits", "method": "isdigit"}
_doc_args["isspace"] = {"type": "whitespace", "method": "isspace"}
_doc_args["islower"] = {"type": "lowercase", "method": "islower"}
_doc_args["isascii"] = {"type": "ascii", "method": "isascii"}
_doc_args["isupper"] = {"type": "uppercase", "method": "isupper"}
_doc_args["istitle"] = {"type": "titlecase", "method": "istitle"}
_doc_args["isnumeric"] = {"type": "numeric", "method": "isnumeric"}
_doc_args["isdecimal"] = {"type": "decimal", "method": "isdecimal"}
# force _noarg_wrapper return type with dtype=np.dtype(bool) (GH 29624)
isalnum = _map_and_wrap(
"isalnum",
docstring=_shared_docs["ismethods"] % _doc_args["isalnum"]
+ _shared_docs["isalnum"],
)
isalpha = _map_and_wrap(
"isalpha",
docstring=_shared_docs["ismethods"] % _doc_args["isalpha"]
+ _shared_docs["isalpha"],
)
isdigit = _map_and_wrap(
"isdigit",
docstring=_shared_docs["ismethods"] % _doc_args["isdigit"]
+ _shared_docs["isdigit"],
)
isspace = _map_and_wrap(
"isspace",
docstring=_shared_docs["ismethods"] % _doc_args["isspace"]
+ _shared_docs["isspace"],
)
islower = _map_and_wrap(
"islower",
docstring=_shared_docs["ismethods"] % _doc_args["islower"]
+ _shared_docs["islower"],
)
isascii = _map_and_wrap(
"isascii",
docstring=_shared_docs["ismethods"] % _doc_args["isascii"]
+ _shared_docs["isascii"],
)
isupper = _map_and_wrap(
"isupper",
docstring=_shared_docs["ismethods"] % _doc_args["isupper"]
+ _shared_docs["isupper"],
)
istitle = _map_and_wrap(
"istitle",
docstring=_shared_docs["ismethods"] % _doc_args["istitle"]
+ _shared_docs["istitle"],
)
isnumeric = _map_and_wrap(
"isnumeric",
docstring=_shared_docs["ismethods"] % _doc_args["isnumeric"]
+ _shared_docs["isnumeric"],
)
isdecimal = _map_and_wrap(
"isdecimal",
docstring=_shared_docs["ismethods"] % _doc_args["isdecimal"]
+ _shared_docs["isdecimal"],
)
def cat_safe(list_of_columns: list[npt.NDArray[np.object_]], sep: str):
"""
Auxiliary function for :meth:`str.cat`.
Same signature as cat_core, but handles TypeErrors in concatenation, which
happen if the arrays in list_of columns have the wrong dtypes or content.
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
try:
result = cat_core(list_of_columns, sep)
except TypeError:
# if there are any non-string values (wrong dtype or hidden behind
# object dtype), np.sum will fail; catch and return with better message
for column in list_of_columns:
dtype = lib.infer_dtype(column, skipna=True)
if dtype not in ["string", "empty"]:
raise TypeError(
"Concatenation requires list-likes containing only "
"strings (or missing values). Offending values found in "
f"column {dtype}"
) from None
return result
def cat_core(list_of_columns: list, sep: str):
"""
Auxiliary function for :meth:`str.cat`
Parameters
----------
list_of_columns : list of numpy arrays
List of arrays to be concatenated with sep;
these arrays may not contain NaNs!
sep : string
The separator string for concatenating the columns.
Returns
-------
nd.array
The concatenation of list_of_columns with sep.
"""
if sep == "":
# no need to interleave sep if it is empty
arr_of_cols = np.asarray(list_of_columns, dtype=object)
return np.sum(arr_of_cols, axis=0)
list_with_sep = [sep] * (2 * len(list_of_columns) - 1)
list_with_sep[::2] = list_of_columns
arr_with_sep = np.asarray(list_with_sep, dtype=object)
return np.sum(arr_with_sep, axis=0)
def _result_dtype(arr):
# workaround #27953
# ideally we just pass `dtype=arr.dtype` unconditionally, but this fails
# when the list of values is empty.
from pandas.core.arrays.string_ import StringDtype
if isinstance(arr.dtype, (ArrowDtype, StringDtype)):
return arr.dtype
return object
def _get_single_group_name(regex: re.Pattern) -> Hashable:
if regex.groupindex:
return next(iter(regex.groupindex))
else:
return None
def _get_group_names(regex: re.Pattern) -> list[Hashable] | range:
"""
Get named groups from compiled regex.
Unnamed groups are numbered.
Parameters
----------
regex : compiled regex
Returns
-------
list of column labels
"""
rng = range(regex.groups)
names = {v: k for k, v in regex.groupindex.items()}
if not names:
return rng
result: list[Hashable] = [names.get(1 + i, i) for i in rng]
arr = np.array(result)
if arr.dtype.kind == "i" and lib.is_range_indexer(arr, len(arr)):
return rng
return result
def str_extractall(arr, pat, flags: int = 0) -> DataFrame:
regex = re.compile(pat, flags=flags)
# the regex must contain capture groups.
if regex.groups == 0:
raise ValueError("pattern contains no capture groups")
if isinstance(arr, ABCIndex):
arr = arr.to_series().reset_index(drop=True).astype(arr.dtype)
columns = _get_group_names(regex)
match_list = []
index_list = []
is_mi = arr.index.nlevels > 1
for subject_key, subject in arr.items():
if isinstance(subject, str):
if not is_mi:
subject_key = (subject_key,)
for match_i, match_tuple in enumerate(regex.findall(subject)):
if isinstance(match_tuple, str):
match_tuple = (match_tuple,)
na_tuple = [np.nan if group == "" else group for group in match_tuple]
match_list.append(na_tuple)
result_key = tuple(subject_key + (match_i,))
index_list.append(result_key)
from pandas import MultiIndex
index = MultiIndex.from_tuples(index_list, names=arr.index.names + ["match"])
dtype = _result_dtype(arr)
result = arr._constructor_expanddim(
match_list, index=index, columns=columns, dtype=dtype
)
return result
| StringMethods |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.