language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | encode__django-rest-framework | tests/test_filters.py | {
"start": 32808,
"end": 35269
} | class ____(TestCase):
def setUp(self):
for idx in range(3):
username = {0: 'userA', 1: 'userB', 2: 'userC'}[idx]
password = {0: 'passA', 1: 'passC', 2: 'passB'}[idx]
SensitiveOrderingFilterModel(username=username, password=password).save()
def test_order_by_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': '-username'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: Inverse username ordering correctly applied.
assert response.data == [
{'id': 3, username_field: 'userC'},
{'id': 2, username_field: 'userB'},
{'id': 1, username_field: 'userA'},
]
def test_cannot_order_by_non_serializer_fields(self):
for serializer_cls in [
SensitiveDataSerializer1,
SensitiveDataSerializer2,
SensitiveDataSerializer3
]:
class OrderingListView(generics.ListAPIView):
queryset = SensitiveOrderingFilterModel.objects.all().order_by('username')
filter_backends = (filters.OrderingFilter,)
serializer_class = serializer_cls
view = OrderingListView.as_view()
request = factory.get('/', {'ordering': 'password'})
response = view(request)
if serializer_cls == SensitiveDataSerializer3:
username_field = 'user'
else:
username_field = 'username'
# Note: The passwords are not in order. Default ordering is used.
assert response.data == [
{'id': 1, username_field: 'userA'}, # PassB
{'id': 2, username_field: 'userB'}, # PassC
{'id': 3, username_field: 'userC'}, # PassA
]
| SensitiveOrderingFilterTests |
python | getsentry__sentry | src/sentry/auth_v2/endpoints/base.py | {
"start": 170,
"end": 567
} | class ____(BasePermission):
def has_permission(self, request: Request, view: object) -> bool:
if settings.IS_DEV:
return True
# WARN: If the secret is not set on production, we must fail the request.
if not settings.AUTH_V2_SECRET:
return False
return request.META.get("HTTP_X_SENTRY_AUTH_V2") == settings.AUTH_V2_SECRET
| AuthV2Permission |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/padded_batch_test.py | {
"start": 16253,
"end": 17993
} | class ____(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations(),
combinations.combine(symbolic_checkpoint=[False, True])))
def test(self, verify_fn, symbolic_checkpoint):
def build_dataset(seq_lens):
dataset = dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
lambda x: array_ops.fill([x], x)).padded_batch(
batch_size=4, padded_shapes=[-1])
options = options_lib.Options()
options.experimental_symbolic_checkpoint = symbolic_checkpoint
dataset = dataset.with_options(options)
return dataset
seq_lens = np.random.randint(1, 20, size=(32,)).astype(np.int32)
verify_fn(self, lambda: build_dataset(seq_lens), num_outputs=8)
@combinations.generate(
combinations.times(test_base.default_test_combinations(),
checkpoint_test_base.default_test_combinations()))
def testNonDefaultPadding(self, verify_fn):
def build_dataset(seq_lens):
def fill_tuple(x):
filled = array_ops.fill([x], x)
return (filled, string_ops.as_string(filled))
padded_shape = [-1]
return dataset_ops.Dataset.from_tensor_slices(seq_lens).map(
fill_tuple).padded_batch(
batch_size=4,
padded_shapes=(padded_shape, padded_shape),
padding_values=(-1, '<end>'))
seq_lens = np.random.randint(1, 20, size=(32,)).astype(np.int32)
verify_fn(self, lambda: build_dataset(seq_lens), num_outputs=8)
if __name__ == '__main__':
test.main()
| PaddedBatchCheckpointTest |
python | sphinx-doc__sphinx | tests/roots/test-ext-autosummary-ext/dummy_module.py | {
"start": 561,
"end": 1320
} | class ____:
"""My C class
with class_attr attribute
"""
#: This is a class attribute
#:
#: value is integer.
class_attr = 42
def __init__(self):
#: This is an instance attribute
#:
#: value is a string
self.instance_attr = '42'
def _prop_attr_get(self):
"""This is a function docstring
return value is string.
"""
return 'spam egg'
prop_attr1 = property(_prop_attr_get)
prop_attr2 = property(_prop_attr_get)
"""
This is a attribute docstring
value is string.
"""
class C2:
"""This is a nested inner class docstring"""
def func(arg_, *args, **kwargs):
"""Test function take an argument ended with underscore."""
| C |
python | rapidsai__cudf | python/cudf/cudf/core/accessors/base_accessor.py | {
"start": 351,
"end": 3328
} | class ____(NotIterable):
_parent: Series | Index
def __init__(self, parent: Series | Index):
self._parent = parent
self._column = self._parent._column
@overload
def _return_or_inplace(
self,
new_col,
inplace: Literal[True],
expand: bool = False,
retain_index: bool = True,
) -> None: ...
@overload
def _return_or_inplace(
self,
new_col,
inplace: Literal[False],
expand: bool = False,
retain_index: bool = True,
) -> Series | Index: ...
@overload
def _return_or_inplace(
self,
new_col,
expand: bool = False,
retain_index: bool = True,
) -> Series | Index: ...
@overload
def _return_or_inplace(
self,
new_col,
inplace: bool = False,
expand: bool = False,
retain_index: bool = True,
) -> Series | Index | None: ...
def _return_or_inplace(
self, new_col, inplace=False, expand=False, retain_index=True
):
"""
Returns an object of the type of the column owner or updates the column
of the owner (Series or Index) to mimic an inplace operation
"""
if inplace:
self._parent._mimic_inplace(
type(self._parent)._from_column(
new_col, name=self._parent.name
),
inplace=True,
)
return None
else:
if expand:
# This branch indicates the passed as new_col
# is a Table
table = new_col
if isinstance(self._parent, cudf.Index):
idx = self._parent._constructor_expanddim._from_data(table)
idx.names = None
return idx
else:
df = self._parent._constructor_expanddim._from_data(
data=table,
index=self._parent.index,
attrs=self._parent.attrs,
)
if len(table) == 0:
df._data.rangeindex = True
return df
elif isinstance(self._parent, cudf.Series):
return cudf.Series._from_column(
new_col,
name=self._parent.name,
index=self._parent.index if retain_index else None,
attrs=self._parent.attrs,
)
elif isinstance(self._parent, cudf.Index):
return cudf.Index._from_column(new_col, name=self._parent.name)
else:
return self._parent._mimic_inplace(new_col, inplace=False)
def __setattr__(self, key, value):
if key in {"_parent", "_column"}:
super().__setattr__(key, value)
else:
raise AttributeError(f"You cannot add any new attribute '{key}'")
| BaseAccessor |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/load/test_dump.py | {
"start": 1186,
"end": 2230
} | class ____:
pass
def test_person(snapshot: Any) -> None:
p = Person(secret="parrot party") # noqa: S106
assert dumps(p, pretty=True) == snapshot
sp = SpecialPerson(another_secret="Wooo", secret="Hmm") # noqa: S106
assert dumps(sp, pretty=True) == snapshot
assert Person.lc_id() == ["tests", "unit_tests", "load", "test_dump", "Person"]
assert SpecialPerson.lc_id() == ["my", "special", "namespace", "SpecialPerson"]
def test_typeerror() -> None:
assert (
dumps({(1, 2): 3}) == "{"
'"lc": 1, '
'"type": "not_implemented", '
'"id": ["builtins", "dict"], '
'"repr": "{(1, 2): 3}"'
"}"
)
def test_person_with_kwargs(snapshot: Any) -> None:
person = Person(secret="parrot party") # noqa: S106
assert dumps(person, separators=(",", ":")) == snapshot
def test_person_with_invalid_kwargs() -> None:
person = Person(secret="parrot party") # noqa: S106
with pytest.raises(TypeError):
dumps(person, invalid_kwarg="hello")
| NotSerializable |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/testpkg-edgedata/script_from_import.py | {
"start": 72,
"end": 1551
} | class ____:
from pkg import toplevel_class_existing
from pkg import toplevel_class_nonexisting
if a == b:
from pkg import toplevel_conditional_existing
from pkg import toplevel_conditional_nonexisting
try:
from pkg import toplevel_conditional_import_existing, toplevel_conditional_import_nonexisting
except:
from pkg import toplevel_conditional_import2_existing
from pkg import toplevel_conditional_import2_nonexisting
try:
from pkg import toplevel_import_existing, toplevel_import_nonexisting
except:
from pkg import toplevel_import2_existing
from pkg import toplevel_import2_nonexisting
def function():
from pkg import function_existing, function_nonexisting
class MyClass:
from pkg import function_class_existing, function_class_nonexisting
if a == b:
from pkg import function_conditional_existing
from pkg import function_conditional_nonexisting
try:
from pkg import function_conditional_import_existing
from pkg import function_conditional_import_nonexisting
except:
from pkg import function_conditional_import2_existing
from pkg import function_conditional_import2_nonexisting
try:
from pkg import function_import_existing
from pkg import function_import_nonexisting
except:
from pkg import function_import2_existing
from pkg import function_import2_nonexisting
| MyClass |
python | pdm-project__pdm | src/pdm/cli/utils.py | {
"start": 6368,
"end": 7049
} | class ____(ArgumentParser):
"""A subclass of argparse.ArgumentParser that raises
parsing error rather than exiting.
This does the same as passing exit_on_error=False on Python 3.9+
"""
def _parse_known_args(
self, arg_strings: list[str], namespace: argparse.Namespace, *args: Any, **kwargs: Any
) -> tuple[argparse.Namespace, list[str]]:
try:
return super()._parse_known_args(arg_strings, namespace, *args, **kwargs)
except argparse.ArgumentError as e:
# We raise a dedicated error to avoid being caught by the caller
raise PdmArgumentError(e) from e
@dc.dataclass(frozen=True)
| ErrorArgumentParser |
python | pypa__setuptools | setuptools/config/_validate_pyproject/fastjsonschema_exceptions.py | {
"start": 157,
"end": 1480
} | class ____(JsonSchemaException):
"""
Exception raised by validation function. Available properties:
* ``message`` containing human-readable information what is wrong (e.g. ``data.property[index] must be smaller than or equal to 42``),
* invalid ``value`` (e.g. ``60``),
* ``name`` of a path in the data structure (e.g. ``data.property[index]``),
* ``path`` as an array in the data structure (e.g. ``['data', 'property', 'index']``),
* the whole ``definition`` which the ``value`` has to fulfil (e.g. ``{'type': 'number', 'maximum': 42}``),
* ``rule`` which the ``value`` is breaking (e.g. ``maximum``)
* and ``rule_definition`` (e.g. ``42``).
.. versionchanged:: 2.14.0
Added all extra properties.
"""
def __init__(self, message, value=None, name=None, definition=None, rule=None):
super().__init__(message)
self.message = message
self.value = value
self.name = name
self.definition = definition
self.rule = rule
@property
def path(self):
return [item for item in SPLIT_RE.split(self.name) if item != '']
@property
def rule_definition(self):
if not self.rule or not self.definition:
return None
return self.definition.get(self.rule)
| JsonSchemaValueException |
python | pytorch__pytorch | torch/distributed/rpc/server_process_global_profiler.py | {
"start": 297,
"end": 8566
} | class ____(profile):
"""
It has the same API as ``torch.autograd.profiler.profile`` class,
except that it enables profiling on all threads running RPC server request callbacks.
Context manager that manages autograd profiler state and holds a summary of results.
Under the hood it just records events of functions being executed in C++ and
exposes those events to Python. You can wrap any code into it and it will
only report runtime of PyTorch functions.
Note: profiler is thread local and is automatically propagated into the async tasks
Args:
enabled (bool, optional): Setting this to False makes this context manager a no-op.
Default: ``True``.
use_cuda (bool, optional): Enables timing of CUDA events as well using the cudaEvent API.
Adds approximately 4us of overhead to each tensor operation.
Default: ``False``
record_shapes (bool, optional): If shapes recording is set, information
about input dimensions will be collected. This allows one to see which
dimensions have been used under the hood and further group by them
using prof.key_averages(group_by_input_shape=True). Please note that
shape recording might skew your profiling data. It is recommended to
use separate runs with and without shape recording to validate the timing.
Most likely the skew will be negligible for bottom most events (in a case
of nested function calls). But for higher level functions the total
self cpu time might be artificially increased because of the shape
collection.
profile_memory (bool, optional): Whether to report memory usage, default: ``False``
.. warning::
Enabling memory profiling incurs additional profiler overhead
.. warning::
Due to some CUDA multiprocessing limitations (see :ref:`multiprocessing-cuda-note`),
one cannot use the profiler with ``use_cuda = True`` to benchmark
DataLoaders with ``num_workers > 0``. If you wish to benchmark data loading,
please use ``use_cuda = False`` or ``num_workers = 0``.
Example:
>>> # xdoctest: +SKIP
>>> # On worker 0:
>>> import torch
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker0", rank=0, world_size=2)
>>> x, y = torch.tensor(1), torch.tensor(2)
>>> outer_profile_rref = rpc.remote(
... dst_worker_name, rpc._server_process_global_profile
... )
>>> outer_profile_rref.rpc_sync().__enter__()
>>> rpc.rpc_sync(dst_worker_name, torch.add, (x, y))
>>> inner_profile_rref = rpc.remote(
... dst_worker_name, rpc._server_process_global_profile
... )
>>> inner_profile_rref.rpc_sync().__enter__()
>>> rpc.rpc_sync(dst_worker_name, torch.sub, (x, y))
>>> inner_profile_rref.rpc_sync().__exit__(None, None, None)
>>> outer_profile_rref.rpc_sync().__exit__(None, None, None)
>>> print(inner_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 85.06% 76.275us 100.00% 89.667us 89.667us 1
empty 14.94% 13.392us 14.94% 13.392us 13.392us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 89.667us
>>> print(outer_profile_rref.rpc_sync().key_averages())
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Name Self CPU total % Self CPU total CPU total % CPU total CPU time avg Number of Calls
--------- --------------- --------------- --------------- --------------- --------------- ---------------
sub 35.65% 76.275us 41.91% 89.667us 89.667us 1
empty 12.67% 27.101us 12.67% 27.101us 13.551us 2
add 51.68% 110.550us 58.09% 124.259us 124.259us 1
--------- --------------- --------------- --------------- --------------- --------------- ---------------
Self CPU time total: 213.926us
>>> rpc.shutdown()
>>> # On worker 1:
>>> import torch.distributed.rpc as rpc
>>> rpc.init_rpc("worker1", rank=1, world_size=2)
>>> # wait for worker 0 to finish work, and then shutdown.
>>> rpc.shutdown()
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __enter__(self):
"""
Turn on server-side process-global profiling.
This enables thread-local profiler on all RPC threads running server-side request callbacks.
"""
if not self.enabled:
return
if self.entered: # type: ignore[has-type]
raise RuntimeError("autograd profiler traces are not reentrant")
self.entered = True
profiler_kind = (
torch.autograd.ProfilerState.CUDA
if self.use_cuda
else torch.autograd.ProfilerState.CPU
)
profiler_config = torch.autograd.ProfilerConfig(
profiler_kind,
self.record_shapes,
self.profile_memory,
False,
False,
False,
torch.profiler._ExperimentalConfig(),
)
_enable_server_process_global_profiler(profiler_config)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
Turn off server-side process-global profiling.
Aggregate all profiling events recorded by RPC threads.
These attributes are assigned on exiting context.
Attributes:
function_events (torch.autograd.profiler.EventList). It's a list that has helper
methods, like 1) show record items in a pretty-print table.
2) do averaging by grouping on keys. 3) and more.
process_global_function_events (List[torch.autograd.profiler.FunctionEvent]).
It's a list of ``FunctionEvent`` elements. Every element is a profiling result
of an RPC request handling within the profiling range.
"""
if not self.enabled:
return
process_global_events = _disable_server_process_global_profiler()
# Every element in this list is a thread profiling result from an RPC request handling.
process_global_function_events = []
for thread_local_events in process_global_events:
# Parse from ``Event``s to ``FunctionEvent``s.
thread_local_function_events = (
torch.autograd.profiler_legacy._parse_legacy_records(
thread_local_events
)
)
thread_local_function_events.sort(
key=lambda function_event: [
function_event.time_range.start,
-(function_event.time_range.end),
]
)
process_global_function_events.append(thread_local_function_events)
flattened_function_events = list(
itertools.chain.from_iterable(process_global_function_events)
)
# pyrefly: ignore [bad-assignment]
self.function_events = torch.autograd.profiler_util.EventList(
flattened_function_events,
use_device="cuda" if self.use_cuda else None,
profile_memory=self.profile_memory,
)
# pyrefly: ignore [missing-attribute]
self.function_events._build_tree()
self.process_global_function_events = process_global_function_events
return False
| _server_process_global_profile |
python | astropy__astropy | astropy/io/typing.py | {
"start": 700,
"end": 1155
} | class ____(Protocol[_T_co]):
"""A file-like object that supports reading with a method ``read``.
This is a :class:`~typing.Protocol` that can be used to annotate file-like
objects. It is also runtime-checkable and can be used with :func:`isinstance`.
See :func:`~typing.runtime_checkable` for more information about how runtime
checking with Protocols works.
"""
def read(self) -> _T_co: ...
@runtime_checkable
| ReadableFileLike |
python | django__django | tests/dates/models.py | {
"start": 315,
"end": 540
} | class ____(models.Model):
article = models.ForeignKey(Article, models.CASCADE, related_name="comments")
text = models.TextField()
pub_date = models.DateField()
approval_date = models.DateField(null=True)
| Comment |
python | getsentry__sentry | src/sentry/api/serializers/models/group.py | {
"start": 3737,
"end": 3913
} | class ____(TypedDict, total=False):
isUnhandled: bool
count: str
userCount: int
firstSeen: datetime | None
lastSeen: datetime | None
| BaseGroupResponseOptional |
python | getsentry__sentry | src/sentry/interfaces/http.py | {
"start": 1683,
"end": 5973
} | class ____(Interface):
"""
The Request information is stored in the Http interface. Two arguments
are required: ``url`` and ``method``.
The ``env`` variable is a compounded dictionary of HTTP headers as well
as environment information passed from the webserver. Sentry will explicitly
look for ``REMOTE_ADDR`` in ``env`` for things which require an IP address.
The ``data`` variable should only contain the request body (not the query
string). It can either be a dictionary (for standard HTTP requests) or a
raw request body.
>>> {
>>> "url": "http://absolute.uri/foo",
>>> "method": "POST",
>>> "data": "foo=bar",
>>> "query_string": "hello=world",
>>> "cookies": "foo=bar",
>>> "headers": [
>>> ["Content-Type", "text/html"]
>>> ],
>>> "env": {
>>> "REMOTE_ADDR": "192.168.0.1"
>>> }
>>> }
.. note:: This interface can be passed as the 'request' key in addition
to the full interface path.
"""
display_score = 1000
score = 800
path = "request"
FORM_TYPE = "application/x-www-form-urlencoded"
@classmethod
def to_python(cls, data, **kwargs):
data.setdefault("query_string", [])
for key in (
"api_target",
"method",
"url",
"fragment",
"cookies",
"headers",
"data",
"env",
"inferred_content_type",
):
data.setdefault(key, None)
return super().to_python(data, **kwargs)
def to_json(self):
return prune_empty_keys(
{
"apiTarget": self.api_target,
"method": self.method,
"url": self.url,
"query_string": self.query_string or None,
"fragment": self.fragment or None,
"cookies": self.cookies or None,
"headers": self.headers or None,
"data": self.data,
"env": self.env or None,
"inferred_content_type": self.inferred_content_type,
}
)
@property
def full_url(self):
url = self.url
if url:
if self.query_string:
url = url + "?" + safe_urlencode(get_path(self.query_string, filter=True))
if self.fragment:
url = url + "#" + self.fragment
return url
def to_email_html(self, event, **kwargs):
return render_to_string(
"sentry/partial/interfaces/http_email.html",
{
"event": event,
"url": self.full_url,
"short_url": self.url,
"method": self.method,
"query_string": safe_urlencode(get_path(self.query_string, filter=True)),
"fragment": self.fragment,
},
)
def get_title(self):
return _("Request")
def get_api_context(self, is_public=False, platform=None):
if is_public:
return {}
cookies = self.cookies or ()
if isinstance(cookies, dict):
cookies = sorted(self.cookies.items())
headers = self.headers or ()
if isinstance(headers, dict):
headers = sorted(self.headers.items())
data = {
"apiTarget": self.api_target,
"method": self.method,
"url": self.url,
"query": self.query_string,
"fragment": self.fragment,
"data": self.data,
"headers": headers,
"cookies": cookies,
"env": self.env or None,
"inferredContentType": self.inferred_content_type,
}
return data
def get_api_meta(self, meta, is_public=False, platform=None):
if is_public:
return None
return {
"": meta.get(""),
"apiTarget": meta.get("api_target"),
"method": meta.get("method"),
"url": meta.get("url"),
"query": meta.get("query_string"),
"data": meta.get("data"),
"headers": meta.get("headers"),
"cookies": meta.get("cookies"),
"env": meta.get("env"),
}
| Http |
python | python__mypy | mypy/nodes.py | {
"start": 87401,
"end": 88443
} | class ____(Expression):
"""Type application expr[type, ...]"""
__slots__ = ("expr", "types")
__match_args__ = ("expr", "types")
expr: Expression
types: list[mypy.types.Type]
def __init__(self, expr: Expression, types: list[mypy.types.Type]) -> None:
super().__init__()
self.expr = expr
self.types = types
def accept(self, visitor: ExpressionVisitor[T]) -> T:
return visitor.visit_type_application(self)
# Variance of a type variable. For example, T in the definition of
# List[T] is invariant, so List[int] is not a subtype of List[object],
# and also List[object] is not a subtype of List[int].
#
# The T in Iterable[T] is covariant, so Iterable[int] is a subtype of
# Iterable[object], but not vice versa.
#
# If T is contravariant in Foo[T], Foo[object] is a subtype of
# Foo[int], but not vice versa.
INVARIANT: Final = 0
COVARIANT: Final = 1
CONTRAVARIANT: Final = 2
VARIANCE_NOT_READY: Final = 3 # Variance hasn't been inferred (using Python 3.12 syntax)
| TypeApplication |
python | pydata__xarray | asv_bench/benchmarks/dataset_io.py | {
"start": 410,
"end": 2726
} | class ____:
"""
A few examples that benchmark reading/writing a single netCDF file with
xarray
"""
timeout = 300.0
repeat = 1
number = 5
def make_ds(self):
# single Dataset
self.ds = xr.Dataset()
self.nt = 1000
self.nx = 90
self.ny = 45
self.block_chunks = {
"time": self.nt / 4,
"lon": self.nx / 3,
"lat": self.ny / 3,
}
self.time_chunks = {"time": int(self.nt / 36)}
times = pd.date_range("1970-01-01", periods=self.nt, freq="D")
lons = xr.DataArray(
np.linspace(0, 360, self.nx),
dims=("lon",),
attrs={"units": "degrees east", "long_name": "longitude"},
)
lats = xr.DataArray(
np.linspace(-90, 90, self.ny),
dims=("lat",),
attrs={"units": "degrees north", "long_name": "latitude"},
)
self.ds["foo"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="foo",
attrs={"units": "foo units", "description": "a description"},
)
self.ds["bar"] = xr.DataArray(
randn((self.nt, self.nx, self.ny), frac_nan=0.2),
coords={"lon": lons, "lat": lats, "time": times},
dims=("time", "lon", "lat"),
name="bar",
attrs={"units": "bar units", "description": "a description"},
)
self.ds["baz"] = xr.DataArray(
randn((self.nx, self.ny), frac_nan=0.2).astype(np.float32),
coords={"lon": lons, "lat": lats},
dims=("lon", "lat"),
name="baz",
attrs={"units": "baz units", "description": "a description"},
)
self.ds.attrs = {"history": "created for xarray benchmarking"}
self.oinds = {
"time": randint(0, self.nt, 120),
"lon": randint(0, self.nx, 20),
"lat": randint(0, self.ny, 10),
}
self.vinds = {
"time": xr.DataArray(randint(0, self.nt, 120), dims="x"),
"lon": xr.DataArray(randint(0, self.nx, 120), dims="x"),
"lat": slice(3, 20),
}
| IOSingleNetCDF |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 50326,
"end": 50914
} | class ____(nn.Module):
def __init__(self, dim: int, context_dim: int, spatial_merge_size: int = 2) -> None:
super().__init__()
self.hidden_size = context_dim * (spatial_merge_size**2)
self.ln_q = Qwen2RMSNorm(context_dim, eps=1e-6)
self.mlp = nn.Sequential(
nn.Linear(self.hidden_size, self.hidden_size),
nn.GELU(),
nn.Linear(self.hidden_size, dim),
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.mlp(self.ln_q(x).view(-1, self.hidden_size))
return x
| Qwen2_5OmniPatchMerger |
python | ansible__ansible | test/integration/targets/inventory_cache/plugins/inventory/cache_host.py | {
"start": 580,
"end": 1718
} | class ____(BaseInventoryPlugin, Cacheable):
NAME = 'cache_host'
def verify_file(self, path):
if not path.endswith(('cache_host.yml', 'cache_host.yaml',)):
return False
return super(InventoryModule, self).verify_file(path)
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
self._read_config_data(path)
cache_key = self.get_cache_key(path)
# user has enabled cache and the cache is not being flushed
read_cache = self.get_option('cache') and cache
# user has enabled cache and the cache is being flushed
update_cache = self.get_option('cache') and not cache
host = None
if read_cache:
try:
host = self._cache[cache_key]
except KeyError:
# cache expired
update_cache = True
if host is None:
host = 'testhost{0}'.format(random.randint(0, 50))
self.inventory.add_host(host, 'all')
if update_cache:
self._cache[cache_key] = host
| InventoryModule |
python | falconry__falcon | falcon/bench/queues/api.py | {
"start": 758,
"end": 1010
} | class ____:
def process_request(self, req, resp):
req.context.request_id = '<generate ID>'
def process_response(self, req, resp, resource, req_succeeded):
resp.set_header('X-Request-ID', req.context.request_id)
| RequestIDComponent |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 678385,
"end": 678759
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("Issue", graphql_name="node")
"""The item at the end of the edge."""
| IssueEdge |
python | crytic__slither | slither/core/variables/structure_variable.py | {
"start": 170,
"end": 502
} | class ____(Variable):
def __init__(self) -> None:
super().__init__()
self._structure: Optional["Structure"] = None
def set_structure(self, structure: "Structure") -> None:
self._structure = structure
@property
def structure(self) -> "Structure":
return self._structure
| StructureVariable |
python | langchain-ai__langchain | libs/core/tests/unit_tests/fake/callbacks.py | {
"start": 332,
"end": 1242
} | class ____(BaseModel):
"""Base fake callback handler for testing."""
starts: int = 0
ends: int = 0
errors: int = 0
errors_args: list[Any] = []
text: int = 0
ignore_llm_: bool = False
ignore_chain_: bool = False
ignore_agent_: bool = False
ignore_retriever_: bool = False
ignore_chat_model_: bool = False
# to allow for similar callback handlers that are not technically equal
fake_id: str | None = None
# add finer-grained counters for easier debugging of failing tests
chain_starts: int = 0
chain_ends: int = 0
llm_starts: int = 0
llm_ends: int = 0
llm_streams: int = 0
tool_starts: int = 0
tool_ends: int = 0
agent_actions: int = 0
agent_ends: int = 0
chat_model_starts: int = 0
retriever_starts: int = 0
retriever_ends: int = 0
retriever_errors: int = 0
retries: int = 0
| BaseFakeCallbackHandler |
python | plotly__plotly.py | plotly/graph_objs/splom/_marker.py | {
"start": 233,
"end": 36987
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "splom"
_path_str = "splom.marker"
_valid_props = {
"angle",
"anglesrc",
"autocolorscale",
"cauto",
"cmax",
"cmid",
"cmin",
"color",
"coloraxis",
"colorbar",
"colorscale",
"colorsrc",
"line",
"opacity",
"opacitysrc",
"reversescale",
"showscale",
"size",
"sizemin",
"sizemode",
"sizeref",
"sizesrc",
"symbol",
"symbolsrc",
}
@property
def angle(self):
"""
Sets the marker angle in respect to `angleref`.
The 'angle' property is a angle (in degrees) that may be
specified as a number between -180 and 180, or a list, numpy array or other iterable thereof.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float|numpy.ndarray
"""
return self["angle"]
@angle.setter
def angle(self, val):
self["angle"] = val
@property
def anglesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `angle`.
The 'anglesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["anglesrc"]
@anglesrc.setter
def anglesrc(self, val):
self["anglesrc"] = val
@property
def autocolorscale(self):
"""
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in `marker.color` is
set to a numerical array. In case `colorscale` is unspecified
or `autocolorscale` is true, the default palette will be chosen
according to whether numbers in the `color` array are all
positive, all negative or mixed.
The 'autocolorscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["autocolorscale"]
@autocolorscale.setter
def autocolorscale(self, val):
self["autocolorscale"] = val
@property
def cauto(self):
"""
Determines whether or not the color domain is computed with
respect to the input data (here in `marker.color`) or the
bounds set in `marker.cmin` and `marker.cmax` Has an effect
only if in `marker.color` is set to a numerical array. Defaults
to `false` when `marker.cmin` and `marker.cmax` are set by the
user.
The 'cauto' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["cauto"]
@cauto.setter
def cauto(self, val):
self["cauto"] = val
@property
def cmax(self):
"""
Sets the upper bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmin` must be set as well.
The 'cmax' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmax"]
@cmax.setter
def cmax(self, val):
self["cmax"] = val
@property
def cmid(self):
"""
Sets the mid-point of the color domain by scaling `marker.cmin`
and/or `marker.cmax` to be equidistant to this point. Has an
effect only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`. Has no
effect when `marker.cauto` is `false`.
The 'cmid' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmid"]
@cmid.setter
def cmid(self, val):
self["cmid"] = val
@property
def cmin(self):
"""
Sets the lower bound of the color domain. Has an effect only if
in `marker.color` is set to a numerical array. Value should
have the same units as in `marker.color` and if set,
`marker.cmax` must be set as well.
The 'cmin' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["cmin"]
@cmin.setter
def cmin(self, val):
self["cmin"] = val
@property
def color(self):
"""
Sets the marker color. It accepts either a specific color or an
array of numbers that are mapped to the colorscale relative to
the max and min values of the array or relative to
`marker.cmin` and `marker.cmax` if set.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A number that will be interpreted as a color
according to splom.marker.colorscale
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def coloraxis(self):
"""
Sets a reference to a shared color axis. References to these
shared color axes are "coloraxis", "coloraxis2", "coloraxis3",
etc. Settings for these shared color axes are set in the
layout, under `layout.coloraxis`, `layout.coloraxis2`, etc.
Note that multiple color scales can be linked to the same color
axis.
The 'coloraxis' property is an identifier of a particular
subplot, of type 'coloraxis', that may be specified as the string 'coloraxis'
optionally followed by an integer >= 1
(e.g. 'coloraxis', 'coloraxis1', 'coloraxis2', 'coloraxis3', etc.)
Returns
-------
str
"""
return self["coloraxis"]
@coloraxis.setter
def coloraxis(self, val):
self["coloraxis"] = val
@property
def colorbar(self):
"""
The 'colorbar' property is an instance of ColorBar
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.ColorBar`
- A dict of string/value properties that will be passed
to the ColorBar constructor
Returns
-------
plotly.graph_objs.splom.marker.ColorBar
"""
return self["colorbar"]
@colorbar.setter
def colorbar(self, val):
self["colorbar"] = val
@property
def colorscale(self):
"""
Sets the colorscale. Has an effect only if in `marker.color` is
set to a numerical array. The colorscale must be an array
containing arrays mapping a normalized value to an rgb, rgba,
hex, hsl, hsv, or named color string. At minimum, a mapping for
the lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`. To
control the bounds of the colorscale in color space, use
`marker.cmin` and `marker.cmax`. Alternatively, `colorscale`
may be a palette name string of the following list: Blackbody,B
luered,Blues,Cividis,Earth,Electric,Greens,Greys,Hot,Jet,Picnic
,Portland,Rainbow,RdBu,Reds,Viridis,YlGnBu,YlOrRd.
The 'colorscale' property is a colorscale and may be
specified as:
- A list of colors that will be spaced evenly to create the colorscale.
Many predefined colorscale lists are included in the sequential, diverging,
and cyclical modules in the plotly.colors package.
- A list of 2-element lists where the first element is the
normalized color level value (starting at 0 and ending at 1),
and the second item is a valid color string.
(e.g. [[0, 'green'], [0.5, 'red'], [1.0, 'rgb(0, 0, 255)']])
- One of the following named colorscales:
['aggrnyl', 'agsunset', 'algae', 'amp', 'armyrose', 'balance',
'blackbody', 'bluered', 'blues', 'blugrn', 'bluyl', 'brbg',
'brwnyl', 'bugn', 'bupu', 'burg', 'burgyl', 'cividis', 'curl',
'darkmint', 'deep', 'delta', 'dense', 'earth', 'edge', 'electric',
'emrld', 'fall', 'geyser', 'gnbu', 'gray', 'greens', 'greys',
'haline', 'hot', 'hsv', 'ice', 'icefire', 'inferno', 'jet',
'magenta', 'magma', 'matter', 'mint', 'mrybm', 'mygbm', 'oranges',
'orrd', 'oryel', 'oxy', 'peach', 'phase', 'picnic', 'pinkyl',
'piyg', 'plasma', 'plotly3', 'portland', 'prgn', 'pubu', 'pubugn',
'puor', 'purd', 'purp', 'purples', 'purpor', 'rainbow', 'rdbu',
'rdgy', 'rdpu', 'rdylbu', 'rdylgn', 'redor', 'reds', 'solar',
'spectral', 'speed', 'sunset', 'sunsetdark', 'teal', 'tealgrn',
'tealrose', 'tempo', 'temps', 'thermal', 'tropic', 'turbid',
'turbo', 'twilight', 'viridis', 'ylgn', 'ylgnbu', 'ylorbr',
'ylorrd'].
Appending '_r' to a named colorscale reverses it.
Returns
-------
str
"""
return self["colorscale"]
@colorscale.setter
def colorscale(self, val):
self["colorscale"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def line(self):
"""
The 'line' property is an instance of Line
that may be specified as:
- An instance of :class:`plotly.graph_objs.splom.marker.Line`
- A dict of string/value properties that will be passed
to the Line constructor
Returns
-------
plotly.graph_objs.splom.marker.Line
"""
return self["line"]
@line.setter
def line(self, val):
self["line"] = val
@property
def opacity(self):
"""
Sets the marker opacity.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
@property
def opacitysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `opacity`.
The 'opacitysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["opacitysrc"]
@opacitysrc.setter
def opacitysrc(self, val):
self["opacitysrc"] = val
@property
def reversescale(self):
"""
Reverses the color mapping if true. Has an effect only if in
`marker.color` is set to a numerical array. If true,
`marker.cmin` will correspond to the last color in the array
and `marker.cmax` will correspond to the first color.
The 'reversescale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["reversescale"]
@reversescale.setter
def reversescale(self, val):
self["reversescale"] = val
@property
def showscale(self):
"""
Determines whether or not a colorbar is displayed for this
trace. Has an effect only if in `marker.color` is set to a
numerical array.
The 'showscale' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showscale"]
@showscale.setter
def showscale(self, val):
self["showscale"] = val
@property
def size(self):
"""
Sets the marker size (in px).
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizemin(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the minimum size (in px) of the rendered marker
points.
The 'sizemin' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["sizemin"]
@sizemin.setter
def sizemin(self, val):
self["sizemin"] = val
@property
def sizemode(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the rule for which the data in `size` is converted
to pixels.
The 'sizemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['diameter', 'area']
Returns
-------
Any
"""
return self["sizemode"]
@sizemode.setter
def sizemode(self, val):
self["sizemode"] = val
@property
def sizeref(self):
"""
Has an effect only if `marker.size` is set to a numerical
array. Sets the scale factor used to determine the rendered
size of marker points. Use with `sizemin` and `sizemode`.
The 'sizeref' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["sizeref"]
@sizeref.setter
def sizeref(self, val):
self["sizeref"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def symbol(self):
"""
Sets the marker symbol type. Adding 100 is equivalent to
appending "-open" to a symbol name. Adding 200 is equivalent to
appending "-dot" to a symbol name. Adding 300 is equivalent to
appending "-open-dot" or "dot-open" to a symbol name.
The 'symbol' property is an enumeration that may be specified as:
- One of the following enumeration values:
[0, '0', 'circle', 100, '100', 'circle-open', 200, '200',
'circle-dot', 300, '300', 'circle-open-dot', 1, '1',
'square', 101, '101', 'square-open', 201, '201',
'square-dot', 301, '301', 'square-open-dot', 2, '2',
'diamond', 102, '102', 'diamond-open', 202, '202',
'diamond-dot', 302, '302', 'diamond-open-dot', 3, '3',
'cross', 103, '103', 'cross-open', 203, '203',
'cross-dot', 303, '303', 'cross-open-dot', 4, '4', 'x',
104, '104', 'x-open', 204, '204', 'x-dot', 304, '304',
'x-open-dot', 5, '5', 'triangle-up', 105, '105',
'triangle-up-open', 205, '205', 'triangle-up-dot', 305,
'305', 'triangle-up-open-dot', 6, '6', 'triangle-down',
106, '106', 'triangle-down-open', 206, '206',
'triangle-down-dot', 306, '306', 'triangle-down-open-dot',
7, '7', 'triangle-left', 107, '107', 'triangle-left-open',
207, '207', 'triangle-left-dot', 307, '307',
'triangle-left-open-dot', 8, '8', 'triangle-right', 108,
'108', 'triangle-right-open', 208, '208',
'triangle-right-dot', 308, '308',
'triangle-right-open-dot', 9, '9', 'triangle-ne', 109,
'109', 'triangle-ne-open', 209, '209', 'triangle-ne-dot',
309, '309', 'triangle-ne-open-dot', 10, '10',
'triangle-se', 110, '110', 'triangle-se-open', 210, '210',
'triangle-se-dot', 310, '310', 'triangle-se-open-dot', 11,
'11', 'triangle-sw', 111, '111', 'triangle-sw-open', 211,
'211', 'triangle-sw-dot', 311, '311',
'triangle-sw-open-dot', 12, '12', 'triangle-nw', 112,
'112', 'triangle-nw-open', 212, '212', 'triangle-nw-dot',
312, '312', 'triangle-nw-open-dot', 13, '13', 'pentagon',
113, '113', 'pentagon-open', 213, '213', 'pentagon-dot',
313, '313', 'pentagon-open-dot', 14, '14', 'hexagon', 114,
'114', 'hexagon-open', 214, '214', 'hexagon-dot', 314,
'314', 'hexagon-open-dot', 15, '15', 'hexagon2', 115,
'115', 'hexagon2-open', 215, '215', 'hexagon2-dot', 315,
'315', 'hexagon2-open-dot', 16, '16', 'octagon', 116,
'116', 'octagon-open', 216, '216', 'octagon-dot', 316,
'316', 'octagon-open-dot', 17, '17', 'star', 117, '117',
'star-open', 217, '217', 'star-dot', 317, '317',
'star-open-dot', 18, '18', 'hexagram', 118, '118',
'hexagram-open', 218, '218', 'hexagram-dot', 318, '318',
'hexagram-open-dot', 19, '19', 'star-triangle-up', 119,
'119', 'star-triangle-up-open', 219, '219',
'star-triangle-up-dot', 319, '319',
'star-triangle-up-open-dot', 20, '20',
'star-triangle-down', 120, '120',
'star-triangle-down-open', 220, '220',
'star-triangle-down-dot', 320, '320',
'star-triangle-down-open-dot', 21, '21', 'star-square',
121, '121', 'star-square-open', 221, '221',
'star-square-dot', 321, '321', 'star-square-open-dot', 22,
'22', 'star-diamond', 122, '122', 'star-diamond-open',
222, '222', 'star-diamond-dot', 322, '322',
'star-diamond-open-dot', 23, '23', 'diamond-tall', 123,
'123', 'diamond-tall-open', 223, '223',
'diamond-tall-dot', 323, '323', 'diamond-tall-open-dot',
24, '24', 'diamond-wide', 124, '124', 'diamond-wide-open',
224, '224', 'diamond-wide-dot', 324, '324',
'diamond-wide-open-dot', 25, '25', 'hourglass', 125,
'125', 'hourglass-open', 26, '26', 'bowtie', 126, '126',
'bowtie-open', 27, '27', 'circle-cross', 127, '127',
'circle-cross-open', 28, '28', 'circle-x', 128, '128',
'circle-x-open', 29, '29', 'square-cross', 129, '129',
'square-cross-open', 30, '30', 'square-x', 130, '130',
'square-x-open', 31, '31', 'diamond-cross', 131, '131',
'diamond-cross-open', 32, '32', 'diamond-x', 132, '132',
'diamond-x-open', 33, '33', 'cross-thin', 133, '133',
'cross-thin-open', 34, '34', 'x-thin', 134, '134',
'x-thin-open', 35, '35', 'asterisk', 135, '135',
'asterisk-open', 36, '36', 'hash', 136, '136',
'hash-open', 236, '236', 'hash-dot', 336, '336',
'hash-open-dot', 37, '37', 'y-up', 137, '137',
'y-up-open', 38, '38', 'y-down', 138, '138',
'y-down-open', 39, '39', 'y-left', 139, '139',
'y-left-open', 40, '40', 'y-right', 140, '140',
'y-right-open', 41, '41', 'line-ew', 141, '141',
'line-ew-open', 42, '42', 'line-ns', 142, '142',
'line-ns-open', 43, '43', 'line-ne', 143, '143',
'line-ne-open', 44, '44', 'line-nw', 144, '144',
'line-nw-open', 45, '45', 'arrow-up', 145, '145',
'arrow-up-open', 46, '46', 'arrow-down', 146, '146',
'arrow-down-open', 47, '47', 'arrow-left', 147, '147',
'arrow-left-open', 48, '48', 'arrow-right', 148, '148',
'arrow-right-open', 49, '49', 'arrow-bar-up', 149, '149',
'arrow-bar-up-open', 50, '50', 'arrow-bar-down', 150,
'150', 'arrow-bar-down-open', 51, '51', 'arrow-bar-left',
151, '151', 'arrow-bar-left-open', 52, '52',
'arrow-bar-right', 152, '152', 'arrow-bar-right-open', 53,
'53', 'arrow', 153, '153', 'arrow-open', 54, '54',
'arrow-wide', 154, '154', 'arrow-wide-open']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["symbol"]
@symbol.setter
def symbol(self, val):
self["symbol"] = val
@property
def symbolsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `symbol`.
The 'symbolsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["symbolsrc"]
@symbolsrc.setter
def symbolsrc(self, val):
self["symbolsrc"] = val
@property
def _prop_descriptions(self):
return """\
angle
Sets the marker angle in respect to `angleref`.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.splom.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
"""
def __init__(
self,
arg=None,
angle=None,
anglesrc=None,
autocolorscale=None,
cauto=None,
cmax=None,
cmid=None,
cmin=None,
color=None,
coloraxis=None,
colorbar=None,
colorscale=None,
colorsrc=None,
line=None,
opacity=None,
opacitysrc=None,
reversescale=None,
showscale=None,
size=None,
sizemin=None,
sizemode=None,
sizeref=None,
sizesrc=None,
symbol=None,
symbolsrc=None,
**kwargs,
):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.splom.Marker`
angle
Sets the marker angle in respect to `angleref`.
anglesrc
Sets the source reference on Chart Studio Cloud for
`angle`.
autocolorscale
Determines whether the colorscale is a default palette
(`autocolorscale: true`) or the palette determined by
`marker.colorscale`. Has an effect only if in
`marker.color` is set to a numerical array. In case
`colorscale` is unspecified or `autocolorscale` is
true, the default palette will be chosen according to
whether numbers in the `color` array are all positive,
all negative or mixed.
cauto
Determines whether or not the color domain is computed
with respect to the input data (here in `marker.color`)
or the bounds set in `marker.cmin` and `marker.cmax`
Has an effect only if in `marker.color` is set to a
numerical array. Defaults to `false` when `marker.cmin`
and `marker.cmax` are set by the user.
cmax
Sets the upper bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by scaling
`marker.cmin` and/or `marker.cmax` to be equidistant to
this point. Has an effect only if in `marker.color` is
set to a numerical array. Value should have the same
units as in `marker.color`. Has no effect when
`marker.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has an effect
only if in `marker.color` is set to a numerical array.
Value should have the same units as in `marker.color`
and if set, `marker.cmax` must be set as well.
color
Sets the marker color. It accepts either a specific
color or an array of numbers that are mapped to the
colorscale relative to the max and min values of the
array or relative to `marker.cmin` and `marker.cmax` if
set.
coloraxis
Sets a reference to a shared color axis. References to
these shared color axes are "coloraxis", "coloraxis2",
"coloraxis3", etc. Settings for these shared color axes
are set in the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple color
scales can be linked to the same color axis.
colorbar
:class:`plotly.graph_objects.splom.marker.ColorBar`
instance or dict with compatible properties
colorscale
Sets the colorscale. Has an effect only if in
`marker.color` is set to a numerical array. The
colorscale must be an array containing arrays mapping a
normalized value to an rgb, rgba, hex, hsl, hsv, or
named color string. At minimum, a mapping for the
lowest (0) and highest (1) values are required. For
example, `[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in color space,
use `marker.cmin` and `marker.cmax`. Alternatively,
`colorscale` may be a palette name string of the
following list: Blackbody,Bluered,Blues,Cividis,Earth,E
lectric,Greens,Greys,Hot,Jet,Picnic,Portland,Rainbow,Rd
Bu,Reds,Viridis,YlGnBu,YlOrRd.
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
line
:class:`plotly.graph_objects.splom.marker.Line`
instance or dict with compatible properties
opacity
Sets the marker opacity.
opacitysrc
Sets the source reference on Chart Studio Cloud for
`opacity`.
reversescale
Reverses the color mapping if true. Has an effect only
if in `marker.color` is set to a numerical array. If
true, `marker.cmin` will correspond to the last color
in the array and `marker.cmax` will correspond to the
first color.
showscale
Determines whether or not a colorbar is displayed for
this trace. Has an effect only if in `marker.color` is
set to a numerical array.
size
Sets the marker size (in px).
sizemin
Has an effect only if `marker.size` is set to a
numerical array. Sets the minimum size (in px) of the
rendered marker points.
sizemode
Has an effect only if `marker.size` is set to a
numerical array. Sets the rule for which the data in
`size` is converted to pixels.
sizeref
Has an effect only if `marker.size` is set to a
numerical array. Sets the scale factor used to
determine the rendered size of marker points. Use with
`sizemin` and `sizemode`.
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
symbol
Sets the marker symbol type. Adding 100 is equivalent
to appending "-open" to a symbol name. Adding 200 is
equivalent to appending "-dot" to a symbol name. Adding
300 is equivalent to appending "-open-dot" or "dot-
open" to a symbol name.
symbolsrc
Sets the source reference on Chart Studio Cloud for
`symbol`.
Returns
-------
Marker
"""
super().__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.splom.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.splom.Marker`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("angle", arg, angle)
self._set_property("anglesrc", arg, anglesrc)
self._set_property("autocolorscale", arg, autocolorscale)
self._set_property("cauto", arg, cauto)
self._set_property("cmax", arg, cmax)
self._set_property("cmid", arg, cmid)
self._set_property("cmin", arg, cmin)
self._set_property("color", arg, color)
self._set_property("coloraxis", arg, coloraxis)
self._set_property("colorbar", arg, colorbar)
self._set_property("colorscale", arg, colorscale)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("line", arg, line)
self._set_property("opacity", arg, opacity)
self._set_property("opacitysrc", arg, opacitysrc)
self._set_property("reversescale", arg, reversescale)
self._set_property("showscale", arg, showscale)
self._set_property("size", arg, size)
self._set_property("sizemin", arg, sizemin)
self._set_property("sizemode", arg, sizemode)
self._set_property("sizeref", arg, sizeref)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("symbol", arg, symbol)
self._set_property("symbolsrc", arg, symbolsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Marker |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_assets.py | {
"start": 22550,
"end": 23860
} | class ____(TestAssets):
@pytest.mark.parametrize(
("url", "expected_asset_uris"),
[
# Limit test data
("/assets?limit=1", ["s3://bucket/key/1"]),
("/assets?limit=100", [f"s3://bucket/key/{i}" for i in range(1, 101)]),
# Offset test data
("/assets?offset=1", [f"s3://bucket/key/{i}" for i in range(2, 52)]),
("/assets?offset=3", [f"s3://bucket/key/{i}" for i in range(4, 54)]),
# Limit and offset test data
("/assets?offset=50&limit=50", [f"s3://bucket/key/{i}" for i in range(51, 101)]),
("/assets?offset=3&limit=3", [f"s3://bucket/key/{i}" for i in [4, 5, 6]]),
],
)
def test_limit_and_offset(self, test_client, url, expected_asset_uris):
self.create_assets(num=110)
response = test_client.get(url)
assert response.status_code == 200
asset_uris = [asset["uri"] for asset in response.json()["assets"]]
assert asset_uris == expected_asset_uris
def test_should_respect_page_size_limit_default(self, test_client):
self.create_assets(num=110)
response = test_client.get("/assets")
assert response.status_code == 200
assert len(response.json()["assets"]) == 50
| TestGetAssetsEndpointPagination |
python | readthedocs__readthedocs.org | readthedocs/projects/models.py | {
"start": 67358,
"end": 72334
} | class ____(models.Model):
"""
Project feature flags.
Features should generally be added here as choices, however features may
also be added dynamically from a signal in other packages. Features can be
added by external packages with the use of signals::
@receiver(pre_init, sender=Feature)
def add_features(sender, **kwargs):
sender.FEATURES += (('blah', 'BLAH'),)
The FeatureForm will grab the updated list on instantiation.
"""
# Feature constants - this is not a exhaustive list of features, features
# may be added by other packages
DISABLE_PAGEVIEWS = "disable_pageviews"
RESOLVE_PROJECT_FROM_HEADER = "resolve_project_from_header"
USE_PROXIED_APIS_WITH_PREFIX = "use_proxied_apis_with_prefix"
ALLOW_VERSION_WARNING_BANNER = "allow_version_warning_banner"
DONT_SYNC_WITH_REMOTE_REPO = "dont_sync_with_remote_repo"
# Versions sync related features
SKIP_SYNC_TAGS = "skip_sync_tags"
SKIP_SYNC_BRANCHES = "skip_sync_branches"
SKIP_SYNC_VERSIONS = "skip_sync_versions"
# Dependencies related features
PIP_ALWAYS_UPGRADE = "pip_always_upgrade"
# Search related features
DEFAULT_TO_FUZZY_SEARCH = "default_to_fuzzy_search"
# Build related features
SCALE_IN_PROTECTION = "scale_in_prtection"
USE_S3_SCOPED_CREDENTIALS_ON_BUILDERS = "use_s3_scoped_credentials_on_builders"
BUILD_FULL_CLEAN = "build_full_clean"
BUILD_HEALTHCHECK = "build_healthcheck"
BUILD_NO_ACKS_LATE = "build_no_acks_late"
FEATURES = (
(
DISABLE_PAGEVIEWS,
_("Proxito: Disable all page views"),
),
(
RESOLVE_PROJECT_FROM_HEADER,
_("Proxito: Allow usage of the X-RTD-Slug header"),
),
(
USE_PROXIED_APIS_WITH_PREFIX,
_(
"Proxito: Use proxied APIs (/_/*) with the custom prefix if the project has one (Project.custom_prefix)."
),
),
(
ALLOW_VERSION_WARNING_BANNER,
_("Dashboard: Allow project to use the version warning banner."),
),
(
DONT_SYNC_WITH_REMOTE_REPO,
_("Remote repository: Don't keep project in sync with remote repository."),
),
# Versions sync related features
(
SKIP_SYNC_BRANCHES,
_("Webhook: Skip syncing branches"),
),
(
SKIP_SYNC_TAGS,
_("Webhook: Skip syncing tags"),
),
(
SKIP_SYNC_VERSIONS,
_("Webhook: Skip sync versions task"),
),
# Dependencies related features
(PIP_ALWAYS_UPGRADE, _("Build: Always run pip install --upgrade")),
# Search related features.
(
DEFAULT_TO_FUZZY_SEARCH,
_("Search: Default to fuzzy search for simple search queries"),
),
# Build related features.
(
SCALE_IN_PROTECTION,
_("Build: Set scale-in protection before/after building."),
),
(
USE_S3_SCOPED_CREDENTIALS_ON_BUILDERS,
_("Build: Use S3 scoped credentials for uploading build artifacts."),
),
(
BUILD_FULL_CLEAN,
_("Build: Clean all build directories to avoid leftovers from other projects."),
),
(
BUILD_HEALTHCHECK,
_("Build: Use background cURL healthcheck."),
),
(
BUILD_NO_ACKS_LATE,
_("Build: Do not use Celery ASK_LATE config for this project."),
),
)
FEATURES = sorted(FEATURES, key=lambda x: x[1])
projects = models.ManyToManyField(
Project,
blank=True,
)
# Feature is not implemented as a ChoiceField, as we don't want validation
# at the database level on this field. Arbitrary values are allowed here.
feature_id = models.CharField(
_("Feature identifier"),
max_length=255,
unique=True,
)
add_date = models.DateTimeField(
_("Date feature was added"),
auto_now_add=True,
)
# TODO: rename this field to `past_default_true` and follow this steps when deploying
# https://github.com/readthedocs/readthedocs.org/pull/7524#issuecomment-703663724
default_true = models.BooleanField(
_("Default all past projects to True"),
default=False,
)
future_default_true = models.BooleanField(
_("Default all future projects to True"),
default=False,
)
objects = FeatureQuerySet.as_manager()
def __str__(self):
return self.get_feature_display()
def get_feature_display(self):
"""
Implement display name field for fake ChoiceField.
Because the field is not a ChoiceField here, we need to manually
implement this behavior.
"""
return str(dict(self.FEATURES).get(self.feature_id, self.feature_id))
| Feature |
python | huggingface__transformers | src/transformers/models/smolvlm/modeling_smolvlm.py | {
"start": 30195,
"end": 31851
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`tuple(torch.FloatTensor)`, *optional*):
Tuple of `torch.FloatTensor` (one for the output of the image embeddings, `(batch_size, num_images,
sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[tuple[torch.FloatTensor]] = None
@auto_docstring(
custom_intro="""
The SmolVLM Model with a language modeling head. It is made up a SigLIP vision encoder, with a language modeling head on top.
"""
)
| SmolVLMCausalLMOutputWithPast |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/function7.py | {
"start": 1028,
"end": 1118
} | class ____(Protocol):
def write(self, __a: str, b: str) -> object:
pass
| _Writer4 |
python | spack__spack | lib/spack/spack/solver/asp.py | {
"start": 47098,
"end": 50156
} | class ____(collections.abc.Mapping):
"""Mapping containing concrete specs keyed by DAG hash.
The mapping is ensured to be consistent, i.e. if a spec in the mapping has a dependency with
hash X, it is ensured to be the same object in memory as the spec keyed by X.
"""
def __init__(self) -> None:
self.data: Dict[str, spack.spec.Spec] = {}
self.explicit: Set[str] = set()
def __getitem__(self, dag_hash: str) -> spack.spec.Spec:
return self.data[dag_hash]
def explicit_items(self) -> Iterator[Tuple[str, spack.spec.Spec]]:
"""Iterate on items that have been added explicitly, and not just as a dependency
of other nodes.
"""
for h, s in self.items():
# We need to make an exception for gcc-runtime, until we can splice it.
if h in self.explicit or s.name == "gcc-runtime":
yield h, s
def add(self, spec: spack.spec.Spec) -> bool:
"""Adds a new concrete spec to the mapping. Returns True if the spec was just added,
False if the spec was already in the mapping.
Calling this function marks the spec as added explicitly.
Args:
spec: spec to be added
Raises:
ValueError: if the spec is not concrete
"""
if not spec.concrete:
msg = (
f"trying to store the non-concrete spec '{spec}' in a container "
f"that only accepts concrete"
)
raise ValueError(msg)
dag_hash = spec.dag_hash()
self.explicit.add(dag_hash)
if dag_hash in self.data:
return False
# Here we need to iterate on the input and rewire the copy.
self.data[spec.dag_hash()] = spec.copy(deps=False)
nodes_to_reconstruct = [spec]
while nodes_to_reconstruct:
input_parent = nodes_to_reconstruct.pop()
container_parent = self.data[input_parent.dag_hash()]
for edge in input_parent.edges_to_dependencies():
input_child = edge.spec
container_child = self.data.get(input_child.dag_hash())
# Copy children that don't exist yet
if container_child is None:
container_child = input_child.copy(deps=False)
self.data[input_child.dag_hash()] = container_child
nodes_to_reconstruct.append(input_child)
# Rewire edges
container_parent.add_dependency_edge(
dependency_spec=container_child, depflag=edge.depflag, virtuals=edge.virtuals
)
return True
def __len__(self) -> int:
return len(self.data)
def __iter__(self):
return iter(self.data)
# types for condition caching in solver setup
ConditionSpecKey = Tuple[str, Optional[TransformFunction]]
ConditionIdFunctionPair = Tuple[int, List[AspFunction]]
ConditionSpecCache = Dict[str, Dict[ConditionSpecKey, ConditionIdFunctionPair]]
| ConcreteSpecsByHash |
python | apache__airflow | dev/breeze/src/airflow_breeze/prepare_providers/provider_documentation.py | {
"start": 4333,
"end": 4927
} | class ____(NamedTuple):
"""Stores details about commits"""
full_hash: str
short_hash: str
date: str
version: str
message: str
message_without_backticks: str
pr: str | None
def get_most_impactful_change(changes: list[TypeOfChange]):
return max(changes, key=lambda change: precedence_order[change])
def format_message_for_classification(message):
find_pr = re.search(r"#(\d+)", message)
if find_pr:
num = find_pr.group(1)
message = re.sub(r"#(\d+)", f"https://github.com/apache/airflow/pull/{num}", message)
return message
| Change |
python | getsentry__sentry | tests/sentry/workflow_engine/endpoints/test_organization_workflow_details.py | {
"start": 1224,
"end": 1452
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-workflow-details"
def setUp(self) -> None:
super().setUp()
self.login_as(user=self.user)
@region_silo_test
| OrganizationWorkflowDetailsBaseTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/pipes/client.py | {
"start": 2002,
"end": 5225
} | class ____:
"""A wrapper for the results of a pipes client invocation, typically returned from `PipesClient.run`.
Args:
session (PipesSession): The Pipes session that was used to run the external process.
metadata (Optional[RawMetadataMapping]): Arbitrary metadata that will be attached to all
results generated by the invocation. Useful for attaching information to
asset materializations and checks that is available via the external process launch API
but not in the external process itself (e.g. a job_id param returned by the launch API call).
"""
def __init__(
self,
session: PipesSession,
metadata: Optional[RawMetadataMapping] = None,
):
self._session = session
self._metadata = normalize_metadata(metadata or {})
@property
def metadata(self) -> Mapping[str, MetadataValue]:
"""Arbitrary metadata attached to the invocation."""
return self._metadata
def get_results(
self,
*,
implicit_materializations=True,
) -> Sequence["PipesExecutionResult"]:
"""Get the stream of results as a Sequence of a completed pipes
client invocation.
Args:
implicit_materializations (bool): Create MaterializeResults for expected assets
even if nothing was reported from the external process.
Returns: Sequence[PipesExecutionResult]
"""
return self._session.get_results(
implicit_materializations=implicit_materializations, metadata=self.metadata
)
def get_materialize_result(
self,
*,
implicit_materialization=True,
) -> MaterializeResult:
"""Get a single materialize result for a pipes invocation. This coalesces
the materialization result and any separately reported asset check results from
the external process.
This does not work on invocations that materialize multiple assets and will fail
in that case. For multiple assets use `get_results` instead to get the result stream.
Args:
implicit_materializations (bool): Create MaterializeResults for expected asset
even if nothing was reported from the external process.
Returns: MaterializeResult
"""
return materialize_result_from_pipes_results(
self.get_results(implicit_materializations=implicit_materialization)
)
def get_asset_check_result(self) -> AssetCheckResult:
"""Get a single asset check result for a pipes invocation.
This does not work on invocations that have anything except a single asset check result.
Use `get_results` instead to get the result stream in those cases.
Returns: AssetCheckResult
"""
return _check_result_from_pipes_results(self.get_results())
def get_custom_messages(self) -> Sequence[Any]:
"""Get the sequence of deserialized JSON data that was reported from the external process using
`report_custom_message`.
Returns: Sequence[Any]
"""
return self._session.get_custom_messages()
@public
| PipesClientCompletedInvocation |
python | spack__spack | lib/spack/spack/cmd/create.py | {
"start": 6981,
"end": 7870
} | class ____(PackageTemplate):
"""Provides appropriate overrides for LuaRocks-based packages"""
base_class_name = "LuaPackage"
package_class_import = "from spack_repo.builtin.build_systems.lua import LuaPackage"
body_def = """\
def luarocks_args(self):
# FIXME: Add arguments to `luarocks make` other than rockspec path
# FIXME: If not needed delete this function
args = []
return args"""
def __init__(self, name, url, versions, languages: List[str]):
# If the user provided `--name lua-lpeg`, don't rename it lua-lua-lpeg
if not name.startswith("lua-"):
# Make it more obvious that we are renaming the package
tty.msg("Changing package name from {0} to lua-{0}".format(name))
name = "lua-{0}".format(name)
super().__init__(name, url, versions, languages)
| LuaPackageTemplate |
python | numba__llvmlite | llvmlite/binding/targets.py | {
"start": 4926,
"end": 6351
} | class ____(ffi.ObjectRef):
"""
A TargetData provides structured access to a data layout.
Use :func:`create_target_data` to create instances.
"""
def __str__(self):
if self._closed:
return "<dead TargetData>"
with ffi.OutputString() as out:
ffi.lib.LLVMPY_CopyStringRepOfTargetData(self, out)
return str(out)
def _dispose(self):
self._capi.LLVMPY_DisposeTargetData(self)
def get_abi_size(self, ty):
"""
Get ABI size of LLVM type *ty*.
"""
return ffi.lib.LLVMPY_ABISizeOfType(self, ty)
def get_element_offset(self, ty, position):
"""
Get byte offset of type's ty element at the given position
"""
offset = ffi.lib.LLVMPY_OffsetOfElement(self, ty, position)
if offset == -1:
raise ValueError("Could not determined offset of {}th "
"element of the type '{}'. Is it a struct"
"type?".format(position, str(ty)))
return offset
def get_abi_alignment(self, ty):
"""
Get minimum ABI alignment of LLVM type *ty*.
"""
return ffi.lib.LLVMPY_ABIAlignmentOfType(self, ty)
RELOC = frozenset(['default', 'static', 'pic', 'dynamicnopic'])
CODEMODEL = frozenset(['default', 'jitdefault', 'small', 'kernel', 'medium',
'large'])
| TargetData |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/class_interval.py | {
"start": 5969,
"end": 6074
} | class ____(B14, C14):
pass
def multi_inheritance_no_issue_one_hop(b: B14):
_test_sink(b.m0())
| D14 |
python | falconry__falcon | examples/things_advanced.py | {
"start": 555,
"end": 1026
} | class ____:
engines = {
'ddg': 'https://duckduckgo.com',
'y': 'https://search.yahoo.com/search',
}
def __call__(self, req, resp, engine):
url = self.engines[engine]
params = {'q': req.get_param('q', True)}
result = requests.get(url, params=params)
resp.status = falcon.code_to_http_status(result.status_code)
resp.content_type = result.headers['content-type']
resp.text = result.text
| SinkAdapter |
python | redis__redis-py | redis/_parsers/hiredis.py | {
"start": 952,
"end": 1147
} | class ____(TypedDict, total=False):
protocolError: Callable[[str], Exception]
replyError: Callable[[str], Exception]
encoding: Optional[str]
errors: Optional[str]
| _HiredisReaderArgs |
python | celery__celery | celery/worker/pidbox.py | {
"start": 435,
"end": 2155
} | class ____:
"""Worker mailbox."""
consumer = None
def __init__(self, c):
self.c = c
self.hostname = c.hostname
self.node = c.app.control.mailbox.Node(
safe_str(c.hostname),
handlers=control.Panel.data,
state=AttributeDict(
app=c.app,
hostname=c.hostname,
consumer=c,
tset=pass1 if c.controller.use_eventloop else set),
)
self._forward_clock = self.c.app.clock.forward
def on_message(self, body, message):
# just increase clock as clients usually don't
# have a valid clock to adjust with.
self._forward_clock()
try:
self.node.handle_message(body, message)
except KeyError as exc:
error('No such control command: %s', exc)
except Exception as exc:
error('Control command error: %r', exc, exc_info=True)
self.reset()
def start(self, c):
self.node.channel = c.connection.channel()
self.consumer = self.node.listen(callback=self.on_message)
self.consumer.on_decode_error = c.on_decode_error
def on_stop(self):
pass
def stop(self, c):
self.on_stop()
self.consumer = self._close_channel(c)
def reset(self):
self.stop(self.c)
self.start(self.c)
def _close_channel(self, c):
if self.node and self.node.channel:
ignore_errors(c, self.node.channel.close)
def shutdown(self, c):
self.on_stop()
if self.consumer:
debug('Canceling broadcast consumer...')
ignore_errors(c, self.consumer.cancel)
self.stop(self.c)
| Pidbox |
python | django__django | tests/file_storage/test_base.py | {
"start": 183,
"end": 390
} | class ____(Storage):
"""Simple Storage subclass implementing the bare minimum for testing."""
def exists(self, name):
return False
def _save(self, name):
return name
| CustomStorage |
python | keras-team__keras | keras/src/ops/core.py | {
"start": 2326,
"end": 6649
} | class ____(Operation):
def __init__(self, length=None, reverse=False, unroll=1, *, name=None):
super().__init__(name=name)
self.length = length
self.reverse = reverse
self.unroll = unroll
def call(self, f, init, xs=None):
return backend.core.scan(
f,
init,
xs,
length=self.length,
reverse=self.reverse,
unroll=self.unroll,
)
def compute_output_spec(self, f, init, xs=None):
if xs is None:
n = int(self.length)
x = None
else:
n = (
int(self.length)
if self.length is not None
else tree.flatten(xs)[0].shape[0]
)
x = xs[0]
carry, y = backend.compute_output_spec(f, init, x)
y = KerasTensor(shape=(n,) + y.shape, dtype=y.dtype, sparse=y.sparse)
return carry, y
@keras_export("keras.ops.scan")
def scan(f, init, xs=None, length=None, reverse=False, unroll=1):
"""Scan a function over leading array axes while carrying along state.
When the type of `xs` is an array type or `None`, and the type of `ys` is an
array type, the semantics of `scan()` are given roughly by this Python
implementation:
```python
def scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
```
The loop-carried value `carry` (`init`) must hold a fixed shape and dtype
across all iterations.
In TensorFlow, `y` must match `carry` in shape and dtype. This is not
required in other backends.
Args:
f: Callable defines the logic for each loop iteration. This accepts two
arguments where the first is a value of the loop carry and the
second is a slice of `xs` along its leading axis.
This callable returns a pair where the first represents a new value
for the loop carry and the second represents a slice of the output.
init: The initial loop carry value. This can be a scalar, tensor, or any
nested structure. It must match the structure of the first element
returned by `f`.
xs: Optional value to scan along its leading axis. This can be a tensor
or any nested structure. If `xs` is not provided, you must specify
`length` to define the number of loop iterations.
Defaults to `None`.
length: Optional integer specifying the number of loop iterations.
If `length` is not provided, it defaults to the sizes of leading
axis of the arrays in `xs`. Defaults to `None`.
reverse: Optional boolean specifying whether to run the scan iteration
forward or in reverse, equivalent to reversing the leading axes of
the arrays in both `xs` and in `ys`.
unroll: Optional positive integer or boolean specifying how many scan
iterations to unroll within a single iteration of a loop. If an
integer is provided, it determines how many unrolled loop iterations
to run within a single rolled iteration of the loop. If a boolean is
provided, it will determine if the loop is completely unrolled
(`unroll=True`) or left completely unrolled (`unroll=False`).
Note that unrolling is only supported by JAX and TensorFlow
backends.
Returns:
A pair where the first element represents the final loop carry value and
the second element represents the stacked outputs of `f` when scanned
over the leading axis of the inputs.
Examples:
>>> sum_fn = lambda c, x: (c + x, c + x)
>>> init = keras.ops.array(0)
>>> xs = keras.ops.array([1, 2, 3, 4, 5])
>>> carry, result = keras.ops.scan(sum_fn, init, xs)
>>> carry
15
>>> result
[1, 3, 6, 10, 15]
"""
if any_symbolic_tensors((init, xs)):
return Scan(
length=length, reverse=reverse, unroll=unroll
).symbolic_call(f, init, xs)
return backend.core.scan(
f, init, xs, length, reverse=reverse, unroll=unroll
)
| Scan |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 25232,
"end": 25740
} | class ____(AnyUrl):
"""A type that will accept any Cockroach DSN.
* User info required
* TLD not required
* Host required
"""
_constraints = UrlConstraints(
host_required=True,
allowed_schemes=[
'cockroachdb',
'cockroachdb+psycopg2',
'cockroachdb+asyncpg',
],
)
@property
def host(self) -> str:
"""The required URL host."""
return self._url.host # pyright: ignore[reportReturnType]
| CockroachDsn |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 104634,
"end": 105555
} | class ____(Request):
"""
Get the tasks's latest scalar values
:param task: Task ID
:type task: str
"""
_service = "events"
_action = "get_task_latest_scalar_values"
_version = "2.20"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task: str, **kwargs: Any) -> None:
super(GetTaskLatestScalarValuesRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
| GetTaskLatestScalarValuesRequest |
python | pyodide__pyodide | src/py/pyodide/_package_loader.py | {
"start": 2561,
"end": 13703
} | class ____(Exception):
"""Unsupported wheel."""
def find_wheel_metadata_dir(source: ZipFile, suffix: str) -> str | None:
"""
Returns the name of the contained metadata directory inside the wheel file.
Parameters
----------
source
A ZipFile object representing the wheel file.
suffix
The suffix of the metadata directory. Usually ".dist-info" or ".data"
Returns
-------
The name of the metadata directory. If not found, returns None.
"""
# Zip file path separators must be /
subdirs = {p.split("/", 1)[0] for p in source.namelist()}
info_dirs = [s for s in subdirs if s.endswith(suffix)]
if not info_dirs:
return None
# Choose the first directory if there are multiple directories
info_dir = info_dirs[0]
return info_dir
def wheel_dist_info_dir(source: ZipFile, name: str) -> str:
"""
Returns the name of the contained .dist-info directory.
"""
dist_info_dir = find_wheel_metadata_dir(source, suffix=DIST_INFO_DIR_SUFFIX)
if dist_info_dir is None:
raise UnsupportedWheel(
f"{DIST_INFO_DIR_SUFFIX} directory not found in wheel {name!r}"
)
dist_info_dir_name = canonicalize_name(dist_info_dir)
canonical_name = canonicalize_name(name)
if not dist_info_dir_name.startswith(canonical_name):
raise UnsupportedWheel(
f"{DIST_INFO_DIR_SUFFIX} directory {dist_info_dir!r} does not start with {canonical_name!r}"
)
return dist_info_dir
def wheel_data_file_dir(source: ZipFile, name: str) -> str | None:
data_file_dir = find_wheel_metadata_dir(source, suffix=DATA_FILES_DIR_SUFFIX)
# data files are optional, so we return None if not found
if data_file_dir is None:
return None
data_file_dir_name = canonicalize_name(data_file_dir)
canonical_name = canonicalize_name(name)
if not data_file_dir_name.startswith(canonical_name):
return None
return data_file_dir
def make_whlfile(
*args: Any, owner: int | None = None, group: int | None = None, **kwargs: Any
) -> str:
return shutil._make_zipfile(*args, **kwargs) # type: ignore[attr-defined]
if IN_PYODIDE:
shutil.register_archive_format("whl", make_whlfile, description="Wheel file")
shutil.register_unpack_format(
"whl",
[".whl", ".wheel"],
shutil._unpack_zipfile, # type: ignore[attr-defined]
description="Wheel file",
)
def get_format(format: str) -> str:
for fmt, extensions, _ in shutil.get_unpack_formats():
if format == fmt:
return fmt
if format in extensions:
return fmt
if "." + format in extensions:
return fmt
raise ValueError(f"Unrecognized format {format}")
def get_install_dir(target: Literal["site", "dynlib"] | None = None) -> str:
"""
Get the installation directory for a target.
"""
if not target:
return str(SITE_PACKAGES)
return str(TARGETS.get(target, SITE_PACKAGES))
def unpack_buffer(
buffer: JsBuffer,
*,
filename: str = "",
format: str | None = None,
extract_dir: str | None = None,
calculate_dynlibs: bool = False,
metadata: dict[str, str] | None = None,
) -> JsArray[str] | None:
"""Used to install a package either into sitepackages or into the standard
library.
This is a helper method called from ``loadPackage``.
Parameters
----------
buffer
A Javascript ``Uint8Array`` with the binary data for the archive.
filename
The name of the file we are extracting. We only care about it to figure
out whether the buffer represents a tar file or a zip file. Ignored if
format argument is present.
format
Controls the format that we assume the archive has. Overrides the file
extension of filename. In particular we decide the file format as
follows:
1. If format is present, we use that.
2. If file name is present, it should have an extension, like a.zip,
a.tar, etc. Then we use that.
3. If neither is present or the file name has no extension, we throw an
error.
extract_dir
Controls which directory the file is unpacked into. Default is the
working directory.
calculate_dynlibs
If true, will return a Javascript Array of paths to dynamic libraries
('.so' files) that were in the archive. We need to precompile these Wasm
binaries in `load-pyodide.js`. These paths point to the unpacked
locations of the .so files.
metadata
A dictionary of metadata to be stored in the package's dist-info directory.
The keys are the names of the metadata files and the values are the contents
of the files.
Returns
-------
If calculate_dynlibs is True, a Javascript Array of dynamic libraries.
Otherwise, return None.
"""
if format:
format = get_format(format)
if not filename and format is None:
raise ValueError("At least one of filename and format must be provided")
extract_path = Path(extract_dir or ".")
filename = filename.rpartition("/")[-1]
extract_path.mkdir(parents=True, exist_ok=True)
with NamedTemporaryFile(suffix=filename) as f:
buffer._into_file(f)
shutil.unpack_archive(f.name, extract_path, format)
suffix = Path(f.name).suffix
if suffix == ".whl":
z = ZipFile(f)
if metadata:
set_wheel_metadata(filename, z, extract_path, metadata)
install_datafiles(filename, z, extract_path)
if calculate_dynlibs:
return to_js(get_dynlibs(f, suffix, extract_path))
return None
def should_load_dynlib(path: str | Path) -> bool:
path = Path(path)
if not SHAREDLIB_REGEX.search(path.name):
return False
suffixes = path.suffixes
try:
tag = suffixes[suffixes.index(".so") - 1]
except ValueError: # This should not happen, but just in case
return False
if tag in EXTENSION_TAGS:
return True
# Okay probably it's not compatible now. But it might be an unrelated .so
# file with a name with an extra dot: `some.name.so` vs
# `some.cpython-39-x86_64-linux-gnu.so` Let's make a best effort here to
# check.
return not PLATFORM_TAG_REGEX.match(tag)
def set_wheel_metadata(
filename: str,
archive: ZipFile,
target_dir: Path,
metadata: dict[str, str],
) -> None:
"""Record the metadata of a wheel into the target directory.
Common metadata includes the installer file according to the packaging spec:
packaging.python.org/en/latest/specifications/recording-installed-packages/#the-dist-info-directory
The packaging spec allows us to make custom files. It also allows wheels to
include custom files in their .dist-info directory. The spec has no attempt
to coordinate these so that installers don't trample files that wheels
include. We make a best effort with our PYODIDE prefix.
Parameters
----------
filename
The file name of the wheel.
archive
A ZipFile object representing the wheel file.
target_dir
The directory the wheel is being installed into. Probably site-packages.
installer
The name of the installer. Currently either `pyodide.unpackArchive`,
`pyodide.loadPackage` or `micropip`.
source
Where did the package come from? Either a url, `pyodide`, or `PyPI`.
"""
wheel_name = parse_wheel_name(filename)[0]
dist_info_name = wheel_dist_info_dir(archive, wheel_name)
dist_info = target_dir / dist_info_name
for key, value in metadata.items():
(dist_info / key).write_text(value)
def install_datafiles(
filename: str,
archive: ZipFile,
target_dir: Path,
) -> None:
"""
Install data files from a wheel into the target directory.
While data files are not standard in wheels, they are common in the wild and pip supports them.
"""
wheel_name = parse_wheel_name(filename)[0]
data_file_dir_name = wheel_data_file_dir(archive, wheel_name)
if data_file_dir_name is None:
return
data_file_dir = target_dir / data_file_dir_name / DATA_FILES_SCHEME
if not data_file_dir.exists():
return
install_files(data_file_dir, sys.prefix)
def get_dynlibs(archive: IO[bytes], suffix: str, target_dir: Path) -> list[str]:
"""List out the paths to .so files in a zip or tar archive.
Parameters
----------
archive
A binary representation of either a zip or a tar archive. We use the `.name`
field to determine which file type.
target_dir
The directory the archive is unpacked into. Paths will be adjusted to point
inside this directory.
Returns
-------
The list of paths to dynamic libraries ('.so' files) that were in the archive,
but adjusted to point to their unpacked locations.
"""
import tarfile
dynlib_paths_iter: Iterable[str]
if suffix in ZIP_TYPES:
dynlib_paths_iter = ZipFile(archive).namelist()
elif suffix in TAR_TYPES:
dynlib_paths_iter = (tinfo.name for tinfo in tarfile.open(archive.name))
else:
raise ValueError(f"Unexpected suffix {suffix}")
return [
str((target_dir / path).resolve())
for path in dynlib_paths_iter
if should_load_dynlib(path)
]
def get_dist_source(dist_path: Path) -> tuple[str, str]:
"""Get the package name and a description of the source of a package.
This is used in loadPackage to explain where the package came from. Purely
for informative purposes.
"""
with (dist_path / "METADATA").open() as f:
for line in f:
if line.startswith("Name:"):
dist_name = line[5:].strip()
break
else:
raise ValueError(f"Package name not found in {dist_path.name} METADATA")
source_path = dist_path / PYODIDE_SOURCE_METADATA_FILE
if source_path.exists():
source = source_path.read_text().strip()
if source == "pyodide":
return dist_name, "default channel"
elif source:
return dist_name, source
direct_url_path = dist_path / "direct_url.json"
if direct_url_path.exists():
import json
return dist_name, json.loads(direct_url_path.read_text())["url"]
installer_path = dist_path / "INSTALLER"
if installer_path.exists():
installer = installer_path.read_text().strip()
return dist_name, f"{installer} (index unknown)"
return dist_name, "Unknown"
def init_loaded_packages() -> None:
"""Initialize pyodide.loadedPackages with the packages that are already
present.
This ensures that `pyodide.loadPackage` knows that they are around and
doesn't install over them.
"""
for dist_path in SITE_PACKAGES.glob("*.dist-info"):
dist_name, dist_source = get_dist_source(dist_path)
setattr(loadedPackages, dist_name, dist_source)
| UnsupportedWheel |
python | walkccc__LeetCode | solutions/2799. Count Complete Subarrays in an Array/2799.py | {
"start": 0,
"end": 484
} | class ____:
def countCompleteSubarrays(self, nums: list[int]) -> int:
ans = 0
distinct = len(set(nums))
count = collections.Counter()
l = 0
for num in nums:
count[num] += 1
while len(count) == distinct:
count[nums[l]] -= 1
if count[nums[l]] == 0:
del count[nums[l]]
l += 1
# Assume nums[r] = num,
# nums[0..r], nums[1..r], ..., nums[l - 1..r] have k different values.
ans += l
return ans
| Solution |
python | pytorch__pytorch | test/distributed/elastic/rendezvous/static_rendezvous_test.py | {
"start": 520,
"end": 3188
} | class ____(unittest.TestCase):
def test_missing_port(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="localhost",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_empty_endpoint(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_ipv6_addr(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="[2001:0db8:85a3:0000:0000:8a2e:0370:7334]:90",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_ipv6_addr_localhost(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="[::1]:90",
run_id="test_id",
min_nodes=1,
max_nodes=1,
)
with self.assertRaises(ValueError):
create_rdzv_handler(rdzv_params)
def test_get_backend(self):
rdzv_params = RendezvousParameters(
backend="static",
endpoint="localhost:123",
run_id="test",
min_nodes=1,
max_nodes=1,
timeout=60,
rank=0,
)
static_rdzv = create_rdzv_handler(rdzv_params)
self.assertEqual("static", static_rdzv.get_backend())
def test_static_rdzv_multiple_calls(self):
sock = get_socket_with_port()
with closing(sock):
master_port = sock.getsockname()[1]
master_addr = "localhost"
rdzv_params = RendezvousParameters(
backend="static",
endpoint=f"{master_addr}:{master_port}",
run_id="test_id",
min_nodes=1,
max_nodes=1,
rank=0,
)
rdzv_handler = create_rdzv_handler(rdzv_params)
# Call rendezvous two times
rdzv_info = rdzv_handler.next_rendezvous()
self.assertIsNotNone(rdzv_info.store)
self.assertEqual(0, rdzv_info.rank)
self.assertEqual(1, rdzv_info.world_size)
rdzv_info = rdzv_handler.next_rendezvous()
self.assertIsNotNone(rdzv_info.store)
self.assertEqual(0, rdzv_info.rank)
self.assertEqual(1, rdzv_info.world_size)
| StaticTCPRendezvousTest |
python | ZoranPandovski__al-go-rithms | data_structures/red_and_black_tree/Python/red_black_tree.py | {
"start": 243,
"end": 9353
} | class ____():
def __init__(self):
self.TNULL = Node(0)
self.TNULL.color = 0
self.TNULL.left = None
self.TNULL.right = None
self.root = self.TNULL
# Preorder
def pre_order_helper(self, node):
if node != self.TNULL:
sys.stdout.write(node.item + " ")
self.pre_order_helper(node.left)
self.pre_order_helper(node.right)
# Inorder
def in_order_helper(self, node):
if node != self.TNULL:
self.in_order_helper(node.left)
sys.stdout.write(node.item + " ")
self.in_order_helper(node.right)
# Postorder
def post_order_helper(self, node):
if node != self.TNULL:
self.post_order_helper(node.left)
self.post_order_helper(node.right)
sys.stdout.write(node.item + " ")
# Search the tree
def search_tree_helper(self, node, key):
if node == self.TNULL or key == node.item:
return node
if key < node.item:
return self.search_tree_helper(node.left, key)
return self.search_tree_helper(node.right, key)
# Balancing the tree after deletion
def delete_fix(self, x):
while x != self.root and x.color == 0:
if x == x.parent.left:
s = x.parent.right
if s.color == 1:
s.color = 0
x.parent.color = 1
self.left_rotate(x.parent)
s = x.parent.right
if s.left.color == 0 and s.right.color == 0:
s.color = 1
x = x.parent
else:
if s.right.color == 0:
s.left.color = 0
s.color = 1
self.right_rotate(s)
s = x.parent.right
s.color = x.parent.color
x.parent.color = 0
s.right.color = 0
self.left_rotate(x.parent)
x = self.root
else:
s = x.parent.left
if s.color == 1:
s.color = 0
x.parent.color = 1
self.right_rotate(x.parent)
s = x.parent.left
if s.right.color == 0 and s.right.color == 0:
s.color = 1
x = x.parent
else:
if s.left.color == 0:
s.right.color = 0
s.color = 1
self.left_rotate(s)
s = x.parent.left
s.color = x.parent.color
x.parent.color = 0
s.left.color = 0
self.right_rotate(x.parent)
x = self.root
x.color = 0
def __rb_transplant(self, u, v):
if u.parent == None:
self.root = v
elif u == u.parent.left:
u.parent.left = v
else:
u.parent.right = v
v.parent = u.parent
# Node deletion
def delete_node_helper(self, node, key):
z = self.TNULL
while node != self.TNULL:
if node.item == key:
z = node
if node.item <= key:
node = node.right
else:
node = node.left
if z == self.TNULL:
print("Cannot find key in the tree")
return
y = z
y_original_color = y.color
if z.left == self.TNULL:
x = z.right
self.__rb_transplant(z, z.right)
elif (z.right == self.TNULL):
x = z.left
self.__rb_transplant(z, z.left)
else:
y = self.minimum(z.right)
y_original_color = y.color
x = y.right
if y.parent == z:
x.parent = y
else:
self.__rb_transplant(y, y.right)
y.right = z.right
y.right.parent = y
self.__rb_transplant(z, y)
y.left = z.left
y.left.parent = y
y.color = z.color
if y_original_color == 0:
self.delete_fix(x)
# Balance the tree after insertion
def fix_insert(self, k):
while k.parent.color == 1:
if k.parent == k.parent.parent.right:
u = k.parent.parent.left
if u.color == 1:
u.color = 0
k.parent.color = 0
k.parent.parent.color = 1
k = k.parent.parent
else:
if k == k.parent.left:
k = k.parent
self.right_rotate(k)
k.parent.color = 0
k.parent.parent.color = 1
self.left_rotate(k.parent.parent)
else:
u = k.parent.parent.right
if u.color == 1:
u.color = 0
k.parent.color = 0
k.parent.parent.color = 1
k = k.parent.parent
else:
if k == k.parent.right:
k = k.parent
self.left_rotate(k)
k.parent.color = 0
k.parent.parent.color = 1
self.right_rotate(k.parent.parent)
if k == self.root:
break
self.root.color = 0
# Printing the tree
def __print_helper(self, node, indent, last):
if node != self.TNULL:
sys.stdout.write(indent)
if last:
sys.stdout.write("R----")
indent += " "
else:
sys.stdout.write("L----")
indent += "| "
s_color = "RED" if node.color == 1 else "BLACK"
print(str(node.item) + "(" + s_color + ")")
self.__print_helper(node.left, indent, False)
self.__print_helper(node.right, indent, True)
def preorder(self):
self.pre_order_helper(self.root)
def inorder(self):
self.in_order_helper(self.root)
def postorder(self):
self.post_order_helper(self.root)
def searchTree(self, k):
return self.search_tree_helper(self.root, k)
def minimum(self, node):
while node.left != self.TNULL:
node = node.left
return node
def maximum(self, node):
while node.right != self.TNULL:
node = node.right
return node
def successor(self, x):
if x.right != self.TNULL:
return self.minimum(x.right)
y = x.parent
while y != self.TNULL and x == y.right:
x = y
y = y.parent
return y
def predecessor(self, x):
if (x.left != self.TNULL):
return self.maximum(x.left)
y = x.parent
while y != self.TNULL and x == y.left:
x = y
y = y.parent
return y
def left_rotate(self, x):
y = x.right
x.right = y.left
if y.left != self.TNULL:
y.left.parent = x
y.parent = x.parent
if x.parent == None:
self.root = y
elif x == x.parent.left:
x.parent.left = y
else:
x.parent.right = y
y.left = x
x.parent = y
def right_rotate(self, x):
y = x.left
x.left = y.right
if y.right != self.TNULL:
y.right.parent = x
y.parent = x.parent
if x.parent == None:
self.root = y
elif x == x.parent.right:
x.parent.right = y
else:
x.parent.left = y
y.right = x
x.parent = y
def insert(self, key):
node = Node(key)
node.parent = None
node.item = key
node.left = self.TNULL
node.right = self.TNULL
node.color = 1
y = None
x = self.root
while x != self.TNULL:
y = x
if node.item < x.item:
x = x.left
else:
x = x.right
node.parent = y
if y == None:
self.root = node
elif node.item < y.item:
y.left = node
else:
y.right = node
if node.parent == None:
node.color = 0
return
if node.parent.parent == None:
return
self.fix_insert(node)
def get_root(self):
return self.root
def delete_node(self, item):
self.delete_node_helper(self.root, item)
def print_tree(self):
self.__print_helper(self.root, "", True)
if __name__ == "__main__":
bst = RedBlackTree()
bst.insert(55)
bst.insert(40)
bst.insert(65)
bst.insert(60)
bst.insert(75)
bst.insert(57)
bst.print_tree()
print("\nAfter deleting an element")
bst.delete_node(40)
bst.print_tree()
| RedBlackTree |
python | pytorch__pytorch | torch/autograd/graph.py | {
"start": 23485,
"end": 25039
} | class ____(saved_tensors_hooks):
def __init__(self, ctx: "_AllowMutationOnSavedContext") -> None:
def pack_hook(tensor: torch.Tensor) -> _Handle:
tid = _get_tid(tensor)
sid = _get_sid(tensor)
# Tensors saved for backward have an entry in _tid_to_weakhandle
handle: Optional[_Handle] = None
# Save aliasing information
ctx.sid_to_tid[sid].add(tid)
# NB: The same tensor (of the same version) can be saved multiple times
if tid not in ctx.tid_to_weakhandle:
handle = _Handle()
ctx.tid_to_weakhandle[tid] = handle
ctx.original[handle] = tensor
else:
# Store an additional strong reference to the handle
handle = ctx.tid_to_weakhandle[tid]
return handle
def unpack_hook(handle: _Handle) -> torch.Tensor:
error_msg = (
"Trying to backward outside of the 'allow_mutation_on_saved_tensors' context"
"in which the graph was originally recorded."
)
if not _allow_mutation_on_saved_tensors_enabled:
raise AssertionError(error_msg)
if handle in ctx.cloned:
res = ctx.cloned[handle]
else:
if handle not in ctx.original:
raise AssertionError(error_msg)
res = ctx.original[handle]
return res
super().__init__(pack_hook, unpack_hook)
| _swap_with_cloned |
python | automl__auto-sklearn | autosklearn/pipeline/components/classification/extra_trees.py | {
"start": 710,
"end": 7453
} | class ____(
IterativeComponentWithSampleWeight,
AutoSklearnClassificationAlgorithm,
):
def __init__(
self,
criterion,
min_samples_leaf,
min_samples_split,
max_features,
bootstrap,
max_leaf_nodes,
max_depth,
min_weight_fraction_leaf,
min_impurity_decrease,
oob_score=False,
n_jobs=1,
random_state=None,
verbose=0,
class_weight=None,
):
self.n_estimators = self.get_max_iter()
self.criterion = criterion
self.max_depth = max_depth
self.max_leaf_nodes = max_leaf_nodes
self.min_samples_leaf = min_samples_leaf
self.min_samples_split = min_samples_split
self.max_features = max_features
self.bootstrap = bootstrap
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.min_impurity_decrease = min_impurity_decrease
self.oob_score = oob_score
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
self.class_weight = class_weight
self.estimator = None
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_estimators
def iterative_fit(self, X, y, sample_weight=None, n_iter=1, refit=False):
from sklearn.ensemble import ExtraTreesClassifier as ETC
if refit:
self.estimator = None
if self.estimator is None:
max_features = int(X.shape[1] ** float(self.max_features))
if self.criterion not in ("gini", "entropy"):
raise ValueError(
"'criterion' is not in ('gini', 'entropy'): " "%s" % self.criterion
)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.min_samples_leaf = int(self.min_samples_leaf)
self.min_samples_split = int(self.min_samples_split)
self.max_features = float(self.max_features)
self.min_impurity_decrease = float(self.min_impurity_decrease)
self.min_weight_fraction_leaf = float(self.min_weight_fraction_leaf)
self.oob_score = check_for_bool(self.oob_score)
self.bootstrap = check_for_bool(self.bootstrap)
self.n_jobs = int(self.n_jobs)
self.verbose = int(self.verbose)
self.estimator = ETC(
n_estimators=n_iter,
criterion=self.criterion,
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
bootstrap=self.bootstrap,
max_features=max_features,
max_leaf_nodes=self.max_leaf_nodes,
min_weight_fraction_leaf=self.min_weight_fraction_leaf,
min_impurity_decrease=self.min_impurity_decrease,
oob_score=self.oob_score,
n_jobs=self.n_jobs,
verbose=self.verbose,
random_state=self.random_state,
class_weight=self.class_weight,
warm_start=True,
)
else:
self.estimator.n_estimators += n_iter
self.estimator.n_estimators = min(
self.estimator.n_estimators, self.n_estimators
)
self.estimator.fit(X, y, sample_weight=sample_weight)
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
return not len(self.estimator.estimators_) < self.n_estimators
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
probas = self.estimator.predict_proba(X)
probas = convert_multioutput_multiclass_to_multilabel(probas)
return probas
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "ET",
"name": "Extra Trees Classifier",
"handles_regression": False,
"handles_classification": True,
"handles_multiclass": True,
"handles_multilabel": True,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
criterion = CategoricalHyperparameter(
"criterion", ["gini", "entropy"], default_value="gini"
)
# The maximum number of features used in the forest is calculated as
# m^max_features, where m is the total number of features,
# and max_features is the hyperparameter specified below.
# The default is 0.5, which yields sqrt(m) features as max_features
# in the estimator. This corresponds with Geurts' heuristic.
max_features = UniformFloatHyperparameter(
"max_features", 0.0, 1.0, default_value=0.5
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
min_samples_split = UniformIntegerHyperparameter(
"min_samples_split", 2, 20, default_value=2
)
min_samples_leaf = UniformIntegerHyperparameter(
"min_samples_leaf", 1, 20, default_value=1
)
min_weight_fraction_leaf = UnParametrizedHyperparameter(
"min_weight_fraction_leaf", 0.0
)
max_leaf_nodes = UnParametrizedHyperparameter("max_leaf_nodes", "None")
min_impurity_decrease = UnParametrizedHyperparameter(
"min_impurity_decrease", 0.0
)
bootstrap = CategoricalHyperparameter(
"bootstrap", ["True", "False"], default_value="False"
)
cs.add_hyperparameters(
[
criterion,
max_features,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_leaf_nodes,
min_impurity_decrease,
bootstrap,
]
)
return cs
| ExtraTreesClassifier |
python | pytorch__pytorch | torch/_dynamo/cache_size.py | {
"start": 3390,
"end": 7976
} | class ____:
"""
We track the number of cache entries that have same id_match objects as the
given frame.
TODO(janimesh) - Consider adding a map from tuple_of_match_ids to count -
https://github.com/pytorch/pytorch/pull/107496#discussion_r1304564682 - this
could be useful for debugging as well.
"""
# Total number of CacheEntry objects in the Dynamo linked list
num_cache_entries: int = 0
# Number of CacheEntry objects having same ID_MATCH'd objects as given frame.
num_cache_entries_with_same_id_matched_objs: int = 0
def will_compilation_exceed(self, limit: int) -> bool:
# Checks if a compilation will exceed the given limit (that's why >=).
return (
self.will_compilation_exceed_accumulated_limit()
or self.will_compilation_exceed_specific_limit(limit)
)
def will_compilation_exceed_accumulated_limit(self) -> bool:
return self.num_cache_entries >= config.accumulated_recompile_limit
def will_compilation_exceed_specific_limit(self, limit: int) -> bool:
return self.num_cache_entries_with_same_id_matched_objs >= limit
def _get_weakref_from_f_locals(
frame: DynamoFrameType, local_name: str
) -> Optional[weakref.ref[Any]]:
obj = frame.f_locals.get(local_name, None)
weak_id = None
try:
weak_id = weakref.ref(obj)
except TypeError:
pass # cannot weakref bool object
return weak_id
def _has_same_id_matched_objs(frame: DynamoFrameType, cache_entry: Any) -> bool:
"""
Checks if the ID_MATCH'd objects saved on cache_entry are same as the ones
in frame.f_locals.
"""
if not cache_entry:
return False
for (
local_name,
weakref_from_cache_entry,
) in cache_entry.guard_manager.id_matched_objs.items():
if weakref_from_cache_entry() is not None:
weakref_from_frame = _get_weakref_from_f_locals(frame, local_name)
if weakref_from_frame is not weakref_from_cache_entry:
return False
# Also covers the case where no ID_MATCH objects are saved in frame.f_locals
return True
def compute_cache_size(
frame: DynamoFrameType, cache_entry: Any
) -> CacheSizeRelevantForFrame:
# Walk the linked list to calculate the cache size
num_cache_entries = 0
num_cache_entries_with_same_id_matched_objs = 0
while cache_entry:
num_cache_entries += 1
# Track the number of cache entries having same ID_MATCH'd objects as
# that of frame.f_locals. This will be used later to compare against the
# recompile_limit.
if _has_same_id_matched_objs(frame, cache_entry):
num_cache_entries_with_same_id_matched_objs += 1
cache_entry = cache_entry.next
return CacheSizeRelevantForFrame(
num_cache_entries, num_cache_entries_with_same_id_matched_objs
)
def is_recompilation(cache_size: CacheSizeRelevantForFrame) -> bool:
"""
If the frame (earlier parsed by compute_cache_size) has more than 1 cache
entry with same ID_MATCH'd objects, then its a recompilation.
"""
# Note that you can have multiple entries in the cache but still not a
# recompile, e.g., you can have 64 nn module instances, each one having an
# ID_MATCH guard, and each one having just 1 cache entry in the cache. In
# this case, we can have 64 entries in the cache, but no recompilation
# because there is only one entry for each id_matched_obj.
return cache_size.will_compilation_exceed(1)
def exceeds_recompile_limit(
cache_size: CacheSizeRelevantForFrame, compile_id: CompileId
) -> tuple[bool, str]:
"""
Checks if we are exceeding the cache size limit.
"""
if cache_size.will_compilation_exceed_accumulated_limit():
return True, "accumulated_recompile_limit"
if cache_size.will_compilation_exceed_specific_limit(config.recompile_limit):
return True, "recompile_limit"
# NOTE this check is needed in the case that the frame's cache doesn't grow
# and we keep recompiling. This can happen if the guard guard_manager becomes invalidated,
# e.g. due to guarded objects being freed. This technically makes the
# will_compilation_exceed_accumulated_limit check unnecessary, but we will keep the
# check in case we have a better fix in the future.
assert compile_id.frame_compile_id is not None
if compile_id.frame_compile_id >= config.accumulated_recompile_limit:
return True, "accumulated_recompile_limit"
return False, ""
| CacheSizeRelevantForFrame |
python | geekcomputers__Python | BlackJack_game/blackjack_simulate.py | {
"start": 1170,
"end": 1807
} | class ____:
__slots__ = "suit", "rank", "is_face"
def __init__(self, suit, rank, face=True):
"""
:param suit: pattern in the card
:param rank: point in the card
:param face: show or cover the face(point & pattern on it)
"""
self.suit = suit
self.rank = rank
self.is_face = face
def __repr__(self):
fmt_card = "\t<rank: {rank:2}, suit: {suit:8}>"
if self.is_face:
return fmt_card.format(suit=self.suit, rank=self.rank)
return fmt_card.format(suit="*-Back-*", rank="*-Back-*")
def show(self):
print(str(self))
| Card |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/api.py | {
"start": 3766,
"end": 3839
} | class ____:
RemoteConsoles = "remote_consoles_menu"
| RemoteConsolesMenus |
python | neetcode-gh__leetcode | python/0084-largest-rectangle-in-histogram.py | {
"start": 0,
"end": 534
} | class ____:
def largestRectangleArea(self, heights: List[int]) -> int:
maxArea = 0
stack = [] # pair: (index, height)
for i, h in enumerate(heights):
start = i
while stack and stack[-1][1] > h:
index, height = stack.pop()
maxArea = max(maxArea, height * (i - index))
start = index
stack.append((start, h))
for i, h in stack:
maxArea = max(maxArea, h * (len(heights) - i))
return maxArea
| Solution |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_ordered_dict.py | {
"start": 38695,
"end": 38846
} | class ____(PurePythonOrderedDictTests):
module = py_coll
class OrderedDict(py_coll.OrderedDict):
pass
| PurePythonOrderedDictSubclassTests |
python | django__django | django/db/models/lookups.py | {
"start": 22913,
"end": 22995
} | class ____(EndsWith):
lookup_name = "iendswith"
@Field.register_lookup
| IEndsWith |
python | doocs__leetcode | solution/3600-3699/3687.Library Late Fee Calculator/Solution.py | {
"start": 0,
"end": 265
} | class ____:
def lateFee(self, daysLate: List[int]) -> int:
def f(x: int) -> int:
if x == 1:
return 1
if x > 5:
return 3 * x
return 2 * x
return sum(f(x) for x in daysLate)
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-faker/source_faker/airbyte_message_with_cached_json.py | {
"start": 136,
"end": 759
} | class ____(AirbyteMessage):
"""
I a monkeypatch to AirbyteMessage which pre-renders the JSON-representation of the object upon initialization.
This allows the JSON to be calculated in the process that builds the object rather than the main process.
Note: We can't use @cache here because the LRU cache is not serializable when passed to child workers.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._json = AirbyteMessageSerializer.dump(self)
self.json = self.get_json
def get_json(self, **kwargs):
return self._json
| AirbyteMessageWithCachedJSON |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 6477,
"end": 6643
} | class ____(NamedTuple):
font: FT2Font
fontsize: float
postscript_name: str
metrics: FontMetrics
num: int
glyph: Glyph
offset: float
| FontInfo |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/callback.py | {
"start": 5639,
"end": 6466
} | class ____(Callback):
"""
Synchronous callback that runs in the specified or default executor.
The `callback_callable` can be a Python callable type or a string containing the path to the callable that
can be used to import the callable. It must be a top-level callable in a module present on the executor.
It will be called with Airflow context and specified kwargs when a deadline is missed.
"""
executor: str | None
def __init__(
self, callback_callable: Callable | str, kwargs: dict | None = None, executor: str | None = None
):
super().__init__(callback_callable=callback_callable, kwargs=kwargs)
self.executor = executor
@classmethod
def serialized_fields(cls) -> tuple[str, ...]:
return super().serialized_fields() + ("executor",)
| SyncCallback |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/layers.py | {
"start": 3501,
"end": 4133
} | class ____(torch.nn.Module):
@abc.abstractproperty
def memory_size(self) -> int:
"""
Size of memory that is required at the start of a sequence.
"""
pass
@abc.abstractmethod
def forward(
self, input_tensor: torch.Tensor, memories: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Pass a sequence to the memory module.
:input_tensor: Tensor of shape (batch_size, seq_length, size) that represents the input.
:memories: Tensor of initial memories.
:return: Tuple of output, final memories.
"""
pass
| MemoryModule |
python | huggingface__transformers | src/transformers/models/grounding_dino/modeling_grounding_dino.py | {
"start": 36189,
"end": 44913
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
vision_dim = text_dim = config.d_model
embed_dim = config.encoder_ffn_dim // 2
num_heads = config.encoder_attention_heads // 2
dropout = config.fusion_dropout
self.embed_dim = embed_dim
self.num_heads = num_heads
self.head_dim = embed_dim // num_heads
self.vision_dim = vision_dim
self.text_dim = text_dim
if self.head_dim * self.num_heads != self.embed_dim:
raise ValueError(
f"`embed_dim` must be divisible by `num_heads` (got `embed_dim`: {self.embed_dim} and `num_heads`: {self.num_heads})."
)
self.scale = self.head_dim ** (-0.5)
self.dropout = dropout
self.vision_proj = nn.Linear(self.vision_dim, self.embed_dim)
self.text_proj = nn.Linear(self.text_dim, self.embed_dim)
self.values_vision_proj = nn.Linear(self.vision_dim, self.embed_dim)
self.values_text_proj = nn.Linear(self.text_dim, self.embed_dim)
self.out_vision_proj = nn.Linear(self.embed_dim, self.vision_dim)
self.out_text_proj = nn.Linear(self.embed_dim, self.text_dim)
def _reshape(self, tensor: torch.Tensor, seq_len: int, batch_size: int):
return tensor.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
vision_features: torch.FloatTensor,
text_features: torch.FloatTensor,
vision_attention_mask: Optional[torch.BoolTensor] = None,
text_attention_mask: Optional[torch.BoolTensor] = None,
) -> tuple[tuple[torch.FloatTensor, torch.FloatTensor], tuple[torch.FloatTensor, torch.FloatTensor]]:
"""Image-to-text and text-to-image cross-attention
Args:
vision_features (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_dim)`):
Projected flattened image features generated by the vision backbone.
text_features (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`):
Projected text features generated by the text encoder.
vision_attention_mask (`torch.BoolTensor`, **optional**):
Attention mask for image-to-text cross-attention. False for real tokens and True for padding tokens.
text_attention_mask (`torch.BoolTensor`, **optional**):
Attention mask for text-to-image cross-attention. False for real tokens and True for padding tokens.
Returns:
`tuple(tuple(torch.FloatTensor), tuple(torch.FloatTensor))` where each inner tuple comprises an attention
output and weights:
- **vision_attn_output** (`torch.FloatTensor` of shape `(batch_size, vision_sequence_length, hidden_din)`)
--
Output of the image-to-text cross-attention layer.
- **vision_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, vision_sequence_length,
vision_sequence_length)`) --
Attention weights of the image-to-text cross-attention layer.
- **text_attn_output** (`torch.FloatTensor` of shape `(batch_size, text_sequence_length, hidden_dim)`) --
Output of the text-to-image cross-attention layer.
- **text_attn_weights** (`torch.FloatTensor` of shape `(batch_size, num_heads, text_sequence_length,
text_sequence_length)`) --
Attention weights of the text-to-image cross-attention layer.
"""
batch_size, tgt_len, _ = vision_features.size()
vision_query_states = self.vision_proj(vision_features) * self.scale
vision_query_states = self._reshape(vision_query_states, tgt_len, batch_size)
text_key_states = self.text_proj(text_features)
text_key_states = self._reshape(text_key_states, -1, batch_size)
vision_value_states = self.values_vision_proj(vision_features)
vision_value_states = self._reshape(vision_value_states, -1, batch_size)
text_value_states = self.values_text_proj(text_features)
text_value_states = self._reshape(text_value_states, -1, batch_size)
proj_shape = (batch_size * self.num_heads, -1, self.head_dim)
vision_query_states = vision_query_states.view(*proj_shape)
text_key_states = text_key_states.view(*proj_shape)
vision_value_states = vision_value_states.view(*proj_shape)
text_value_states = text_value_states.view(*proj_shape)
src_len = text_key_states.size(1)
attn_weights = torch.bmm(vision_query_states, text_key_states.transpose(1, 2)) # bs*nhead, nimg, ntxt
if attn_weights.size() != (batch_size * self.num_heads, tgt_len, src_len):
raise ValueError(
f"Attention weights should be of size {(batch_size * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
)
attn_weights = attn_weights - attn_weights.max()
# Do not increase -50000/50000, data type half has quite limited range
attn_weights = torch.clamp(attn_weights, min=-50000, max=50000)
attn_weights_transposed = attn_weights.transpose(1, 2)
text_attn_weights = attn_weights_transposed - torch.max(attn_weights_transposed, dim=-1, keepdim=True)[0]
# Do not increase -50000/50000, data type half has quite limited range
text_attn_weights = torch.clamp(text_attn_weights, min=-50000, max=50000)
# mask vision for language
if vision_attention_mask is not None:
vision_attention_mask = (
vision_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
)
text_attn_weights.masked_fill_(vision_attention_mask, float("-inf"))
text_attn_weights = text_attn_weights.softmax(dim=-1)
# mask language for vision
if text_attention_mask is not None:
text_attention_mask = text_attention_mask[:, None, None, :].repeat(1, self.num_heads, 1, 1).flatten(0, 1)
attn_weights.masked_fill_(text_attention_mask, float("-inf"))
vision_attn_weights = attn_weights.softmax(dim=-1)
vision_attn_probs = F.dropout(vision_attn_weights, p=self.dropout, training=self.training)
text_attn_probs = F.dropout(text_attn_weights, p=self.dropout, training=self.training)
vision_attn_output = torch.bmm(vision_attn_probs, text_value_states)
text_attn_output = torch.bmm(text_attn_probs, vision_value_states)
if vision_attn_output.size() != (batch_size * self.num_heads, tgt_len, self.head_dim):
raise ValueError(
f"`vision_attn_output` should be of size {(batch_size, self.num_heads, tgt_len, self.head_dim)}, but is {vision_attn_output.size()}"
)
if text_attn_output.size() != (batch_size * self.num_heads, src_len, self.head_dim):
raise ValueError(
f"`text_attn_output` should be of size {(batch_size, self.num_heads, src_len, self.head_dim)}, but is {text_attn_output.size()}"
)
vision_attn_output = vision_attn_output.view(batch_size, self.num_heads, tgt_len, self.head_dim)
vision_attn_output = vision_attn_output.transpose(1, 2)
vision_attn_output = vision_attn_output.reshape(batch_size, tgt_len, self.embed_dim)
text_attn_output = text_attn_output.view(batch_size, self.num_heads, src_len, self.head_dim)
text_attn_output = text_attn_output.transpose(1, 2)
text_attn_output = text_attn_output.reshape(batch_size, src_len, self.embed_dim)
vision_attn_output = self.out_vision_proj(vision_attn_output)
text_attn_output = self.out_text_proj(text_attn_output)
return (vision_attn_output, vision_attn_weights), (text_attn_output, text_attn_weights)
# Copied from transformers.models.beit.modeling_beit.drop_path
def drop_path(input: torch.Tensor, drop_prob: float = 0.0, training: bool = False) -> torch.Tensor:
"""
Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
"""
if drop_prob == 0.0 or not training:
return input
keep_prob = 1 - drop_prob
shape = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = keep_prob + torch.rand(shape, dtype=input.dtype, device=input.device)
random_tensor.floor_() # binarize
output = input.div(keep_prob) * random_tensor
return output
# Copied from transformers.models.beit.modeling_beit.BeitDropPath with Beit->GroundingDino
| GroundingDinoBiMultiHeadAttention |
python | pytorch__pytorch | test/test_vulkan.py | {
"start": 339,
"end": 6829
} | class ____(TestCase):
@staticmethod
def validate_transformed_module(
# To please flake
self,
pattern_count_map,
data_shape,
prepack_removal=False,
fuse_clamping_ops=False):
module_instance = self
scripted_model = torch.jit.script(module_instance)
scripted_model.eval()
input_data = torch.normal(1, 20, size=data_shape)
scripted_model(input_data)
torch._C._jit_pass_vulkan_insert_prepacked_ops(scripted_model._c)
if fuse_clamping_ops or prepack_removal:
scripted_model._c = torch._C._freeze_module(scripted_model._c)
if fuse_clamping_ops:
torch._C._jit_pass_vulkan_fuse_clamp_w_prepacked_conv(scripted_model._c)
if prepack_removal:
torch._C._jit_pass_vulkan_fold_prepacking_ops(scripted_model._c)
buffer = io.BytesIO()
torch.jit.save(scripted_model, buffer)
buffer.seek(0)
deserialized_scripted_model = torch.jit.load(buffer)
for pattern, v in pattern_count_map.items():
if (v == 0):
FileCheck().check(pattern).run(deserialized_scripted_model.graph)
elif (v == -1):
FileCheck().check_not(pattern).run(deserialized_scripted_model.graph)
else:
FileCheck().check_count(pattern, v, exactly=True).run(deserialized_scripted_model.graph)
def test_conv(self):
# Conv params
batch_size = 2
input_channels_per_group = 6
height = 16
width = 16
output_channels_per_group = 6
groups = 4
kernel_h = kernel_w = 3
stride_h = stride_w = 1
pad_h = pad_w = 1
dilation = 1
input_channels = input_channels_per_group * groups
output_channels = output_channels_per_group * groups
strides = (stride_h, stride_w)
paddings = (pad_h, pad_w)
dilations = (dilation, dilation)
conv_weight_shape = (output_channels, input_channels_per_group, kernel_h, kernel_w)
conv_bias_shape = (output_channels)
class Conv2D(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
return F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2D(), pattern_count_map, data_shape)
class Conv2DRelu(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.relu(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(), pattern_count_map, data_shape)
pattern_count_map["aten::relu"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::relu"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
class Conv2DHardtanh(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(conv_weight_shape), requires_grad=False)
self.bias = torch.nn.Parameter(torch.rand(conv_bias_shape), requires_grad=False)
self.strides = strides
self.paddings = paddings
self.dilations = dilations
self.groups = groups
def forward(self, x):
o = F.conv2d(x, self.weight, self.bias,
self.strides, self.paddings, self.dilations, self.groups)
o = F.hardtanh(o)
return o
data_shape = (batch_size, input_channels, height, width)
pattern_count_map = {"Tensor = aten::conv2d": -1,
"vulkan_prepack::conv2d_clamp_prepack": 1,
"vulkan_prepack::conv2d_clamp_run": 1}
TestVulkanRewritePass.validate_transformed_module(Conv2DHardtanh(), pattern_count_map, data_shape)
pattern_count_map["aten::hardtanh"] = 1
pattern_count_map["vulkan_prepack::conv2d_clamp_prepack"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DHardtanh(),
pattern_count_map,
data_shape,
prepack_removal=True)
pattern_count_map["aten::hardtanh"] = -1
TestVulkanRewritePass.validate_transformed_module(
Conv2DRelu(),
pattern_count_map,
data_shape,
prepack_removal=True,
fuse_clamping_ops=True)
if __name__ == "__main__":
run_tests()
| TestVulkanRewritePass |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 42581,
"end": 45776
} | class ____(PreTrainedModel):
"""
An audio encoder based on the [Universal Speech Model](https://huggingface.co/papers/2303.01037) architecture.
"""
config: Gemma3nAudioConfig
main_input_name = "audio_mel"
input_modalities = "audio"
def __init__(self, config: Gemma3nAudioConfig):
super().__init__(config)
self.config = config
self.subsample_conv_projection = Gemma3nAudioSubSampleConvProjection(config)
self.conformer = nn.ModuleList(
[Gemma3nAudioConformerBlock(config) for _ in range(config.conf_num_hidden_layers)]
)
def forward(
self, audio_mel: torch.Tensor, audio_mel_mask: torch.BoolTensor
) -> tuple[torch.Tensor, torch.BoolTensor]:
"""Encodes a batch of MELs.
Args:
audio_mel: a torch.Tensor of shape [batch, num_frames, num_channels,
mel_bins].
Returns:
audio_encodings: a torch.Tensor of shape
`[batch_size, self.config.audio_soft_tokens_per_image,
self.config.audio_config.hidden_size]`
audio_mel_mask: a torch.BoolTensor of shape [batch, num_frames].
"""
audio_encodings = self.subsample_conv_projection(audio_mel) # audio_encodings: [B, T_sub, D]
# Subsample the input audio_mel_mask to match the time dimension of audio_encodings (T_sub)
t_sub = audio_encodings.shape[1]
time_stride_product = 1
for stride_pair_idx in range(len(self.config.sscp_conv_stride_size)):
time_stride_product *= self.config.sscp_conv_stride_size[stride_pair_idx][0]
# Create indices for gathering from the original mask.
# These indices map to original time steps corresponding to the start of each
# receptive field in the subsampled output.
indices = torch.arange(t_sub, device=audio_mel_mask.device) * time_stride_product
indices = torch.clamp(indices, max=audio_mel_mask.shape[1] - 1) # Ensure indices are valid
# Expand indices for batch compatibility if B > 1 and indices is 1D.
if audio_mel_mask.ndim > 1 and indices.ndim == 1:
indices = indices.unsqueeze(0).expand(audio_mel_mask.shape[0], -1) # [B, T_sub]
elif (
audio_mel_mask.ndim == indices.ndim
and audio_mel_mask.shape[0] == 1
and indices.shape[0] != 1
and t_sub == indices.shape[0]
):
# Handle case where B=1 but indices became [T_sub] instead of [1, T_sub]
indices = indices.unsqueeze(0)
current_mask = torch.gather(audio_mel_mask, 1, indices) # [B, T_sub]
for block in self.conformer:
audio_encodings = block(audio_encodings, current_mask) # Pass the processed mask
if self.config.conf_reduction_factor > 1:
audio_encodings = audio_encodings[:, :: self.config.conf_reduction_factor]
# Reduce the mask as well
current_mask = current_mask[:, :: self.config.conf_reduction_factor]
audio_encodings = audio_encodings.masked_fill(current_mask.unsqueeze(-1), 0.0)
return audio_encodings, current_mask
| Gemma3nAudioEncoder |
python | ray-project__ray | rllib/algorithms/sac/sac_tf_policy.py | {
"start": 25784,
"end": 30599
} | class ____:
def __init__(self, loss_fn):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(
obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights
):
# Do forward pass on loss to update td errors attribute
# (one TD-error value per item in batch to update PR weights).
loss_fn(
self,
self.model,
None,
{
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.TERMINATEDS: tf.convert_to_tensor(terminateds_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
},
)
# `self.td_error` is set in loss_fn.
return self.td_error
self.compute_td_error = compute_td_error
def setup_mid_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors before Policy's loss initialization.
Adds the `compute_td_error` method to the given policy.
Calling `compute_td_error` with batch data will re-calculate the loss
on that batch AND return the per-batch-item TD-error for prioritized
replay buffer record weight updating (in case a prioritized replay buffer
is used).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
ComputeTDErrorMixin.__init__(policy, sac_actor_critic_loss)
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Call mixin classes' constructors after Policy initialization.
Adds the `update_target` method to the given policy.
Calling `update_target` updates all target Q-networks' weights from their
respective "main" Q-metworks, based on tau (smooth, partial updating).
Args:
policy: The Policy object.
obs_space (gym.spaces.Space): The Policy's observation space.
action_space (gym.spaces.Space): The Policy's action space.
config: The Policy's config.
"""
TargetNetworkMixin.__init__(policy)
def validate_spaces(
policy: Policy,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
"""Validates the observation- and action spaces used for the Policy.
Args:
policy: The policy, whose spaces are being validated.
observation_space (gym.spaces.Space): The observation space to
validate.
action_space (gym.spaces.Space): The action space to validate.
config: The Policy's config dict.
Raises:
UnsupportedSpaceException: If one of the spaces is not supported.
"""
# Only support single Box or single Discrete spaces.
if not isinstance(action_space, (Box, Discrete, Simplex)):
raise UnsupportedSpaceException(
"Action space ({}) of {} is not supported for "
"SAC. Must be [Box|Discrete|Simplex].".format(action_space, policy)
)
# If Box, make sure it's a 1D vector space.
elif isinstance(action_space, (Box, Simplex)) and len(action_space.shape) > 1:
raise UnsupportedSpaceException(
"Action space ({}) of {} has multiple dimensions "
"{}. ".format(action_space, policy, action_space.shape)
+ "Consider reshaping this into a single dimension, "
"using a Tuple action space, or the multi-agent API."
)
# Build a child class of `DynamicTFPolicy`, given the custom functions defined
# above.
SACTFPolicy = build_tf_policy(
name="SACTFPolicy",
get_default_config=lambda: ray.rllib.algorithms.sac.sac.SACConfig(),
make_model=build_sac_model,
postprocess_fn=postprocess_trajectory,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=sac_actor_critic_loss,
stats_fn=stats,
compute_gradients_fn=compute_and_clip_gradients,
apply_gradients_fn=apply_gradients,
extra_learn_fetches_fn=lambda policy: {"td_error": policy.td_error},
mixins=[TargetNetworkMixin, ActorCriticOptimizerMixin, ComputeTDErrorMixin],
validate_spaces=validate_spaces,
before_init=setup_early_mixins,
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
)
| ComputeTDErrorMixin |
python | getsentry__sentry | src/sentry/incidents/logic.py | {
"start": 11011,
"end": 11234
} | class ____(BaseMetricIssueQueryParams):
project_ids: list[int]
entity_subscription: EntitySubscription
start_arg: datetime | None = None
end_arg: datetime | None = None
@dataclass
| BuildMetricQueryBuilderParams |
python | gevent__gevent | src/gevent/tests/test__util.py | {
"start": 580,
"end": 1978
} | class ____(greentest.TestCase):
def test_basic(self):
lines = util.format_run_info()
value = '\n'.join(lines)
self.assertIn('Threads', value)
self.assertIn('Greenlets', value)
# because it's a raw greenlet, we have no data for it.
self.assertNotIn("Spawned at", value)
self.assertNotIn("Parent greenlet", value)
self.assertNotIn("Spawn Tree Locals", value)
def test_with_Greenlet(self):
rl = local.local()
rl.some_attr = 1
def root():
l = MyLocal(42)
assert l
# And an empty local.
l2 = local.local()
assert l2
gevent.getcurrent().spawn_tree_locals['a value'] = 42
io = NativeStrIO()
g = gevent.spawn(util.print_run_info, file=io)
g.join()
return io.getvalue()
g = gevent.spawn(root)
g.name = 'Printer'
g.join()
value = g.value
self.assertIn("Spawned at", value)
self.assertIn("Parent:", value)
self.assertIn("Spawn Tree Locals", value)
self.assertIn("Greenlet Locals:", value)
self.assertIn('MyLocal', value)
self.assertIn("Printer", value) # The name is printed
# Empty locals should not be printed
self.assertNotIn('{}', value)
@greentest.skipOnPyPy("See TestFormat")
| TestFormat |
python | pytorch__pytorch | test/test_foreach.py | {
"start": 5004,
"end": 61522
} | class ____(TestCase):
@property
def is_cuda(self):
return self.device_type == "cuda"
def _get_funcs(self, op):
return (
ForeachFuncWrapper(op.method_variant),
RegularFuncWrapper(op.ref),
ForeachFuncWrapper(op.inplace_variant),
RegularFuncWrapper(op.ref_inplace),
)
# note(crcrpar): Make sure 0-size tensors are appropriately ignored by `multi_tensor_apply`
# which is originally reported in https://github.com/pytorch/pytorch/issues/94865.
# rel:
# - https://github.com/pytorch/pytorch/pull/94655
# - https://github.com/pytorch/pytorch/issues/100701
# - https://github.com/pytorch/pytorch/pull/100811
@onlyCUDA
@ops(
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_reduce_op_db
+ foreach_other_op_db,
dtypes=(torch.float32,),
)
def test_all_zero_size_tensors_do_not_launch_kernel(self, device, dtype, op):
wrapped_op, _, inplace_op, _ = self._get_funcs(op)
for sample in op.sample_zero_size_inputs(device, dtype):
if op.method_variant is not None:
wrapped_op(
(sample.input, *sample.args),
is_cuda=self.is_cuda,
expect_fastpath=True,
zero_size=True,
)
if op.inplace_variant is not None:
with InplaceForeachVersionBumpCheck(self, sample.input):
inplace_op(
(sample.input, *sample.args),
is_cuda=self.is_cuda,
expect_fastpath=True,
zero_size=True,
)
@ops(
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_reduce_op_db
+ foreach_other_op_db,
)
@parametrize(
"noncontiguous,inplace",
[(False, False), (False, True), (True, False), (True, True)],
name_fn=lambda x, y: "{}_{}".format(
"fastpath" if not x else "slowpath", "inplace" if y else "outplace"
),
)
def test_parity(self, device, dtype, op, noncontiguous, inplace):
if inplace:
_, _, func, ref = self._get_funcs(op)
else:
func, ref, _, _ = self._get_funcs(op)
for sample in op.sample_inputs(
device, dtype, noncontiguous=noncontiguous, allow_higher_dtype_scalars=True
):
ref_kwargs = sample.kwargs
# div promotes ints to floats, so we cannot go on the fastpath there
div_slowpath = (
dtype in integral_types_and(torch.bool) and op.name == "_foreach_div"
)
expect_fastpath = not (
noncontiguous or sample.disable_fastpath or div_slowpath
)
ref_input, ctxmgr = sample.input, nullcontext()
if inplace:
with torch.no_grad():
ref_input = [t.detach().clone() for t in sample.input]
ctxmgr = InplaceForeachVersionBumpCheck(self, sample.input)
try:
with ctxmgr:
actual = func(
[sample.input, *sample.args],
self.is_cuda,
expect_fastpath,
**sample.kwargs,
)
except Exception as e:
with self.assertRaises(type(e)):
ref([ref_input, *sample.ref_args], **ref_kwargs)
else:
expected = ref([ref_input, *sample.ref_args], **ref_kwargs)
self.assertEqual(expected, actual)
def _binary_test(
self,
dtype,
op,
ref,
inputs,
is_fastpath,
is_inplace,
*,
alpha,
scalar_self_arg: bool,
):
ref_inputs = (
[[t.detach().clone() for t in inputs[0]], inputs[1]]
if is_inplace
else inputs
)
try:
with (
InplaceForeachVersionBumpCheck(self, inputs[0])
if op.is_inplace
else nullcontext()
):
actual = op(inputs, self.is_cuda, is_fastpath)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e).splitlines()[0])):
if not scalar_self_arg:
ref(ref_inputs)
else:
[ref.func(ref_inputs[0], t) for t in ref_inputs[1]]
else:
expected = (
ref(ref_inputs)
if not scalar_self_arg
else [ref.func(ref_inputs[0], t) for t in ref_inputs[1]]
)
self.assertEqual(actual, expected)
if alpha is not None and not scalar_self_arg:
kwargs = {"alpha": alpha}
ref_inputs = inputs
try:
op_kwargs = {}
op_kwargs.update(kwargs)
with (
InplaceForeachVersionBumpCheck(self, inputs[0])
if op.is_inplace
else nullcontext()
):
actual = op(inputs, self.is_cuda, is_fastpath, **op_kwargs)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e).splitlines()[0])):
ref(ref_inputs, **kwargs)
else:
expected = ref(ref_inputs, **kwargs)
if dtype in (torch.float16, torch.bfloat16) and TEST_WITH_ROCM:
self.assertEqual(
expected, actual, atol=1.0e-3, rtol=default_tolerances(dtype)[0]
)
else:
self.assertEqual(expected, actual)
@ops(filter(lambda op: op.supports_scalar_self_arg, foreach_binary_op_db))
@parametrize("is_fastpath", (True, False))
def test_binary_op_with_scalar_self_support(self, device, dtype, op, is_fastpath):
def clone(arg):
if isinstance(arg, (list, tuple)):
return [clone(a) for a in arg]
if torch.is_tensor(arg):
return arg.detach().clone().requires_grad_()
else:
return arg
scalar_self_arg_test_complete = False
for sample in op.sample_inputs(
device,
dtype,
noncontiguous=not is_fastpath,
allow_higher_dtype_scalars=True,
):
(rhs_arg,) = sample.args
kwargs = {} or sample.kwargs
alpha = kwargs.pop("alpha", None)
wrapped_op, ref, inplace_op, inplace_ref = self._get_funcs(op)
if isinstance(rhs_arg, Number) and not scalar_self_arg_test_complete:
scalar_self_arg_test_complete = True
self._binary_test(
dtype,
wrapped_op,
ref,
[rhs_arg, sample.input],
is_fastpath,
False,
alpha=alpha,
scalar_self_arg=True,
)
if op.supports_autograd and dtype == torch.float32:
transformed_sample = sample.transform(
get_transform_func(
len(sample.input), dtype, device, is_fastpath
)
)
tensors = transformed_sample.input
(rhs_arg,) = transformed_sample.args
ref_tensors, ref_rhs_arg = clone(tensors), clone(rhs_arg)
sum(
wrapped_op(
[rhs_arg, tensors], is_cuda=False, expect_fastpath=False
)
).mean().backward()
sum(ref.func(ref_rhs_arg, t) for t in ref_tensors).mean().backward()
self.assertEqual(
[t.grad for t in tensors], [t.grad for t in ref_tensors]
)
@ops(foreach_pointwise_op_db)
@parametrize("is_fastpath", (True, False))
def test_pointwise_op_with_tensor_of_scalarlist_overload(
self, device, dtype, op, is_fastpath
):
for sample in op.sample_inputs(
device,
dtype,
noncontiguous=not is_fastpath,
allow_higher_dtype_scalars=True,
):
assert isinstance(sample.args, tuple)
assert len(sample.args) == 2
inputs = [sample.input, *sample.args]
kwargs = sample.kwargs.copy()
disable_fastpath = sample.disable_fastpath and is_fastpath
wrapped_op, ref, inplace_op, inplace_ref = self._get_funcs(op)
scalars = kwargs.pop("scalars", None)
if is_fastpath and scalars:
sample = sample.transform(
lambda t: t.detach().clone() if torch.is_tensor(t) else t
)
inputs = [sample.input, *sample.args]
tensor_values = torch.tensor(scalars)
# 1D Tensor of scalars
for is_inplace, op_, ref_ in (
(False, wrapped_op, ref),
(True, inplace_op, inplace_ref),
):
self._pointwise_test(
op_,
ref_,
inputs,
is_fastpath and not disable_fastpath,
is_inplace,
scalars=tensor_values,
**kwargs,
)
self._pointwise_test(
op_,
ref_,
inputs,
is_fastpath and not disable_fastpath,
is_inplace,
scalars=tensor_values[0],
custom_values_err="Expected packed scalar Tensor to be of dimension 1. Got 0 instead.",
**kwargs,
)
if self.is_cuda:
self._pointwise_test(
op_,
ref_,
inputs,
is_fastpath and not disable_fastpath,
is_inplace,
scalars=tensor_values.cuda(),
custom_values_err="Expected scalars to be on CPU, got cuda:0 instead.",
**kwargs,
)
self._pointwise_test(
op_,
ref_,
inputs,
is_fastpath and not disable_fastpath,
is_inplace,
scalars=tensor_values[:2],
custom_values_err=f"Expected length of scalars to match input of length {len(scalars)} but got 2 instead.",
**kwargs,
)
self._pointwise_test(
op_,
ref_,
inputs,
is_fastpath and not disable_fastpath,
is_inplace,
scalars=torch.tensor([[0, 1], [2, 3]])[:, 1],
custom_values_err="Expected scalars to be contiguous.",
**kwargs,
)
# Tests of implicit broadcasting
N = len(sample.input)
inputs = [
[
make_tensor(
(N, N),
device=device,
dtype=dtype,
noncontiguous=not is_fastpath,
)
for _ in range(N)
],
[
make_tensor(
(N - i, 1),
device=device,
dtype=dtype,
noncontiguous=not is_fastpath,
)
for i in range(N)
],
[
make_tensor(
(1, N - i),
device=device,
dtype=dtype,
noncontiguous=not is_fastpath,
)
for i in range(N)
],
]
self._pointwise_test(
wrapped_op,
ref,
inputs,
is_fastpath and disable_fastpath,
is_inplace=False,
scalars=scalars,
**kwargs,
)
self._pointwise_test(
inplace_op,
inplace_ref,
inputs,
is_fastpath and disable_fastpath,
is_inplace=True,
scalars=scalars,
**kwargs,
)
def _pointwise_test(
self,
op,
ref,
inputs,
is_fastpath,
is_inplace,
*,
scalars=None,
custom_values_err=None,
**kwargs,
):
ref_inputs = (
[[t.detach().clone() for t in inputs[0]], inputs[1], inputs[2]]
if is_inplace
else inputs
)
try:
with (
InplaceForeachVersionBumpCheck(self, inputs[0])
if is_inplace
else nullcontext()
):
actual = op(inputs, self.is_cuda, is_fastpath, **kwargs)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e).splitlines()[0])):
ref(ref_inputs, **kwargs)
else:
expected = ref(ref_inputs, **kwargs)
self.assertEqual(expected, actual)
if scalars is not None:
kwargs = kwargs.copy()
kwargs["scalars"] = scalars
try:
actual = op(inputs, self.is_cuda, is_fastpath, **kwargs)
except RuntimeError as e:
# Match with error messages from regular non-foreach reference if no
# custom error message was provided.
if custom_values_err is None:
with self.assertRaisesRegex(
type(e), re.escape(str(e).splitlines()[0])
):
ref(ref_inputs, **kwargs)
else:
self.assertEqual(re.escape(str(e)), re.escape(custom_values_err))
else:
expected = ref(ref_inputs, **kwargs)
self.assertEqual(expected, actual)
@dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16))
def test_add_scalar_with_empty_list_and_empty_tensor(self, device, dtype):
# TODO: enable empty list case
for tensors in [
[torch.randn([0], device=device, dtype=dtype)],
[torch.empty_strided((0, 1), (0, 0), dtype=dtype, device=device)],
]:
res = torch._foreach_add(tensors, 1)
self.assertEqual(res, tensors)
torch._foreach_add_(tensors, 1)
self.assertEqual(res, tensors)
# Regression test for https://github.com/pytorch/pytorch/issues/113156
torch._foreach_mul_(tensors, 1)
@onlyCUDA
@dtypes(torch.float32)
def test_foreach_check_stride_ignore_dims_of_one(self, device, dtype):
# default tensor stride is (9, 9, 3, 1).
tensor = torch.ones((2, 1, 3, 3), device=device, dtype=dtype)
strided_tensor = torch.ones(
(2, 1, 3, 3), device=device, dtype=dtype
).as_strided((2, 1, 3, 3), (9, 1, 3, 1))
left_inputs = [tensor, strided_tensor]
right_inputs = [strided_tensor, tensor]
compare_result = tensor + strided_tensor
foreach_add_check_ = ForeachFuncWrapper(torch._foreach_add)
out = foreach_add_check_(
(left_inputs, right_inputs), is_cuda=True, expect_fastpath=True
)
for res in out:
self.assertEqual(res, compare_result)
@ops(
filter(lambda op: op.supports_out, foreach_binary_op_db),
dtypes=OpDTypes.supported,
)
def test_binary_op_scalar_with_overlapping_tensors(self, device, dtype, op):
foreach_op, ref = op.method_variant, op.ref
tensors = [torch.ones(1, 1, device=device, dtype=dtype).expand(2, 1, 3)]
if ref == torch.sub and dtype == torch.bool:
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
[ref(t, 1) for t in tensors]
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
foreach_op(tensors, 1)
return
expected = [ref(t, 1) for t in tensors]
res = foreach_op(tensors, 1)
self.assertEqual(res, expected)
@ops(
filter(lambda op: op.supports_out, foreach_binary_op_db),
allowed_dtypes=[torch.float],
)
def test_binary_op_scalar_with_different_tensor_dtypes(self, device, dtype, op):
foreach_op = op.method_variant
tensors = [
torch.tensor([1.1], dtype=torch.float, device=device),
torch.tensor([1], dtype=torch.long, device=device),
]
runtime_error = None
try:
foreach_op(tensors, 1)
except RuntimeError as e:
runtime_error = e
self.assertIsNone(runtime_error)
@skipIfTorchDynamo("Different error msgs, TODO")
@ops(
filter(lambda op: op.supports_out, foreach_binary_op_db),
dtypes=OpDTypes.supported,
)
def test_binary_op_list_error_cases(self, device, dtype, op):
foreach_op, foreach_op_, ref, ref_ = (
op.method_variant,
op.inplace_variant,
op.ref,
op.ref_inplace,
)
tensors1 = []
tensors2 = []
ops_to_test = [foreach_op, foreach_op_]
# Empty lists
for fop in ops_to_test:
with self.assertRaisesRegex(
RuntimeError, "Tensor list must have at least one tensor."
):
fop(tensors1, tensors2)
# One empty list
tensors1.append(torch.tensor([1], device=device, dtype=dtype))
for fop in ops_to_test:
with self.assertRaisesRegex(
RuntimeError,
"Tensor list must have same number of elements as scalar list.",
):
fop(tensors1, tensors2)
# Lists have different amount of tensors
tensors2.append(torch.tensor([1], device=device))
tensors2.append(torch.tensor([1], device=device))
for fop in ops_to_test:
with self.assertRaisesRegex(
RuntimeError,
"Tensor lists must have the same number of tensors, got 1 and 2",
):
fop(tensors1, tensors2)
with self.assertRaisesRegex(
RuntimeError,
"Tensor lists must have the same number of tensors, got 2 and 1",
):
fop(tensors2, tensors1)
# Corresponding tensors with different sizes that aren't compatible with broadcast
# If sizes are different then foreach chooses slow path, thus error messages are expected
# to be the same as torch regular function.
tensors1 = [torch.zeros(10, 10, device=device, dtype=dtype) for _ in range(10)]
tensors2 = [torch.ones(11, 11, device=device, dtype=dtype) for _ in range(10)]
if dtype == torch.bool and foreach_op == torch._foreach_sub:
for fop in ops_to_test:
with self.assertRaisesRegex(RuntimeError, re.escape(_BOOL_SUB_ERR_MSG)):
fop(tensors1, tensors2)
return
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(10\) must match the size of tensor b \(11\) at non-singleton dimension 1",
):
foreach_op(tensors1, tensors2)
with self.assertRaisesRegex(
RuntimeError,
r"The size of tensor a \(10\) must match the size of tensor b \(11\) at non-singleton dimension 1",
):
foreach_op_(tensors1, tensors2)
# different devices
if self.device_type == "cuda" and torch.cuda.device_count() > 1:
tensor1 = torch.zeros(10, 10, device="cuda:0", dtype=dtype)
tensor2 = torch.ones(10, 10, device="cuda:1", dtype=dtype)
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
foreach_op([tensor1], [tensor2])
if (
dtype in integral_types_and(torch.bool)
and foreach_op == torch._foreach_div
):
with self.assertRaisesRegex(RuntimeError, "result type"):
foreach_op_([tensor1], [tensor2])
else:
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
foreach_op_([tensor1], [tensor2])
@unittest.skipIf(not torch.cuda.is_available(), "CUDA not found")
@ops(
filter(lambda op: op.supports_out, foreach_binary_op_db),
dtypes=OpDTypes.supported,
)
def test_binary_op_list_slow_path(self, device, dtype, op):
foreach_op, native_op, foreach_op_, native_op_ = self._get_funcs(op)
# 0-strides
tensor1 = make_tensor((10, 10), dtype=dtype, device=device)
tensor2 = make_tensor((1,), device=device, dtype=dtype).expand_as(tensor1)
inputs = ([tensor1], [tensor2])
self._binary_test(
dtype,
foreach_op,
native_op,
inputs,
is_fastpath=False,
is_inplace=False,
alpha=None,
scalar_self_arg=False,
)
self._binary_test(
dtype,
foreach_op_,
native_op_,
inputs,
is_fastpath=False,
is_inplace=True,
alpha=None,
scalar_self_arg=False,
)
# different strides
tensor1 = torch.zeros(10, 10, device=device, dtype=dtype)
tensor2 = torch.ones(10, 10, device=device, dtype=dtype)
inputs = ([tensor1], [tensor2.t()])
self._binary_test(
dtype,
foreach_op,
native_op,
inputs,
is_fastpath=False,
is_inplace=False,
alpha=None,
scalar_self_arg=False,
)
self._binary_test(
dtype,
foreach_op_,
native_op_,
inputs,
is_fastpath=False,
is_inplace=True,
alpha=None,
scalar_self_arg=False,
)
# non contiguous
tensor1 = make_tensor(
(5, 2, 1, 3), device=device, dtype=dtype, noncontiguous=True
)
tensor2 = make_tensor(
(5, 2, 1, 3), device=device, dtype=dtype, noncontiguous=True
)
self.assertFalse(tensor1.is_contiguous())
self.assertFalse(tensor2.is_contiguous())
inputs = ([tensor1], [tensor2])
self._binary_test(
dtype,
foreach_op,
native_op,
inputs,
is_fastpath=False,
is_inplace=False,
alpha=None,
scalar_self_arg=False,
)
self._binary_test(
dtype,
foreach_op_,
native_op_,
inputs,
is_fastpath=False,
is_inplace=True,
alpha=None,
scalar_self_arg=False,
)
# sliced tensor
tensor1 = make_tensor((5, 2, 1, 3), device=device, dtype=dtype)
tensor2 = make_tensor((5, 2, 1, 3 * 7), device=device, dtype=dtype)[
:, :, :, ::7
]
inputs = ([tensor1], [tensor2])
self._binary_test(
dtype,
foreach_op,
native_op,
inputs,
is_fastpath=False,
is_inplace=False,
alpha=None,
scalar_self_arg=False,
)
self._binary_test(
dtype,
foreach_op_,
native_op_,
inputs,
is_fastpath=False,
is_inplace=True,
alpha=None,
scalar_self_arg=False,
)
@ops(
filter(lambda op: op.supports_out, foreach_binary_op_db),
dtypes=floating_types_and(torch.half, torch.bfloat16),
)
def test_binary_op_float_inf_nan(self, device, dtype, op):
inputs = (
[
torch.tensor([float("inf")], device=device, dtype=dtype),
torch.tensor([-float("inf")], device=device, dtype=dtype),
torch.tensor([float("nan")], device=device, dtype=dtype),
torch.tensor([float("nan")], device=device, dtype=dtype),
],
[
torch.tensor([-float("inf")], device=device, dtype=dtype),
torch.tensor([float("inf")], device=device, dtype=dtype),
torch.tensor([float("inf")], device=device, dtype=dtype),
torch.tensor([float("nan")], device=device, dtype=dtype),
],
)
op, ref, inplace_op, inplace_ref = self._get_funcs(op)
self._binary_test(
dtype, op, ref, inputs, True, False, alpha=None, scalar_self_arg=False
)
self._binary_test(
dtype,
inplace_op,
inplace_ref,
inputs,
True,
True,
alpha=None,
scalar_self_arg=False,
)
# note: Below three tests (postfixed with `_tensors_on_different_devices`)
# checks whether foreach works with lists of tensors on different devices
# but tensors of the same index are on the same device, e.g., ['cuda', 'cpu].
@onlyCUDA
@ops(foreach_unary_op_db)
def test_unary_op_tensors_on_different_devices(self, device, dtype, op):
method, ref, inplace_method, ref_inplace = self._get_funcs(op)
# tensors: ['cuda', 'cpu]
tensors = next(
iter(
op.sample_inputs(
device,
dtype,
num_input_tensors=[2],
allow_higher_dtype_scalars=True,
)
)
).input
tensors[1] = tensors[1].to("cpu")
if not op.supports_out:
try:
actual = method((tensors,), False, False, zero_size=False)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), str(e).splitlines()[0]):
ref((tensors,))
else:
expected = ref((tensors,))
self.assertEqual(expected, actual)
try:
inplace_method((tensors,), False, False, zero_size=False)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), str(e).splitlines()[0]):
ref_inplace((tensors,))
else:
if not op.supports_out:
self.assertEqual(expected, tensors)
else:
self.assertEqual([torch.zeros_like(t) for t in tensors], tensors)
@onlyCUDA
@ops(filter(lambda op: op.supports_out, foreach_binary_op_db))
def test_binary_op_tensors_on_different_devices(self, device, dtype, op):
_cuda_tensors = next(
iter(
op.sample_inputs(
device,
dtype,
num_input_tensors=[2],
same_size=True,
allow_higher_dtype_scalars=True,
)
)
).input
_cpu_tensors = next(
iter(
op.sample_inputs(
"cpu",
dtype,
num_input_tensors=[2],
same_size=True,
allow_higher_dtype_scalars=True,
)
)
).input
tensors1, tensors2 = list(zip(_cuda_tensors, _cpu_tensors))
foreach_op, foreach_op_ = op.method_variant, op.inplace_variant
native_op, native_op_ = op.ref, op.ref_inplace
try:
actual = foreach_op(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e).splitlines()[0])):
[native_op(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
else:
expected = [native_op(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
self.assertEqual(expected, actual)
try:
foreach_op_(tensors1, tensors2)
except RuntimeError as e:
with self.assertRaisesRegex(type(e), re.escape(str(e).splitlines()[0])):
[native_op_(t1, t2) for t1, t2 in zip(tensors1, tensors2)]
else:
self.assertEqual(actual, tensors1)
@onlyCUDA
@ops(foreach_pointwise_op_db, allowed_dtypes=floating_types())
def test_pointwise_op_tensors_on_different_devices(self, device, dtype, op):
# tensors1: ['cuda', 'cpu]
# tensors2: ['cuda', 'cpu]
# tensors3: ['cuda', 'cpu]
# first tensorlist is zero-size when float32
_cuda_tensors = list(
op.sample_inputs(
device,
dtype,
num_input_tensors=[3],
same_size=True,
allow_higher_dtype_scalars=True,
)
)[int(dtype == torch.float32)].input
_cpu_tensors = next(
iter(
op.sample_inputs(
"cpu",
dtype,
num_input_tensors=[3],
same_size=True,
allow_higher_dtype_scalars=True,
)
)
).input
tensors1, tensors2, tensors3 = list(zip(_cuda_tensors, _cpu_tensors))
foreach_op, foreach_op_, native_op = (
op.method_variant,
op.inplace_variant,
op.ref,
)
actual = foreach_op(tensors1, tensors2, tensors3)
expected = [native_op(*_cuda_tensors), native_op(*_cpu_tensors)]
self.assertEqual(expected, actual)
# note(mkozuki): Limiting dtypes to FP32&FP64, we can safely run inplace ops.
foreach_op_(tensors1, tensors2, tensors3)
self.assertEqual(expected, tensors1)
# note: BFloat16 has the same number of exponent bits as FP32
# so if squared L2 norm overflows in BF16, then it also overflows in FP32.
@onlyCUDA
@ops(
[o for o in foreach_reduce_op_db if "norm" in o.name],
allowed_dtypes=(torch.half, torch.bfloat16),
)
def test_foreach_l2_large_value_input(self, device, dtype, op):
ord, N = 2, 10
max_value = torch.finfo(dtype).max
scaler = torch.tensor([max_value]).sqrt().to(device=device, dtype=dtype)
inputs = (
[
t * scaler
for t in next(
iter(
op.sample_inputs(
device,
dtype,
requries_grad=True,
num_input_tensors=[N],
low=1,
)
)
).input
][:-1],
)
# make sure that the min. of squared L2 norm value per tensor is greater than the max value of `dtype`.
self.assertTrue(scaler * scaler * N > max_value)
fn, ref_fn, *_ = self._get_funcs(op)
actual = fn(
inputs, is_cuda=True, expect_fastpath=True, ord=ord, zero_size=False
)
expect = ref_fn(inputs, ord=ord)
if dtype == torch.float16:
# making sure the reference L2 norm values are in the range of FP16.
self.assertFalse(any(torch.isinf(e) for e in expect))
else:
self.assertTrue(
all(
inputs[0][i].numel() == 0 or torch.isinf(e)
for i, e in enumerate(expect)
)
)
self.assertEqual(expect, actual, equal_nan=False)
@onlyCUDA
@ops(foreach_reduce_op_db, allowed_dtypes=floating_types())
@parametrize("use_cuda_graph", (False, True))
@parametrize("w_empty", (False, True))
def test_big_num_tensors(self, device, dtype, op, use_cuda_graph, w_empty):
# foreach_max cannot handle empty tensors as max requires an identity
intersperse_empty_tensors = w_empty and op.name != "_foreach_max"
N = 600
indices_with_empty_tensors = (
set()
if not intersperse_empty_tensors
else {200, 300, 301, 400, 401, 402, 404, 598}
)
tensorlist = [
make_tensor((2, 3), dtype=dtype, device=device, noncontiguous=False)
if i not in indices_with_empty_tensors
else torch.empty(0, dtype=dtype, device=device)
for i in range(N)
]
fn, ref_fn, *_ = self._get_funcs(op)
import math
if op.name == "_foreach_norm":
ords = [1, 2]
if not intersperse_empty_tensors:
# inf norm over an empty tensor is not defined by vector norm as it expects an identity
ords.append(math.inf)
else:
ords = [None]
for ord in ords:
kwargs = {"ord": ord} if ord else {}
if not use_cuda_graph:
actual = fn(
inputs=[tensorlist],
is_cuda=True,
expect_fastpath=True,
zero_size=False,
**kwargs,
)
else:
# When using CUDA graphs and the tensor metadata doesn't fit in
# the static kernel argument space, multi_tensor_apply creates
# the launch arguments once, uses cudaUserObject_t to tie its
# lifetime to the graph, and reuses it throughout replays. This
# test verifies multi_tensor_apply's behavior in the scenario.
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
actual = fn.func(tensorlist, **kwargs)
g.replay()
expect = ref_fn(inputs=[tensorlist], **kwargs)
self.assertEqual(expect, actual, equal_nan=True)
@onlyCUDA
@ops(foreach_reduce_op_db)
@parametrize("w_empty", (False, True))
def test_foreach_reduce_large_input(self, device, dtype, op, w_empty):
# test inputs larger than kChunkSize (65536) * max_num_blocks (320)
N = 65536 * 320 * 2
disable_fastpath = False
kwargs = {}
if op.name == "_foreach_norm":
kwargs["ord"] = 2
disable_fastpath = dtype not in floating_types_and(
torch.half, torch.bfloat16
)
tensorlist = [
make_tensor((N,), dtype=dtype, device=device, noncontiguous=False)
]
# foreach_max cannot handle empty tensors as max over empty is undefined
if w_empty and op.name != "_foreach_max":
tensorlist += [
torch.empty(0, dtype=dtype, device=device),
make_tensor((N,), dtype=dtype, device=device, noncontiguous=False),
]
inputs = (tensorlist,)
wrapped_op, ref, _, _ = self._get_funcs(op)
self.assertEqual(
ref(inputs, **kwargs),
wrapped_op(
inputs, self.is_cuda, not disable_fastpath, zero_size=False, **kwargs
),
)
@onlyCUDA
@ops(
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_other_op_db,
dtypes=(torch.float,),
)
def test_inplace_foreach_leaf_check_and_grad_fn(self, device, dtype, op):
inplace_op = op.inplace_variant
if inplace_op is None:
self.skipTest("no in-place op available")
sample = next(
iter(
op.sample_inputs(
dtype=dtype, device=device, num_input_tensors=[2], same_size=True
)
)
)
sample.input[0].requires_grad_(True)
with self.assertRaisesRegex(RuntimeError, "a leaf Variable that requires grad"):
inplace_op(sample.input, *sample.args)
sample.input[1].requires_grad_(True)
with self.assertRaisesRegex(RuntimeError, "a leaf Variable that requires grad"):
inplace_op(sample.input, *sample.args)
_tensors = [
t.detach().clone().requires_grad_(i == 0)
for i, t in enumerate(sample.input)
]
tensors = [t.clone() for t in _tensors]
inplace_op(tensors, *sample.args)
self.assertIsNotNone(tensors[0].grad_fn)
self.assertIsNone(tensors[1].grad_fn)
@onlyCUDA
@ops(
filter(
lambda op: op.supports_out,
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_other_op_db,
),
dtypes=(torch.float,),
)
def test_outplace_with_invalid_grads(self, device, dtype, op):
func, *_ = self._get_funcs(op)
sample = next(
iter(
op.sample_inputs(
dtype=dtype,
device=device,
requires_grad=True,
num_input_tensors=[2],
same_size=True,
)
)
)
self.assertTrue(all(t.requires_grad for t in sample.input))
(out1, out2) = func(
[sample.input, *sample.args],
is_cuda=False,
expect_fastpath=False,
**sample.kwargs,
)
out1.backward(torch.ones_like(out1))
self.assertIsNotNone(sample.input[0].grad)
self.assertIsNone(sample.input[1].grad)
@ops(
filter(
lambda op: op.backward_requires_result,
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_other_op_db,
),
dtypes=(torch.float32,),
)
def test_lifetime_of_grad_fn_when_result_is_saved(self, device, dtype, op):
def get_ref(func, sample):
class Foo:
pass
out = func(
(sample.input, *sample.args),
is_cuda=False,
expect_fastpath=False,
**sample.kwargs,
)
foo = Foo()
meta_dict = out[0].grad_fn.metadata
meta_dict[0] = foo
ref = weakref.ref(foo)
return out, ref
def _test(func, sample):
out, ref = get_ref(func, sample)
self.assertIsNotNone(ref())
del out
self.assertIsNone(ref())
func = self._get_funcs(op)[0]
for sample in op.sample_inputs(
device, dtype, requires_grad=True, num_input_tensors=[1]
):
for key in ("is_fastpath", "disable_fastpath"):
if key in sample.kwargs:
del sample.kwargs[key]
# note: `_foreach_pow.Scalar` and `_foreach_pow.ScalarList` don't depend on `result`
# see: https://github.com/pytorch/pytorch/blob/5403c777/tools/autograd/derivatives.yaml#L3048-L3049
if op.name == "_foreach_pow":
if (
isinstance(sample.args[0], list)
and isinstance(sample.args[0][0], Number)
) or (
isinstance(sample.args[0], Number)
and not isinstance(sample.args[0], float)
):
continue
if isinstance(sample.args[0], float):
new_args = (sample.input,)
sample.input = sample.args[0]
sample.args = new_args
_test(func, sample)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensors_grouping(self):
num_tensors_per_list = 10
num_devices = torch.cuda.device_count()
dtypes = (torch.float16, torch.float32, torch.float64)
list1 = [
torch.tensor(
i,
device=torch.device("cuda", random.randint(0, num_devices - 1)),
dtype=dtypes[random.randint(0, 2)],
)
for i in range(num_tensors_per_list)
]
list2 = [None for _ in list1]
list3 = [torch.rand_like(t) for t in list1]
nested_tensorlists = [list1, list2, list3]
grouped_tensors = torch.utils._foreach_utils._group_tensors_by_device_and_dtype(
nested_tensorlists, with_indices=True
)
num_tensors_seen = 0
for (device, dtype), ([l1, l2, l3], indices) in grouped_tensors.items():
for t in itertools.chain(l1, l3):
self.assertEqual(t.device, device)
self.assertEqual(t.dtype, dtype)
num_tensors_seen += 1
self.assertEqual(len(l1), len(l2))
self.assertTrue(all(p is None for p in l2))
for i, index in enumerate(indices):
self.assertEqual(l1[i], list1[index])
self.assertEqual(l2[i], list2[index])
self.assertEqual(l3[i], list3[index])
self.assertEqual(num_tensors_seen, 2 * num_tensors_per_list)
@onlyCUDA
def test_0dim_tensor_overload_cpu_ok(self):
tensors = [torch.ones((), device="cuda", dtype=torch.float32) for _ in range(2)]
scalar_cpu_tensor = torch.tensor(4.0, device="cpu")
# For mul and div, the scalar is allowed to be on CPU too
actual = torch._foreach_mul(tensors, scalar_cpu_tensor)
self.assertEqual(actual, [t.mul(scalar_cpu_tensor) for t in tensors])
actual = torch._foreach_div(tensors, scalar_cpu_tensor)
self.assertEqual(actual, [t.div(scalar_cpu_tensor) for t in tensors])
@onlyCUDA
def test_div_reciprocal(self):
expect_m, expect_e = torch.frexp(
torch.div(torch.tensor(0.1, device="cuda"), 10.0)
)
actual_m, actual_e = torch.frexp(
torch._foreach_div([torch.tensor(0.1, device="cuda")], [10.0])[0]
)
self.assertEqual(expect_m, actual_m)
self.assertEqual(expect_e, actual_e)
@onlyCUDA
def test_0dim_tensor_overload_exception(self):
# check exceptions of fast path
tensors = [
make_tensor((2, 2), dtype=torch.float, device="cuda") for _ in range(2)
]
with self.assertRaisesRegex(RuntimeError, "scalar tensor expected to be on"):
torch._foreach_add(tensors, torch.tensor(1.0, device="cpu"), alpha=1.0)
tensors = [
make_tensor((2, 2), dtype=torch.float, device=d) for d in ("cpu", "cuda")
]
with self.assertRaisesRegex(
RuntimeError, "scalar tensor expected to be 0 dim but"
):
torch._foreach_mul(tensors, torch.tensor([1.0, 1.0], device="cuda"))
with self.assertRaisesRegex(
RuntimeError, "scalar tensor expected to be 0 dim but"
):
torch._foreach_add(tensors, torch.tensor([1.0, 1.0], device="cuda"))
@onlyCUDA
@ops(filter(lambda op: op.name == "_foreach_copy", foreach_binary_op_db))
def test_foreach_copy_with_multi_device_inputs(self, device, dtype, op):
foreach_copy_ = op.inplace_variant
copy_ = op.ref_inplace
for non_blocking in (False, True):
for sample in op.sample_inputs(
device, dtype, noncontiguous=False, allow_higher_dtype_scalars=True
):
with torch.no_grad():
ref_input = [t.detach().clone() for t in sample.input]
foreach_copy_(sample.input, sample.args[0], non_blocking)
for t, s in zip(ref_input, sample.args[0]):
copy_(t, s, non_blocking)
self.assertEqual(sample.input, ref_input)
if torch.cuda.device_count() > 1:
device = torch.device("cuda", 1)
rhs_tensors = [t.to(device) for t in sample.args[0]]
foreach_copy_(sample.input, rhs_tensors, non_blocking)
for t, s in zip(ref_input, rhs_tensors):
copy_(t, s, non_blocking)
self.assertEqual(ref_input, sample.input)
@onlyCUDA
@ops(filter(lambda op: op.name == "_foreach_copy", foreach_binary_op_db))
def test_foreach_copy_with_multi_dtypes(self, device, dtype, op):
# check (a) multi_tensor_apply is called and (b) numerical parity with for-loop and Tensor.copy_
foreach_copy_ = ForeachFuncWrapper(op.inplace_variant)
for sample in op.sample_inputs(
device, dtype, noncontiguous=False, allow_higher_dtype_scalars=True
):
for src_dtype in floating_types_and(torch.half, torch.bfloat16):
if src_dtype == dtype:
continue
self_tensors = [t.clone() for t in sample.input]
src_tensors = [t.to(src_dtype) for t in self_tensors]
out = foreach_copy_(
(self_tensors, src_tensors), is_cuda=True, expect_fastpath=True
)
ref_out = [
torch.empty_like(t).copy_(s)
for t, s in zip(self_tensors, src_tensors)
]
for t, ref_t in zip(out, ref_out):
self.assertTrue(torch.equal(t, ref_t))
@onlyCUDA
@largeTensorTest("40GB", device="cuda")
def test_foreach_copy_with_multi_dtypes_large_input(self):
# see https://github.com/pytorch/pytorch/issues/156261
self_tensor = torch.empty(2**31 + 1, device="cuda", dtype=torch.float32)
src_tensor = torch.ones(2**31 + 1, device="cuda", dtype=torch.bfloat16)
torch._foreach_copy_([self_tensor], [src_tensor])
ref_out = torch.empty_like(self_tensor).copy_(src_tensor)
self.assertEqual(self_tensor, ref_out)
@requires_cuda_and_triton
@ops(filter(lambda op: op.name == "_foreach_copy", foreach_binary_op_db))
def test_foreach_copy_with_different_device_inputs(self, device, dtype, op):
if dtype in (torch.complex128, torch.complex64):
self.skipTest("Complex dtype not supported")
# check foreach_copy when self and src tensorList have different device
foreach_copy = op.method_variant
copy_ = op.ref_inplace
def fn(self_tensor, src_tensor, non_blocking):
return foreach_copy(self_tensor, src_tensor, non_blocking)
fn = torch.compile(fn)
for non_blocking in (False,):
for sample in op.sample_inputs(
device, dtype, noncontiguous=False, allow_higher_dtype_scalars=True
):
with torch.no_grad():
ref_input = [t.detach().clone() for t in sample.input]
ref_input_cpu = [t.detach().clone().to("cpu") for t in sample.input]
rhs_tensors = [t.detach().clone().to("cpu") for t in sample.args[0]]
self_tensors = [t.detach().clone().to("cpu") for t in sample.input]
output1 = fn(sample.input, rhs_tensors, non_blocking)
for t, s in zip(ref_input, rhs_tensors):
copy_(t, s, non_blocking)
self.assertEqual(output1, ref_input)
output2 = fn(self_tensors, sample.args[0], non_blocking)
for t, s in zip(ref_input_cpu, sample.args[0]):
copy_(t, s, non_blocking)
self.assertEqual(output2, ref_input_cpu)
# Test reverse-mode & forward-mode AD if supported.
@onlyCUDA
@ops(
foreach_unary_op_db
+ foreach_binary_op_db
+ foreach_pointwise_op_db
+ foreach_reduce_op_db
+ foreach_other_op_db,
dtypes=OpDTypes.supported,
allowed_dtypes=(torch.float64, torch.complex128),
)
@parametrize(
"inplace", (False, True), name_fn=lambda x: "inplace" if x else "outplace"
)
def test_autodiff(self, device, dtype, op, inplace):
if (not inplace) and not op.supports_out:
self.skipTest("out-of-place not implemented")
if inplace and op.has_no_in_place:
self.skipTest("in-place not implemented")
if not (
op.supports_autograd
or op.supports_inplace_autograd
or op.supports_forward_ad
):
self.skipTest("neither reverse mode nor forward mode supported")
# note(crcrpar): without this, some unary functions fail, unlike inplace and/or complex.
if (
(not inplace)
and dtype == torch.float64
and op.name
in (
"_foreach_acos",
"_foreach_asin",
"_foreach_log10",
"_foreach_log1p",
"_foreach_log2",
"_foreach_log",
"_foreach_pow",
"_foreach_sqrt",
"_foreach_rsqrt",
)
):
value_range = {"low": 0.5, "high": 1.0}
else:
value_range = {}
for sample in op.sample_inputs(
device,
dtype,
requires_grad=True,
num_input_tensors=[5],
allow_higher_dtype_scalars=True,
**value_range,
):
# Skip `_foreach_pow.ScalarAndTensor(Scalar, Tensor[])`
if op.name == "_foreach_pow" and isinstance(sample.input, Number):
continue
func = None
if inplace:
# Call `clone` to avoid inplace modifications likewise
# `torch.testing._internal.common_utils.TestGradients._get_safe_inplace`
def inplace_func(*tensorlist):
kwargs = (
{"alpha": sample.kwargs["alpha"]}
if "alpha" in sample.kwargs
else {}
)
op.inplace_variant(
tuple(t.clone() for t in tensorlist), *sample.args, **kwargs
)
return tensorlist
func = inplace_func
else:
def outplace_func(*tensorlist):
kwargs = (
{"alpha": sample.kwargs["alpha"]}
if "alpha" in sample.kwargs
else {}
)
return op.method_variant(tensorlist, *sample.args, **kwargs)
func = outplace_func
working_sample, err_msg_pattern = check_autodiff_sample(
op, sample, dtype, inplace
)
def call_gradcheck():
gradcheck(
func,
sample.input,
raise_exception=True,
check_forward_ad=op.supports_forward_ad,
check_batched_forward_grad=False,
check_backward_ad=op.supports_autograd,
check_batched_grad=False,
)
if not working_sample:
if not err_msg_pattern:
# lhs of float64 and rhs of complex.
continue
with self.assertRaisesRegex(RuntimeError, re.escape(err_msg_pattern)):
call_gradcheck()
continue
call_gradcheck()
# Test per-tensor `grad_fn` behavior.
if inplace and op.supports_inplace_autograd:
# per-tensor `grad_fn` check.
hook_buffer = []
def get_grad_fn_hook(i):
def hook(grad_inputs, grad_outputs) -> None:
hook_buffer.append(i)
return hook
_inputs = [t.detach().clone().requires_grad_() for t in sample.input]
inputs = [t.clone() for t in _inputs]
kwargs = (
{"alpha": sample.kwargs["alpha"]}
if "alpha" in sample.kwargs
else {}
)
op.inplace_variant(inputs, *sample.args, **kwargs)
self.assertEqual(len({t.grad_fn for t in inputs}), len(inputs))
for i, t in enumerate(inputs):
t.grad_fn.register_hook(get_grad_fn_hook(i))
torch.autograd.grad(
inputs[0],
inputs=(_inputs[0],),
grad_outputs=(torch.rand_like(inputs[0]),),
retain_graph=True,
)
self.assertEqual(hook_buffer, [0])
hook_buffer.clear()
# tensors have different shapes.
sum_of_cloned_tensors = torch.cat([t.view(-1) for t in inputs]).sum()
grad_output = torch.rand_like(sum_of_cloned_tensors)
torch.autograd.grad(
sum_of_cloned_tensors,
inputs=tuple(_inputs),
grad_outputs=(grad_output,),
retain_graph=False,
)
self.assertEqual(hook_buffer, list(reversed(range(len(inputs)))))
# TODO(crcrpar): Hide this inside torch/testing/_internal.
# would end up adding another layer to `foreach_inputs_sample_func.__call__`
# so that we can use this function as something like the first argument of `filter` function.
# Even after moving this function to testing, I personally think it'd be better to check the error message.
def check_autodiff_sample(op, sample, dtype, is_inplace):
if op.name == "_foreach_abs" and is_inplace and dtype == torch.complex128:
return False, "In-place abs is not supported for complex tensors."
if op.name == "_foreach_sub" and (
(
isinstance(sample.args[-1], list)
and any(isinstance(a, bool) for a in sample.args[-1])
)
or isinstance(sample.args[-1], bool)
):
return False, _BOOL_SUB_ERR_MSG
if op.name == "_foreach_norm" and (not is_inplace):
return (
False,
"Trying to set a forward gradient that has a different size than that of the original Tensor, "
"this is not supported. Tensor is of size [] while the given forward gradient is of size [1",
)
rhs_arg_has_complex_number = sample.args and (
(
isinstance(sample.args[-1], list)
and any(isinstance(a, complex) for a in sample.args[-1])
)
or (isinstance(sample.args[-1], complex))
)
if rhs_arg_has_complex_number and dtype == torch.float64:
if op.name == "_foreach_lerp":
return False, "value cannot be converted to type double without overflow"
if op.name in (
"_foreach_clamp_max",
"_foreach_clamp_min",
"_foreach_maximum",
"_foreach_minimum",
):
return False, "clamp is not supported for complex types"
if op.name == "_foreach_lerp" and is_inplace:
return False, "value cannot be converted to type double without overflow"
if not is_inplace:
return False, ""
elif op.name in (
"_foreach_add",
"_foreach_sub",
"_foreach_mul",
"_foreach_div",
"_foreach_pow",
):
return (
False,
"result type ComplexDouble can't be cast to the desired output type Double",
)
return True, ""
instantiate_device_type_tests(TestForeach, globals())
if __name__ == "__main__":
run_tests()
| TestForeach |
python | apache__thrift | lib/py/src/transport/TTwisted.py | {
"start": 3516,
"end": 7496
} | class ____(ThriftClientProtocol):
START = 1
OK = 2
BAD = 3
ERROR = 4
COMPLETE = 5
MAX_LENGTH = 2 ** 31 - 1
def __init__(self, client_class, iprot_factory, oprot_factory=None,
host=None, service=None, mechanism='GSSAPI', **sasl_kwargs):
"""
host: the name of the server, from a SASL perspective
service: the name of the server's service, from a SASL perspective
mechanism: the name of the preferred mechanism to use
All other kwargs will be passed to the puresasl.client.SASLClient
constructor.
"""
from puresasl.client import SASLClient
self.SASLCLient = SASLClient
ThriftClientProtocol.__init__(self, client_class, iprot_factory, oprot_factory)
self._sasl_negotiation_deferred = None
self._sasl_negotiation_status = None
self.client = None
if host is not None:
self.createSASLClient(host, service, mechanism, **sasl_kwargs)
def createSASLClient(self, host, service, mechanism, **kwargs):
self.sasl = self.SASLClient(host, service, mechanism, **kwargs)
def dispatch(self, msg):
encoded = self.sasl.wrap(msg)
len_and_encoded = ''.join((struct.pack('!i', len(encoded)), encoded))
ThriftClientProtocol.dispatch(self, len_and_encoded)
@defer.inlineCallbacks
def connectionMade(self):
self._sendSASLMessage(self.START, self.sasl.mechanism)
initial_message = yield deferToThread(self.sasl.process)
self._sendSASLMessage(self.OK, initial_message)
while True:
status, challenge = yield self._receiveSASLMessage()
if status == self.OK:
response = yield deferToThread(self.sasl.process, challenge)
self._sendSASLMessage(self.OK, response)
elif status == self.COMPLETE:
if not self.sasl.complete:
msg = "The server erroneously indicated that SASL " \
"negotiation was complete"
raise TTransport.TTransportException(msg, message=msg)
else:
break
else:
msg = "Bad SASL negotiation status: %d (%s)" % (status, challenge)
raise TTransport.TTransportException(msg, message=msg)
self._sasl_negotiation_deferred = None
ThriftClientProtocol.connectionMade(self)
def _sendSASLMessage(self, status, body):
if body is None:
body = ""
header = struct.pack(">BI", status, len(body))
self.transport.write(header + body)
def _receiveSASLMessage(self):
self._sasl_negotiation_deferred = defer.Deferred()
self._sasl_negotiation_status = None
return self._sasl_negotiation_deferred
def connectionLost(self, reason=connectionDone):
if self.client:
ThriftClientProtocol.connectionLost(self, reason)
def dataReceived(self, data):
if self._sasl_negotiation_deferred:
# we got a sasl challenge in the format (status, length, challenge)
# save the status, let IntNStringReceiver piece the challenge data together
self._sasl_negotiation_status, = struct.unpack("B", data[0])
ThriftClientProtocol.dataReceived(self, data[1:])
else:
# normal frame, let IntNStringReceiver piece it together
ThriftClientProtocol.dataReceived(self, data)
def stringReceived(self, frame):
if self._sasl_negotiation_deferred:
# the frame is just a SASL challenge
response = (self._sasl_negotiation_status, frame)
self._sasl_negotiation_deferred.callback(response)
else:
# there's a second 4 byte length prefix inside the frame
decoded_frame = self.sasl.unwrap(frame[4:])
ThriftClientProtocol.stringReceived(self, decoded_frame)
| ThriftSASLClientProtocol |
python | huggingface__transformers | src/transformers/models/conditional_detr/modeling_conditional_detr.py | {
"start": 88252,
"end": 91702
} | class ____(nn.Module):
"""
Simple convolutional head, using group norm. Upsampling is done using a FPN approach
"""
def __init__(self, dim, fpn_dims, context_dim):
super().__init__()
if dim % 8 != 0:
raise ValueError(
"The hidden_size + number of attention heads must be divisible by 8 as the number of groups in"
" GroupNorm is set to 8"
)
inter_dims = [dim, context_dim // 2, context_dim // 4, context_dim // 8, context_dim // 16, context_dim // 64]
self.lay1 = nn.Conv2d(dim, dim, 3, padding=1)
self.gn1 = nn.GroupNorm(8, dim)
self.lay2 = nn.Conv2d(dim, inter_dims[1], 3, padding=1)
self.gn2 = nn.GroupNorm(min(8, inter_dims[1]), inter_dims[1])
self.lay3 = nn.Conv2d(inter_dims[1], inter_dims[2], 3, padding=1)
self.gn3 = nn.GroupNorm(min(8, inter_dims[2]), inter_dims[2])
self.lay4 = nn.Conv2d(inter_dims[2], inter_dims[3], 3, padding=1)
self.gn4 = nn.GroupNorm(min(8, inter_dims[3]), inter_dims[3])
self.lay5 = nn.Conv2d(inter_dims[3], inter_dims[4], 3, padding=1)
self.gn5 = nn.GroupNorm(min(8, inter_dims[4]), inter_dims[4])
self.out_lay = nn.Conv2d(inter_dims[4], 1, 3, padding=1)
self.dim = dim
self.adapter1 = nn.Conv2d(fpn_dims[0], inter_dims[1], 1)
self.adapter2 = nn.Conv2d(fpn_dims[1], inter_dims[2], 1)
self.adapter3 = nn.Conv2d(fpn_dims[2], inter_dims[3], 1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.kaiming_uniform_(m.weight, a=1)
init.constant_(m.bias, 0)
def forward(self, x: Tensor, bbox_mask: Tensor, fpns: list[Tensor]):
# here we concatenate x, the projected feature map, of shape (batch_size, d_model, height/32, width/32) with
# the bbox_mask = the attention maps of shape (batch_size, n_queries, n_heads, height/32, width/32).
# We expand the projected feature map to match the number of heads.
x = torch.cat([_expand(x, bbox_mask.shape[1]), bbox_mask.flatten(0, 1)], 1)
x = self.lay1(x)
x = self.gn1(x)
x = nn.functional.relu(x)
x = self.lay2(x)
x = self.gn2(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter1(fpns[0])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay3(x)
x = self.gn3(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter2(fpns[1])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay4(x)
x = self.gn4(x)
x = nn.functional.relu(x)
cur_fpn = self.adapter3(fpns[2])
if cur_fpn.size(0) != x.size(0):
cur_fpn = _expand(cur_fpn, x.size(0) // cur_fpn.size(0))
x = cur_fpn + nn.functional.interpolate(x, size=cur_fpn.shape[-2:], mode="nearest")
x = self.lay5(x)
x = self.gn5(x)
x = nn.functional.relu(x)
x = self.out_lay(x)
return x
# Copied from transformers.models.detr.modeling_detr.DetrMHAttentionMap with Detr->ConditionalDetr
| ConditionalDetrMaskHeadSmallConv |
python | Pylons__pyramid | tests/test_decorator.py | {
"start": 33,
"end": 1372
} | class ____(unittest.TestCase):
def _makeOne(self, wrapped):
from pyramid.decorator import reify
return reify(wrapped)
def test___get__withinst(self):
def wrapped(inst):
return 'a'
decorator = self._makeOne(wrapped)
inst = Dummy()
result = decorator.__get__(inst)
self.assertEqual(result, 'a')
self.assertEqual(inst.__dict__['wrapped'], 'a')
def test___get__noinst(self):
def wrapped(inst):
return 'a' # pragma: no cover
decorator = self._makeOne(wrapped)
result = decorator.__get__(None)
self.assertEqual(result, decorator)
def test_copy_docstring(self):
def wrapped(inst):
"""Test doc"""
return 'a' # pragma: no cover
decorator = self._makeOne(wrapped)
assert decorator.__doc__ == 'Test doc'
def test_not_function(self):
"""
Because reify'd methods act as attributes, it's important that they
aren't recognized as a function. Otherwise tools like Sphinx may
misbehave, like in https://github.com/Pylons/pyramid/issues/3655
"""
def wrapped(inst):
return 'a' # pragma: no cover
decorator = self._makeOne(wrapped)
assert not inspect.isfunction(decorator)
| TestReify |
python | scipy__scipy | scipy/optimize/tests/test_linprog.py | {
"start": 81354,
"end": 82988
} | class ____(LinprogSimplexTests):
def setup_method(self):
self.options = {'presolve': False}
is_32_bit = np.intp(0).itemsize < 8
is_linux = sys.platform.startswith('linux')
@pytest.mark.xfail(
condition=is_32_bit and is_linux,
reason='Fails with warning on 32-bit linux')
def test_bug_5400(self):
super().test_bug_5400()
def test_bug_6139_low_tol(self):
# Linprog(method='simplex') fails to find a basic feasible solution
# if phase 1 pseudo-objective function is outside the provided tol.
# https://github.com/scipy/scipy/issues/6139
# Without ``presolve`` eliminating such rows the result is incorrect.
self.options.update({'tol': 1e-12})
with pytest.raises(AssertionError, match='linprog status 4'):
return super().test_bug_6139()
def test_bug_7237_low_tol(self):
pytest.skip("Simplex fails on this problem.")
def test_bug_8174_low_tol(self):
# Fails if the tolerance is too strict. Here, we test that
# even if the solution is wrong, the appropriate warning is issued.
self.options.update({'tol': 1e-12})
with pytest.warns(OptimizeWarning):
super().test_bug_8174()
def test_unbounded_no_nontrivial_constraints_1(self):
pytest.skip("Tests behavior specific to presolve")
def test_unbounded_no_nontrivial_constraints_2(self):
pytest.skip("Tests behavior specific to presolve")
#######################################
# Interior-Point Option-Specific Tests#
#######################################
| TestLinprogSimplexNoPresolve |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed1.py | {
"start": 1052,
"end": 1163
} | class ____(TypedDict, extra_items=ReadOnly[str | None]):
name: str
# This should generate an error.
| MovieBase |
python | ray-project__ray | python/ray/util/collective/types.py | {
"start": 3042,
"end": 3144
} | class ____(CommunicatorMetadata):
"""Metadata for the NIXL communicator."""
| NixlCommunicatorMetadata |
python | realpython__materials | python-protocol/shapes_v2.py | {
"start": 50,
"end": 155
} | class ____(Protocol):
def get_area(self) -> float: ...
def get_perimeter(self) -> float: ...
| Shape |
python | langchain-ai__langchain | libs/core/langchain_core/language_models/llms.py | {
"start": 8998,
"end": 49226
} | class ____(BaseLanguageModel[str], ABC):
"""Base LLM abstract interface.
It should take in a prompt and return a string.
"""
model_config = ConfigDict(
arbitrary_types_allowed=True,
)
@functools.cached_property
def _serialized(self) -> dict[str, Any]:
return dumpd(self)
# --- Runnable methods ---
@property
@override
def OutputType(self) -> type[str]:
"""Get the input type for this `Runnable`."""
return str
def _convert_input(self, model_input: LanguageModelInput) -> PromptValue:
if isinstance(model_input, PromptValue):
return model_input
if isinstance(model_input, str):
return StringPromptValue(text=model_input)
if isinstance(model_input, Sequence):
return ChatPromptValue(messages=convert_to_messages(model_input))
msg = (
f"Invalid input type {type(model_input)}. "
"Must be a PromptValue, str, or list of BaseMessages."
)
raise ValueError(msg)
def _get_ls_params(
self,
stop: list[str] | None = None,
**kwargs: Any,
) -> LangSmithParams:
"""Get standard params for tracing."""
# get default provider from class name
default_provider = self.__class__.__name__
default_provider = default_provider.removesuffix("LLM")
default_provider = default_provider.lower()
ls_params = LangSmithParams(ls_provider=default_provider, ls_model_type="llm")
if stop:
ls_params["ls_stop"] = stop
# model
if "model" in kwargs and isinstance(kwargs["model"], str):
ls_params["ls_model_name"] = kwargs["model"]
elif hasattr(self, "model") and isinstance(self.model, str):
ls_params["ls_model_name"] = self.model
elif hasattr(self, "model_name") and isinstance(self.model_name, str):
ls_params["ls_model_name"] = self.model_name
# temperature
if "temperature" in kwargs and isinstance(kwargs["temperature"], float):
ls_params["ls_temperature"] = kwargs["temperature"]
elif hasattr(self, "temperature") and isinstance(self.temperature, float):
ls_params["ls_temperature"] = self.temperature
# max_tokens
if "max_tokens" in kwargs and isinstance(kwargs["max_tokens"], int):
ls_params["ls_max_tokens"] = kwargs["max_tokens"]
elif hasattr(self, "max_tokens") and isinstance(self.max_tokens, int):
ls_params["ls_max_tokens"] = self.max_tokens
return ls_params
@override
def invoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> str:
config = ensure_config(config)
return (
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.pop("run_id", None),
**kwargs,
)
.generations[0][0]
.text
)
@override
async def ainvoke(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> str:
config = ensure_config(config)
llm_result = await self.agenerate_prompt(
[self._convert_input(input)],
stop=stop,
callbacks=config.get("callbacks"),
tags=config.get("tags"),
metadata=config.get("metadata"),
run_name=config.get("run_name"),
run_id=config.pop("run_id", None),
**kwargs,
)
return llm_result.generations[0][0].text
@override
def batch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> list[str]:
if not inputs:
return []
config = get_config_list(config, len(inputs))
max_concurrency = config[0].get("max_concurrency")
if max_concurrency is None:
try:
llm_result = self.generate_prompt(
[self._convert_input(input_) for input_ in inputs],
callbacks=[c.get("callbacks") for c in config],
tags=[c.get("tags") for c in config],
metadata=[c.get("metadata") for c in config],
run_name=[c.get("run_name") for c in config],
**kwargs,
)
return [g[0].text for g in llm_result.generations]
except Exception as e:
if return_exceptions:
return cast("list[str]", [e for _ in inputs])
raise
else:
batches = [
inputs[i : i + max_concurrency]
for i in range(0, len(inputs), max_concurrency)
]
config = [{**c, "max_concurrency": None} for c in config]
return [
output
for i, batch in enumerate(batches)
for output in self.batch(
batch,
config=config[i * max_concurrency : (i + 1) * max_concurrency],
return_exceptions=return_exceptions,
**kwargs,
)
]
@override
async def abatch(
self,
inputs: list[LanguageModelInput],
config: RunnableConfig | list[RunnableConfig] | None = None,
*,
return_exceptions: bool = False,
**kwargs: Any,
) -> list[str]:
if not inputs:
return []
config = get_config_list(config, len(inputs))
max_concurrency = config[0].get("max_concurrency")
if max_concurrency is None:
try:
llm_result = await self.agenerate_prompt(
[self._convert_input(input_) for input_ in inputs],
callbacks=[c.get("callbacks") for c in config],
tags=[c.get("tags") for c in config],
metadata=[c.get("metadata") for c in config],
run_name=[c.get("run_name") for c in config],
**kwargs,
)
return [g[0].text for g in llm_result.generations]
except Exception as e:
if return_exceptions:
return cast("list[str]", [e for _ in inputs])
raise
else:
batches = [
inputs[i : i + max_concurrency]
for i in range(0, len(inputs), max_concurrency)
]
config = [{**c, "max_concurrency": None} for c in config]
return [
output
for i, batch in enumerate(batches)
for output in await self.abatch(
batch,
config=config[i * max_concurrency : (i + 1) * max_concurrency],
return_exceptions=return_exceptions,
**kwargs,
)
]
@override
def stream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> Iterator[str]:
if type(self)._stream == BaseLLM._stream: # noqa: SLF001
# model doesn't implement streaming, so use default implementation
yield self.invoke(input, config=config, stop=stop, **kwargs)
else:
prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params["stop"] = stop
params = {**params, **kwargs}
options = {"stop": stop}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = CallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
inheritable_metadata,
self.metadata,
)
(run_manager,) = callback_manager.on_llm_start(
self._serialized,
[prompt],
invocation_params=params,
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: GenerationChunk | None = None
try:
for chunk in self._stream(
prompt, stop=stop, run_manager=run_manager, **kwargs
):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
except BaseException as e:
run_manager.on_llm_error(
e,
response=LLMResult(
generations=[[generation]] if generation else []
),
)
raise
if generation is None:
err = ValueError("No generation chunks were returned")
run_manager.on_llm_error(err, response=LLMResult(generations=[]))
raise err
run_manager.on_llm_end(LLMResult(generations=[[generation]]))
@override
async def astream(
self,
input: LanguageModelInput,
config: RunnableConfig | None = None,
*,
stop: list[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[str]:
if (
type(self)._astream is BaseLLM._astream # noqa: SLF001
and type(self)._stream is BaseLLM._stream # noqa: SLF001
):
yield await self.ainvoke(input, config=config, stop=stop, **kwargs)
return
prompt = self._convert_input(input).to_string()
config = ensure_config(config)
params = self.dict()
params["stop"] = stop
params = {**params, **kwargs}
options = {"stop": stop}
inheritable_metadata = {
**(config.get("metadata") or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
callback_manager = AsyncCallbackManager.configure(
config.get("callbacks"),
self.callbacks,
self.verbose,
config.get("tags"),
self.tags,
inheritable_metadata,
self.metadata,
)
(run_manager,) = await callback_manager.on_llm_start(
self._serialized,
[prompt],
invocation_params=params,
options=options,
name=config.get("run_name"),
run_id=config.pop("run_id", None),
batch_size=1,
)
generation: GenerationChunk | None = None
try:
async for chunk in self._astream(
prompt,
stop=stop,
run_manager=run_manager,
**kwargs,
):
yield chunk.text
if generation is None:
generation = chunk
else:
generation += chunk
except BaseException as e:
await run_manager.on_llm_error(
e,
response=LLMResult(generations=[[generation]] if generation else []),
)
raise
if generation is None:
err = ValueError("No generation chunks were returned")
await run_manager.on_llm_error(err, response=LLMResult(generations=[]))
raise err
await run_manager.on_llm_end(LLMResult(generations=[[generation]]))
# --- Custom methods ---
@abstractmethod
def _generate(
self,
prompts: list[str],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompts.
Args:
prompts: The prompts to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
run_manager: Callback manager for the run.
Returns:
The LLM result.
"""
async def _agenerate(
self,
prompts: list[str],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> LLMResult:
"""Run the LLM on the given prompts.
Args:
prompts: The prompts to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
If stop tokens are not supported consider raising `NotImplementedError`.
run_manager: Callback manager for the run.
Returns:
The LLM result.
"""
return await run_in_executor(
None,
self._generate,
prompts,
stop,
run_manager.get_sync() if run_manager else None,
**kwargs,
)
def _stream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
"""Stream the LLM on the given prompt.
This method should be overridden by subclasses that support streaming.
If not implemented, the default behavior of calls to stream will be to
fallback to the non-streaming version of the model and return
the output as a single chunk.
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Yields:
Generation chunks.
"""
raise NotImplementedError
async def _astream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
"""An async version of the _stream method.
The default implementation uses the synchronous _stream method and wraps it in
an async iterator. Subclasses that need to provide a true async implementation
should override this method.
Args:
prompt: The prompt to generate from.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
run_manager: Callback manager for the run.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Yields:
Generation chunks.
"""
iterator = await run_in_executor(
None,
self._stream,
prompt,
stop,
run_manager.get_sync() if run_manager else None,
**kwargs,
)
done = object()
while True:
item = await run_in_executor(
None,
next,
iterator,
done,
)
if item is done:
break
yield item # type: ignore[misc]
@override
def generate_prompt(
self,
prompts: list[PromptValue],
stop: list[str] | None = None,
callbacks: Callbacks | list[Callbacks] | None = None,
**kwargs: Any,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return self.generate(prompt_strings, stop=stop, callbacks=callbacks, **kwargs)
@override
async def agenerate_prompt(
self,
prompts: list[PromptValue],
stop: list[str] | None = None,
callbacks: Callbacks | list[Callbacks] | None = None,
**kwargs: Any,
) -> LLMResult:
prompt_strings = [p.to_string() for p in prompts]
return await self.agenerate(
prompt_strings, stop=stop, callbacks=callbacks, **kwargs
)
def _generate_helper(
self,
prompts: list[str],
stop: list[str] | None,
run_managers: list[CallbackManagerForLLMRun],
*,
new_arg_supported: bool,
**kwargs: Any,
) -> LLMResult:
try:
output = (
self._generate(
prompts,
stop=stop,
# TODO: support multiple run managers
run_manager=run_managers[0] if run_managers else None,
**kwargs,
)
if new_arg_supported
else self._generate(prompts, stop=stop)
)
except BaseException as e:
for run_manager in run_managers:
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
raise
flattened_outputs = output.flatten()
for manager, flattened_output in zip(
run_managers, flattened_outputs, strict=False
):
manager.on_llm_end(flattened_output)
if run_managers:
output.run = [
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
]
return output
def generate(
self,
prompts: list[str],
stop: list[str] | None = None,
callbacks: Callbacks | list[Callbacks] | None = None,
*,
tags: list[str] | list[list[str]] | None = None,
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
run_name: str | list[str] | None = None,
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
**kwargs: Any,
) -> LLMResult:
"""Pass a sequence of prompts to a model and return generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of string prompts.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
tags: List of tags to associate with each prompt. If provided, the length
of the list must match the length of the prompts list.
metadata: List of metadata dictionaries to associate with each prompt. If
provided, the length of the list must match the length of the prompts
list.
run_name: List of run names to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
run_id: List of run IDs to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Raises:
ValueError: If prompts is not a list.
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
`run_name` (if provided) does not match the length of prompts.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
"""
if not isinstance(prompts, list):
msg = (
"Argument 'prompts' is expected to be of type list[str], received"
f" argument of type {type(prompts)}."
)
raise ValueError(msg) # noqa: TRY004
# Create callback managers
if isinstance(metadata, list):
metadata = [
{
**(meta or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
for meta in metadata
]
elif isinstance(metadata, dict):
metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
if (
isinstance(callbacks, list)
and callbacks
and (
isinstance(callbacks[0], (list, BaseCallbackManager))
or callbacks[0] is None
)
):
# We've received a list of callbacks args to apply to each input
if len(callbacks) != len(prompts):
msg = "callbacks must be the same length as prompts"
raise ValueError(msg)
if tags is not None and not (
isinstance(tags, list) and len(tags) == len(prompts)
):
msg = "tags must be a list of the same length as prompts"
raise ValueError(msg)
if metadata is not None and not (
isinstance(metadata, list) and len(metadata) == len(prompts)
):
msg = "metadata must be a list of the same length as prompts"
raise ValueError(msg)
if run_name is not None and not (
isinstance(run_name, list) and len(run_name) == len(prompts)
):
msg = "run_name must be a list of the same length as prompts"
raise ValueError(msg)
callbacks = cast("list[Callbacks]", callbacks)
tags_list = cast("list[list[str] | None]", tags or ([None] * len(prompts)))
metadata_list = cast(
"list[dict[str, Any] | None]", metadata or ([{}] * len(prompts))
)
run_name_list = run_name or cast(
"list[str | None]", ([None] * len(prompts))
)
callback_managers = [
CallbackManager.configure(
callback,
self.callbacks,
self.verbose,
tag,
self.tags,
meta,
self.metadata,
)
for callback, tag, meta in zip(
callbacks, tags_list, metadata_list, strict=False
)
]
else:
# We've received a single callbacks arg to apply to all inputs
callback_managers = [
CallbackManager.configure(
cast("Callbacks", callbacks),
self.callbacks,
self.verbose,
cast("list[str]", tags),
self.tags,
cast("dict[str, Any]", metadata),
self.metadata,
)
] * len(prompts)
run_name_list = [cast("str | None", run_name)] * len(prompts)
run_ids_list = self._get_run_ids_list(run_id, prompts)
params = self.dict()
params["stop"] = stop
options = {"stop": stop}
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = get_prompts(params, prompts, self.cache)
new_arg_supported = inspect.signature(self._generate).parameters.get(
"run_manager"
)
if (self.cache is None and get_llm_cache() is None) or self.cache is False:
run_managers = [
callback_manager.on_llm_start(
self._serialized,
[prompt],
invocation_params=params,
options=options,
name=run_name,
batch_size=len(prompts),
run_id=run_id_,
)[0]
for callback_manager, prompt, run_name, run_id_ in zip(
callback_managers,
prompts,
run_name_list,
run_ids_list,
strict=False,
)
]
return self._generate_helper(
prompts,
stop,
run_managers,
new_arg_supported=bool(new_arg_supported),
**kwargs,
)
if len(missing_prompts) > 0:
run_managers = [
callback_managers[idx].on_llm_start(
self._serialized,
[prompts[idx]],
invocation_params=params,
options=options,
name=run_name_list[idx],
batch_size=len(missing_prompts),
)[0]
for idx in missing_prompt_idxs
]
new_results = self._generate_helper(
missing_prompts,
stop,
run_managers,
new_arg_supported=bool(new_arg_supported),
**kwargs,
)
llm_output = update_cache(
self.cache,
existing_prompts,
llm_string,
missing_prompt_idxs,
new_results,
prompts,
)
run_info = (
[RunInfo(run_id=run_manager.run_id) for run_manager in run_managers]
if run_managers
else None
)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
@staticmethod
def _get_run_ids_list(
run_id: uuid.UUID | list[uuid.UUID | None] | None, prompts: list
) -> list:
if run_id is None:
return [None] * len(prompts)
if isinstance(run_id, list):
if len(run_id) != len(prompts):
msg = (
"Number of manually provided run_id's does not match batch length."
f" {len(run_id)} != {len(prompts)}"
)
raise ValueError(msg)
return run_id
return [run_id] + [None] * (len(prompts) - 1)
async def _agenerate_helper(
self,
prompts: list[str],
stop: list[str] | None,
run_managers: list[AsyncCallbackManagerForLLMRun],
*,
new_arg_supported: bool,
**kwargs: Any,
) -> LLMResult:
try:
output = (
await self._agenerate(
prompts,
stop=stop,
run_manager=run_managers[0] if run_managers else None,
**kwargs,
)
if new_arg_supported
else await self._agenerate(prompts, stop=stop)
)
except BaseException as e:
await asyncio.gather(
*[
run_manager.on_llm_error(e, response=LLMResult(generations=[]))
for run_manager in run_managers
]
)
raise
flattened_outputs = output.flatten()
await asyncio.gather(
*[
run_manager.on_llm_end(flattened_output)
for run_manager, flattened_output in zip(
run_managers, flattened_outputs, strict=False
)
]
)
if run_managers:
output.run = [
RunInfo(run_id=run_manager.run_id) for run_manager in run_managers
]
return output
async def agenerate(
self,
prompts: list[str],
stop: list[str] | None = None,
callbacks: Callbacks | list[Callbacks] | None = None,
*,
tags: list[str] | list[list[str]] | None = None,
metadata: dict[str, Any] | list[dict[str, Any]] | None = None,
run_name: str | list[str] | None = None,
run_id: uuid.UUID | list[uuid.UUID | None] | None = None,
**kwargs: Any,
) -> LLMResult:
"""Asynchronously pass a sequence of prompts to a model and return generations.
This method should make use of batched calls for models that expose a batched
API.
Use this method when you want to:
1. Take advantage of batched calls,
2. Need more output from the model than just the top generated value,
3. Are building chains that are agnostic to the underlying language model
type (e.g., pure text completion models vs chat models).
Args:
prompts: List of string prompts.
stop: Stop words to use when generating.
Model output is cut off at the first occurrence of any of these
substrings.
callbacks: `Callbacks` to pass through.
Used for executing additional functionality, such as logging or
streaming, throughout generation.
tags: List of tags to associate with each prompt. If provided, the length
of the list must match the length of the prompts list.
metadata: List of metadata dictionaries to associate with each prompt. If
provided, the length of the list must match the length of the prompts
list.
run_name: List of run names to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
run_id: List of run IDs to associate with each prompt. If provided, the
length of the list must match the length of the prompts list.
**kwargs: Arbitrary additional keyword arguments.
These are usually passed to the model provider API call.
Raises:
ValueError: If the length of `callbacks`, `tags`, `metadata`, or
`run_name` (if provided) does not match the length of prompts.
Returns:
An `LLMResult`, which contains a list of candidate `Generations` for each
input prompt and additional model provider-specific output.
"""
if isinstance(metadata, list):
metadata = [
{
**(meta or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
for meta in metadata
]
elif isinstance(metadata, dict):
metadata = {
**(metadata or {}),
**self._get_ls_params(stop=stop, **kwargs),
}
# Create callback managers
if isinstance(callbacks, list) and (
isinstance(callbacks[0], (list, BaseCallbackManager))
or callbacks[0] is None
):
# We've received a list of callbacks args to apply to each input
if len(callbacks) != len(prompts):
msg = "callbacks must be the same length as prompts"
raise ValueError(msg)
if tags is not None and not (
isinstance(tags, list) and len(tags) == len(prompts)
):
msg = "tags must be a list of the same length as prompts"
raise ValueError(msg)
if metadata is not None and not (
isinstance(metadata, list) and len(metadata) == len(prompts)
):
msg = "metadata must be a list of the same length as prompts"
raise ValueError(msg)
if run_name is not None and not (
isinstance(run_name, list) and len(run_name) == len(prompts)
):
msg = "run_name must be a list of the same length as prompts"
raise ValueError(msg)
callbacks = cast("list[Callbacks]", callbacks)
tags_list = cast("list[list[str] | None]", tags or ([None] * len(prompts)))
metadata_list = cast(
"list[dict[str, Any] | None]", metadata or ([{}] * len(prompts))
)
run_name_list = run_name or cast(
"list[str | None]", ([None] * len(prompts))
)
callback_managers = [
AsyncCallbackManager.configure(
callback,
self.callbacks,
self.verbose,
tag,
self.tags,
meta,
self.metadata,
)
for callback, tag, meta in zip(
callbacks, tags_list, metadata_list, strict=False
)
]
else:
# We've received a single callbacks arg to apply to all inputs
callback_managers = [
AsyncCallbackManager.configure(
cast("Callbacks", callbacks),
self.callbacks,
self.verbose,
cast("list[str]", tags),
self.tags,
cast("dict[str, Any]", metadata),
self.metadata,
)
] * len(prompts)
run_name_list = [cast("str | None", run_name)] * len(prompts)
run_ids_list = self._get_run_ids_list(run_id, prompts)
params = self.dict()
params["stop"] = stop
options = {"stop": stop}
(
existing_prompts,
llm_string,
missing_prompt_idxs,
missing_prompts,
) = await aget_prompts(params, prompts, self.cache)
# Verify whether the cache is set, and if the cache is set,
# verify whether the cache is available.
new_arg_supported = inspect.signature(self._agenerate).parameters.get(
"run_manager"
)
if (self.cache is None and get_llm_cache() is None) or self.cache is False:
run_managers = await asyncio.gather(
*[
callback_manager.on_llm_start(
self._serialized,
[prompt],
invocation_params=params,
options=options,
name=run_name,
batch_size=len(prompts),
run_id=run_id_,
)
for callback_manager, prompt, run_name, run_id_ in zip(
callback_managers,
prompts,
run_name_list,
run_ids_list,
strict=False,
)
]
)
run_managers = [r[0] for r in run_managers] # type: ignore[misc]
return await self._agenerate_helper(
prompts,
stop,
run_managers, # type: ignore[arg-type]
new_arg_supported=bool(new_arg_supported),
**kwargs,
)
if len(missing_prompts) > 0:
run_managers = await asyncio.gather(
*[
callback_managers[idx].on_llm_start(
self._serialized,
[prompts[idx]],
invocation_params=params,
options=options,
name=run_name_list[idx],
batch_size=len(missing_prompts),
)
for idx in missing_prompt_idxs
]
)
run_managers = [r[0] for r in run_managers] # type: ignore[misc]
new_results = await self._agenerate_helper(
missing_prompts,
stop,
run_managers, # type: ignore[arg-type]
new_arg_supported=bool(new_arg_supported),
**kwargs,
)
llm_output = await aupdate_cache(
self.cache,
existing_prompts,
llm_string,
missing_prompt_idxs,
new_results,
prompts,
)
run_info = (
[RunInfo(run_id=run_manager.run_id) for run_manager in run_managers] # type: ignore[attr-defined]
if run_managers
else None
)
else:
llm_output = {}
run_info = None
generations = [existing_prompts[i] for i in range(len(prompts))]
return LLMResult(generations=generations, llm_output=llm_output, run=run_info)
async def _call_async(
self,
prompt: str,
stop: list[str] | None = None,
callbacks: Callbacks = None,
*,
tags: list[str] | None = None,
metadata: dict[str, Any] | None = None,
**kwargs: Any,
) -> str:
"""Check Cache and run the LLM on the given prompt and input."""
result = await self.agenerate(
[prompt],
stop=stop,
callbacks=callbacks,
tags=tags,
metadata=metadata,
**kwargs,
)
return result.generations[0][0].text
def __str__(self) -> str:
"""Return a string representation of the object for printing."""
cls_name = f"\033[1m{self.__class__.__name__}\033[0m"
return f"{cls_name}\nParams: {self._identifying_params}"
@property
@abstractmethod
def _llm_type(self) -> str:
"""Return type of llm."""
@override
def dict(self, **kwargs: Any) -> dict:
"""Return a dictionary of the LLM."""
starter_dict = dict(self._identifying_params)
starter_dict["_type"] = self._llm_type
return starter_dict
def save(self, file_path: Path | str) -> None:
"""Save the LLM.
Args:
file_path: Path to file to save the LLM to.
Raises:
ValueError: If the file path is not a string or Path object.
Example:
```python
llm.save(file_path="path/llm.yaml")
```
"""
# Convert file to Path object.
save_path = Path(file_path)
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
# Fetch dictionary to save
prompt_dict = self.dict()
if save_path.suffix == ".json":
with save_path.open("w", encoding="utf-8") as f:
json.dump(prompt_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w", encoding="utf-8") as f:
yaml.dump(prompt_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
| BaseLLM |
python | walkccc__LeetCode | solutions/2661. First Completely Painted Row or Column/2661.py | {
"start": 0,
"end": 782
} | class ____:
def firstCompleteIndex(self, arr: list[int], mat: list[list[int]]) -> int:
m = len(mat)
n = len(mat[0])
# rows[i] := the number of painted grid in the i-th row
rows = [0] * m
# cols[j] := the number of painted grid in the j-th column
cols = [0] * n
# numToRow[num] := the i-th row of `num` in `mat`
numToRow = [0] * (m * n + 1)
# numToCol[num] := the j-th column of `num` in `mat`
numToCol = [0] * (m * n + 1)
for i, row in enumerate(mat):
for j, num in enumerate(row):
numToRow[num] = i
numToCol[num] = j
for i, a in enumerate(arr):
rows[numToRow[a]] += 1
if rows[numToRow[a]] == n:
return i
cols[numToCol[a]] += 1
if cols[numToCol[a]] == m:
return i
| Solution |
python | ray-project__ray | python/ray/tune/logger/csv.py | {
"start": 2087,
"end": 4149
} | class ____(LoggerCallback):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
_SAVED_FILE_TEMPLATES = [EXPR_PROGRESS_FILE]
def __init__(self):
self._trial_continue: Dict["Trial", bool] = {}
self._trial_files: Dict["Trial", TextIO] = {}
self._trial_csv: Dict["Trial", csv.DictWriter] = {}
def _setup_trial(self, trial: "Trial"):
if trial in self._trial_files:
self._trial_files[trial].close()
# Make sure logdir exists
trial.init_local_path()
local_file_path = Path(trial.local_path, EXPR_PROGRESS_FILE)
# Resume the file from remote storage.
self._restore_from_remote(EXPR_PROGRESS_FILE, trial)
self._trial_continue[trial] = (
local_file_path.exists() and local_file_path.stat().st_size > 0
)
self._trial_files[trial] = local_file_path.open("at")
self._trial_csv[trial] = None
def log_trial_result(self, iteration: int, trial: "Trial", result: Dict):
if trial not in self._trial_files:
self._setup_trial(trial)
tmp = result.copy()
tmp.pop("config", None)
result = flatten_dict(tmp, delimiter="/")
if not self._trial_csv[trial]:
self._trial_csv[trial] = csv.DictWriter(
self._trial_files[trial], result.keys()
)
if not self._trial_continue[trial]:
self._trial_csv[trial].writeheader()
self._trial_csv[trial].writerow(
{k: v for k, v in result.items() if k in self._trial_csv[trial].fieldnames}
)
self._trial_files[trial].flush()
def log_trial_end(self, trial: "Trial", failed: bool = False):
if trial not in self._trial_files:
return
del self._trial_csv[trial]
self._trial_files[trial].close()
del self._trial_files[trial]
| CSVLoggerCallback |
python | pandas-dev__pandas | pandas/tests/frame/test_stack_unstack.py | {
"start": 481,
"end": 60001
} | class ____:
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_unstack(self, float_frame, future_stack):
df = float_frame.copy()
df[:] = np.arange(np.prod(df.shape)).reshape(df.shape)
stacked = df.stack(future_stack=future_stack)
stacked_df = DataFrame({"foo": stacked, "bar": stacked})
unstacked = stacked.unstack()
unstacked_df = stacked_df.unstack()
tm.assert_frame_equal(unstacked, df)
tm.assert_frame_equal(unstacked_df["bar"], df)
unstacked_cols = stacked.unstack(0)
unstacked_cols_df = stacked_df.unstack(0)
tm.assert_frame_equal(unstacked_cols.T, df)
tm.assert_frame_equal(unstacked_cols_df["bar"].T, df)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_mixed_level(self, future_stack):
# GH 18310
levels = [range(3), [3, "a", "b"], [1, 2]]
# flat columns:
df = DataFrame(1, index=levels[0], columns=levels[1])
result = df.stack(future_stack=future_stack)
expected = Series(1, index=MultiIndex.from_product(levels[:2]))
tm.assert_series_equal(result, expected)
# MultiIndex columns:
df = DataFrame(1, index=levels[0], columns=MultiIndex.from_product(levels[1:]))
result = df.stack(1, future_stack=future_stack)
expected = DataFrame(
1, index=MultiIndex.from_product([levels[0], levels[2]]), columns=levels[1]
)
tm.assert_frame_equal(result, expected)
# as above, but used labels in level are actually of homogeneous type
result = df[["a", "b"]].stack(1, future_stack=future_stack)
expected = expected[["a", "b"]]
tm.assert_frame_equal(result, expected)
def test_unstack_not_consolidated(self):
# Gh#34708
df = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]})
df2 = df[["x"]]
df2["y"] = df["y"]
assert len(df2._mgr.blocks) == 2
res = df2.unstack()
expected = df.unstack()
tm.assert_series_equal(res, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_fill(self, future_stack):
# GH #9746: fill_value keyword argument for Series
# and DataFrame unstack
# From a series
data = Series([1, 2, 4, 5], dtype=np.int16)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack(fill_value=-1)
expected = DataFrame(
{"a": [1, -1, 5], "b": [2, 4, -1]}, index=["x", "y", "z"], dtype=np.int16
)
tm.assert_frame_equal(result, expected)
msg = (
"Using a fill_value that cannot be held in the existing dtype is deprecated"
)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
# From a series with incorrect data type for fill_value
result = data.unstack(fill_value=0.5)
expected = DataFrame(
{"a": [1, 0.5, 5], "b": [2, 4, 0.5]}, index=["x", "y", "z"], dtype=float
)
tm.assert_frame_equal(result, expected)
# GH #13971: fill_value when unstacking multiple levels:
df = DataFrame(
{"x": ["a", "a", "b"], "y": ["j", "k", "j"], "z": [0, 1, 2], "w": [0, 1, 2]}
).set_index(["x", "y", "z"])
unstacked = df.unstack(["x", "y"], fill_value=0)
key = ("w", "b", "j")
expected = unstacked[key]
result = Series([0, 0, 2], index=unstacked.index, name=key)
tm.assert_series_equal(result, expected)
stacked = unstacked.stack(["x", "y"], future_stack=future_stack)
stacked.index = stacked.index.reorder_levels(df.index.names)
# Workaround for GH #17886 (unnecessarily casts to float):
stacked = stacked.astype(np.int64)
result = stacked.loc[df.index]
tm.assert_frame_equal(result, df)
# From a series
s = df["w"]
result = s.unstack(["x", "y"], fill_value=0)
expected = unstacked["w"]
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame(self):
# From a dataframe
rows = [[1, 2], [3, 4], [5, 6], [7, 8]]
df = DataFrame(rows, columns=list("AB"), dtype=np.int32)
df.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = df.unstack(fill_value=-1)
rows = [[1, 3, 2, 4], [-1, 5, -1, 6], [7, -1, 8, -1]]
expected = DataFrame(rows, index=list("xyz"), dtype=np.int32)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
# From a mixed type dataframe
df["A"] = df["A"].astype(np.int16)
df["B"] = df["B"].astype(np.float64)
result = df.unstack(fill_value=-1)
expected["A"] = expected["A"].astype(np.int16)
expected["B"] = expected["B"].astype(np.float64)
tm.assert_frame_equal(result, expected)
msg = (
"Using a fill_value that cannot be held in the existing dtype is deprecated"
)
with tm.assert_produces_warning(Pandas4Warning, match=msg):
# From a dataframe with incorrect data type for fill_value
result = df.unstack(fill_value=0.5)
rows = [[1, 3, 2, 4], [0.5, 5, 0.5, 6], [7, 0.5, 8, 0.5]]
expected = DataFrame(rows, index=list("xyz"), dtype=float)
expected.columns = MultiIndex.from_tuples(
[("A", "a"), ("A", "b"), ("B", "a"), ("B", "b")]
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_datetime(self):
# Test unstacking with date times
dv = date_range("2012-01-01", periods=4).values
data = Series(dv)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [dv[0], pd.NaT, dv[3]], "b": [dv[1], dv[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=dv[0])
expected = DataFrame(
{"a": [dv[0], dv[0], dv[3]], "b": [dv[1], dv[2], dv[0]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_timedelta(self):
# Test unstacking with time deltas
td = [Timedelta(days=i) for i in range(4)]
data = Series(td)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [td[0], pd.NaT, td[3]], "b": [td[1], td[2], pd.NaT]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=td[1])
expected = DataFrame(
{"a": [td[0], td[1], td[3]], "b": [td[1], td[2], td[1]]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_period(self):
# Test unstacking with period
periods = [
Period("2012-01"),
Period("2012-02"),
Period("2012-03"),
Period("2012-04"),
]
data = Series(periods)
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
result = data.unstack()
expected = DataFrame(
{"a": [periods[0], None, periods[3]], "b": [periods[1], periods[2], None]},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
result = data.unstack(fill_value=periods[1])
expected = DataFrame(
{
"a": [periods[0], periods[1], periods[3]],
"b": [periods[1], periods[2], periods[1]],
},
index=["x", "y", "z"],
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_categorical(self):
# Test unstacking with categorical
data = Series(["a", "b", "c", "a"], dtype="category")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{
"a": pd.Categorical(["a", None, "a"], categories=list("abc")),
"b": pd.Categorical(["b", "c", None], categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
# Fill with non-category results in a ValueError
msg = r"Cannot setitem on a Categorical with a new category \(d\)"
with pytest.raises(TypeError, match=msg):
data.unstack(fill_value="d")
# Fill with category value replaces missing values as expected
result = data.unstack(fill_value="c")
expected = DataFrame(
{
"a": pd.Categorical(list("aca"), categories=list("abc")),
"b": pd.Categorical(list("bcc"), categories=list("abc")),
},
index=list("xyz"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_tuplename_in_multiindex(self):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b", "c"], [1, 2, 3]], names=[("A", "a"), ("B", "b")]
)
df = DataFrame({"d": [1] * 9, "e": [2] * 9}, index=idx)
result = df.unstack(("A", "a"))
expected = DataFrame(
[[1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2], [1, 1, 1, 2, 2, 2]],
columns=MultiIndex.from_tuples(
[
("d", "a"),
("d", "b"),
("d", "c"),
("e", "a"),
("e", "b"),
("e", "c"),
],
names=[None, ("A", "a")],
),
index=Index([1, 2, 3], name=("B", "b")),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"unstack_idx, expected_values, expected_index, expected_columns",
[
(
("A", "a"),
[[1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2], [1, 1, 2, 2]],
MultiIndex.from_tuples(
[(1, 3), (1, 4), (2, 3), (2, 4)], names=["B", "C"]
),
MultiIndex.from_tuples(
[("d", "a"), ("d", "b"), ("e", "a"), ("e", "b")],
names=[None, ("A", "a")],
),
),
(
(("A", "a"), "B"),
[[1, 1, 1, 1, 2, 2, 2, 2], [1, 1, 1, 1, 2, 2, 2, 2]],
Index([3, 4], name="C"),
MultiIndex.from_tuples(
[
("d", "a", 1),
("d", "a", 2),
("d", "b", 1),
("d", "b", 2),
("e", "a", 1),
("e", "a", 2),
("e", "b", 1),
("e", "b", 2),
],
names=[None, ("A", "a"), "B"],
),
),
],
)
def test_unstack_mixed_type_name_in_multiindex(
self, unstack_idx, expected_values, expected_index, expected_columns
):
# GH 19966
idx = MultiIndex.from_product(
[["a", "b"], [1, 2], [3, 4]], names=[("A", "a"), "B", "C"]
)
df = DataFrame({"d": [1] * 8, "e": [2] * 8}, index=idx)
result = df.unstack(unstack_idx)
expected = DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_unstack_preserve_dtypes(self):
# Checks fix for #11847
df = DataFrame(
{
"state": ["IL", "MI", "NC"],
"index": ["a", "b", "c"],
"some_categories": Series(["a", "b", "c"]).astype("category"),
"A": np.random.default_rng(2).random(3),
"B": 1,
"C": "foo",
"D": pd.Timestamp("20010102"),
"E": Series([1.0, 50.0, 100.0]).astype("float32"),
"F": Series([3.0, 4.0, 5.0]).astype("float64"),
"G": False,
"H": Series([1, 200, 923442]).astype("int8"),
}
)
def unstack_and_compare(df, column_name):
unstacked1 = df.unstack([column_name])
unstacked2 = df.unstack(column_name)
tm.assert_frame_equal(unstacked1, unstacked2)
df1 = df.set_index(["state", "index"])
unstack_and_compare(df1, "index")
df1 = df.set_index(["state", "some_categories"])
unstack_and_compare(df1, "some_categories")
df1 = df.set_index(["F", "C"])
unstack_and_compare(df1, "F")
df1 = df.set_index(["G", "B", "state"])
unstack_and_compare(df1, "B")
df1 = df.set_index(["E", "A"])
unstack_and_compare(df1, "E")
df1 = df.set_index(["state", "index"])
s = df1["A"]
unstack_and_compare(s, "index")
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_ints(self, future_stack):
columns = MultiIndex.from_tuples(list(itertools.product(range(3), repeat=3)))
df = DataFrame(
np.random.default_rng(2).standard_normal((30, 27)), columns=columns
)
tm.assert_frame_equal(
df.stack(level=[1, 2], future_stack=future_stack),
df.stack(level=1, future_stack=future_stack).stack(
level=1, future_stack=future_stack
),
)
tm.assert_frame_equal(
df.stack(level=[-2, -1], future_stack=future_stack),
df.stack(level=1, future_stack=future_stack).stack(
level=1, future_stack=future_stack
),
)
df_named = df.copy()
return_value = df_named.columns.set_names(range(3), inplace=True)
assert return_value is None
tm.assert_frame_equal(
df_named.stack(level=[1, 2], future_stack=future_stack),
df_named.stack(level=1, future_stack=future_stack).stack(
level=1, future_stack=future_stack
),
)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_mixed_levels(self, future_stack):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)), columns=columns
)
animal_hair_stacked = df.stack(
level=["animal", "hair_length"], future_stack=future_stack
)
exp_hair_stacked = df.stack(
level=["exp", "hair_length"], future_stack=future_stack
)
# GH #8584: Need to check that stacking works when a number
# is passed that is both a level name and in the range of
# the level numbers
df2 = df.copy()
df2.columns.names = ["exp", "animal", 1]
tm.assert_frame_equal(
df2.stack(level=["animal", 1], future_stack=future_stack),
animal_hair_stacked,
check_names=False,
)
tm.assert_frame_equal(
df2.stack(level=["exp", 1], future_stack=future_stack),
exp_hair_stacked,
check_names=False,
)
# When mixed types are passed and the ints are not level
# names, raise
msg = (
"level should contain all level names or all level numbers, not "
"a mixture of the two"
)
with pytest.raises(ValueError, match=msg):
df2.stack(level=["animal", 0], future_stack=future_stack)
# GH #8584: Having 0 in the level names could raise a
# strange error about lexsort depth
df3 = df.copy()
df3.columns.names = ["exp", "animal", 0]
tm.assert_frame_equal(
df3.stack(level=["animal", 0], future_stack=future_stack),
animal_hair_stacked,
check_names=False,
)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_int_level_names(self, future_stack):
columns = MultiIndex.from_tuples(
[
("A", "cat", "long"),
("B", "cat", "long"),
("A", "dog", "short"),
("B", "dog", "short"),
],
names=["exp", "animal", "hair_length"],
)
df = DataFrame(
np.random.default_rng(2).standard_normal((4, 4)), columns=columns
)
exp_animal_stacked = df.stack(
level=["exp", "animal"], future_stack=future_stack
)
animal_hair_stacked = df.stack(
level=["animal", "hair_length"], future_stack=future_stack
)
exp_hair_stacked = df.stack(
level=["exp", "hair_length"], future_stack=future_stack
)
df2 = df.copy()
df2.columns.names = [0, 1, 2]
tm.assert_frame_equal(
df2.stack(level=[1, 2], future_stack=future_stack),
animal_hair_stacked,
check_names=False,
)
tm.assert_frame_equal(
df2.stack(level=[0, 1], future_stack=future_stack),
exp_animal_stacked,
check_names=False,
)
tm.assert_frame_equal(
df2.stack(level=[0, 2], future_stack=future_stack),
exp_hair_stacked,
check_names=False,
)
# Out-of-order int column names
df3 = df.copy()
df3.columns.names = [2, 0, 1]
tm.assert_frame_equal(
df3.stack(level=[0, 1], future_stack=future_stack),
animal_hair_stacked,
check_names=False,
)
tm.assert_frame_equal(
df3.stack(level=[2, 0], future_stack=future_stack),
exp_animal_stacked,
check_names=False,
)
tm.assert_frame_equal(
df3.stack(level=[2, 1], future_stack=future_stack),
exp_hair_stacked,
check_names=False,
)
def test_unstack_bool(self):
df = DataFrame(
[False, False],
index=MultiIndex.from_arrays([["a", "b"], ["c", "l"]]),
columns=["col"],
)
rs = df.unstack()
xp = DataFrame(
np.array([[False, np.nan], [np.nan, False]], dtype=object),
index=["a", "b"],
columns=MultiIndex.from_arrays([["col", "col"], ["c", "l"]]),
)
tm.assert_frame_equal(rs, xp)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_level_binding(self, future_stack):
# GH9856
mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"], ["a", "b"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [1, 0, 1, 0]],
names=["first", "second", "third"],
)
s = Series(0, index=mi)
result = s.unstack([1, 2]).stack(0, future_stack=future_stack)
expected_mi = MultiIndex(
levels=[["foo", "bar"], ["one", "two"]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=["first", "second"],
)
expected = DataFrame(
np.array(
[[0, np.nan], [np.nan, 0], [0, np.nan], [np.nan, 0]], dtype=np.float64
),
index=expected_mi,
columns=Index(["b", "a"], name="third"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_to_series(self, float_frame):
# check reversibility
data = float_frame.unstack()
assert isinstance(data, Series)
undo = data.unstack().T
tm.assert_frame_equal(undo, float_frame)
# check NA handling
data = DataFrame({"x": [1, 2, np.nan], "y": [3.0, 4, np.nan]})
data.index = Index(["a", "b", "c"])
result = data.unstack()
midx = MultiIndex(
levels=[["x", "y"], ["a", "b", "c"]],
codes=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]],
)
expected = Series([1, 2, np.nan, 3, 4, np.nan], index=midx)
tm.assert_series_equal(result, expected)
# check composability of unstack
old_data = data.copy()
for _ in range(4):
data = data.unstack()
tm.assert_frame_equal(old_data, data)
def test_unstack_dtypes(self, using_infer_string):
# GH 2929
rows = [[1, 1, 3, 4], [1, 2, 3, 4], [2, 1, 3, 4], [2, 2, 3, 4]]
df = DataFrame(rows, columns=list("ABCD"))
result = df.dtypes
expected = Series([np.dtype("int64")] * 4, index=list("ABCD"))
tm.assert_series_equal(result, expected)
# single dtype
df2 = df.set_index(["A", "B"])
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("int64")] * 4,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
# mixed
df2 = df.set_index(["A", "B"])
df2["C"] = 3.0
df3 = df2.unstack("B")
result = df3.dtypes
expected = Series(
[np.dtype("float64")] * 2 + [np.dtype("int64")] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
df2["D"] = "foo"
df3 = df2.unstack("B")
result = df3.dtypes
dtype = (
pd.StringDtype(na_value=np.nan)
if using_infer_string
else np.dtype("object")
)
expected = Series(
[np.dtype("float64")] * 2 + [dtype] * 2,
index=MultiIndex.from_arrays(
[["C", "C", "D", "D"], [1, 2, 1, 2]], names=(None, "B")
),
)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"c, d",
(
(np.zeros(5), np.zeros(5)),
(np.arange(5, dtype="f8"), np.arange(5, 10, dtype="f8")),
),
)
def test_unstack_dtypes_mixed_date(self, c, d):
# GH7405
df = DataFrame(
{
"A": ["a"] * 5,
"C": c,
"D": d,
"B": date_range("2012-01-01", periods=5),
}
)
right = df.iloc[:3].copy(deep=True)
df = df.set_index(["A", "B"])
df["D"] = df["D"].astype("int64")
left = df.iloc[:3].unstack(0)
right = right.set_index(["A", "B"]).unstack(0)
right[("D", "a")] = right[("D", "a")].astype("int64")
assert left.shape == (3, 2)
tm.assert_frame_equal(left, right)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_unstack_non_unique_index_names(self, future_stack):
idx = MultiIndex.from_tuples([("a", "b"), ("c", "d")], names=["c1", "c1"])
df = DataFrame([1, 2], index=idx)
msg = "The name c1 occurs multiple times, use a level number"
with pytest.raises(ValueError, match=msg):
df.unstack("c1")
with pytest.raises(ValueError, match=msg):
df.T.stack("c1", future_stack=future_stack)
def test_unstack_unused_levels(self):
# GH 17845: unused codes in index make unstack() cast int to float
idx = MultiIndex.from_product([["a"], ["A", "B", "C", "D"]])[:-1]
df = DataFrame([[1, 0]] * 3, index=idx)
result = df.unstack()
exp_col = MultiIndex.from_product([range(2), ["A", "B", "C"]])
expected = DataFrame([[1, 1, 1, 0, 0, 0]], index=["a"], columns=exp_col)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
# Unused items on both levels
levels = [range(3), range(4)]
codes = [[0, 0, 1, 1], [0, 2, 0, 2]]
idx = MultiIndex(levels, codes)
block = np.arange(4).reshape(2, 2)
df = DataFrame(np.concatenate([block, block + 4]), index=idx)
result = df.unstack()
expected = DataFrame(
np.concatenate([block * 2, block * 2 + 1], axis=1), columns=idx
)
tm.assert_frame_equal(result, expected)
assert (result.columns.levels[1] == idx.levels[1]).all()
@pytest.mark.parametrize(
"level, idces, col_level, idx_level",
(
(0, [13, 16, 6, 9, 2, 5, 8, 11], [np.nan, "a", 2], [np.nan, 5, 1]),
(1, [8, 11, 1, 4, 12, 15, 13, 16], [np.nan, 5, 1], [np.nan, "a", 2]),
),
)
def test_unstack_unused_levels_mixed_with_nan(
self, level, idces, col_level, idx_level
):
# With mixed dtype and NaN
levels = [["a", 2, "c"], [1, 3, 5, 7]]
codes = [[0, -1, 1, 1], [0, 2, -1, 2]]
idx = MultiIndex(levels, codes)
data = np.arange(8)
df = DataFrame(data.reshape(4, 2), index=idx)
result = df.unstack(level=level)
exp_data = np.zeros(18) * np.nan
exp_data[idces] = data
cols = MultiIndex.from_product([range(2), col_level])
expected = DataFrame(exp_data.reshape(3, 6), index=idx_level, columns=cols)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("cols", [["A", "C"], slice(None)])
def test_unstack_unused_level(self, cols):
# GH 18562 : unused codes on the unstacked level
df = DataFrame([[2010, "a", "I"], [2011, "b", "II"]], columns=["A", "B", "C"])
ind = df.set_index(["A", "B", "C"], drop=False)
selection = ind.loc[(slice(None), slice(None), "I"), cols]
result = selection.unstack()
expected = ind.iloc[[0]][cols]
expected.columns = MultiIndex.from_product(
[expected.columns, ["I"]], names=[None, "C"]
)
expected.index = expected.index.droplevel("C")
tm.assert_frame_equal(result, expected)
def test_unstack_long_index(self):
# PH 32624: Error when using a lot of indices to unstack.
# The error occurred only, if a lot of indices are used.
df = DataFrame(
[[1]],
columns=MultiIndex.from_tuples([[0]], names=["c1"]),
index=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["i1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
)
result = df.unstack(["i2", "i3", "i4", "i5", "i6", "i7"])
expected = DataFrame(
[[1]],
columns=MultiIndex.from_tuples(
[[0, 0, 1, 0, 0, 0, 1]],
names=["c1", "i2", "i3", "i4", "i5", "i6", "i7"],
),
index=Index([0], name="i1"),
)
tm.assert_frame_equal(result, expected)
def test_unstack_multi_level_cols(self):
# PH 24729: Unstack a df with multi level columns
df = DataFrame(
[[0.0, 0.0], [0.0, 0.0]],
columns=MultiIndex.from_tuples(
[["B", "C"], ["B", "D"]], names=["c1", "c2"]
),
index=MultiIndex.from_tuples(
[[10, 20, 30], [10, 20, 40]], names=["i1", "i2", "i3"]
),
)
assert df.unstack(["i2", "i1"]).columns.names[-2:] == ["i2", "i1"]
def test_unstack_multi_level_rows_and_cols(self):
# PH 28306: Unstack df with multi level cols and rows
df = DataFrame(
[[1, 2], [3, 4], [-1, -2], [-3, -4]],
columns=MultiIndex.from_tuples([["a", "b", "c"], ["d", "e", "f"]]),
index=MultiIndex.from_tuples(
[
["m1", "P3", 222],
["m1", "A5", 111],
["m2", "P3", 222],
["m2", "A5", 111],
],
names=["i1", "i2", "i3"],
),
)
result = df.unstack(["i3", "i2"])
expected = df.unstack(["i3"]).unstack(["i2"])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("idx", [("jim", "joe"), ("joe", "jim")])
@pytest.mark.parametrize("lev", list(range(2)))
def test_unstack_nan_index1(self, idx, lev):
# GH7466
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
df = DataFrame(
{
"jim": ["a", "b", np.nan, "d"],
"joe": ["w", "x", "y", "z"],
"jolie": ["a.w", "b.x", " .y", "d.z"],
}
)
left = df.set_index(["jim", "joe"]).unstack()["jolie"]
right = df.set_index(["joe", "jim"]).unstack()["jolie"].T
tm.assert_frame_equal(left, right)
mi = df.set_index(list(idx))
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == len(df)
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = udf["jolie"].notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(udf["jolie"].iloc[i, j].split("."))
right = mk_list(udf["jolie"].index[i]) + mk_list(udf["jolie"].columns[j])
right = sorted(map(cast, right))
assert left == right
@pytest.mark.parametrize("idx", itertools.permutations(["1st", "2nd", "3rd"]))
@pytest.mark.parametrize("lev", list(range(3)))
@pytest.mark.parametrize("col", ["4th", "5th"])
def test_unstack_nan_index_repeats(self, idx, lev, col):
def cast(val):
val_str = "" if val != val else val
return f"{val_str:1}"
df = DataFrame(
{
"1st": ["d"] * 3
+ [np.nan] * 5
+ ["a"] * 2
+ ["c"] * 3
+ ["e"] * 2
+ ["b"] * 5,
"2nd": ["y"] * 2
+ ["w"] * 3
+ [np.nan] * 3
+ ["z"] * 4
+ [np.nan] * 3
+ ["x"] * 3
+ [np.nan] * 2,
"3rd": [
67,
39,
53,
72,
57,
80,
31,
18,
11,
30,
59,
50,
62,
59,
76,
52,
14,
53,
60,
51,
],
}
)
df["4th"], df["5th"] = (
df.apply(lambda r: ".".join(map(cast, r)), axis=1),
df.apply(lambda r: ".".join(map(cast, r.iloc[::-1])), axis=1),
)
mi = df.set_index(list(idx))
udf = mi.unstack(level=lev)
assert udf.notna().values.sum() == 2 * len(df)
mk_list = lambda a: list(a) if isinstance(a, tuple) else [a]
rows, cols = udf[col].notna().values.nonzero()
for i, j in zip(rows, cols):
left = sorted(udf[col].iloc[i, j].split("."))
right = mk_list(udf[col].index[i]) + mk_list(udf[col].columns[j])
right = sorted(map(cast, right))
assert left == right
def test_unstack_nan_index2(self):
# GH7403
df = DataFrame({"A": list("aaaabbbb"), "B": range(8), "C": range(8)})
# Explicit cast to avoid implicit cast when setting to np.nan
df = df.astype({"B": "float"})
df.iloc[3, 1] = np.nan
left = df.set_index(["A", "B"]).unstack(0)
vals = [
[3, 0, 1, 2, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4, 5, 6, 7],
]
vals = list(map(list, zip(*vals)))
idx = Index([np.nan, 0, 1, 2, 4, 5, 6, 7], name="B")
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
# Explicit cast to avoid implicit cast when setting to np.nan
df = df.astype({"B": "float"})
df.iloc[2, 1] = np.nan
left = df.set_index(["A", "B"]).unstack(0)
vals = [[2, np.nan], [0, 4], [1, 5], [np.nan, 6], [3, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
df = DataFrame({"A": list("aaaabbbb"), "B": list(range(4)) * 2, "C": range(8)})
# Explicit cast to avoid implicit cast when setting to np.nan
df = df.astype({"B": "float"})
df.iloc[3, 1] = np.nan
left = df.set_index(["A", "B"]).unstack(0)
vals = [[3, np.nan], [0, 4], [1, 5], [2, 6], [np.nan, 7]]
cols = MultiIndex(
levels=[["C"], ["a", "b"]], codes=[[0, 0], [0, 1]], names=[None, "A"]
)
idx = Index([np.nan, 0, 1, 2, 3], name="B")
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index3(self):
# GH7401
df = DataFrame(
{
"A": list("aaaaabbbbb"),
"B": (date_range("2012-01-01", periods=5).tolist() * 2),
"C": np.arange(10),
}
)
df.iloc[3, 1] = np.nan
left = df.set_index(["A", "B"]).unstack()
vals = np.array([[3, 0, 1, 2, np.nan, 4], [np.nan, 5, 6, 7, 8, 9]])
idx = Index(["a", "b"], name="A")
cols = MultiIndex(
levels=[["C"], date_range("2012-01-01", periods=5)],
codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],
names=[None, "B"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
def test_unstack_nan_index4(self):
# GH4862
vals = [
["Hg", np.nan, np.nan, 680585148],
["U", 0.0, np.nan, 680585148],
["Pb", 7.07e-06, np.nan, 680585148],
["Sn", 2.3614e-05, 0.0133, 680607017],
["Ag", 0.0, 0.0133, 680607017],
["Hg", -0.00015, 0.0133, 680607017],
]
df = DataFrame(
vals,
columns=["agent", "change", "dosage", "s_id"],
index=[17263, 17264, 17265, 17266, 17267, 17268],
)
left = df.copy().set_index(["s_id", "dosage", "agent"]).unstack()
vals = [
[np.nan, np.nan, 7.07e-06, np.nan, 0.0],
[0.0, -0.00015, np.nan, 2.3614e-05, np.nan],
]
idx = MultiIndex(
levels=[[680585148, 680607017], [0.0133]],
codes=[[0, 1], [-1, 0]],
names=["s_id", "dosage"],
)
cols = MultiIndex(
levels=[["change"], ["Ag", "Hg", "Pb", "Sn", "U"]],
codes=[[0, 0, 0, 0, 0], [0, 1, 2, 3, 4]],
names=[None, "agent"],
)
right = DataFrame(vals, columns=cols, index=idx)
tm.assert_frame_equal(left, right)
left = df.loc[17264:].copy().set_index(["s_id", "dosage", "agent"])
tm.assert_frame_equal(left.unstack(), right)
def test_unstack_nan_index5(self):
# GH9497 - multiple unstack with nulls
df = DataFrame(
{
"1st": [1, 2, 1, 2, 1, 2],
"2nd": date_range("2014-02-01", periods=6, freq="D"),
"jim": 100 + np.arange(6),
"joe": (np.random.default_rng(2).standard_normal(6) * 10).round(2),
}
)
df["3rd"] = df["2nd"] - pd.Timestamp("2014-02-02")
df.loc[1, "2nd"] = df.loc[3, "2nd"] = np.nan
df.loc[1, "3rd"] = df.loc[4, "3rd"] = np.nan
left = df.set_index(["1st", "2nd", "3rd"]).unstack(["2nd", "3rd"])
assert left.notna().values.sum() == 2 * len(df)
for col in ["jim", "joe"]:
for _, r in df.iterrows():
key = r["1st"], (col, r["2nd"], r["3rd"])
assert r[col] == left.loc[key]
def test_stack_datetime_column_multiIndex(self, future_stack):
# GH 8039
t = datetime(2014, 1, 1)
df = DataFrame([1, 2, 3, 4], columns=MultiIndex.from_tuples([(t, "A", "B")]))
warn = None if future_stack else Pandas4Warning
msg = "The previous implementation of stack is deprecated"
with tm.assert_produces_warning(warn, match=msg):
result = df.stack(future_stack=future_stack)
eidx = MultiIndex.from_product([range(4), ("B",)])
ecols = MultiIndex.from_tuples([(t, "A")])
expected = DataFrame([1, 2, 3, 4], index=eidx, columns=ecols)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize(
"multiindex_columns",
[
[0, 1, 2, 3, 4],
[0, 1, 2, 3],
[0, 1, 2, 4],
[0, 1, 2],
[1, 2, 3],
[2, 3, 4],
[0, 1],
[0, 2],
[0, 3],
[0],
[2],
[4],
[4, 3, 2, 1, 0],
[3, 2, 1, 0],
[4, 2, 1, 0],
[2, 1, 0],
[3, 2, 1],
[4, 3, 2],
[1, 0],
[2, 0],
[3, 0],
],
)
@pytest.mark.parametrize("level", (-1, 0, 1, [0, 1], [1, 0]))
def test_stack_partial_multiIndex(self, multiindex_columns, level, future_stack):
# GH 8844
dropna = False if not future_stack else lib.no_default
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
multiindex = full_multiindex[multiindex_columns]
df = DataFrame(
np.arange(3 * len(multiindex)).reshape(3, len(multiindex)),
columns=multiindex,
)
result = df.stack(level=level, dropna=dropna, future_stack=future_stack)
if isinstance(level, int) and not future_stack:
# Stacking a single level should not make any all-NaN rows,
# so df.stack(level=level, dropna=False) should be the same
# as df.stack(level=level, dropna=True).
expected = df.stack(level=level, dropna=True, future_stack=future_stack)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
df.columns = MultiIndex.from_tuples(
df.columns.to_numpy(), names=df.columns.names
)
expected = df.stack(level=level, dropna=dropna, future_stack=future_stack)
if isinstance(expected, Series):
tm.assert_series_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_full_multiIndex(self, future_stack):
# GH 8844
full_multiindex = MultiIndex.from_tuples(
[("B", "x"), ("B", "z"), ("A", "y"), ("C", "x"), ("C", "u")],
names=["Upper", "Lower"],
)
df = DataFrame(np.arange(6).reshape(2, 3), columns=full_multiindex[[0, 1, 3]])
dropna = False if not future_stack else lib.no_default
result = df.stack(dropna=dropna, future_stack=future_stack)
expected = DataFrame(
[[0, 2], [1, np.nan], [3, 5], [4, np.nan]],
index=MultiIndex(
levels=[range(2), ["u", "x", "y", "z"]],
codes=[[0, 0, 1, 1], [1, 3, 1, 3]],
names=[None, "Lower"],
),
columns=Index(["B", "C"], name="Upper"),
)
expected["B"] = expected["B"].astype(df.dtypes.iloc[0])
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize("ordered", [False, True])
def test_stack_preserve_categorical_dtype(self, ordered, future_stack):
# GH13854
cidx = pd.CategoricalIndex(list("yxz"), categories=list("xyz"), ordered=ordered)
df = DataFrame([[10, 11, 12]], columns=cidx)
result = df.stack(future_stack=future_stack)
# `MultiIndex.from_product` preserves categorical dtype -
# it's tested elsewhere.
midx = MultiIndex.from_product([df.index, cidx])
expected = Series([10, 11, 12], index=midx)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize("ordered", [False, True])
@pytest.mark.parametrize(
"labels,data",
[
(list("xyz"), [10, 11, 12, 13, 14, 15]),
(list("zyx"), [14, 15, 12, 13, 10, 11]),
],
)
def test_stack_multi_preserve_categorical_dtype(
self, ordered, labels, data, future_stack
):
# GH-36991
cidx = pd.CategoricalIndex(labels, categories=sorted(labels), ordered=ordered)
cidx2 = pd.CategoricalIndex(["u", "v"], ordered=ordered)
midx = MultiIndex.from_product([cidx, cidx2])
df = DataFrame([sorted(data)], columns=midx)
result = df.stack([0, 1], future_stack=future_stack)
labels = labels if future_stack else sorted(labels)
s_cidx = pd.CategoricalIndex(labels, ordered=ordered)
expected_data = sorted(data) if future_stack else data
expected = Series(
expected_data, index=MultiIndex.from_product([range(1), s_cidx, cidx2])
)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
def test_stack_preserve_categorical_dtype_values(self, future_stack):
# GH-23077
cat = pd.Categorical(["a", "a", "b", "c"])
df = DataFrame({"A": cat, "B": cat})
result = df.stack(future_stack=future_stack)
index = MultiIndex.from_product([range(4), ["A", "B"]])
expected = Series(
pd.Categorical(["a", "a", "a", "a", "b", "b", "c", "c"]), index=index
)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize(
"index",
[
[0, 0, 1, 1],
[0, 0, 2, 3],
[0, 1, 2, 3],
],
)
def test_stack_multi_columns_non_unique_index(self, index, future_stack):
# GH-28301
columns = MultiIndex.from_product([[1, 2], ["a", "b"]])
df = DataFrame(index=index, columns=columns).fillna(1)
stacked = df.stack(future_stack=future_stack)
new_index = MultiIndex.from_tuples(stacked.index.to_numpy())
expected = DataFrame(
stacked.to_numpy(), index=new_index, columns=stacked.columns
)
tm.assert_frame_equal(stacked, expected)
stacked_codes = np.asarray(stacked.index.codes)
expected_codes = np.asarray(new_index.codes)
tm.assert_numpy_array_equal(stacked_codes, expected_codes)
@pytest.mark.filterwarnings(
"ignore:The previous implementation of stack is deprecated"
)
@pytest.mark.parametrize(
"vals1, vals2, dtype1, dtype2, expected_dtype",
[
([1, 2], [3.0, 4.0], "Int64", "Float64", "Float64"),
([1, 2], ["foo", "bar"], "Int64", "string", "object"),
],
)
def test_stack_multi_columns_mixed_extension_types(
self, vals1, vals2, dtype1, dtype2, expected_dtype, future_stack
):
# GH45740
df = DataFrame(
{
("A", 1): Series(vals1, dtype=dtype1),
("A", 2): Series(vals2, dtype=dtype2),
}
)
result = df.stack(future_stack=future_stack)
expected = (
df.astype(object).stack(future_stack=future_stack).astype(expected_dtype)
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("level", [0, 1])
def test_unstack_mixed_extension_types(self, level):
index = MultiIndex.from_tuples([("A", 0), ("A", 1), ("B", 1)], names=["a", "b"])
df = DataFrame(
{
"A": pd.array([0, 1, None], dtype="Int64"),
"B": pd.Categorical(["a", "a", "b"]),
},
index=index,
)
result = df.unstack(level=level)
expected = df.astype(object).unstack(level=level)
if level == 0:
expected[("A", "B")] = expected[("A", "B")].fillna(pd.NA)
else:
expected[("A", 0)] = expected[("A", 0)].fillna(pd.NA)
expected_dtypes = Series(
[df.A.dtype] * 2 + [df.B.dtype] * 2, index=result.columns
)
tm.assert_series_equal(result.dtypes, expected_dtypes)
tm.assert_frame_equal(result.astype(object), expected)
@pytest.mark.parametrize("level", [0, "baz"])
def test_unstack_swaplevel_sortlevel(self, level):
# GH 20994
mi = MultiIndex.from_product([range(1), ["d", "c"]], names=["bar", "baz"])
df = DataFrame([[0, 2], [1, 3]], index=mi, columns=["B", "A"])
df.columns.name = "foo"
expected = DataFrame(
[[3, 1, 2, 0]],
columns=MultiIndex.from_tuples(
[("c", "A"), ("c", "B"), ("d", "A"), ("d", "B")], names=["baz", "foo"]
),
)
expected.index.name = "bar"
result = df.unstack().swaplevel(axis=1).sort_index(axis=1, level=level)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
def test_unstack_sort_false(frame_or_series, dtype):
# GH 15105
index = MultiIndex.from_tuples(
[("two", "z", "b"), ("two", "y", "a"), ("one", "z", "b"), ("one", "y", "a")]
)
obj = frame_or_series(np.arange(1.0, 5.0), index=index, dtype=dtype)
result = obj.unstack(level=0, sort=False)
if frame_or_series is DataFrame:
expected_columns = MultiIndex.from_tuples([(0, "two"), (0, "one")])
else:
expected_columns = ["two", "one"]
expected = DataFrame(
[[1.0, 3.0], [2.0, 4.0]],
index=MultiIndex.from_tuples([("z", "b"), ("y", "a")]),
columns=expected_columns,
dtype=dtype,
)
tm.assert_frame_equal(result, expected)
result = obj.unstack(level=-1, sort=False)
if frame_or_series is DataFrame:
expected_columns = MultiIndex(
levels=[range(1), ["b", "a"]], codes=[[0, 0], [0, 1]]
)
else:
expected_columns = ["b", "a"]
item = pd.NA if dtype == "Float64" else np.nan
expected = DataFrame(
[[1.0, item], [item, 2.0], [3.0, item], [item, 4.0]],
columns=expected_columns,
index=MultiIndex.from_tuples(
[("two", "z"), ("two", "y"), ("one", "z"), ("one", "y")]
),
dtype=dtype,
)
tm.assert_frame_equal(result, expected)
result = obj.unstack(level=[1, 2], sort=False)
if frame_or_series is DataFrame:
expected_columns = MultiIndex(
levels=[range(1), ["z", "y"], ["b", "a"]], codes=[[0, 0], [0, 1], [0, 1]]
)
else:
expected_columns = MultiIndex.from_tuples([("z", "b"), ("y", "a")])
expected = DataFrame(
[[1.0, 2.0], [3.0, 4.0]],
index=["two", "one"],
columns=expected_columns,
dtype=dtype,
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"levels2, expected_columns",
[
(
[None, 1, 2, 3],
[("value", np.nan), ("value", 1), ("value", 2), ("value", 3)],
),
(
[1, None, 2, 3],
[("value", 1), ("value", np.nan), ("value", 2), ("value", 3)],
),
(
[1, 2, None, 3],
[("value", 1), ("value", 2), ("value", np.nan), ("value", 3)],
),
(
[1, 2, 3, None],
[("value", 1), ("value", 2), ("value", 3), ("value", np.nan)],
),
],
ids=["nan=first", "nan=second", "nan=third", "nan=last"],
)
def test_unstack_sort_false_nan(levels2, expected_columns):
# GH#61221
levels1 = ["b", "a"]
index = MultiIndex.from_product([levels1, levels2], names=["level1", "level2"])
df = DataFrame({"value": [0, 1, 2, 3, 4, 5, 6, 7]}, index=index)
result = df.unstack(level="level2", sort=False)
expected_data = [[0, 4], [1, 5], [2, 6], [3, 7]]
expected = DataFrame(
dict(zip(expected_columns, expected_data)),
index=Index(["b", "a"], name="level1"),
columns=MultiIndex.from_tuples(expected_columns, names=[None, "level2"]),
)
tm.assert_frame_equal(result, expected)
def test_unstack_fill_frame_object():
# GH12815 Test unstacking with object.
data = Series(["a", "b", "c", "a"], dtype="object")
data.index = MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("y", "b"), ("z", "a")]
)
# By default missing values will be NaN
result = data.unstack()
expected = DataFrame(
{"a": ["a", np.nan, "a"], "b": ["b", "c", np.nan]},
index=list("xyz"),
dtype=object,
)
tm.assert_frame_equal(result, expected)
# Fill with any value replaces missing values as expected
result = data.unstack(fill_value="d")
expected = DataFrame(
{"a": ["a", "d", "a"], "b": ["b", "c", "d"]}, index=list("xyz"), dtype=object
)
tm.assert_frame_equal(result, expected)
def test_unstack_timezone_aware_values():
# GH 18338
df = DataFrame(
{
"timestamp": [pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC")],
"a": ["a"],
"b": ["b"],
"c": ["c"],
},
columns=["timestamp", "a", "b", "c"],
)
result = df.set_index(["a", "b"]).unstack()
expected = DataFrame(
[[pd.Timestamp("2017-08-27 01:00:00.709949+0000", tz="UTC"), "c"]],
index=Index(["a"], name="a"),
columns=MultiIndex(
levels=[["timestamp", "c"], ["b"]],
codes=[[0, 1], [0, 0]],
names=[None, "b"],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
def test_stack_timezone_aware_values(future_stack):
# GH 19420
ts = date_range(freq="D", start="20180101", end="20180103", tz="America/New_York")
df = DataFrame({"A": ts}, index=["a", "b", "c"])
result = df.stack(future_stack=future_stack)
expected = Series(
ts,
index=MultiIndex(levels=[["a", "b", "c"], ["A"]], codes=[[0, 1, 2], [0, 0, 0]]),
)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
@pytest.mark.parametrize("dropna", [True, False, lib.no_default])
def test_stack_empty_frame(dropna, future_stack):
# GH 36113
levels = [pd.RangeIndex(0), pd.RangeIndex(0)]
expected = Series(dtype=np.float64, index=MultiIndex(levels=levels, codes=[[], []]))
if future_stack and dropna is not lib.no_default:
with pytest.raises(ValueError, match="dropna must be unspecified"):
DataFrame(dtype=np.float64).stack(dropna=dropna, future_stack=future_stack)
else:
result = DataFrame(dtype=np.float64).stack(
dropna=dropna, future_stack=future_stack
)
tm.assert_series_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
@pytest.mark.parametrize("dropna", [True, False, lib.no_default])
def test_stack_empty_level(dropna, future_stack, int_frame):
# GH 60740
if future_stack and dropna is not lib.no_default:
with pytest.raises(ValueError, match="dropna must be unspecified"):
DataFrame(dtype=np.int64).stack(dropna=dropna, future_stack=future_stack)
else:
expected = int_frame
result = int_frame.copy().stack(
level=[], dropna=dropna, future_stack=future_stack
)
tm.assert_frame_equal(result, expected)
expected = DataFrame()
result = DataFrame().stack(level=[], dropna=dropna, future_stack=future_stack)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
@pytest.mark.parametrize("dropna", [True, False, lib.no_default])
@pytest.mark.parametrize("fill_value", [None, 0])
def test_stack_unstack_empty_frame(dropna, fill_value, future_stack):
# GH 36113
if future_stack and dropna is not lib.no_default:
with pytest.raises(ValueError, match="dropna must be unspecified"):
DataFrame(dtype=np.int64).stack(
dropna=dropna, future_stack=future_stack
).unstack(fill_value=fill_value)
else:
result = (
DataFrame(dtype=np.int64)
.stack(dropna=dropna, future_stack=future_stack)
.unstack(fill_value=fill_value)
)
expected = DataFrame(dtype=np.int64)
tm.assert_frame_equal(result, expected)
def test_unstack_single_index_series():
# GH 36113
msg = r"index must be a MultiIndex to unstack.*"
with pytest.raises(ValueError, match=msg):
Series(dtype=np.int64).unstack()
def test_unstacking_multi_index_df():
# see gh-30740
df = DataFrame(
{
"name": ["Alice", "Bob"],
"score": [9.5, 8],
"employed": [False, True],
"kids": [0, 0],
"gender": ["female", "male"],
}
)
df = df.set_index(["name", "employed", "kids", "gender"])
df = df.unstack(["gender"], fill_value=0)
expected = df.unstack("employed", fill_value=0).unstack("kids", fill_value=0)
result = df.unstack(["employed", "kids"], fill_value=0)
expected = DataFrame(
[[9.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 8.0]],
index=Index(["Alice", "Bob"], name="name"),
columns=MultiIndex.from_tuples(
[
("score", "female", False, 0),
("score", "female", True, 0),
("score", "male", False, 0),
("score", "male", True, 0),
],
names=[None, "gender", "employed", "kids"],
),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
def test_stack_positional_level_duplicate_column_names(future_stack):
# https://github.com/pandas-dev/pandas/issues/36353
columns = MultiIndex.from_product([("x", "y"), ("y", "z")], names=["a", "a"])
df = DataFrame([[1, 1, 1, 1]], columns=columns)
result = df.stack(0, future_stack=future_stack)
new_columns = Index(["y", "z"], name="a")
new_index = MultiIndex(
levels=[range(1), ["x", "y"]], codes=[[0, 0], [0, 1]], names=[None, "a"]
)
expected = DataFrame([[1, 1], [1, 1]], index=new_index, columns=new_columns)
tm.assert_frame_equal(result, expected)
def test_unstack_non_slice_like_blocks():
# Case where the mgr_locs of a DataFrame's underlying blocks are not slice-like
mi = MultiIndex.from_product([range(5), ["A", "B", "C"]])
df = DataFrame(
{
0: np.random.default_rng(2).standard_normal(15),
1: np.random.default_rng(2).standard_normal(15).astype(np.int64),
2: np.random.default_rng(2).standard_normal(15),
3: np.random.default_rng(2).standard_normal(15),
},
index=mi,
)
assert any(not x.mgr_locs.is_slice_like for x in df._mgr.blocks)
res = df.unstack()
expected = pd.concat([df[n].unstack() for n in range(4)], keys=range(4), axis=1)
tm.assert_frame_equal(res, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
def test_stack_sort_false(future_stack):
# GH 15105
data = [[1, 2, 3.0, 4.0], [2, 3, 4.0, 5.0], [3, 4, np.nan, np.nan]]
df = DataFrame(
data,
columns=MultiIndex(
levels=[["B", "A"], ["x", "y"]], codes=[[0, 0, 1, 1], [0, 1, 0, 1]]
),
)
kwargs = {} if future_stack else {"sort": False}
result = df.stack(level=0, future_stack=future_stack, **kwargs)
if future_stack:
expected = DataFrame(
{
"x": [1.0, 3.0, 2.0, 4.0, 3.0, np.nan],
"y": [2.0, 4.0, 3.0, 5.0, 4.0, np.nan],
},
index=MultiIndex.from_arrays(
[[0, 0, 1, 1, 2, 2], ["B", "A", "B", "A", "B", "A"]]
),
)
else:
expected = DataFrame(
{"x": [1.0, 3.0, 2.0, 4.0, 3.0], "y": [2.0, 4.0, 3.0, 5.0, 4.0]},
index=MultiIndex.from_arrays([[0, 0, 1, 1, 2], ["B", "A", "B", "A", "B"]]),
)
tm.assert_frame_equal(result, expected)
# Codes sorted in this call
df = DataFrame(
data,
columns=MultiIndex.from_arrays([["B", "B", "A", "A"], ["x", "y", "x", "y"]]),
)
kwargs = {} if future_stack else {"sort": False}
result = df.stack(level=0, future_stack=future_stack, **kwargs)
tm.assert_frame_equal(result, expected)
@pytest.mark.filterwarnings("ignore:The previous implementation of stack is deprecated")
def test_stack_sort_false_multi_level(future_stack):
# GH 15105
idx = MultiIndex.from_tuples([("weight", "kg"), ("height", "m")])
df = DataFrame([[1.0, 2.0], [3.0, 4.0]], index=["cat", "dog"], columns=idx)
kwargs = {} if future_stack else {"sort": False}
result = df.stack([0, 1], future_stack=future_stack, **kwargs)
expected_index = MultiIndex.from_tuples(
[
("cat", "weight", "kg"),
("cat", "height", "m"),
("dog", "weight", "kg"),
("dog", "height", "m"),
]
)
expected = Series([1.0, 2.0, 3.0, 4.0], index=expected_index)
tm.assert_series_equal(result, expected)
| TestDataFrameReshape |
python | apache__airflow | airflow-ctl/src/airflowctl/ctl/cli_config.py | {
"start": 3773,
"end": 4337
} | class ____(argparse.ArgumentParser):
"""CustomParser to display help message."""
def _check_value(self, action, value):
"""Override _check_value and check conditionally added command."""
super()._check_value(action, value)
def error(self, message):
"""Override error and use print_help instead of print_usage."""
self.print_help()
self.exit(2, f"\n{self.prog} command error: {message}, see help above.\n")
# Used in Arg to enable `None` as a distinct value from "not passed"
_UNSET = object()
| DefaultHelpParser |
python | ray-project__ray | python/ray/tune/execution/tune_controller.py | {
"start": 83415,
"end": 84556
} | class ____:
"""The TuneController does not use a RayTrialExecutor anymore.
Instead, we pass this fake executor for searchers/schedulers to use
as an interface.
In the future, we should have the searchers/schedulers either interact with
the tune controller, or define a different API for more fine-grained scheduler
control.
"""
def __init__(self, tune_controller: TuneController):
self._tune_controller = tune_controller
def pause_trial(self, trial: Trial, should_checkpoint: bool = True):
return self._tune_controller._schedule_trial_pause(
trial, should_checkpoint=should_checkpoint
)
def save(
self,
trial: Trial,
result: Optional[Dict] = None,
) -> Optional[_FutureTrainingResult]:
return self._tune_controller._schedule_trial_save(trial=trial, result=result)
def has_resources_for_trial(self, trial: Trial):
return True
@property
def _resource_updater(self):
return self._tune_controller._resource_updater
def force_reconcilation_on_next_step_end(self):
pass
| _FakeRayTrialExecutor |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-pandas-ai/llama_index/readers/pandas_ai/base.py | {
"start": 440,
"end": 3540
} | class ____(BaseReader):
r"""
Pandas AI reader.
Light wrapper around https://github.com/gventuri/pandas-ai.
Args:
llm (Optional[pandas.llm]): LLM to use. Defaults to None.
concat_rows (bool): whether to concatenate all rows into one document.
If set to False, a Document will be created for each row.
True by default.
col_joiner (str): Separator to use for joining cols per row.
Set to ", " by default.
row_joiner (str): Separator to use for joining each row.
Only used when `concat_rows=True`.
Set to "\n" by default.
pandas_config (dict): Options for the `pandas.read_csv` function call.
Refer to https://pandas.pydata.org/docs/reference/api/pandas.read_csv.html
for more information.
Set to empty dict by default, this means pandas will try to figure
out the separators, table head, etc. on its own.
"""
def __init__(
self,
pandas_llm: Optional[PandasLLM] = None,
concat_rows: bool = True,
col_joiner: str = ", ",
row_joiner: str = "\n",
pandas_config: dict = {},
) -> None:
"""Init params."""
self._llm = pandas_llm or OpenAI()
self._pandasai_config = {"llm": self._llm}
self._concat_rows = concat_rows
self._col_joiner = col_joiner
self._row_joiner = row_joiner
self._pandas_config = pandas_config
def run_pandas_ai(
self,
initial_df: pd.DataFrame,
query: str,
is_conversational_answer: bool = False,
) -> Any:
"""Load dataframe."""
smart_df = SmartDataframe(initial_df, config=self._pandasai_config)
return smart_df.chat(query=query)
def load_data(
self,
initial_df: pd.DataFrame,
query: str,
is_conversational_answer: bool = False,
) -> List[Document]:
"""Parse file."""
result = self.run_pandas_ai(
initial_df, query, is_conversational_answer=is_conversational_answer
)
if is_conversational_answer:
return [Document(text=result)]
else:
if isinstance(result, (np.generic)):
result = pd.Series(result)
elif isinstance(result, (pd.Series, pd.DataFrame)):
pass
else:
raise ValueError(f"Unexpected type for result: {type(result)}")
# if not conversational answer, use Pandas CSV Reader
reader = PandasCSVReader(
concat_rows=self._concat_rows,
col_joiner=self._col_joiner,
row_joiner=self._row_joiner,
pandas_config=self._pandas_config,
)
with TemporaryDirectory() as tmpdir:
outpath = Path(tmpdir) / "out.csv"
with outpath.open("w") as f:
# TODO: add option to specify index=False
result.to_csv(f, index=False)
return reader.load_data(outpath)
| PandasAIReader |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-jina-ai-reader/source_jina_ai_reader/components.py | {
"start": 459,
"end": 1497
} | class ____(HttpRequester):
request_headers: Optional[Union[str, Mapping[str, str]]] = None
def __post_init__(self, parameters: Mapping[str, Any]) -> None:
super().__post_init__(parameters)
self._headers_interpolator = InterpolatedRequestInputProvider(
config=self.config, request_inputs=self.request_headers, parameters=parameters
)
# For appending bearer token only if api_key is present
def get_request_headers(
self,
*,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
next_page_token: Optional[Mapping[str, Any]] = None,
) -> Mapping[str, Any]:
headers = self._headers_interpolator.eval_request_inputs(stream_state, stream_slice, next_page_token)
if isinstance(headers, dict):
api_key = self.config.get("api_key")
if api_key:
headers.update({"Authorization": f"Bearer {api_key}"})
return headers
return {}
| JinaAiHttpRequester |
python | kamyu104__LeetCode-Solutions | Python/maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts.py | {
"start": 41,
"end": 593
} | class ____(object):
def maxArea(self, h, w, horizontalCuts, verticalCuts):
"""
:type h: int
:type w: int
:type horizontalCuts: List[int]
:type verticalCuts: List[int]
:rtype: int
"""
def max_len(l, cuts):
cuts.sort()
l = max(cuts[0]-0, l-cuts[-1])
for i in xrange(1, len(cuts)):
l = max(l, cuts[i]-cuts[i-1])
return l
MOD = 10**9+7
return max_len(h, horizontalCuts) * max_len(w, verticalCuts) % MOD
| Solution |
python | scipy__scipy | scipy/sparse/_dok.py | {
"start": 19799,
"end": 21162
} | class ____(_dok_base, sparray):
"""
Dictionary Of Keys based sparse array.
This is an efficient structure for constructing sparse
arrays incrementally.
This can be instantiated in several ways:
dok_array(D)
where D is a 2-D ndarray
dok_array(S)
with another sparse array or matrix S (equivalent to S.todok())
dok_array((M,N), [dtype])
create the array with initial shape (M,N)
dtype is optional, defaulting to dtype='d'
Attributes
----------
dtype : dtype
Data type of the array
shape : 2-tuple
Shape of the array
ndim : int
Number of dimensions (this is always 2)
nnz
Number of nonzero elements
size
T
Notes
-----
Sparse arrays can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
- Allows for efficient O(1) access of individual elements.
- Duplicates are not allowed.
- Can be efficiently converted to a coo_array once constructed.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import dok_array
>>> S = dok_array((5, 5), dtype=np.float32)
>>> for i in range(5):
... for j in range(5):
... S[i, j] = i + j # Update element
"""
| dok_array |
python | matplotlib__matplotlib | galleries/examples/user_interfaces/embedding_in_wx3_sgskip.py | {
"start": 1052,
"end": 2837
} | class ____(wx.Panel):
def __init__(self, parent):
super().__init__(parent, -1)
self.fig = Figure((5, 4), 75)
self.canvas = FigureCanvas(self, -1, self.fig)
self.toolbar = NavigationToolbar(self.canvas) # matplotlib toolbar
self.toolbar.Realize()
# Now put all into a sizer
sizer = wx.BoxSizer(wx.VERTICAL)
# This way of adding to sizer allows resizing
sizer.Add(self.canvas, 1, wx.LEFT | wx.TOP | wx.GROW)
# Best to allow the toolbar to resize!
sizer.Add(self.toolbar, 0, wx.GROW)
self.SetSizer(sizer)
self.Fit()
def init_plot_data(self):
ax = self.fig.add_subplot()
x = np.arange(120.0) * 2 * np.pi / 60.0
y = np.arange(100.0) * 2 * np.pi / 50.0
self.x, self.y = np.meshgrid(x, y)
z = np.sin(self.x) + np.cos(self.y)
self.im = ax.imshow(z, cmap="RdBu", origin='lower')
zmax = np.max(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0] - ymax_i
self.lines = ax.plot(xmax_i, ymax_i, 'ko')
self.toolbar.update() # Not sure why this is needed - ADS
def GetToolBar(self):
# You will need to override GetToolBar if you are using an
# unmanaged toolbar in your frame
return self.toolbar
def OnWhiz(self, event):
self.x += np.pi / 15
self.y += np.pi / 20
z = np.sin(self.x) + np.cos(self.y)
self.im.set_array(z)
zmax = np.max(z) - ERR_TOL
ymax_i, xmax_i = np.nonzero(z >= zmax)
if self.im.origin == 'upper':
ymax_i = z.shape[0] - ymax_i
self.lines[0].set_data(xmax_i, ymax_i)
self.canvas.draw()
| PlotPanel |
python | miyuchina__mistletoe | test/test_markdown_renderer.py | {
"start": 165,
"end": 13451
} | class ____(unittest.TestCase):
@staticmethod
def roundtrip(input, **rendererArgs):
"""Parses the given markdown input and renders it back to markdown again."""
with MarkdownRenderer(**rendererArgs) as renderer:
return renderer.render(Document(input))
def test_empty_document(self):
input = []
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_paragraphs_and_blank_lines(self):
input = [
"Paragraph 1. Single line. Followed by two white-space-only lines.\n",
"\n",
"\n",
"Paragraph 2. Two\n",
"lines, no final line break.",
]
output = self.roundtrip(input)
# note: a line break is always added at the end of a paragraph.
self.assertEqual(output, "".join(input) + "\n")
def test_line_breaks(self):
input = [
"soft line break\n",
"hard line break (backslash)\\\n",
"another hard line break (double spaces) \n",
"yet another hard line break \n",
"that's all.\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_emphasized_and_strong(self):
input = ["*emphasized* __strong__ _**emphasized and strong**_\n"]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_strikethrough(self):
input = ["~~strikethrough~~\n"]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_escaped_chars(self):
input = ["\\*escaped, not emphasized\\*\n"]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_html_span(self):
input = ["so <p>hear ye</p><h1>\n"]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_code_span(self):
input = [
"a) `code span` b) ``trailing space, double apostrophes `` c) ` leading and trailing space `\n"
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_code_span_with_embedded_line_breaks(self):
input = [
"a `multi-line\n",
"code\n",
"span`.\n"
]
output = self.roundtrip(input)
expected = [
"a `multi-line code span`.\n"
]
self.assertEqual(output, "".join(expected))
def test_images_and_links(self):
input = [
"[a link](#url (title))\n",
"[another link](<url-in-angle-brackets> '*emphasized\n",
"title*')\n",
'![an \\[*image*\\], escapes and emphasis](#url "title")\n',
"<http://auto.link>\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_multiline_fragment(self):
input = [
"[a link](<url-in-angle-brackets> '*emphasized\n",
"title\n",
"spanning\n",
"many\n",
"lines*')\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_thematic_break(self):
input = [
" ** * ** * ** * **\n",
"followed by a paragraph of text\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_atx_headings(self):
input = [
"## atx *heading* ##\n",
"# another atx heading, without trailing hashes\n",
"###\n",
"^ empty atx heading\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_setext_headings(self):
input = [
"*setext*\n",
"heading!\n",
"===============\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_numbered_list(self):
input = [
" 22) *emphasized list item*\n",
" 96) \n",
" 128) here begins a nested list.\n",
" + apples\n",
" + bananas\n",
]
output = self.roundtrip(input)
expected = [
" 22) *emphasized list item*\n",
" 96) \n",
" 128) here begins a nested list.\n",
" + apples\n",
" + bananas\n",
]
self.assertEqual(output, "".join(expected))
def test_bulleted_list(self):
input = [
"* **test case**:\n",
" testing a link as the first item on a continuation line\n",
" [links must be indented][properly].\n",
"\n",
"[properly]: uri\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_list_item_margin_indentation_preserved(self):
# 0 to 4 spaces of indentation from the margin
input = [
"- 0 space: ok.\n",
" subsequent line.\n",
" - 1 space: ok.\n",
" subsequent line.\n",
" - 2 spaces: ok.\n",
" subsequent line.\n",
" - 3 spaces: ok.\n",
" subsequent line.\n",
" - 4 spaces: in the paragraph of the above list item.\n",
" subsequent line.\n",
]
output = self.roundtrip(input)
expected = [
"- 0 space: ok.\n",
" subsequent line.\n",
" - 1 space: ok.\n",
" subsequent line.\n",
" - 2 spaces: ok.\n",
" subsequent line.\n",
# note: we still always normalize the indentation of all list item lines:
" - 3 spaces: ok.\n",
" subsequent line.\n",
" - 4 spaces: in the paragraph of the above list item.\n",
" subsequent line.\n",
]
self.assertEqual(output, "".join(expected))
def test_list_item_margin_indentation_normalized(self):
# 0 to 4 spaces of indentation from the margin
input = [
"- 0 space: ok.\n",
" subsequent line.\n",
" - 1 space: ok.\n",
" subsequent line.\n",
" - 2 spaces: ok.\n",
" subsequent line.\n",
" - 3 spaces: ok.\n",
" subsequent line.\n",
" - 4 spaces: in the paragraph of the above list item.\n",
" subsequent line.\n",
]
output = self.roundtrip(input, normalize_whitespace=True)
expected = [
"- 0 space: ok.\n",
" subsequent line.\n",
"- 1 space: ok.\n",
" subsequent line.\n",
"- 2 spaces: ok.\n",
" subsequent line.\n",
"- 3 spaces: ok.\n",
" subsequent line.\n",
" - 4 spaces: in the paragraph of the above list item.\n",
" subsequent line.\n",
]
self.assertEqual(output, "".join(expected))
def test_list_item_indentation_after_leader_preserved(self):
# leaders followed by 1 to 5 spaces
input = [
"- 1 space: ok.\n",
" subsequent line.\n",
"- 2 spaces: ok.\n",
" subsequent line.\n",
"- 3 spaces: ok.\n",
" subsequent line.\n",
"- 4 spaces: ok.\n",
" subsequent line.\n",
"- 5 spaces: list item starting with indented code.\n",
" subsequent line.\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_list_item_indentation_after_leader_normalized(self):
# leaders followed by 1 to 5 spaces
input = [
"- 1 space: ok.\n",
" subsequent line.\n",
"- 2 spaces: ok.\n",
" subsequent line.\n",
"- 3 spaces: ok.\n",
" subsequent line.\n",
"- 4 spaces: ok.\n",
" subsequent line.\n",
"- 5 spaces: list item starting with indented code.\n",
" subsequent line.\n",
]
output = self.roundtrip(input, normalize_whitespace=True)
expected = [
"- 1 space: ok.\n",
" subsequent line.\n",
"- 2 spaces: ok.\n",
" subsequent line.\n",
"- 3 spaces: ok.\n",
" subsequent line.\n",
"- 4 spaces: ok.\n",
" subsequent line.\n",
"- 5 spaces: list item starting with indented code.\n",
" subsequent line.\n",
]
self.assertEqual(output, "".join(expected))
def test_code_blocks(self):
input = [
" this is an indented code block\n",
" on two lines \n",
" with some extra whitespace here and there, to be preserved \n",
" just as it is.\n",
"```\n",
"now for a fenced code block \n",
" where indentation is also preserved. as are the double spaces at the end of this line: \n",
"```\n",
" ~~~this is an info string: behold the fenced code block with tildes!\n",
" *tildes are great*\n",
" ~~~\n",
"1. a list item with an embedded\n",
"\n",
" indented code block.\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_blank_lines_following_code_block(self):
input = [
" code block\n",
"\n",
"paragraph.\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_html_block(self):
input = [
"<h1>some text <img src='https://cdn.rawgit.com/' align='right'></h1>\n",
"<br>\n",
"\n",
"+ <h1>html block embedded in list <img src='https://cdn.rawgit.com/' align='right'></h1>\n",
" <br>\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_block_quote(self):
input = [
"> a block quote\n",
"> > and a nested block quote\n",
"> 1. > and finally, a list with a nested block quote\n",
"> > which continues on a second line.\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_link_reference_definition(self):
input = [
"[label]: https://domain.com\n",
"\n",
"paragraph [with a link][label-2], etc, etc.\n",
"and [a *second* link][label] as well\n",
"shortcut [label] & collapsed [label][]\n",
"\n",
"[label-2]: <https://libraries.io/> 'title\n",
"with line break'\n",
"[label-not-referred-to]: https://foo (title)\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_table(self):
input = [
"| Emoji | Description |\n",
"| :---: | ------------------------- |\n",
"| 📚 | Update documentation. |\n",
"| 🐎 | Performance improvements. |\n",
"etc, etc\n",
]
output = self.roundtrip(input)
self.assertEqual(output, "".join(input))
def test_table_with_varying_column_counts(self):
input = [
" | header | x | \n",
" | --- | ---: | \n",
" | . | Performance improvements. | an extra column | \n",
"etc, etc\n",
]
output = self.roundtrip(input)
expected = [
"| header | x | |\n",
"| ------ | ------------------------: | --------------- |\n",
"| . | Performance improvements. | an extra column |\n",
"etc, etc\n",
]
self.assertEqual(output, "".join(expected))
def test_table_with_narrow_column(self):
input = [
"| xyz | ? |\n",
"| --- | - |\n",
"| a | p |\n",
"| b | q |\n",
]
output = self.roundtrip(input)
expected = [
"| xyz | ? |\n",
"| --- | --- |\n",
"| a | p |\n",
"| b | q |\n",
]
self.assertEqual(output, "".join(expected))
def test_direct_rendering_of_block_token(self):
input = [
"Line 1\n",
"Line 2\n",
]
paragraph = block_token.Paragraph(input)
with MarkdownRenderer() as renderer:
lines = renderer.render(paragraph)
assert lines == "".join(input)
def test_direct_rendering_of_span_token(self):
input = "some text"
raw_text = span_token.RawText(input)
with MarkdownRenderer() as renderer:
lines = renderer.render(raw_text)
assert lines == input + "\n"
| TestMarkdownRenderer |
python | pennersr__django-allauth | allauth/socialaccount/providers/saml/provider.py | {
"start": 242,
"end": 5074
} | class ____(Provider):
id = "saml"
name = "SAML"
supports_redirect = True
account_class = SAMLAccount
default_attribute_mapping = {
"uid": [
"urn:oasis:names:tc:SAML:attribute:subject-id",
],
"email": [
"urn:oid:0.9.2342.19200300.100.1.3",
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress",
],
"email_verified": [
"http://schemas.auth0.com/email_verified",
],
"first_name": [
"http://schemas.xmlsoap.org/ws/2005/05/identity/claims/givenname",
"urn:oid:2.5.4.42",
],
"last_name": [
"urn:oid:2.5.4.4",
],
"username": [
"http://schemas.auth0.com/nickname",
],
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.name = self.app.name or self.app.client_id or self.name
def get_login_url(self, request, **kwargs):
url = reverse("saml_login", kwargs={"organization_slug": self.app.client_id})
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def extract_extra_data(self, data):
return data.get_attributes()
def extract_uid(self, data):
"""https://docs.oasis-open.org/security/saml-subject-id-attr/v1.0/csprd01/saml-subject-id-attr-v1.0-csprd01.html
Quotes:
"While the Attributes defined in this profile have as a goal the
explicit replacement of the <saml:NameID> element as a means of subject
identification, it is certainly possible to compose them with existing
NameID usage provided the same subject is being identified. This can
also serve as a migration strategy for existing applications."
"SAML does not define an identifier that meets all of these
requirements well. It does standardize a kind of NameID termed
“persistent” that meets some of them in the particular case of so-called
“pairwise” identification, where an identifier varies by relying
party. It has seen minimal adoption outside of a few contexts, and fails
at the “compact” and “simple to handle” criteria above, on top of the
disadvantages inherent with all NameID usage."
Overall, our strategy is to prefer a uid resulting from explicit
attribute mappings, and only if there is no such uid fallback to the
NameID.
"""
uid = self._extract(data).get("uid")
if uid is None:
uid = data.get_nameid()
return uid
def extract_common_fields(self, data):
ret = self._extract(data)
ret.pop("uid", None)
return ret
def _extract(self, data):
provider_config = self.app.settings
raw_attributes = data.get_attributes()
attributes = {}
attribute_mapping = provider_config.get(
"attribute_mapping", self.default_attribute_mapping
)
# map configured provider attributes
for key, provider_keys in attribute_mapping.items():
if isinstance(provider_keys, str):
provider_keys = [provider_keys]
for provider_key in provider_keys:
attribute_list = raw_attributes.get(provider_key, None)
if attribute_list is not None and len(attribute_list) > 0:
attributes[key] = attribute_list[0]
break
email_verified = attributes.get("email_verified")
if email_verified:
email_verified = email_verified.lower() in ["true", "1", "t", "y", "yes"]
attributes["email_verified"] = email_verified
# If we did not find an email, check if the NameID contains the email.
if not attributes.get("email") and (
data.get_nameid_format()
== "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress"
# Alternatively, if `use_id_for_email` is true, then we always interpret the nameID as email
or provider_config.get("use_nameid_for_email", False)
):
attributes["email"] = data.get_nameid()
return attributes
def redirect(self, request, process, next_url=None, data=None, **kwargs):
from allauth.socialaccount.providers.saml.utils import build_auth
auth = build_auth(request, self)
# If we pass `return_to=None` `auth.login` will use the URL of the
# current view.
redirect = auth.login(return_to="")
self.stash_redirect_state(
request,
process,
next_url,
data,
state_id=auth.get_last_request_id(),
**kwargs,
)
return HttpResponseRedirect(redirect)
provider_classes = [SAMLProvider]
| SAMLProvider |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_table_row_count_to_equal_other_table.py | {
"start": 2533,
"end": 15657
} | class ____(BatchExpectation):
__doc__ = f"""{EXPECTATION_SHORT_DESCRIPTION}
ExpectTableRowCountToEqualOtherTable is a \
Batch Expectation.
BatchExpectations are one of the most common types of Expectation.
They are evaluated for an entire Batch, and answer a semantic question about the Batch itself.
Args:
other_table_name (str): {OTHER_TABLE_NAME_DESCRIPTION}
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
See Also:
[ExpectTableRowCountToBeBetween](https://greatexpectations.io/expectations/expect_table_row_count_to_be_between)
[ExpectTableRowCountToEqual](https://greatexpectations.io/expectations/expect_table_row_count_to_equal)
Supported Data Sources:
[{SUPPORTED_DATA_SOURCES[0]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[1]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[2]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[3]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[4]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[5]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[6]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[7]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[8]}](https://docs.greatexpectations.io/docs/application_integration_support/)
[{SUPPORTED_DATA_SOURCES[9]}](https://docs.greatexpectations.io/docs/application_integration_support/)
Data Quality Issues:
{DATA_QUALITY_ISSUES[0]}
Example Data:
test_table
test test2
0 1.00 2
1 2.30 5
2 4.33 0
test_table_two
test test2
0 1.00 2
1 2.30 5
2 4.33 0
test_table_three
test test2
0 1.00 2
1 2.30 5
Code Examples:
Passing Case:
Input:
ExpectTableRowCountToEqualOtherTable(
other_table_name=test_table_two
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 3
}},
"meta": {{}},
"success": true
}}
Failing Case:
Input:
ExpectTableRowCountToEqualOtherTable(
other_table_name=test_table_three
)
Output:
{{
"exception_info": {{
"raised_exception": false,
"exception_traceback": null,
"exception_message": null
}},
"result": {{
"observed_value": 2
}},
"meta": {{}},
"success": false
}}
""" # noqa: E501 # FIXME CoP
other_table_name: Union[str, SuiteParameterDict] = pydantic.Field(
description=OTHER_TABLE_NAME_DESCRIPTION
)
row_condition: RowConditionType = None
condition_parser: Union[ConditionParser, None] = None
library_metadata: ClassVar[Dict[str, Union[str, list, bool]]] = {
"maturity": "production",
"tags": ["core expectation", "table expectation", "multi-table expectation"],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
_library_metadata = library_metadata
metric_dependencies = ("table.row_count",)
domain_keys: ClassVar[Tuple[str, ...]] = ("row_condition", "condition_parser")
success_keys = ("other_table_name",)
args_keys = ("other_table_name",)
class Config:
title = "Expect table row count to equal other table"
@staticmethod
def schema_extra(
schema: Dict[str, Any], model: Type[ExpectTableRowCountToEqualOtherTable]
) -> None:
BatchExpectation.Config.schema_extra(schema, model)
schema["properties"]["metadata"]["properties"].update(
{
"data_quality_issues": {
"title": "Data Quality Issues",
"type": "array",
"const": DATA_QUALITY_ISSUES,
},
"library_metadata": {
"title": "Library Metadata",
"type": "object",
"const": model._library_metadata,
},
"short_description": {
"title": "Short Description",
"type": "string",
"const": EXPECTATION_SHORT_DESCRIPTION,
},
"supported_data_sources": {
"title": "Supported Data Sources",
"type": "array",
"const": SUPPORTED_DATA_SOURCES,
},
}
)
@override
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
renderer_configuration.add_param(
name="other_table_name", param_type=RendererValueType.STRING
)
renderer_configuration.template_str = (
"Row count must equal the row count of table $other_table_name."
)
return renderer_configuration
@override
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
styling = runtime_configuration.get("styling")
if not configuration:
raise ValueError("configuration is required for prescriptive renderer") # noqa: TRY003 # FIXME CoP
params = substitute_none_for_missing(configuration.kwargs, ["other_table_name"])
template_str = "Row count must equal the row count of table $other_table_name."
return [
RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": template_str,
"params": params,
"styling": styling,
},
)
]
@override
@classmethod
@renderer(renderer_type=LegacyDiagnosticRendererType.OBSERVED_VALUE)
def _diagnostic_observed_value_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
if not result or result.result.get("observed_value"):
return "--"
self_table_row_count = num_to_str(result.result["observed_value"]["self"])
other_table_row_count = num_to_str(result.result["observed_value"]["other"])
return RenderedStringTemplateContent(
content_block_type="string_template",
string_template={
"template": "Row Count: $self_table_row_count<br>Other Table Row Count: $other_table_row_count", # noqa: E501 # FIXME CoP
"params": {
"self_table_row_count": self_table_row_count,
"other_table_row_count": other_table_row_count,
},
"styling": {"classes": ["mb-2"]},
},
)
@override
def get_validation_dependencies(
self,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
) -> ValidationDependencies:
validation_dependencies: ValidationDependencies = super().get_validation_dependencies(
execution_engine, runtime_configuration
)
configuration = self.configuration
kwargs = configuration.kwargs if configuration else {}
other_table_name = kwargs.get("other_table_name")
# At this time, this is the only Expectation that
# computes the same metric over more than one domain
# ValidationDependencies does not allow duplicate metric names
# and the registry is checked to ensure the metric name is registered
# as a side effect of the super().get_validation_dependencies() call above
# As a work-around, after the registry check
# we create a second table.row_count metric for the other table manually
# and rename the metrics defined in ValidationDependencies
table_row_count_metric_config_self: Optional[MetricConfiguration] = (
validation_dependencies.get_metric_configuration(metric_name="table.row_count")
)
assert table_row_count_metric_config_self, "table_row_count_metric should not be None"
copy_table_row_count_metric_config_self = deepcopy(table_row_count_metric_config_self)
copy_table_row_count_metric_config_self.metric_domain_kwargs["table"] = other_table_name
# Remove row_condition from other table - it should only apply to the main table
copy_table_row_count_metric_config_self.metric_domain_kwargs.pop("row_condition", None)
copy_table_row_count_metric_config_self.metric_domain_kwargs.pop("condition_parser", None)
# instantiating a new MetricConfiguration gives us a new id
table_row_count_metric_config_other = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs=copy_table_row_count_metric_config_self.metric_domain_kwargs,
metric_value_kwargs=copy_table_row_count_metric_config_self.metric_value_kwargs,
)
# rename original "table.row_count" metric to "table.row_count.self"
validation_dependencies.set_metric_configuration(
metric_name="table.row_count.self",
metric_configuration=table_row_count_metric_config_self,
)
validation_dependencies.remove_metric_configuration(metric_name="table.row_count")
# add a new metric dependency named "table.row_count.other" with modified metric config
validation_dependencies.set_metric_configuration(
"table.row_count.other", table_row_count_metric_config_other
)
return validation_dependencies
@override
def _validate(
self,
metrics: Dict,
runtime_configuration: Optional[dict] = None,
execution_engine: Optional[ExecutionEngine] = None,
):
table_row_count_self = metrics["table.row_count.self"]
table_row_count_other = metrics["table.row_count.other"]
return {
"success": table_row_count_self == table_row_count_other,
"result": {
"observed_value": {
"self": table_row_count_self,
"other": table_row_count_other,
}
},
}
| ExpectTableRowCountToEqualOtherTable |
python | skorch-dev__skorch | examples/translation/data.py | {
"start": 75,
"end": 4031
} | class ____:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # Count SOS and EOS
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
######################################################################
# The files are all in Unicode, to simplify we will turn Unicode
# characters to ASCII, make everything lowercase, and trim most
# punctuation.
#
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
return s
######################################################################
# To read the data file we will split the file into lines, and then split
# lines into pairs. The files are all English → Other Language, so if we
# want to translate from Other Language → English I added the ``reverse``
# flag to reverse the pairs.
#
def readLangs(lang1, lang2, reverse=False):
print("Reading lines...")
# Read the file and split into lines
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
# Reverse pairs, make Lang instances
if reverse:
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
else:
input_lang = Lang(lang1)
output_lang = Lang(lang2)
return input_lang, output_lang, pairs
######################################################################
# Since there are a *lot* of example sentences and we want to train
# something quickly, we'll trim the data set to only relatively short and
# simple sentences. Here the maximum length is 10 words (that includes
# ending punctuation) and we're filtering to sentences that translate to
# the form "I am" or "He is" etc. (accounting for apostrophes replaced
# earlier).
#
MAX_LENGTH = 10
eng_prefixes = (
"i am ", "i m ",
"he is", "he s ",
"she is", "she s",
"you are", "you re ",
"we are", "we re ",
"they are", "they re "
)
def filterPair(p):
return len(p[0].split(' ')) < MAX_LENGTH and \
len(p[1].split(' ')) < MAX_LENGTH and \
p[1].startswith(eng_prefixes)
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
######################################################################
# The full process for preparing the data is:
#
# - Read text file and split into lines, split lines into pairs
# - Normalize text, filter by length and content
# - Make word lists from sentences in pairs
#
def prepareData(lang1, lang2, reverse=False):
input_lang, output_lang, pairs = readLangs(lang1, lang2, reverse)
print("Read %s sentence pairs" % len(pairs))
pairs = filterPairs(pairs)
print("Trimmed to %s sentence pairs" % len(pairs))
print("Counting words...")
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
print("Counted words:")
print(input_lang.name, input_lang.n_words)
print(output_lang.name, output_lang.n_words)
return input_lang, output_lang, pairs
| Lang |
python | google__jax | jax/experimental/mosaic/gpu/tcgen05.py | {
"start": 39009,
"end": 65372
} | class ____:
address: ir.Value
shape: tuple[int, int]
dtype: ir.Type
layout: TMEMLayout
@property
def packing(self) -> int:
return self.layout.vector_length
def __post_init__(self):
packed_bitwidth = utils.bitwidth(self.dtype) * self.packing
if not packed_bitwidth <= 32:
raise ValueError("Expected packed packed bitwidth to be <= 32, but got: "
f"{packed_bitwidth=}")
@classmethod
def from_alloc(
cls,
tmem_addr_ref: ir.Value,
shape: tuple[int, int],
dtype,
collective: bool | None = None,
layout: TMEMLayout | None = None,
) -> TMEMRef:
i32 = ir.IntegerType.get_signless(32)
if not ir.MemRefType.isinstance(tmem_addr_ref.type):
raise ValueError(f"tmem_addr_ref must be a memref or a pointer, got: {tmem_addr_ref.type}")
addr_ref_ty = ir.MemRefType(tmem_addr_ref.type)
if not utils.is_smem_ref(addr_ref_ty):
raise ValueError(f"tmem_addr_ref must be in shared memory, got: {addr_ref_ty}")
if addr_ref_ty.element_type != i32:
raise ValueError(f"tmem_addr_ref must be an i32 memref, got: {addr_ref_ty}")
if math.prod(addr_ref_ty.shape) != 1:
raise ValueError(f"tmem_addr_ref must contain a single element, got: {addr_ref_ty}")
i0 = arith.ConstantOp.create_index(0)
tmem_addr = memref.load(tmem_addr_ref, [i0] * addr_ref_ty.rank)
if shape[0] < 32:
raise ValueError(f"TMEM refs must have at least 32 rows, got: {shape[0]}")
if layout is None:
if collective is None:
raise ValueError(
"collective argument must be provided when TMEM layout is inferred"
)
layout = _infer_tmem_layout(shape, collective, packing=1)
else:
layout.check_type(shape, utils.bitwidth(dtype))
# TODO: Do we have to do this??
# warp_idx = utils.warp_idx(sync=False)
# tmem_addr = arith.ori(tmem_addr, arith.shli(warp_idx, utils.c(21, i32)))
return cls(tmem_addr, shape, dtype, layout)
def slice(self, *idxs) -> TMEMRef:
i32 = ir.IntegerType.get_signless(32)
base_idx, slice_shape, is_squeezed = utils.parse_indices(idxs, self.shape)
if any(is_squeezed):
raise ValueError("TMEM can only be sliced, not indexed")
if base_idx == [0] * len(base_idx) and slice_shape == list(self.shape):
return self # Trival slice
if self.layout != tmem_default_layout(packing=self.packing):
raise NotImplementedError(
"Slicing only implemented for refs with standard layout, got:"
f" {self.layout}"
)
if base_idx[0] != 0 or slice_shape[0] != TMEM_ROWS:
raise NotImplementedError("TMEM cannot be sliced along rows")
if slice_shape[1] % 8:
raise NotImplementedError(
"TMEM column slice length must be a multiple of 8. "
f"Got {slice_shape[1]}."
)
col_idx = base_idx[1]
if not isinstance(col_idx, ir.Value):
col_idx = arith.constant(i32, col_idx)
if col_idx.type == ir.IndexType.get():
col_idx = arith.index_cast(i32, col_idx)
if self.packing != 1:
col_idx = arith.divui(col_idx, arith.constant(i32, self.packing))
return TMEMRef(
address=arith.addi(self.address, col_idx),
shape=cast(tuple[int, int], tuple(slice_shape)),
layout=self.layout,
dtype=self.dtype,
)
def load(self, layout: fa.TiledLayout | None = None, is_signed: bool | None = None) -> fa.FragmentedArray:
packing = self.packing
if layout is None:
layout = _infer_tmem_load_registers_layout(
self.layout, self.shape[1], packing
)
bitwidth = utils.bitwidth(self.dtype)
has_default_layout = self.layout == tmem_default_layout(packing=packing)
regs_shape = layout.registers_shape(self.shape)
if regs_shape[0] != 1: # We'll need to issue multiple loads below.
raise NotImplementedError("Loading multiple row tiles")
if layout == LAYOUT and self.layout == tmem_default_layout(packing=packing):
registers = _load_32xcols(
self.address, self.shape[1], self.dtype, packing
).T.reshape(regs_shape)
elif layout == self.layout.as_tiled_layout() and packing * bitwidth == 32:
assert len(layout.base_tile_shape) == 2
# We could allow replicated dims in the input, but we'd need to divide the
# split factor computed below by the replication factor of the input.
assert not any(isinstance(d, fa.Replicated) for d in layout.warp_dims)
assert not any(isinstance(d, fa.Replicated) for d in layout.lane_dims)
warp_split_factor = math.prod(
d.times if isinstance(d, fa.Replicated) else 1
for d in layout.remove_dimension(1).warp_dims
)
lane_split_factor = math.prod(
d.times if isinstance(d, fa.Replicated) else 1
for d in layout.remove_dimension(1).lane_dims
)
split_factor = warp_split_factor * lane_split_factor
registers = _load_32xcols_native(
self.address, self.shape[1] // split_factor, self.dtype, packing, packing
).reshape(regs_shape)
# TODO(apaszke): Support the case where we have a long vector length in the
# FA more generally, not just for 2x32b.
# 16-bit types are special, because the store instruction can unpack them.
elif layout == TMEM_NATIVE_LAYOUT and has_default_layout and (
(bitwidth == 16 and packing == 1)
or (bitwidth == 32 and layout.vector_length == 2)
):
registers = _load_32xcols_native(
self.address, self.shape[1], self.dtype, packing, TMEM_NATIVE_LAYOUT.vector_length
).reshape(regs_shape)
elif layout == fa.WGMMA_LAYOUT and self.layout == tmem_half_lane_layout(self.shape[1], packing=packing):
# Load half the columns, since they are folded over lanes.
raw_registers = _load_32xcols(
self.address, self.shape[1] // 2, self.dtype, packing
)
assert raw_registers.shape[0] == 4
registers = np.concatenate([raw_registers[:2], raw_registers[2:]], axis=1)
registers = registers.T.reshape(regs_shape)
elif layout == fa_m64_collective_layout(self.shape[1]) and self.layout == tmem_m64_collective_layout(self.shape[1], packing=packing):
regs_shape = layout.registers_shape(self.shape)
# We take half the columns, because they are split over halves of TMEM.
registers = _load_32xcols(
self.address, self.shape[1] // 2, self.dtype, packing
).reshape(regs_shape)
else:
raise ValueError(
f"Loads from TMEM layout {self.layout} to register layout"
f" {layout} are not supported"
)
return fa.FragmentedArray(
_registers=registers, _layout=layout, _is_signed=is_signed
)
def store(self, value: fa.FragmentedArray):
if not isinstance(value, fa.FragmentedArray):
raise TypeError(f"TMEM stores expect a FragmentedArray, got: {value}")
if value.shape != self.shape:
raise ValueError(
f"Stored array has shape {value.shape}, but TMEM has shape"
f" {self.shape}"
)
if value.mlir_dtype != self.dtype:
raise ValueError(
f"Stored array has dtype {value.mlir_dtype}, but TMEM has dtype"
f" {self.dtype}"
)
if not isinstance(value.layout, fa.TiledLayout):
raise TypeError(f"Stored array has layout {value.layout}, but TMEM stores expect a TiledLayout")
packing = self.packing
has_default_layout = self.layout == tmem_default_layout(packing=packing)
bitwidth = utils.bitwidth(self.dtype)
if value.layout == LAYOUT and has_default_layout:
_store_32xcols(
self.address, value.registers.T.reshape((4, -1)), packing
)
elif value.layout == self.layout.as_tiled_layout() and packing * bitwidth == 32:
_store_32xcols_native(self.address, value.registers.reshape(-1), packing)
# TODO(apaszke): Support the case where we have a long vector length in the
# FA more generally, not just for 2x32b.
# TODO(apaszke): Support a wider range of layouts when dealing with unpacking.
# 16-bit types are special, because the store instruction can unpack them.
elif value.layout == TMEM_NATIVE_LAYOUT and has_default_layout and (
(bitwidth == 16 and packing == 1)
or (bitwidth == 32 and value.layout.vector_length == 2)
):
_store_32xcols_native(self.address, value.registers.reshape(-1), packing)
elif (
value.layout == fa.WGMMA_LAYOUT
and self.layout == tmem_half_lane_layout(self.shape[1], packing=packing)
):
registers = value.registers.T.reshape(2, -1)
registers = np.concatenate(np.split(registers, 2, axis=1), axis=0)
_store_32xcols(self.address, registers, packing)
elif value.layout == fa_m64_collective_layout(
self.shape[1]
) and self.layout == tmem_m64_collective_layout(
self.shape[1], packing=packing
):
_store_32xcols(self.address, value.registers.reshape(4, -1), packing)
else:
raise ValueError(
f"Storing from register layout {value.layout} to TMEM layout"
f" {self.layout} is not supported"
)
def _debug_print(self) -> None:
i32 = ir.IntegerType.get_signless(32)
num_cols = self.layout.cols_in_shape(self.shape, utils.bitwidth(self.dtype))
lane = arith.remui(utils.thread_idx(), arith.constant(i32, utils.WARPGROUP_SIZE))
for c in range(num_cols):
ptr = _tmem_addr_to_ptr(arith.addi(self.address, arith.constant(i32, c)))
val = nvvm.tcgen05_ld(i32, nvvm.Tcgen05LdStShape.SHAPE_32X32B, ptr)
dtype_bitwidth = utils.bitwidth(self.dtype)
full_packing = 32 // dtype_bitwidth
if self.packing == 1:
if dtype_bitwidth < 32:
val = arith.trunci(ir.IntegerType.get_signless(dtype_bitwidth), val)
val = utils.bitcast(val, self.dtype)
elif self.packing == full_packing:
val = utils.bitcast(val, ir.VectorType.get((full_packing,), self.dtype))
else:
raise NotImplementedError(f"Unsupported packing: {self.packing}")
# TODO(apaszke): Make this print logical, not physical location.
utils.debug_print(f"[{{}}, {c}]: {{}}", lane, val, uniform=False)
def _transfer_32xcols(
base_addr: ir.Value,
cols: int,
atom_shape: tuple[int, int],
tmem_packing: int,
reg_packing: int,
) -> Iterator[tuple[ir.Value, int, int, slice]]:
"""Generates a sequence of parameters for a given TMEM read or write.
Arguments:
base_addr: The base address of the TMEM region.
cols: The number of logical columns to transfer.
atom_shape: The logical shape of the tile written by the warp in a single
TMEM transfer.
tmem_packing: Packing degree in TMEM. When packing is 1, but the data is
16-bit, we expect that each transfer actually involves double the number
of physical columns.
reg_packing: The number of elements that fit in a single 32-bit register.
"""
i32 = ir.IntegerType.get_signless(32)
atom_rows, atom_cols = atom_shape
assert cols % atom_cols == 0
total_num = cols // atom_cols
regs_per_instr = atom_shape[0] * atom_shape[1] // (utils.WARP_SIZE * reg_packing)
assert 32 % atom_rows == 0
num_row_steps = 32 // atom_rows
# We artificially lower the instr_num compared to its limits, because higher
# values can lead to register spills..
max_num = 1 << (total_num.bit_length() - 1) # power of 2 <= than total_num
max_num = min(max_num, 32 // regs_per_instr)
for lane_step in range(num_row_steps):
addr_row = arith.addi(base_addr, utils.c((lane_step * atom_rows) << 16, i32))
num_processed = 0
instr_num = max_num
while (remaining := total_num - num_processed) > 0:
while instr_num > remaining:
instr_num //= 2
num_slice = slice(num_processed, num_processed + instr_num)
addr_row_col = arith.addi(
addr_row, utils.c(num_processed * atom_cols // tmem_packing, i32)
)
yield addr_row_col, instr_num, lane_step, num_slice
num_processed += instr_num
assert num_processed == total_num
def _store_32xcols(base_addr, vector_regs, tmem_packing) -> None:
i32 = ir.IntegerType.get_signless(32)
assert vector_regs.ndim == 2 and vector_regs.shape[0] == 4
cols = vector_regs.shape[1] * 8
reg_packing = 64 // utils.bitwidth(vector_regs.flat[0].type)
if reg_packing == 1:
store_shape = "16x256b" # 4 threads * 64 bits per vreg = 256 bits
regs = np.empty((4, vector_regs.shape[1], 2), dtype=object)
c0 = arith.constant(i32, 0)
c1 = arith.constant(i32, 1)
for idx, vreg in np.ndenumerate(vector_regs):
regs[(*idx, 0)] = llvm.extractelement(vreg, c0)
regs[(*idx, 1)] = llvm.extractelement(vreg, c1)
regs = regs.reshape(2, 2, vector_regs.shape[1], 2).swapaxes(1, 2)
# From a single lane perspective a num tile consists of a 2x2, with the
# minor dim traversing columns and major being 8 rows apart.
# See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-matrix-fragments-shape-16256b
assert regs.shape[-2:] == (2, 2)
assert tmem_packing == 1
unpack = False
elif reg_packing == 2:
store_shape = "16x128b" # 4 threads * 32 bits per vreg = 128 bits
# From a single lane perspective a num tile has 2 registers, 8 rows apart.
# See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-matrix-fragments-shape-16128b
regs = vector_regs.reshape(2, 2, vector_regs.shape[1]).swapaxes(1, 2)
assert 1 <= tmem_packing <= 2
unpack = tmem_packing == 1
else:
raise NotImplementedError(reg_packing)
it = _transfer_32xcols(base_addr, cols, (16, 8), tmem_packing, reg_packing)
for addr_row_col, instr_num, lane_step, num_slice in it:
regs_slice = regs[lane_step, num_slice].flat
_tmem_store(addr_row_col, store_shape, instr_num, regs_slice, unpack)
def _store_32xcols_native(base_addr, vector_regs, tmem_packing) -> None:
i32 = ir.IntegerType.get_signless(32)
assert vector_regs.ndim == 1
vec_ty = ir.VectorType(vector_regs.flat[0].type)
[vector_length] = vec_ty.shape
elt_bitwidth = utils.bitwidth(vec_ty.element_type)
reg_packing = 32 // elt_bitwidth
store_atom_shape = (32, reg_packing)
# TODO(apaszke): More general register splitting code, not just 2x32b.
if reg_packing == 1:
if vector_length == 2:
# Transform data such that each reg is 32 bits wide.
regs = [None] * (len(vector_regs) * 2)
c0 = arith.constant(i32, 0)
c1 = arith.constant(i32, 1)
for idx, vreg in enumerate(vector_regs):
regs[2 * idx] = llvm.extractelement(vreg, c0)
regs[2 * idx + 1] = llvm.extractelement(vreg, c1)
else:
regs = [utils.bitcast(r, i32) for r in vector_regs]
assert tmem_packing == 1
unpack = False
elif reg_packing == 2:
assert vector_length == 2
# In this case, registers are already packed into 32-bit registers.
regs = [utils.bitcast(r, i32) for r in vector_regs]
if elt_bitwidth == 16:
assert 1 <= tmem_packing <= 2
unpack = tmem_packing == 1
else:
if tmem_packing == 1 and elt_bitwidth != 32:
raise NotImplementedError(
f"Unsupported packing: {tmem_packing} for element type {elt_bitwidth}"
)
assert tmem_packing == 32 // elt_bitwidth
unpack = False
else:
if tmem_packing != reg_packing:
raise NotImplementedError(
f"Only {reg_packing} packing supported for bitwidth {elt_bitwidth},"
f" but got TMEM packing of {tmem_packing}"
)
assert utils.bitwidth(vec_ty) == 32
regs = [utils.bitcast(r, i32) for r in vector_regs]
unpack = False
cols = len(regs) * reg_packing
it = _transfer_32xcols(base_addr, cols, store_atom_shape, tmem_packing, reg_packing)
for addr_row_col, instr_num, lane_step, num_slice in it:
assert lane_step == 0
regs_slice = regs[num_slice]
_tmem_store(addr_row_col, "32x32b", instr_num, regs_slice, unpack)
def _load_32xcols(base_addr, cols, dtype, tmem_packing) -> np.ndarray:
i32 = ir.IntegerType.get_signless(32)
vec_ty = ir.VectorType.get((2,), dtype)
reg_packing = 32 // utils.bitwidth(dtype)
if reg_packing == 1:
load_shape = "16x256b" # 4 threads * 64 bits per vreg = 256 bits
assert tmem_packing == 1
pack = False
elif reg_packing == 2:
load_shape = "16x128b" # 4 threads * 32 bits per vreg = 128 bits
assert 1 <= tmem_packing <= 2
pack = tmem_packing == 1
else:
raise NotImplementedError(reg_packing)
vector_regs = np.ndarray((4, cols // 8), dtype=object)
it = _transfer_32xcols(base_addr, cols, (16, 8), tmem_packing, reg_packing)
c0 = arith.constant(i32, 0)
c1 = arith.constant(i32, 1)
for addr_row_col, instr_num, lane_step, num_slice in it:
regs = _tmem_load(addr_row_col, load_shape, instr_num, pack)
row_slice = slice(lane_step * 2, (lane_step + 1) * 2)
# This aliases the original array, so updates will be reflected there.
vector_regs_update = vector_regs[row_slice, num_slice]
assert vector_regs_update.shape == (2, instr_num), (vector_regs_update.shape, instr_num)
if reg_packing == 1:
regs = [llvm.bitcast(dtype, r) for r in regs]
# From a single lane perspective a num tile consists of a 2x2, with the
# minor dim traversing columns and major being 8 rows apart.
# See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-matrix-fragments-shape-16256b
regs = np.asarray(regs, dtype=object).reshape(instr_num, 2, 2).swapaxes(0, 1)
undef = llvm.mlir_undef(vec_ty)
assert regs.shape == (*vector_regs_update.shape, 2)
for idx in np.ndindex(vector_regs_update.shape):
high_undef = llvm.insertelement(undef, regs[(*idx, 0)], c0)
vreg = llvm.insertelement(high_undef, regs[(*idx, 1)], c1)
vector_regs_update[idx] = vreg
else:
assert reg_packing == 2
regs = [llvm.bitcast(vec_ty, r) for r in regs]
# From a single lane perspective a num tile has 2 registers, 8 rows apart.
# See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-matrix-fragments-shape-16128b
regs = np.asarray(regs, dtype=object).reshape(instr_num, 2).swapaxes(0, 1)
vector_regs_update[...] = regs
return vector_regs
def _load_32xcols_native(base_addr, cols, dtype, tmem_packing, vector_length) -> np.ndarray:
i32 = ir.IntegerType.get_signless(32)
vec_ty = ir.VectorType.get((vector_length,), dtype)
reg_packing = 32 // utils.bitwidth(dtype)
assert vector_length % reg_packing == 0
load_shape = "32x32b"
load_atom_shape = (32, reg_packing)
if reg_packing == 2:
assert 1 <= tmem_packing <= 2
pack = tmem_packing == 1
else:
if tmem_packing != reg_packing:
raise NotImplementedError(
f"Only {reg_packing} supported for element type {dtype}, but got"
f" TMEM packing of {tmem_packing}"
)
pack = False
it = _transfer_32xcols(base_addr, cols, load_atom_shape, tmem_packing, reg_packing)
c0 = arith.constant(i32, 0)
c1 = arith.constant(i32, 1)
regs = [None] * (cols // reg_packing)
for addr_row_col, instr_num, lane_step, num_slice in it:
assert lane_step == 0, lane_step
instr_regs = _tmem_load(addr_row_col, load_shape, instr_num, pack)
if reg_packing == 1 and vector_length == 2:
regs[num_slice] = [llvm.bitcast(dtype, r) for r in instr_regs]
else:
regs[num_slice] = [utils.bitcast(r, vec_ty) for r in instr_regs]
if reg_packing == 1 and vector_length == 2:
vector_regs = np.ndarray((cols // 2,), dtype=object)
undef = llvm.mlir_undef(vec_ty)
for idx in range(vector_regs.size):
high_undef = llvm.insertelement(undef, regs[2 * idx], c0)
vreg = llvm.insertelement(high_undef, regs[2 * idx + 1], c1)
vector_regs[idx] = vreg
else:
assert vector_length == reg_packing
vector_regs = np.asarray(regs, dtype=object)
return vector_regs
def commit_tmem() -> None:
nvvm.tcgen05_wait(nvvm.Tcgen05WaitKind.STORE)
utils.warpgroup_barrier()
def wait_load_tmem() -> None:
nvvm.tcgen05_wait(nvvm.Tcgen05WaitKind.LOAD)
utils.warpgroup_barrier()
def async_copy_scales_smem_to_tmem(
smem_ref: ir.Value, tmem_ref: TMEMRef, collective: bool = False
) -> None:
"""Asynchronously copies the scale data from SMEM to TMEM.
The result of the copy can be awaited by calling ``commit_arrive`` and waiting
on the chosen ``Barrier``. However, if TMEM reference is to be consumed by a
MMA issued in the same thread, no additional synchronization is needed.
At the moment the function requires ``smem_ref`` to be contiguous and have a
shape of ``(MN // 128, K // 128, 32, 16)`` for 8-bit scales (here MN stands
for the size of the non-contracting dimension which is M or N), matching the
scale layout for .scale_vec::1X. See https://docs.nvidia.com/cuda/parallel-thread-execution/#tcgen05-mma-scale-factor-a-layout-1x
for more details. Note that we always put the non-contracting dimension first.
If you have a (MN, K // 32) array of scales in JAX (where MN and K are
divisible by 128), you can prepare it for use in the kernel this way::
scales.reshape(mn // 128, 4, 32, k // 4, 4)
.transpose(0, 3, 2, 1, 4)
.reshape(mn // 128, k // 4, 32, 16)
The TMEM ref is expected to have the logical shape of the scales
``(MN, K // 32)``, and the layout created by ``scales_layout()``.
"""
i32 = ir.IntegerType.get_signless(32)
smem_ty = ir.MemRefType(smem_ref.type)
if (dtype := smem_ty.element_type) != tmem_ref.dtype:
raise ValueError(f"Incompatible dtypes: SMEM has {dtype}, TMEM has {tmem_ref.dtype}")
if dtype not in {ir.Float8E8M0FNUType.get(), ir.Float8E4M3FNType.get()}:
raise NotImplementedError(f"Unsupported dtype: {dtype}, only f8e8m0fnu and f8e4m3fn are supported")
if tmem_ref.shape[0] % TMEM_ROWS:
raise ValueError(f"TMEM reference must have a multiple of {TMEM_ROWS} rows, but got {tmem_ref.shape[0]}")
if tmem_ref.shape[1] % 4:
raise ValueError(f"TMEM reference must have a multiple of 4 columns, but got {tmem_ref.shape[1]}")
if tmem_ref.layout != scales_layout():
raise ValueError(f"TMEM layout {tmem_ref.layout} is not supported")
smem_shape = tuple(smem_ty.shape)
expected_smem_shape = (tmem_ref.shape[0] // TMEM_ROWS, tmem_ref.shape[1] // 4, 32, 16)
if smem_shape != expected_smem_shape:
raise NotImplementedError(
f"SMEM has {smem_shape}, but expected {expected_smem_shape} for TMEM"
f" ref shape {tmem_ref.shape}"
)
strides, _ = smem_ty.get_strides_and_offset()
# TODO(apaszke): This should only matter for the two minor dims.
if strides != utils.get_contiguous_strides(smem_shape):
raise ValueError("Only copies from contiguous SMEM references are supported")
mn_tile_stride, k_tile_stride = strides[:2]
# One tile of scales has 128 bytes.
if mn_tile_stride % 128 or k_tile_stride % 128:
raise ValueError("Scale tile strides must be a multiple of 128")
mn_tile_stride_i32 = mn_tile_stride // 4
k_tile_stride_i32 = k_tile_stride // 4
smem_base_ptr = utils.memref_ptr(smem_ref, 3)
# TODO(apaszke): Need to figure out the TMEM layout otherwise and MMA doesn't
# support it anyway.
if smem_shape[0] > 2:
raise NotImplementedError("Only M/N up to 256 supported")
for mn_tile, k_tile in np.ndindex(smem_shape[:2]):
load_ptr = utils.getelementptr(
smem_base_ptr,
[mn_tile * mn_tile_stride_i32 + k_tile * k_tile_stride_i32],
i32,
)
# NOTE: The tiles are MN-minor in TMEM, but MN-major (logically) in SMEM.
store_addr = arith.addi(
tmem_ref.address,
arith.constant(i32, 4 * smem_shape[0] * k_tile + 4 * mn_tile),
)
# The "core matrix" here is the same as in MMA: 8x(16 bytes).
desc = mma_utils.encode_descriptor(load_ptr, 0, 8 * 16, swizzle=None)
nvvm.tcgen05_cp(
nvvm.Tcgen05CpShape.SHAPE_32x128b,
_tmem_addr_to_ptr(store_addr),
desc,
multicast=nvvm.Tcgen05CpMulticast.WARPX4,
group=nvvm.CTAGroupKind.CTA_2 if collective else nvvm.CTAGroupKind.CTA_1
)
def async_copy_sparse_metadata_smem_to_tmem(
smem_ref: ir.Value, tmem_ref: TMEMRef, collective: bool = False
) -> None:
i8 = ir.IntegerType.get_signless(8)
i32 = ir.IntegerType.get_signless(32)
smem_ty = ir.MemRefType(smem_ref.type)
if (dtype := smem_ty.element_type) != tmem_ref.dtype:
raise ValueError(f"Incompatible dtypes: SMEM has {dtype}, TMEM has {tmem_ref.dtype}")
if dtype != ir.IntegerType.get_signless(2):
raise NotImplementedError(f"Unsupported dtype: {dtype}, only i2 supported")
if tmem_ref.shape[0] % 128:
raise ValueError(f"TMEM reference must have a multiple of 128 rows, but got {tmem_ref.shape[0]}")
if tmem_ref.shape[1] % 64:
raise ValueError(f"TMEM reference must have a multiple of 64 colums, but got {tmem_ref.shape[1]}")
if tmem_ref.layout != sparse_meta_layout():
raise ValueError(f"TMEM layout {tmem_ref.layout} is not supported")
smem_shape = tuple(smem_ty.shape)
expected_smem_shape = (tmem_ref.shape[0] // 128, tmem_ref.shape[1] // 64, 128, 64)
if smem_shape != expected_smem_shape:
raise NotImplementedError(
f"SMEM has {smem_shape}, but expected {expected_smem_shape} for TMEM"
f" ref shape {tmem_ref.shape}"
)
strides, _ = smem_ty.get_strides_and_offset()
if strides != utils.get_contiguous_strides(smem_shape):
raise ValueError("Only copies from contiguous SMEM references are supported")
if expected_smem_shape[0] != 1:
raise NotImplementedError("Only M=128 supported")
k_tile_stride = strides[1]
if k_tile_stride % 16:
raise ValueError("K tile stride must be a multiple of 16")
k_tile_byte_stride = k_tile_stride // 4
smem_base_ptr = utils.memref_ptr(smem_ref, 3)
for k_tile in range(expected_smem_shape[1]):
load_ptr = utils.getelementptr(
smem_base_ptr, [k_tile * k_tile_byte_stride], i8
)
store_ptr = arith.addi(tmem_ref.address, arith.constant(i32, 4 * k_tile))
# The "core matrix" here is the same as in MMA: 8x(16 bytes).
desc = mma_utils.encode_descriptor(load_ptr, 0, 8 * 16, swizzle=None)
ptr = _tmem_addr_to_ptr(store_ptr)
nvvm.tcgen05_cp(
nvvm.Tcgen05CpShape.SHAPE_128x128b, ptr, desc,
group=nvvm.CTAGroupKind.CTA_2 if collective else nvvm.CTAGroupKind.CTA_1
)
| TMEMRef |
python | django__django | tests/many_to_one/models.py | {
"start": 127,
"end": 371
} | class ____(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
| Reporter |
python | numba__numba | numba/core/typing/collections.py | {
"start": 342,
"end": 596
} | class ____(AbstractTemplate):
key = operator.contains
def generic(self, args, kws):
cont, item = args
if isinstance(cont, types.Container):
return signature(types.boolean, cont, cont.dtype)
@infer_global(len)
| InContainer |
python | scipy__scipy | scipy/differentiate/tests/test_differentiate.py | {
"start": 18221,
"end": 19434
} | class ____:
def test_iv(self, xp):
jh_func = self.jh_func.__func__
# Test input validation
message = "Argument `x` must be at least 1-D."
with pytest.raises(ValueError, match=message):
jh_func(xp.sin, 1, tolerances=dict(atol=-1))
# Confirm that other parameters are being passed to `derivative`,
# which raises an appropriate error message.
x = xp.ones(3)
func = optimize.rosen
message = 'Tolerances and step parameters must be non-negative scalars.'
with pytest.raises(ValueError, match=message):
jh_func(func, x, tolerances=dict(atol=-1))
with pytest.raises(ValueError, match=message):
jh_func(func, x, tolerances=dict(rtol=-1))
with pytest.raises(ValueError, match=message):
jh_func(func, x, step_factor=-1)
message = '`order` must be a positive integer.'
with pytest.raises(ValueError, match=message):
jh_func(func, x, order=-1)
message = '`maxiter` must be a positive integer.'
with pytest.raises(ValueError, match=message):
jh_func(func, x, maxiter=-1)
@make_xp_test_case(jacobian)
| JacobianHessianTest |
python | palantir__python-language-server | test/plugins/test_rope_rename.py | {
"start": 189,
"end": 1277
} | class ____(Test1):
pass
"""
@pytest.fixture
def tmp_workspace(temp_workspace_factory):
return temp_workspace_factory({DOC_NAME: DOC})
def test_rope_rename(tmp_workspace, config): # pylint: disable=redefined-outer-name
position = {"line": 0, "character": 6}
DOC_URI = uris.from_fs_path(os.path.join(tmp_workspace.root_path, DOC_NAME))
doc = Document(DOC_URI, tmp_workspace)
result = pyls_rename(config, tmp_workspace, doc, position, "ShouldBeRenamed")
assert len(result.keys()) == 1
changes = result.get("documentChanges")
assert len(changes) == 1
changes = changes[0]
# Note that this test differs from test_jedi_rename, because rope does not
# seem to modify files that haven't been opened with textDocument/didOpen.
assert changes.get("edits") == [
{
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 5, "character": 0},
},
"newText": "class ShouldBeRenamed():\n pass\n\nclass Test2(ShouldBeRenamed):\n pass\n",
}
]
| Test2 |
python | wandb__wandb | tests/unit_tests/test_step_upload.py | {
"start": 2549,
"end": 3935
} | class ____(Mock):
def __init__(self, *args, **kwargs):
kwargs = {
**dict(
upload_urls=Mock(wraps=mock_upload_urls),
upload_file_retry=Mock(wraps=self._mock_upload),
),
**kwargs,
}
super().__init__(
*args,
**kwargs,
)
self.mock_upload_file_waiters: MutableSequence[Callable[[], None]] = []
self.mock_upload_started = threading.Condition()
def wait_for_upload(self, timeout: float) -> Optional[Callable[[], None]]:
with self.mock_upload_started:
if not self.mock_upload_started.wait_for(
lambda: len(self.mock_upload_file_waiters) > 0,
timeout=timeout,
):
return None
return self.mock_upload_file_waiters.pop()
def _mock_upload(self, *args, **kwargs):
ev = threading.Event()
with self.mock_upload_started:
self.mock_upload_file_waiters.append(ev.set)
self.mock_upload_started.notify_all()
ev.wait()
def run_step_upload(
commands: Iterable[Event],
**step_upload_kwargs: Any,
):
q = queue.Queue()
for cmd in commands:
q.put(cmd)
step_upload = make_step_upload(event_queue=q, **step_upload_kwargs)
step_upload.start()
finish_and_wait(q)
| UploadBlockingMockApi |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/types/dagster_type.py | {
"start": 37213,
"end": 37530
} | class ____:
def __getitem__(self, inner_type: t.Union[t.Type, DagsterType]) -> OptionalType:
inner_type = resolve_dagster_type(
check.not_none_param(inner_type, "inner_type")
)
return OptionalType(inner_type)
Optional: DagsterOptionalApi = DagsterOptionalApi()
| DagsterOptionalApi |
python | scipy__scipy | scipy/stats/_distribution_infrastructure.py | {
"start": 10480,
"end": 12230
} | class ____(ABC):
r""" Representation of the applicable domain of a parameter or variable.
A `_Domain` object is responsible for storing information about the
domain of a parameter or variable, determining whether a value is within
the domain (`contains`), and providing a text/mathematical representation
of itself (`__str__`). Because the domain of a parameter/variable can have
a complicated relationship with other parameters and variables of a
distribution, `_Domain` itself does not try to represent all possibilities;
in fact, it has no implementation and is meant for subclassing.
Attributes
----------
symbols : dict
A map from special numerical values to symbols for use in `__str__`
Methods
-------
contains(x)
Determine whether the argument is contained within the domain (True)
or not (False). Used for input validation.
get_numerical_endpoints()
Gets the numerical values of the domain endpoints, which may have been
defined symbolically or through a callable.
__str__()
Returns a text representation of the domain (e.g. ``[0, b)``).
Used for generating documentation.
"""
symbols = {np.inf: r"\infty", -np.inf: r"-\infty", np.pi: r"\pi", -np.pi: r"-\pi"}
# generic type compatibility with scipy-stubs
__class_getitem__ = classmethod(GenericAlias)
@abstractmethod
def contains(self, x):
raise NotImplementedError()
@abstractmethod
def draw(self, n):
raise NotImplementedError()
@abstractmethod
def get_numerical_endpoints(self, x):
raise NotImplementedError()
@abstractmethod
def __str__(self):
raise NotImplementedError()
| _Domain |
python | getsentry__sentry | src/sentry/users/api/serializers/user.py | {
"start": 9583,
"end": 12223
} | class ____(UserSerializer):
"""
Used in situations like when a member admin (on behalf of an organization) looks up memberships.
"""
def get_attrs(
self, item_list: Sequence[User], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> MutableMapping[User, Any]:
attrs = super().get_attrs(item_list, user)
# ignore things that aren't user controlled (like recovery codes)
authenticators = manytoone_to_dict(
Authenticator.objects.filter(user__in=item_list),
"user_id",
lambda x: not x.interface.is_backup_interface,
)
memberships = OrganizationMemberMapping.objects.filter(
user_id__in={u.id for u in item_list}
).values_list("user_id", "organization_id", named=True)
active_organizations = OrganizationMapping.objects.filter(
organization_id__in={m.organization_id for m in memberships},
status=OrganizationStatus.ACTIVE,
).values_list("organization_id", flat=True)
active_memberships: DefaultDict[int, int] = defaultdict(int)
for membership in memberships:
if membership.organization_id in active_organizations:
active_memberships[membership.user_id] += 1
for item in item_list:
attrs[item]["authenticators"] = authenticators[item.id]
# org can reset 2FA if the user is only in one org
attrs[item]["canReset2fa"] = active_memberships[item.id] == 1
return attrs
def serialize(
self,
obj: User,
attrs: Mapping[str, Any],
user: User | AnonymousUser | RpcUser,
**kwargs: Any,
) -> UserSerializerResponse:
d = super().serialize(obj, attrs, user)
# TODO(schew2381): Remove mention of superuser below once the staff feature flag is removed
# XXX(dcramer): we don't check for active superuser/staff here as we simply
# want to tell the UI that we're an authenticated superuser/staff, and
# for requests that require an *active* session, they should prompt
# on-demand. This ensures things like links to the Sentry admin can
# still easily be rendered.
d["authenticators"] = [
{
"id": str(a.id),
"type": a.interface.interface_id,
"name": str(a.interface.name),
"dateCreated": a.created_at,
"dateUsed": a.last_used_at,
}
for a in attrs["authenticators"]
]
d["canReset2fa"] = attrs["canReset2fa"]
return d
| DetailedUserSerializer |
python | pypa__setuptools | setuptools/_vendor/packaging/markers.py | {
"start": 1141,
"end": 7721
} | class ____(TypedDict):
implementation_name: str
"""The implementation's identifier, e.g. ``'cpython'``."""
implementation_version: str
"""
The implementation's version, e.g. ``'3.13.0a2'`` for CPython 3.13.0a2, or
``'7.3.13'`` for PyPy3.10 v7.3.13.
"""
os_name: str
"""
The value of :py:data:`os.name`. The name of the operating system dependent module
imported, e.g. ``'posix'``.
"""
platform_machine: str
"""
Returns the machine type, e.g. ``'i386'``.
An empty string if the value cannot be determined.
"""
platform_release: str
"""
The system's release, e.g. ``'2.2.0'`` or ``'NT'``.
An empty string if the value cannot be determined.
"""
platform_system: str
"""
The system/OS name, e.g. ``'Linux'``, ``'Windows'`` or ``'Java'``.
An empty string if the value cannot be determined.
"""
platform_version: str
"""
The system's release version, e.g. ``'#3 on degas'``.
An empty string if the value cannot be determined.
"""
python_full_version: str
"""
The Python version as string ``'major.minor.patchlevel'``.
Note that unlike the Python :py:data:`sys.version`, this value will always include
the patchlevel (it defaults to 0).
"""
platform_python_implementation: str
"""
A string identifying the Python implementation, e.g. ``'CPython'``.
"""
python_version: str
"""The Python version as string ``'major.minor'``."""
sys_platform: str
"""
This string contains a platform identifier that can be used to append
platform-specific components to :py:data:`sys.path`, for instance.
For Unix systems, except on Linux and AIX, this is the lowercased OS name as
returned by ``uname -s`` with the first part of the version as returned by
``uname -r`` appended, e.g. ``'sunos5'`` or ``'freebsd8'``, at the time when Python
was built.
"""
def _normalize_extra_values(results: Any) -> Any:
"""
Normalize extra values.
"""
if isinstance(results[0], tuple):
lhs, op, rhs = results[0]
if isinstance(lhs, Variable) and lhs.value == "extra":
normalized_extra = canonicalize_name(rhs.value)
rhs = Value(normalized_extra)
elif isinstance(rhs, Variable) and rhs.value == "extra":
normalized_extra = canonicalize_name(lhs.value)
lhs = Value(normalized_extra)
results[0] = lhs, op, rhs
return results
def _format_marker(
marker: list[str] | MarkerAtom | str, first: bool | None = True
) -> str:
assert isinstance(marker, (list, tuple, str))
# Sometimes we have a structure like [[...]] which is a single item list
# where the single item is itself it's own list. In that case we want skip
# the rest of this function so that we don't get extraneous () on the
# outside.
if (
isinstance(marker, list)
and len(marker) == 1
and isinstance(marker[0], (list, tuple))
):
return _format_marker(marker[0])
if isinstance(marker, list):
inner = (_format_marker(m, first=False) for m in marker)
if first:
return " ".join(inner)
else:
return "(" + " ".join(inner) + ")"
elif isinstance(marker, tuple):
return " ".join([m.serialize() for m in marker])
else:
return marker
_operators: dict[str, Operator] = {
"in": lambda lhs, rhs: lhs in rhs,
"not in": lambda lhs, rhs: lhs not in rhs,
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _eval_op(lhs: str, op: Op, rhs: str) -> bool:
try:
spec = Specifier("".join([op.serialize(), rhs]))
except InvalidSpecifier:
pass
else:
return spec.contains(lhs, prereleases=True)
oper: Operator | None = _operators.get(op.serialize())
if oper is None:
raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.")
return oper(lhs, rhs)
def _normalize(*values: str, key: str) -> tuple[str, ...]:
# PEP 685 – Comparison of extra names for optional distribution dependencies
# https://peps.python.org/pep-0685/
# > When comparing extra names, tools MUST normalize the names being
# > compared using the semantics outlined in PEP 503 for names
if key == "extra":
return tuple(canonicalize_name(v) for v in values)
# other environment markers don't have such standards
return values
def _evaluate_markers(markers: MarkerList, environment: dict[str, str]) -> bool:
groups: list[list[bool]] = [[]]
for marker in markers:
assert isinstance(marker, (list, tuple, str))
if isinstance(marker, list):
groups[-1].append(_evaluate_markers(marker, environment))
elif isinstance(marker, tuple):
lhs, op, rhs = marker
if isinstance(lhs, Variable):
environment_key = lhs.value
lhs_value = environment[environment_key]
rhs_value = rhs.value
else:
lhs_value = lhs.value
environment_key = rhs.value
rhs_value = environment[environment_key]
lhs_value, rhs_value = _normalize(lhs_value, rhs_value, key=environment_key)
groups[-1].append(_eval_op(lhs_value, op, rhs_value))
else:
assert marker in ["and", "or"]
if marker == "or":
groups.append([])
return any(all(item) for item in groups)
def format_full_version(info: sys._version_info) -> str:
version = f"{info.major}.{info.minor}.{info.micro}"
kind = info.releaselevel
if kind != "final":
version += kind[0] + str(info.serial)
return version
def default_environment() -> Environment:
iver = format_full_version(sys.implementation.version)
implementation_name = sys.implementation.name
return {
"implementation_name": implementation_name,
"implementation_version": iver,
"os_name": os.name,
"platform_machine": platform.machine(),
"platform_release": platform.release(),
"platform_system": platform.system(),
"platform_version": platform.version(),
"python_full_version": platform.python_version(),
"platform_python_implementation": platform.python_implementation(),
"python_version": ".".join(platform.python_version_tuple()[:2]),
"sys_platform": sys.platform,
}
| Environment |
python | bokeh__bokeh | src/bokeh/colors/groups.py | {
"start": 7767,
"end": 8641
} | class ____(ColorGroup):
''' CSS "White" Color Group as defined by https://www.w3schools.com/colors/colors_groups.asp
.. bokeh-color:: white
.. bokeh-color:: snow
.. bokeh-color:: honeydew
.. bokeh-color:: mintcream
.. bokeh-color:: azure
.. bokeh-color:: aliceblue
.. bokeh-color:: ghostwhite
.. bokeh-color:: whitesmoke
.. bokeh-color:: seashell
.. bokeh-color:: beige
.. bokeh-color:: oldlace
.. bokeh-color:: floralwhite
.. bokeh-color:: ivory
.. bokeh-color:: antiquewhite
.. bokeh-color:: linen
.. bokeh-color:: lavenderblush
.. bokeh-color:: mistyrose
'''
_colors = ('White', 'Snow', 'Honeydew', 'MintCream', 'Azure', 'AliceBlue', 'GhostWhite', 'WhiteSmoke', 'Seashell',
'Beige', 'OldLace', 'FloralWhite', 'Ivory', 'AntiqueWhite', 'Linen', 'LavenderBlush', 'MistyRose')
| white |
python | python-poetry__poetry | src/poetry/repositories/link_sources/base.py | {
"start": 745,
"end": 3869
} | class ____:
VERSION_REGEX = re.compile(r"(?i)([a-z0-9_\-.]+?)-(?=\d)([a-z0-9_.!+-]+)")
CLEAN_REGEX = re.compile(r"[^a-z0-9$&+,/:;=?@.#%_\\|-]", re.I)
SUPPORTED_FORMATS: ClassVar[list[str]] = [
".tar.gz",
".whl",
".zip",
".tar.bz2",
".tar.xz",
".tar.Z",
".tar",
]
def __init__(self, url: str) -> None:
self._url = url
@property
def url(self) -> str:
return self._url
def versions(self, name: NormalizedName) -> Iterator[Version]:
yield from self._link_cache[name]
@property
def packages(self) -> Iterator[Package]:
for link in self.links:
pkg = self.link_package_data(link)
if pkg:
yield pkg
@property
def links(self) -> Iterator[Link]:
for links_per_version in self._link_cache.values():
for links in links_per_version.values():
yield from links
@classmethod
def link_package_data(cls, link: Link) -> Package | None:
name: str | None = None
version_string: str | None = None
version: Version | None = None
m = wheel_file_re.match(link.filename) or sdist_file_re.match(link.filename)
if m:
name = m.group("name")
version_string = m.group("ver")
else:
info, _ext = link.splitext()
match = cls.VERSION_REGEX.match(info)
if match:
name = match.group(1)
version_string = match.group(2)
if version_string:
try:
version = Version.parse(version_string)
except InvalidVersionError:
logger.debug(
"Skipping url (%s) due to invalid version (%s)", link.url, version
)
return None
pkg = None
if name and version:
pkg = Package(name, version, source_url=link.url)
return pkg
def links_for_version(
self, name: NormalizedName, version: Version
) -> Iterator[Link]:
yield from self._link_cache[name][version]
def clean_link(self, url: str) -> str:
"""Makes sure a link is fully encoded. That is, if a ' ' shows up in
the link, it will be rewritten to %20 (while not over-quoting
% or other characters)."""
return self.CLEAN_REGEX.sub(lambda match: f"%{ord(match.group(0)):02x}", url)
def yanked(self, name: NormalizedName, version: Version) -> str | bool:
reasons = set()
for link in self.links_for_version(name, version):
if link.yanked:
if link.yanked_reason:
reasons.add(link.yanked_reason)
else:
# release is not yanked if at least one file is not yanked
return False
# if all files are yanked (or there are no files) the release is yanked
if reasons:
return "\n".join(sorted(reasons))
return True
@cached_property
def _link_cache(self) -> LinkCache:
raise NotImplementedError()
| LinkSource |
python | doocs__leetcode | solution/2200-2299/2213.Longest Substring of One Repeating Character/Solution.py | {
"start": 63,
"end": 243
} | class ____:
__slots__ = "l", "r", "lmx", "rmx", "mx"
def __init__(self, l: int, r: int):
self.l = l
self.r = r
self.lmx = self.rmx = self.mx = 1
| Node |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.