language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | great-expectations__great_expectations | great_expectations/core/expectation_diagnostics/supporting_types.py | {
"start": 645,
"end": 2441
} | class ____(SerializableDictDot):
"""An augmented version of the Expectation.library_metadata object, used within ExpectationDiagnostics""" # noqa: E501 # FIXME CoP
maturity: Maturity
tags: List[str]
contributors: List[str]
requirements: List[str]
library_metadata_passed_checks: bool
has_full_test_suite: bool
manually_reviewed_code: bool
problems: List[str] = field(default_factory=list)
legacy_maturity_level_substitutions = {
"experimental": "EXPERIMENTAL",
"beta": "BETA",
"production": "PRODUCTION",
}
@classmethod
def from_legacy_dict(cls, dict):
"""This method is a temporary adapter to allow typing of legacy library_metadata objects, without needing to immediately clean up every object.""" # noqa: E501 # FIXME CoP
temp_dict = {}
for k, v in dict.items():
# Ignore parameters that don't match the type definition
if k in inspect.signature(cls).parameters:
temp_dict[k] = v
else:
logging.warning(
f"WARNING: Got extra parameter: {k} while instantiating AugmentedLibraryMetadata." # noqa: E501 # FIXME CoP
"This parameter will be ignored."
"You probably need to clean up a library_metadata object."
)
# If necessary, substitute strings for precise Enum values.
if (
"maturity" in temp_dict
and temp_dict["maturity"] in cls.legacy_maturity_level_substitutions
):
temp_dict["maturity"] = cls.legacy_maturity_level_substitutions[
temp_dict["maturity"]
]
return cls(**temp_dict)
@dataclass
| AugmentedLibraryMetadata |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/tests/test_gradle.py | {
"start": 306,
"end": 1239
} | class ____:
class DummyStep(gradle.GradleTask):
gradle_task_name = "dummyTask"
title = "Dummy Step"
async def _run(self) -> steps.StepResult:
return steps.StepResult(step=self, status=steps.StepStatus.SUCCESS)
@pytest.fixture
def test_context(self, mocker, dagger_client):
return mocker.Mock(
secrets_to_mask=[],
dagger_client=dagger_client,
connector=pipelines.helpers.connectors.modifed.ConnectorWithModifiedFiles(
"source-postgres", frozenset({Path("airbyte-integrations/connectors/source-postgres/metadata.yaml")})
),
)
def test_params(self, test_context):
step = self.DummyStep(test_context)
step.extra_params = {"-x": ["dummyTask", "dummyTask2"]}
assert set(step.params_as_cli_options) == {
"-x=dummyTask",
"-x=dummyTask2",
}
| TestGradleTask |
python | doocs__leetcode | solution/2300-2399/2303.Calculate Amount Paid in Taxes/Solution.py | {
"start": 0,
"end": 269
} | class ____:
def calculateTax(self, brackets: List[List[int]], income: int) -> float:
ans = prev = 0
for upper, percent in brackets:
ans += max(0, min(income, upper) - prev) * percent
prev = upper
return ans / 100
| Solution |
python | rapidsai__cudf | python/cudf/cudf/core/groupby/groupby.py | {
"start": 11028,
"end": 101846
} | class ____(Serializable, Reducible, Scannable):
obj: Series | DataFrame
_VALID_REDUCTIONS = {
"sum",
"prod",
"idxmin",
"idxmax",
"min",
"max",
"mean",
"median",
"nunique",
"first",
"last",
"var",
"std",
}
_VALID_SCANS = {
"cumsum",
"cumprod",
"cummin",
"cummax",
}
# Necessary because the function names don't directly map to the docs.
_SCAN_DOCSTRINGS = {
"cumsum": {"op_name": "Cumulative sum"},
"cumprod": {"op_name": "Cumulative product"},
"cummin": {"op_name": "Cumulative min"},
"cummax": {"op_name": "Cumulative max"},
}
_MAX_GROUPS_BEFORE_WARN = 100
def __init__(
self,
obj,
by=None,
level=None,
sort=False,
as_index=True,
dropna=True,
group_keys=True,
):
"""
Group a DataFrame or Series by a set of columns.
Parameters
----------
by : optional
Specifies the grouping columns. Can be any of the following:
- A Python function called on each value of the object's index
- A dict or Series that maps index labels to group names
- A Index object
- A str indicating a column name
- An array of the same length as the object
- A Grouper object
- A list of the above
level : int, level_name or list, optional
For objects with a MultiIndex, `level` can be used to specify
grouping by one or more levels of the MultiIndex.
sort : bool, default False
Sort the result by group keys. Differ from Pandas, cudf defaults
to False for better performance.
as_index : bool, optional
If as_index=True (default), the group names appear
as the keys of the resulting DataFrame.
If as_index=False, the groups are returned as ordinary
columns of the resulting DataFrame, *if they are named columns*.
dropna : bool, optional
If True (default), do not include the "null" group.
"""
if get_option("mode.pandas_compatible"):
obj = obj.nans_to_nulls()
self.obj = obj
self._as_index = as_index
self._by = by.copy(deep=True) if isinstance(by, _Grouping) else by
self._level = level
self._sort = sort
self._dropna = dropna
self._group_keys = group_keys
if isinstance(self._by, _Grouping):
self._by._obj = self.obj
self.grouping = self._by
else:
self.grouping = _Grouping(obj, self._by, level)
def __iter__(self):
group_names, offsets, _, grouped_values = self._grouped()
if isinstance(group_names, Index):
group_names = group_names.to_pandas()
for i, name in enumerate(group_names):
yield (
(name,)
if isinstance(self._by, list) and len(self._by) == 1
else name,
grouped_values[offsets[i] : offsets[i + 1]],
)
def __len__(self) -> int:
return self.ngroups
@property
def ngroups(self) -> int:
_, offsets, _, _ = self._grouped()
return len(offsets) - 1
@property
def ndim(self) -> int:
return self.obj.ndim
@property
def dtypes(self):
"""
Return the dtypes in this group.
.. deprecated:: 24.04
Use `.dtypes` on base object instead.
Returns
-------
pandas.DataFrame
The data type of each column of the group.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'a': [1, 2, 3, 3], 'b': ['x', 'y', 'z', 'a'],
... 'c':[10, 11, 12, 12]})
>>> df.groupby("a").dtypes
a b c
a
1 int64 object int64
2 int64 object int64
3 int64 object int64
"""
warnings.warn(
f"{type(self).__name__}.dtypes is deprecated and will be "
"removed in a future version. Check the dtypes on the "
"base object instead",
FutureWarning,
)
index = self.grouping.keys.unique().sort_values().to_pandas()
return pd.DataFrame(
{name: [dtype] * len(index) for name, dtype in self.obj._dtypes},
index=index,
)
@cached_property
def groups(self):
"""
Returns a dictionary mapping group keys to row labels.
"""
group_names, offsets, _, grouped_values = self._grouped()
grouped_index = grouped_values.index
if len(group_names) > self._MAX_GROUPS_BEFORE_WARN:
warnings.warn(
f"GroupBy.groups() performance scales poorly with "
f"number of groups. Got {len(group_names)} groups."
)
return dict(
zip(
group_names.to_pandas(),
grouped_index._split(offsets[1:-1]),
strict=True,
)
)
@cached_property
def indices(self) -> dict[ScalarLike, cp.ndarray]:
"""
Dict {group name -> group indices}.
Examples
--------
>>> import cudf
>>> data = [[10, 20, 30], [10, 30, 40], [40, 50, 30]]
>>> df = cudf.DataFrame(data, columns=["a", "b", "c"])
>>> df
a b c
0 10 20 30
1 10 30 40
2 40 50 30
>>> df.groupby(by=["a"]).indices # doctest: +SKIP
{10: array([0, 1]), 40: array([2])}
"""
offsets, group_keys, (indices,) = self._groups(
[as_column(range(len(self.obj)), dtype=SIZE_TYPE_DTYPE)]
)
group_keys = [
ColumnBase.from_pylibcudf(col)
for col in stream_compaction.drop_duplicates(group_keys)
]
if len(group_keys) > 1:
index = MultiIndex.from_arrays(group_keys)
else:
index = Index._from_column(group_keys[0])
return dict(
zip(
index.to_pandas(),
cp.split(indices.values, offsets[1:-1]),
strict=True,
)
)
@_performance_tracking
def get_group(self, name, obj=None):
"""
Construct DataFrame from group with provided name.
Parameters
----------
name : object
The name of the group to get as a DataFrame.
obj : DataFrame, default None
The DataFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used.
Returns
-------
group : same type as obj
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"X": ["A", "B", "A", "B"], "Y": [1, 4, 3, 2]})
>>> df
X Y
0 A 1
1 B 4
2 A 3
3 B 2
>>> df.groupby("X").get_group("A")
X Y
0 A 1
2 A 3
"""
if obj is None:
obj = self.obj
else:
warnings.warn(
"obj is deprecated and will be removed in a future version. "
"Use ``df.iloc[gb.indices.get(name)]`` "
"instead of ``gb.get_group(name, obj=df)``.",
FutureWarning,
)
if is_list_like(self._by) and len(self._by) == 1:
if isinstance(name, tuple) and len(name) == 1:
name = name[0]
else:
raise KeyError(name)
return obj.iloc[self.indices[name]]
@_performance_tracking
def size(self) -> Series:
"""
Return the size of each group.
"""
from cudf.core.series import Series
col = column_empty(len(self.obj), np.dtype(np.int8))
result = (
Series._from_column(col, name=getattr(self.obj, "name", None))
.groupby(self.grouping, sort=self._sort, dropna=self._dropna)
.agg("size")
)
if not self._as_index:
result = result.rename("size").reset_index()
return result
@_performance_tracking
def cumcount(self, ascending: bool = True):
"""
Return the cumulative count of keys in each group.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Currently not supported
"""
if ascending is not True:
raise NotImplementedError(
"ascending is currently not implemented."
)
from cudf.core.series import Series
return (
Series._from_column(
column_empty(len(self.obj), np.dtype(np.int8)),
index=self.obj.index,
)
.groupby(self.grouping, sort=self._sort)
.agg("cumcount")
)
@_performance_tracking
def rank(
self,
method="average",
ascending=True,
na_option="keep",
pct=False,
axis=0,
):
"""
Return the rank of values within each group.
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
if na_option not in {"keep", "top", "bottom"}:
raise ValueError(
f"na_option must be one of 'keep', 'top', or 'bottom', "
f"but got {na_option}"
)
# TODO: in pandas compatibility mode, we should convert any
# NaNs to nulls in any float value columns, as Pandas
# treats NaNs the way we treat nulls.
if get_option("mode.pandas_compatible"):
if any(
col.dtype.kind == "f" for col in self.grouping.values._columns
):
raise NotImplementedError(
"NaNs are not supported in groupby.rank."
)
def rank(x):
return getattr(x, "rank")(
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
result = self.agg(rank)
if get_option("mode.pandas_compatible"):
# pandas always returns floats:
return result.astype(np.dtype(np.float64))
return result
@cached_property
def _groupby(self) -> types.SimpleNamespace:
with acquire_spill_lock() as spill_lock:
plc_groupby = plc.groupby.GroupBy(
plc.Table(
[
col.to_pylibcudf(mode="read")
for col in self.grouping._key_columns
]
),
plc.types.NullPolicy.EXCLUDE
if self._dropna
else plc.types.NullPolicy.INCLUDE,
)
# Do we need this because we just check _spill_locks in test_spillable_df_groupby?
return types.SimpleNamespace(
plc_groupby=plc_groupby, _spill_locks=spill_lock
)
def _groups(
self, values: Iterable[ColumnBase]
) -> tuple[list[int], list[ColumnBase], list[ColumnBase]]:
plc_columns = [col.to_pylibcudf(mode="read") for col in values]
if not plc_columns:
plc_table = None
else:
plc_table = plc.Table(plc_columns)
offsets, grouped_keys, grouped_values = (
self._groupby.plc_groupby.get_groups(plc_table)
)
return (
offsets,
[ColumnBase.from_pylibcudf(col) for col in grouped_keys.columns()],
(
[
ColumnBase.from_pylibcudf(col)
for col in grouped_values.columns()
]
if grouped_values is not None
else []
),
)
def _aggregate(
self, values: tuple[ColumnBase, ...], aggregations
) -> tuple[
list[list[ColumnBase]],
list[ColumnBase],
list[list[tuple[str, str]]],
]:
included_aggregations = []
column_included = []
requests = []
# For any post-processing needed after pylibcudf aggregations
adjustments = []
result_columns: list[list[ColumnBase]] = []
for i, (col, aggs) in enumerate(
zip(values, aggregations, strict=True)
):
valid_aggregations = get_valid_aggregation(col.dtype)
included_aggregations_i = []
col_aggregations = []
adjustments_i = []
for agg in aggs:
str_agg = str(agg)
if _is_unsupported_agg_for_type(col.dtype, str_agg):
raise TypeError(
f"{col.dtype} type does not support {agg} operations"
)
agg_obj = aggregation.make_aggregation(agg)
if (
valid_aggregations == "ALL"
or agg_obj.kind in valid_aggregations
):
included_aggregations_i.append((agg, agg_obj.kind))
col_aggregations.append(agg_obj.plc_obj)
if str_agg == "cumcount":
# pandas 0-indexes cumulative count, see
# https://github.com/rapidsai/cudf/issues/10237
adjustments_i.append(lambda col: (col - 1))
else:
adjustments_i.append(lambda col: col)
included_aggregations.append(included_aggregations_i)
result_columns.append([])
if col_aggregations:
requests.append(
plc.groupby.GroupByRequest(
col.to_pylibcudf(mode="read"), col_aggregations
)
)
column_included.append(i)
adjustments.append(adjustments_i)
if not requests and any(len(v) > 0 for v in aggregations):
raise pd.errors.DataError(
"All requested aggregations are unsupported."
)
keys, results = (
self._groupby.plc_groupby.scan(requests)
if _is_all_scan_aggregate(aggregations)
else self._groupby.plc_groupby.aggregate(requests)
)
for i, result, adjustments_i in zip(
column_included, results, adjustments, strict=True
):
result_columns[i] = [
adj(ColumnBase.from_pylibcudf(col))
for col, adj in zip(
result.columns(), adjustments_i, strict=True
)
]
return (
result_columns,
[ColumnBase.from_pylibcudf(key) for key in keys.columns()],
included_aggregations,
)
def _shift(
self, values: tuple[ColumnBase, ...], periods: int, fill_values: list
) -> Generator[ColumnBase]:
_, shifts = self._groupby.plc_groupby.shift(
plc.table.Table([col.to_pylibcudf(mode="read") for col in values]),
[periods] * len(values),
[
pa_scalar_to_plc_scalar(
pa.scalar(val, type=cudf_dtype_to_pa_type(col.dtype))
)
for val, col in zip(fill_values, values, strict=True)
],
)
return (ColumnBase.from_pylibcudf(col) for col in shifts.columns())
def _replace_nulls(
self, values: tuple[ColumnBase, ...], method: str
) -> Generator[ColumnBase]:
_, replaced = self._groupby.plc_groupby.replace_nulls(
plc.Table([col.to_pylibcudf(mode="read") for col in values]),
[
plc.replace.ReplacePolicy.PRECEDING
if method == "ffill"
else plc.replace.ReplacePolicy.FOLLOWING
]
* len(values),
)
return (ColumnBase.from_pylibcudf(col) for col in replaced.columns())
@_performance_tracking
def agg(self, func=None, *args, engine=None, engine_kwargs=None, **kwargs):
"""
Apply aggregation(s) to the groups.
Parameters
----------
func : str, callable, list or dict
Argument specifying the aggregation(s) to perform on the
groups. `func` can be any of the following:
- string: the name of a supported aggregation
- callable: a function that accepts a Series/DataFrame and
performs a supported operation on it.
- list: a list of strings/callables specifying the
aggregations to perform on every column.
- dict: a mapping of column names to string/callable
specifying the aggregations to perform on those
columns.
See :ref:`the user guide <basics.groupby>` for supported
aggregations.
Returns
-------
A Series or DataFrame containing the combined results of the
aggregation(s).
Examples
--------
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a', sort=True).agg('sum')
b c
a
1 3 4
2 3 1
Specifying a list of aggregations to perform on each column.
>>> import cudf
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a', sort=True).agg(['sum', 'min'])
b c
sum min sum min
a
1 3 1 4 2
2 3 3 1 1
Using a dict to specify aggregations to perform per column.
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> a.groupby('a', sort=True).agg({'a': 'max', 'b': ['min', 'mean']})
a b
max min mean
a
1 1 1 1.5
2 2 3 3.0
Using lambdas/callables to specify aggregations taking parameters.
>>> a = cudf.DataFrame({
... 'a': [1, 1, 2],
... 'b': [1, 2, 3],
... 'c': [2, 2, 1]
... })
>>> f1 = lambda x: x.quantile(0.5); f1.__name__ = "q0.5"
>>> f2 = lambda x: x.quantile(0.75); f2.__name__ = "q0.75"
>>> a.groupby('a').agg([f1, f2])
b c
q0.5 q0.75 q0.5 q0.75
a
1 1.5 1.75 2.0 2.0
2 3.0 3.00 1.0 1.0
"""
if engine is not None:
raise NotImplementedError(
"engine is non-functional and added for compatibility with pandas"
)
if engine_kwargs is not None:
raise NotImplementedError(
"engine_kwargs is non-functional added for compatibility with pandas"
)
if args:
raise NotImplementedError(
"Passing args to func is currently not supported."
)
column_names, columns, normalized_aggs = self._normalize_aggs(
func, **kwargs
)
orig_dtypes = tuple(c.dtype for c in columns)
# Note: When there are no key columns, the below produces
# an Index with float64 dtype, while Pandas returns
# an Index with int64 dtype.
# (GH: 6945)
(
result_columns,
grouped_key_cols,
included_aggregations,
) = self._aggregate(columns, normalized_aggs)
result_index = self.grouping.keys._from_columns_like_self(
grouped_key_cols,
)
multilevel = _is_multi_agg(func)
data = {}
for col_name, aggs, cols, orig_dtype in zip(
column_names,
included_aggregations,
result_columns,
orig_dtypes,
strict=True,
):
for agg_tuple, col in zip(aggs, cols, strict=True):
agg, agg_kind = agg_tuple
agg_name = agg.__name__ if callable(agg) else agg
if multilevel:
key = (col_name, agg_name)
else:
key = col_name
if (
agg in {list, "collect"}
and orig_dtype != col.dtype.element_type
):
# Structs lose their labels which we reconstruct here
col = col._with_type_metadata(
get_dtype_of_same_kind(
orig_dtype, ListDtype(orig_dtype)
)
)
if agg_kind in {"COUNT", "SIZE", "ARGMIN", "ARGMAX"}:
data[key] = col.astype(
get_dtype_of_same_kind(orig_dtype, np.dtype(np.int64))
)
elif (
self.obj.empty
and (
isinstance(agg_name, str)
and agg_name in Reducible._SUPPORTED_REDUCTIONS
)
and len(col) == 0
and not isinstance(
col.dtype,
(ListDtype, StructDtype, DecimalDtype),
)
):
data[key] = col.astype(orig_dtype)
else:
if isinstance(orig_dtype, DecimalDtype):
# `col` has a different precision than `orig_dtype`
# hence we only preserve the kind of the dtype
# and not the precision.
data[key] = col._with_type_metadata(
get_dtype_of_same_kind(orig_dtype, col.dtype)
)
else:
data[key] = col._with_type_metadata(orig_dtype)
data = ColumnAccessor(data, multiindex=multilevel)
if not multilevel:
data = data.rename_levels({np.nan: None}, level=0)
from cudf.core.dataframe import DataFrame
result = DataFrame._from_data(data, index=result_index)
if self._sort:
result = result.sort_index()
else:
if get_option(
"mode.pandas_compatible"
) and not _is_all_scan_aggregate(normalized_aggs):
# Even with `sort=False`, pandas guarantees that
# groupby preserves the order of rows within each group.
left_cols = self.grouping.keys.drop_duplicates()._columns
right_cols = result_index._columns
join_keys = [
_match_join_keys(lcol, rcol, "inner")
for lcol, rcol in zip(left_cols, right_cols, strict=True)
]
# TODO: In future, see if we can centralize
# logic else where that has similar patterns.
join_keys = map(list, zip(*join_keys, strict=True))
# By construction, left and right keys are related by
# a permutation, so we can use an inner join.
with acquire_spill_lock():
plc_tables = [
plc.Table(
[col.to_pylibcudf(mode="read") for col in cols]
)
for cols in join_keys
]
left_plc, right_plc = plc.join.inner_join(
plc_tables[0],
plc_tables[1],
plc.types.NullEquality.EQUAL,
)
left_order = ColumnBase.from_pylibcudf(left_plc)
right_order = ColumnBase.from_pylibcudf(right_plc)
# left order is some permutation of the ordering we
# want, and right order is a matching gather map for
# the result table. Get the correct order by sorting
# the right gather map.
right_order = sorting.sort_by_key(
[right_order],
[left_order],
[True],
["first"],
stable=False,
)[0]
result = result._gather(
GatherMap.from_column_unchecked(
ColumnBase.from_pylibcudf(right_order),
len(result),
nullify=False,
)
)
if not self._as_index:
result = result.reset_index()
if _is_all_scan_aggregate(normalized_aggs):
# Scan aggregations return rows in original index order
return self._mimic_pandas_order(result)
return result
def _reduce_numeric_only(self, op: str):
raise NotImplementedError(
f"numeric_only is not implemented for {type(self)}"
)
def _reduce(
self,
op: str,
numeric_only: bool = False,
min_count: int = 0,
*args,
**kwargs,
):
"""Compute {op} of group values.
Parameters
----------
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to
use everything, then use only numeric data.
min_count : int, default 0
The required number of valid values to perform the operation. If
fewer than ``min_count`` non-NA values are present the result will
be NA.
Returns
-------
Series or DataFrame
Computed {op} of values within each group.
.. pandas-compat::
:meth:`pandas.core.groupby.DataFrameGroupBy.{op}`,
:meth:`pandas.core.groupby.SeriesGroupBy.{op}`
The numeric_only, min_count
"""
if min_count != 0:
raise NotImplementedError(
"min_count parameter is not implemented yet"
)
if numeric_only:
return self._reduce_numeric_only(op)
return self.agg(op)
def _scan(self, op: str, *args, **kwargs):
"""{op_name} for each group."""
return self.agg(op)
aggregate = agg
def _head_tail(self, n, *, take_head: bool, preserve_order: bool):
"""Return the head or tail of each group
Parameters
----------
n
Number of entries to include (if negative, number of
entries to exclude)
take_head
Do we want the head or the tail of the group
preserve_order
If True, return the n rows from each group in original
dataframe order (this mimics pandas behavior though is
more expensive).
Returns
-------
New DataFrame or Series
Notes
-----
Unlike pandas, this returns an object in group order, not
original order, unless ``preserve_order`` is ``True``.
"""
# A more memory-efficient implementation would merge the take
# into the grouping, but that probably requires a new
# aggregation scheme in libcudf. This is probably "fast
# enough" for most reasonable input sizes.
_, offsets, _, group_values = self._grouped()
group_offsets = np.asarray(offsets, dtype=SIZE_TYPE_DTYPE)
size_per_group = np.diff(group_offsets)
# "Out of bounds" n for the group size either means no entries
# (negative) or all the entries (positive)
if n < 0:
size_per_group = np.maximum(
size_per_group + n, 0, out=size_per_group
)
else:
size_per_group = np.minimum(size_per_group, n, out=size_per_group)
if take_head:
group_offsets = group_offsets[:-1]
else:
group_offsets = group_offsets[1:] - size_per_group
to_take_indices = np.arange(
size_per_group.sum(), dtype=SIZE_TYPE_DTYPE
)
fixup = np.empty_like(size_per_group)
fixup[0] = 0
np.cumsum(size_per_group[:-1], out=fixup[1:])
to_take_indices += np.repeat(group_offsets - fixup, size_per_group)
to_take = as_column(to_take_indices)
result = group_values.iloc[to_take]
if preserve_order:
# Can't use _mimic_pandas_order because we need to
# subsample the gather map from the full input ordering,
# rather than permuting the gather map of the output.
_, _, (ordering,) = self._groups(
[as_column(range(0, len(self.obj)))]
)
# Invert permutation from original order to groups on the
# subset of entries we want.
gather_map = ordering.take(to_take).argsort()
return result.take(gather_map)
else:
return result
@_performance_tracking
def head(self, n: int = 5, *, preserve_order: bool = True):
"""Return first n rows of each group
Parameters
----------
n
If positive: number of entries to include from start of group
If negative: number of entries to exclude from end of group
preserve_order
If True (default), return the n rows from each group in
original dataframe order (this mimics pandas behavior
though is more expensive). If you don't need rows in
original dataframe order you will see a performance
improvement by setting ``preserve_order=False``. In both
cases, the original index is preserved, so ``.loc``-based
indexing will work identically.
Returns
-------
Series or DataFrame
Subset of the original grouped object as determined by n
See Also
--------
.tail
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame(
... {
... "a": [1, 0, 1, 2, 2, 1, 3, 2, 3, 3, 3],
... "b": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... }
... )
>>> df.groupby("a").head(1)
a b
0 1 0
1 0 1
3 2 3
6 3 6
>>> df.groupby("a").head(-2)
a b
0 1 0
3 2 3
6 3 6
8 3 8
"""
return self._head_tail(
n, take_head=True, preserve_order=preserve_order
)
@_performance_tracking
def tail(self, n: int = 5, *, preserve_order: bool = True):
"""Return last n rows of each group
Parameters
----------
n
If positive: number of entries to include from end of group
If negative: number of entries to exclude from start of group
preserve_order
If True (default), return the n rows from each group in
original dataframe order (this mimics pandas behavior
though is more expensive). If you don't need rows in
original dataframe order you will see a performance
improvement by setting ``preserve_order=False``. In both
cases, the original index is preserved, so ``.loc``-based
indexing will work identically.
Returns
-------
Series or DataFrame
Subset of the original grouped object as determined by n
See Also
--------
.head
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame(
... {
... "a": [1, 0, 1, 2, 2, 1, 3, 2, 3, 3, 3],
... "b": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
... }
... )
>>> df.groupby("a").tail(1)
a b
1 0 1
5 1 5
7 2 7
10 3 10
>>> df.groupby("a").tail(-2)
a b
5 1 5
7 2 7
9 3 9
10 3 10
"""
return self._head_tail(
n, take_head=False, preserve_order=preserve_order
)
@_performance_tracking
def nth(self, n, dropna: Literal["any", "all", None] = None):
"""
Return the nth row from each group.
"""
if dropna is not None:
raise NotImplementedError("dropna is not currently supported.")
self.obj["__groupbynth_order__"] = range(0, len(self.obj))
# We perform another groupby here to have the grouping columns
# be a part of dataframe columns.
result = self.obj.groupby(self.grouping.keys).agg(lambda x: x.nth(n))
sizes = self.size().reindex(result.index)
result = result[sizes > n]
result.index = self.obj.index.take(
result._data["__groupbynth_order__"]
)
del result._data["__groupbynth_order__"]
del self.obj._data["__groupbynth_order__"]
return result
@_performance_tracking
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Returns
-------
Series
Unique numbers for each group.
See Also
--------
.cumcount : Number the rows in each group.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
"""
from cudf.core.series import Series
index = self.grouping.keys.unique().sort_values()
num_groups = len(index)
has_null_group = any(col.has_nulls() for col in index._columns)
if ascending:
# Count ascending from 0 to num_groups - 1
groups = range(num_groups)
elif has_null_group:
# Count descending from num_groups - 1 to 0, but subtract one more
# for the null group making it num_groups - 2 to -1.
groups = range(num_groups - 2, -2, -1)
else:
# Count descending from num_groups - 1 to 0
groups = range(num_groups - 1, -1, -1)
group_ids = Series._from_column(as_column(groups))
if has_null_group:
group_ids.iloc[-1] = pd.NA
group_ids.index = index
return self._broadcast(group_ids)
def sample(
self,
n: int | None = None,
frac: float | None = None,
replace: bool = False,
weights: Sequence | Series | None = None,
random_state: np.random.RandomState | int | None = None,
):
"""Return a random sample of items in each group.
Parameters
----------
n
Number of items to return for each group, if sampling
without replacement must be at most the size of the
smallest group. Cannot be used with frac. Default is
``n=1`` if frac is None.
frac
Fraction of items to return. Cannot be used with n.
replace
Should sampling occur with or without replacement?
weights
Sampling probability for each element. Must be the same
length as the grouped frame. Not currently supported.
random_state
Seed for random number generation.
Returns
-------
New dataframe or series with samples of appropriate size drawn
from each group.
"""
if weights is not None:
# To implement this case again needs different algorithms
# in both cases.
#
# Without replacement, use the weighted reservoir sampling
# approach of Efraimidas and Spirakis (2006)
# https://doi.org/10.1016/j.ipl.2005.11.003, essentially,
# do a segmented argsort sorting on weight-scaled
# logarithmic deviates. See
# https://timvieira.github.io/blog/post/
# 2019/09/16/algorithms-for-sampling-without-replacement/
#
# With replacement is trickier, one might be able to use
# the alias method, otherwise we're back to bucketed
# rejection sampling.
raise NotImplementedError("Sampling with weights is not supported")
if frac is not None and n is not None:
raise ValueError("Cannot supply both of frac and n")
elif n is None and frac is None:
n = 1
elif frac is not None and not (0 <= frac <= 1):
raise ValueError(
"Sampling with fraction must provide fraction in "
f"[0, 1], got {frac=}"
)
# TODO: handle random states properly.
if random_state is not None and not isinstance(random_state, int):
raise NotImplementedError(
"Only integer seeds are supported for random_state "
"in this case"
)
# Get the groups
# TODO: convince Cython to convert the std::vector offsets
# into a numpy array directly, rather than a list.
# TODO: this uses the sort-based groupby, could one use hash-based?
_, offsets, _, group_values = self._grouped()
group_offsets = np.asarray(offsets, dtype=SIZE_TYPE_DTYPE)
size_per_group = np.diff(group_offsets)
if n is not None:
samples_per_group = np.broadcast_to(
SIZE_TYPE_DTYPE.type(n), size_per_group.shape
)
if not replace and (minsize := size_per_group.min()) < n:
raise ValueError(
f"Cannot sample {n=} without replacement, "
f"smallest group is {minsize}"
)
else:
# Pandas uses round-to-nearest, ties to even to
# pick sample sizes for the fractional case (unlike IEEE
# which is round-to-nearest, ties to sgn(x) * inf).
samples_per_group = np.round(
size_per_group * frac, decimals=0
).astype(SIZE_TYPE_DTYPE)
if replace:
# We would prefer to use cupy here, but their rng.integers
# interface doesn't take array-based low and high
# arguments.
low = 0
high: np.ndarray = np.repeat(size_per_group, samples_per_group)
rng = np.random.default_rng(seed=random_state)
indices = rng.integers(low, high, dtype=SIZE_TYPE_DTYPE)
indices += np.repeat(group_offsets[:-1], samples_per_group)
else:
# Approach: do a segmented argsort of the index array and take
# the first samples_per_group entries from sorted array.
# We will shuffle the group indices and then pick them out
# from the grouped dataframe index.
nrows = len(group_values)
indices = cp.arange(nrows, dtype=SIZE_TYPE_DTYPE)
if len(size_per_group) < 500:
# Empirically shuffling with cupy is faster at this scale
rs = cp.random.get_random_state()
rs.seed(seed=random_state)
for off, size in zip(
group_offsets[:-1], size_per_group, strict=True
):
rs.shuffle(indices[off : off + size])
else:
keys = cp.random.default_rng(seed=random_state).random(
size=nrows
)
with acquire_spill_lock():
plc_table = plc.sorting.stable_segmented_sort_by_key(
plc.Table(
[as_column(indices).to_pylibcudf(mode="read")]
),
plc.Table([as_column(keys).to_pylibcudf(mode="read")]),
as_column(group_offsets).to_pylibcudf(mode="read"),
[plc.types.Order.ASCENDING],
[plc.types.NullOrder.AFTER],
)
indices = ColumnBase.from_pylibcudf(plc_table.columns()[0])
indices = indices.values
# Which indices are we going to want?
want = np.arange(samples_per_group.sum(), dtype=SIZE_TYPE_DTYPE)
scan = np.empty_like(samples_per_group)
scan[0] = 0
np.cumsum(samples_per_group[:-1], out=scan[1:])
want += np.repeat(group_offsets[:-1] - scan, samples_per_group)
indices = indices[want]
return group_values.iloc[indices]
def serialize(self):
header = {}
frames = []
header["kwargs"] = {
"sort": self._sort,
"dropna": self._dropna,
"as_index": self._as_index,
}
obj_header, obj_frames = self.obj.serialize()
header["obj"] = obj_header
header["obj_type_name"] = type(self.obj).__name__
header["num_obj_frames"] = len(obj_frames)
frames.extend(obj_frames)
grouping_header, grouping_frames = self.grouping.serialize()
header["grouping"] = grouping_header
header["num_grouping_frames"] = len(grouping_frames)
frames.extend(grouping_frames)
return header, frames
@classmethod
def deserialize(cls, header, frames):
kwargs = header["kwargs"]
obj_type = Serializable._name_type_map[header["obj_type_name"]]
obj = obj_type.deserialize(
header["obj"], frames[: header["num_obj_frames"]]
)
grouping = _Grouping.deserialize(
header["grouping"], frames[header["num_obj_frames"] :]
)
return cls(obj, grouping, **kwargs)
def _grouped(self, *, include_groups: bool = True):
offsets, grouped_key_cols, grouped_value_cols = self._groups(
itertools.chain(self.obj.index._columns, self.obj._columns)
)
grouped_keys = _index_from_data(dict(enumerate(grouped_key_cols)))
if isinstance(self.grouping.keys, MultiIndex):
grouped_keys.names = self.grouping.keys.names
to_drop = self.grouping.keys.names
else:
grouped_keys.name = self.grouping.keys.name
to_drop = (self.grouping.keys.name,)
grouped_values = self.obj._from_columns_like_self(
grouped_value_cols,
column_names=self.obj._column_names,
index_names=self.obj._index_names, # type: ignore[arg-type]
)
if not include_groups:
for col_name in to_drop:
del grouped_values[col_name]
group_names = grouped_keys.unique().sort_values()
return (group_names, offsets, grouped_keys, grouped_values)
def _normalize_aggs(
self, aggs: MultiColumnAggType, **kwargs
) -> tuple[Iterable[Any], tuple[ColumnBase, ...], list[list[AggType]]]:
"""
Normalize aggs to a list of list of aggregations, where `out[i]`
is a list of aggregations for column `self.obj[i]`. We support four
different form of `aggs` input here:
- A single agg, such as "sum". This agg is applied to all value
columns.
- A list of aggs, such as ["sum", "mean"]. All aggs are applied to all
value columns.
- A mapping of column name to aggs, such as
{"a": ["sum"], "b": ["mean"]}, the aggs are applied to specified
column.
- Pairs of column name and agg tuples passed as kwargs
eg. col1=("a", "sum"), col2=("b", "prod"). The output column names are
the keys. The aggs are applied to the corresponding column in the tuple.
Each agg can be string or lambda functions.
"""
aggs_per_column: Iterable[AggType | Iterable[AggType]]
# TODO: Remove isinstance condition when the legacy dask_cudf API is removed.
# See https://github.com/rapidsai/cudf/pull/16528#discussion_r1715482302 for information.
if aggs or isinstance(aggs, dict):
if isinstance(aggs, dict):
if any(
is_list_like(values) and len(set(values)) != len(values) # type: ignore[arg-type]
for values in aggs.values()
):
if get_option("mode.pandas_compatible"):
raise NotImplementedError(
"Duplicate aggregations per column are currently not supported."
)
else:
warnings.warn(
"Duplicate aggregations per column found. "
"The resulting duplicate columns will be dropped.",
UserWarning,
)
column_names, aggs_per_column = aggs.keys(), aggs.values()
columns = tuple(self.obj._data[col] for col in column_names)
else:
if isinstance(aggs, list) and len(aggs) != len(set(aggs)):
raise pd.errors.SpecificationError(
"Function names must be unique if there is no new column names assigned"
)
values = self.grouping.values
column_names = values._column_names
columns = values._columns
aggs_per_column = (aggs,) * len(columns)
elif not aggs and kwargs:
column_names = kwargs.keys()
def _raise_invalid_type(x):
raise TypeError(
f"Invalid keyword argument {x} of type {type(x)} was passed to agg"
)
columns, aggs_per_column = zip(
*(
(self.obj._data[x[0]], x[1])
if isinstance(x, tuple)
else _raise_invalid_type(x)
for x in kwargs.values()
),
strict=True,
)
else:
raise TypeError("Must provide at least one aggregation function.")
# is_list_like performs type narrowing but type-checkers don't
# know it. One could add a TypeGuard annotation to
# is_list_like (see PEP647), but that is less useful than it
# seems because unlike the builtin narrowings it only performs
# narrowing in the positive case.
normalized_aggs = [
list(agg) if is_list_like(agg) else [agg] # type: ignore[arg-type]
for agg in aggs_per_column
]
return column_names, columns, normalized_aggs
@_performance_tracking
def pipe(self, func, *args, **kwargs):
"""
Apply a function `func` with arguments to this GroupBy
object and return the function's result.
Parameters
----------
func : function
Function to apply to this GroupBy object or,
alternatively, a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the GroupBy object.
args : iterable, optional
Positional arguments passed into ``func``.
kwargs : mapping, optional
A dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
cudf.Series.pipe
Apply a function with arguments to a series.
cudf.DataFrame.pipe
Apply a function with arguments to a dataframe.
apply
Apply function to each group instead of to the full GroupBy object.
Examples
--------
>>> import cudf
>>> df = cudf.DataFrame({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value
in one pass, you can do
>>> df.groupby('A', sort=True).pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2
"""
return pipe(self, func, *args, **kwargs)
@_performance_tracking
def _jit_groupby_apply(
self, function, group_names, offsets, group_keys, grouped_values, *args
):
chunk_results = jit_groupby_apply(
offsets, grouped_values, function, *args
)
return self._post_process_chunk_results(
chunk_results, group_names, group_keys, grouped_values
)
@_performance_tracking
def _iterative_groupby_apply(
self, function, group_names, offsets, group_keys, grouped_values, *args
):
ngroups = len(offsets) - 1
if ngroups > self._MAX_GROUPS_BEFORE_WARN:
warnings.warn(
f"GroupBy.apply() performance scales poorly with "
f"number of groups. Got {ngroups} groups. Some functions "
"may perform better by passing engine='jit'",
RuntimeWarning,
)
chunks = [grouped_values[s:e] for s, e in itertools.pairwise(offsets)]
chunk_results = [function(chk, *args) for chk in chunks]
return self._post_process_chunk_results(
chunk_results, group_names, group_keys, grouped_values
)
def _post_process_chunk_results(
self, chunk_results, group_names, group_keys, grouped_values
):
from cudf.core.dataframe import DataFrame
from cudf.core.series import Series
if not len(chunk_results):
return self.obj.head(0)
if isinstance(chunk_results, ColumnBase) or is_scalar(
chunk_results[0]
):
data = ColumnAccessor(
{None: as_column(chunk_results)}, verify=False
)
ty = Series if self._as_index else DataFrame
result = ty._from_data(data, index=group_names)
result.index.names = self.grouping.names
return result
elif isinstance(chunk_results[0], Series) and isinstance(
self.obj, DataFrame
):
# When the UDF is like df.sum(), the result for each
# group is a row-like "Series" where the index labels
# are the same as the original calling DataFrame
if _is_row_of(chunk_results[0], self.obj):
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
result = concat(chunk_results, axis=1).T
result.index = group_names
result.index.names = self.grouping.names
# When the UDF is like df.x + df.y, the result for each
# group is the same length as the original group
elif (total_rows := sum(len(chk) for chk in chunk_results)) in {
len(self.obj),
len(group_names),
}:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
result = concat(chunk_results)
if total_rows == len(group_names):
result.index = group_names
# TODO: Is there a better way to determine what
# the column name should be, especially if we applied
# a nameless UDF.
result = result.to_frame(
name=grouped_values._column_names[0]
)
else:
index_data = group_keys._data.copy(deep=True)
index_data[None] = grouped_values.index._column
result.index = MultiIndex._from_data(index_data)
elif len(chunk_results) == len(group_names):
result = concat(chunk_results, axis=1).T
result.index = group_names
result.index.names = self.grouping.names
else:
raise TypeError(
"Error handling Groupby apply output with input of "
f"type {type(self.obj)} and output of "
f"type {type(chunk_results[0])}"
)
else:
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
result = concat(chunk_results)
if self._group_keys:
index_data = group_keys._data.copy(deep=True)
index_data[None] = grouped_values.index._column
result.index = MultiIndex._from_data(index_data)
return result
@_performance_tracking
def apply(
self, func, *args, engine="auto", include_groups: bool = True, **kwargs
):
"""Apply a python transformation function over the grouped chunk.
Parameters
----------
func : callable
The python transformation function that will be applied
on the grouped chunk.
args : tuple
Optional positional arguments to pass to the function.
engine: 'auto', 'cudf', or 'jit', default 'auto'
Selects the GroupBy.apply implementation. Use `jit` to
select the numba JIT pipeline. Only certain operations are allowed
within the function when using this option: min, max, sum, mean, var,
std, idxmax, and idxmin and any arithmetic formula involving them are
allowed. Binary operations are not yet supported, so syntax like
`df['x'] * 2` is not yet allowed.
For more information, see the `cuDF guide to user defined functions
<https://docs.rapids.ai/api/cudf/stable/user_guide/guide-to-udfs.html>`__.
Use `cudf` to select the iterative groupby apply algorithm which aims
to provide maximum flexibility at the expense of performance.
The default value `auto` will attempt to use the numba JIT pipeline
where possible and will fall back to the iterative algorithm if
necessary.
include_groups : bool, default True
When True, will attempt to apply ``func`` to the groupings in
the case that they are columns of the DataFrame. In the future,
this will default to ``False``.
kwargs : dict
Optional keyword arguments to pass to the function.
Currently not supported
Examples
--------
.. code-block:: python
from cudf import DataFrame
df = DataFrame()
df['key'] = [0, 0, 1, 1, 2, 2, 2]
df['val'] = [0, 1, 2, 3, 4, 5, 6]
groups = df.groupby(['key'])
# Define a function to apply to each row in a group
def mult(df):
df['out'] = df['key'] * df['val']
return df
result = groups.apply(mult)
print(result)
Output:
.. code-block:: python
key val out
0 0 0 0
1 0 1 0
2 1 2 2
3 1 3 3
4 2 4 8
5 2 5 10
6 2 6 12
.. pandas-compat::
:meth:`pandas.core.groupby.DataFrameGroupBy.apply`,
:meth:`pandas.core.groupby.SeriesGroupBy.apply`
cuDF's ``groupby.apply`` is limited compared to pandas.
In some situations, Pandas returns the grouped keys as part of
the index while cudf does not due to redundancy. For example:
.. code-block::
>>> import pandas as pd
>>> df = pd.DataFrame({
... 'a': [1, 1, 2, 2],
... 'b': [1, 2, 1, 2],
... 'c': [1, 2, 3, 4],
... })
>>> gdf = cudf.from_pandas(df)
>>> df.groupby('a')[["b", "c"]].apply(lambda x: x.iloc[[0]])
b c
a
1 0 1 1
2 2 1 3
>>> gdf.groupby('a')[["b", "c"]].apply(lambda x: x.iloc[[0]])
b c
0 1 1
2 1 3
``engine='jit'`` may be used to accelerate certain functions,
initially those that contain reductions and arithmetic operations
between results of those reductions:
>>> import cudf
>>> df = cudf.DataFrame({'a':[1,1,2,2,3,3], 'b':[1,2,3,4,5,6]})
>>> df.groupby('a').apply(
... lambda group: group['b'].max() - group['b'].min(),
... engine='jit'
... )
a
1 1
2 1
3 1
dtype: int64
"""
if kwargs:
raise NotImplementedError(
"Passing kwargs to func is currently not supported."
)
if self.obj.empty:
from cudf.core.series import Series
if func in {"count", "size", "idxmin", "idxmax"}:
res = Series([], dtype=np.dtype(np.int64))
else:
res = self.obj.copy(deep=True)
res.index = self.grouping.keys
if func in {"sum", "product"}:
# For `sum` & `product`, boolean types
# will need to result in `int64` type.
for name, col in res._column_labels_and_values:
if col.dtype.kind == "b":
res._data[name] = col.astype(np.dtype(np.int64))
return res
if not callable(func):
raise TypeError(f"type {type(func)} is not callable")
group_names, offsets, group_keys, grouped_values = self._grouped(
include_groups=include_groups
)
if engine == "auto":
if _can_be_jitted(grouped_values, func, args):
engine = "jit"
else:
engine = "cudf"
if engine == "jit":
result = self._jit_groupby_apply(
func,
group_names,
offsets,
group_keys,
grouped_values,
*args,
)
elif engine == "cudf":
result = self._iterative_groupby_apply(
func,
group_names,
offsets,
group_keys,
grouped_values,
*args,
)
else:
raise ValueError(f"Unsupported engine '{engine}'")
if self._sort:
result = result.sort_index()
if self._as_index is False:
result = result.reset_index()
return result
@_performance_tracking
def _broadcast(self, values: Series) -> Series:
"""
Broadcast the results of an aggregation to the group
Parameters
----------
values: Series
A Series representing the results of an aggregation. The
index of the Series must be the (unique) values
representing the group keys.
Returns
-------
A Series of the same size and with the same index as
``self.obj``.
"""
if not values.index.equals(self.grouping.keys):
values = values._align_to_index(
self.grouping.keys, how="right", allow_non_unique=True
)
values.index = self.obj.index
return values
@_performance_tracking
def transform(
self, func, *args, engine=None, engine_kwargs=None, **kwargs
):
"""Apply an aggregation, then broadcast the result to the group size.
Parameters
----------
func: str or callable
Aggregation to apply to each group. Note that the set of
operations currently supported by `transform` is identical
to that supported by the `agg` method.
Returns
-------
A Series or DataFrame of the same size as the input, with the
result of the aggregation per group broadcasted to the group
size.
Examples
--------
.. code-block:: python
import cudf
df = DataFrame({'a': [2, 1, 1, 2, 2], 'b': [1, 2, 3, 4, 5]})
df.groupby('a').transform('max')
b
0 5
1 3
2 3
3 5
4 5
See Also
--------
agg
"""
if engine is not None:
raise NotImplementedError(
"engine is non-functional and added for compatibility with pandas"
)
if engine_kwargs is not None:
raise NotImplementedError(
"engine_kwargs is non-functional added for compatibility with pandas"
)
if args:
raise NotImplementedError(
"Passing args to func is currently not supported."
)
if kwargs:
raise NotImplementedError(
"Passing kwargs to func is currently not supported."
)
if not (isinstance(func, str) or callable(func)):
raise TypeError(
"Aggregation must be a named aggregation or a callable"
)
try:
result = self.agg(func)
except TypeError as e:
raise NotImplementedError(
"Currently, `transform()` supports only aggregations."
) from e
# If the aggregation is a scan, don't broadcast
if _is_all_scan_aggregate([[func]]):
if len(result) != len(self.obj):
raise AssertionError(
"Unexpected result length for scan transform"
)
return result
return self._broadcast(result)
def rolling(self, *args, **kwargs):
"""
Returns a `RollingGroupby` object that enables rolling window
calculations on the groups.
See Also
--------
cudf.core.window.rolling.RollingGroupby
"""
from cudf.core.window.rolling import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@_performance_tracking
def count(self, dropna=True):
"""Compute the number of values in each column.
Parameters
----------
dropna : bool
If ``True``, don't include null values in the count.
"""
def func(x):
return getattr(x, "count")(dropna=dropna)
return self.agg(func)
@_performance_tracking
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generate descriptive statistics that summarizes the central tendency,
dispersion and shape of a dataset's distribution, excluding NaN values.
Analyzes numeric DataFrames only
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output.
Currently not supported.
include: 'all', list-like of dtypes or None (default), optional
list of data types to include in the result.
Ignored for Series.
exclude: list-like of dtypes or None (default), optional,
list of data types to omit from the result.
Ignored for Series.
Returns
-------
Series or DataFrame
Summary statistics of the Dataframe provided.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({
... "Speed": [380.0, 370.0, 24.0, 26.0],
... "Score": [50, 30, 90, 80],
... })
>>> gdf
Speed Score
0 380.0 50
1 370.0 30
2 24.0 90
3 26.0 80
>>> gdf.groupby('Score').describe()
Speed
count mean std min 25% 50% 75% max
Score
30 1 370.0 <NA> 370.0 370.0 370.0 370.0 370.0
50 1 380.0 <NA> 380.0 380.0 380.0 380.0 380.0
80 1 26.0 <NA> 26.0 26.0 26.0 26.0 26.0
90 1 24.0 <NA> 24.0 24.0 24.0 24.0 24.0
"""
if percentiles is not None:
raise NotImplementedError("percentiles is currently not supported")
if exclude is not None:
raise NotImplementedError("exclude is currently not supported")
if include is not None:
raise NotImplementedError("include is currently not supported")
res = self.agg(
[
"count",
"mean",
"std",
"min",
_quantile_25,
_quantile_50,
_quantile_75,
"max",
]
)
res.rename(
columns={
"_quantile_25": "25%",
"_quantile_50": "50%",
"_quantile_75": "75%",
},
level=1,
inplace=True,
)
return res
@_performance_tracking
def cov(self, min_periods=0, ddof=1, numeric_only: bool = False):
"""
Compute the pairwise covariance among the columns of a DataFrame,
excluding NA/null values.
The returned DataFrame is the covariance matrix of the columns of
the DataFrame.
Both NA and null values are automatically excluded from the
calculation. See the note below about bias from missing values.
A threshold can be set for the minimum number of observations
for each value created. Comparisons with observations below this
threshold will be returned as `NA`.
This method is generally used for the analysis of time series data to
understand the relationship between different measures across time.
Parameters
----------
min_periods: int, optional
Minimum number of observations required per pair of columns
to have a valid result.
ddof: int, optional
Delta degrees of freedom, default is 1.
Returns
-------
DataFrame
Covariance matrix.
Notes
-----
Returns the covariance matrix of the DataFrame's time series.
The covariance is normalized by N-ddof.
For DataFrames that have Series that are missing data
(assuming that data is missing at random) the returned covariance
matrix will be an unbiased estimate of the variance and covariance
between the member Series.
However, for many applications this estimate may not be acceptable
because the estimate covariance matrix is not guaranteed to be
positive semi-definite. This could lead to estimate correlations
having absolute values which are greater than one, and/or a
non-invertible covariance matrix. See
`Estimation of covariance matrices
<https://en.wikipedia.org/wiki/Estimation_of_covariance_matrices>`
for more details.
Examples
--------
>>> import cudf
>>> gdf = cudf.DataFrame({
... "id": ["a", "a", "a", "b", "b", "b", "c", "c", "c"],
... "val1": [5, 4, 6, 4, 8, 7, 4, 5, 2],
... "val2": [4, 5, 6, 1, 2, 9, 8, 5, 1],
... "val3": [4, 5, 6, 1, 2, 9, 8, 5, 1],
... })
>>> gdf
id val1 val2 val3
0 a 5 4 4
1 a 4 5 5
2 a 6 6 6
3 b 4 1 1
4 b 8 2 2
5 b 7 9 9
6 c 4 8 8
7 c 5 5 5
8 c 2 1 1
>>> gdf.groupby("id").cov()
val1 val2 val3
id
a val1 1.000000 0.500000 0.500000
val2 0.500000 1.000000 1.000000
val3 0.500000 1.000000 1.000000
b val1 4.333333 3.500000 3.500000
val2 3.500000 19.000000 19.000000
val3 3.500000 19.000000 19.000000
c val1 2.333333 3.833333 3.833333
val2 3.833333 12.333333 12.333333
val3 3.833333 12.333333 12.333333
"""
if numeric_only is not False:
raise NotImplementedError(
"numeric_only is currently not supported."
)
return self._cov_or_corr(
lambda x: x.cov(min_periods, ddof), "Covariance"
)
def _cov_or_corr(self, func, method_name):
"""
Internal function that is called by either corr() or cov()
for sort groupby correlation and covariance computations,
respectively.
"""
# create expanded dataframe consisting all combinations of the
# struct columns-pairs to be used in the correlation or covariance
# i.e. (('col1', 'col1'), ('col1', 'col2'), ('col2', 'col2'))
column_names = self.grouping._values_column_names
num_cols = len(column_names)
column_pair_structs = {}
for x, y in itertools.combinations_with_replacement(column_names, 2):
# The number of output columns is the number of input columns
# squared. We directly call the struct column factory here to
# reduce overhead and avoid copying data. Since libcudf groupby
# maintains a cache of aggregation requests, reusing the same
# column also makes use of previously cached column means and
# reduces kernel costs.
# checks if input column names are string, raise a warning if
# not so and cast them to strings
if not (isinstance(x, str) and isinstance(y, str)):
warnings.warn(
"DataFrame contains non-string column name(s). "
"Struct columns require field names to be strings. "
"Non-string column names will be cast to strings "
"in the result's field names."
)
x, y = str(x), str(y)
struct_column = ColumnBase.from_pylibcudf(
plc.Column.struct_from_children(
[
self.obj._data[x].to_pylibcudf(mode="read"),
self.obj._data[y].to_pylibcudf(mode="read"),
]
)
).set_mask(None)
column_pair_structs[(x, y)] = struct_column
from cudf.core.dataframe import DataFrame
column_pair_groupby = DataFrame._from_data(
column_pair_structs
).groupby(by=self.grouping)
try:
gb_cov_corr = column_pair_groupby.agg(func)
except RuntimeError as e:
if "Unsupported groupby reduction type-agg combination" in str(e):
raise TypeError(
f"{method_name} accepts only numerical column-pairs"
)
raise
# ensure that column-pair labels are arranged in ascending order
cols_list = [
(y, x) if i > j else (x, y)
for j, y in enumerate(column_names)
for i, x in enumerate(column_names)
]
cols_split = [
cols_list[i : i + num_cols]
for i in range(0, len(cols_list), num_cols)
]
# interleave: combines the correlation or covariance results for each
# column-pair into a single column
@acquire_spill_lock()
def interleave_columns(source_columns):
return ColumnBase.from_pylibcudf(
plc.reshape.interleave_columns(
plc.Table(
[c.to_pylibcudf(mode="read") for c in source_columns]
)
)
)
res = DataFrame._from_data(
{
x: interleave_columns([gb_cov_corr._data[y] for y in ys])
for ys, x in zip(cols_split, column_names, strict=True)
}
)
# create a multiindex for the groupby covariance or correlation
# dataframe, to match pandas behavior
unsorted_idx = gb_cov_corr.index.repeat(num_cols)
idx_sort_order = unsorted_idx._get_sorted_inds()
sorted_idx = unsorted_idx._gather(idx_sort_order)
if len(gb_cov_corr):
# TO-DO: Should the operation below be done on the CPU instead?
sorted_idx._data[None] = as_column(
np.tile(column_names, len(gb_cov_corr.index))
)
res.index = MultiIndex._from_data(sorted_idx._data)
return res
@_performance_tracking
def var(
self,
ddof=1,
engine=None,
engine_kwargs=None,
numeric_only: bool = False,
):
"""Compute the column-wise variance of the values in each group.
Parameters
----------
ddof : int
The delta degrees of freedom. N - ddof is the divisor used to
normalize the variance.
"""
if engine is not None:
raise NotImplementedError(
"engine is non-functional and added for compatibility with pandas"
)
if engine_kwargs is not None:
raise NotImplementedError(
"engine_kwargs is non-functional added for compatibility with pandas"
)
if numeric_only is not False:
raise NotImplementedError(
"numeric_only is currently not supported."
)
def func(x):
return getattr(x, "var")(ddof=ddof)
return self.agg(func)
@_performance_tracking
def nunique(self, dropna: bool = True):
"""
Return number of unique elements in the group.
Parameters
----------
dropna : bool, default True
Don't include NaN in the counts.
"""
def func(x):
return getattr(x, "nunique")(dropna=dropna)
return self.agg(func)
@_performance_tracking
def std(
self,
ddof=1,
engine=None,
engine_kwargs=None,
numeric_only: bool = False,
):
"""Compute the column-wise std of the values in each group.
Parameters
----------
ddof : int
The delta degrees of freedom. N - ddof is the divisor used to
normalize the standard deviation.
"""
if engine is not None:
raise NotImplementedError(
"engine is non-functional and added for compatibility with pandas"
)
if engine_kwargs is not None:
raise NotImplementedError(
"engine_kwargs is non-functional added for compatibility with pandas"
)
if numeric_only is not False:
raise NotImplementedError(
"numeric_only is currently not supported."
)
def func(x):
return getattr(x, "std")(ddof=ddof)
return self.agg(func)
@_performance_tracking
def quantile(
self, q=0.5, interpolation="linear", numeric_only: bool = False
):
"""Compute the column-wise quantiles of the values in each group.
Parameters
----------
q : float or array-like
The quantiles to compute.
interpolation : {"linear", "lower", "higher", "midpoint", "nearest"}
The interpolation method to use when the desired quantile lies
between two data points. Defaults to "linear".
numeric_only : bool, default False
Include only `float`, `int` or `boolean` data.
Currently not supported
"""
if numeric_only is not False:
raise NotImplementedError(
"numeric_only is not currently supported."
)
def func(x):
return getattr(x, "quantile")(q=q, interpolation=interpolation)
return self.agg(func)
@_performance_tracking
def unique(self):
"""Get a list of the unique values for each column in each group."""
return self.agg("unique")
@_performance_tracking
def diff(self, periods=1, axis=0):
"""Get the difference between the values in each group.
Parameters
----------
periods : int, default 1
Periods to shift for calculating difference,
accepts negative values.
axis : {0 or 'index', 1 or 'columns'}, default 0
Take difference over rows (0) or columns (1).
Only row-wise (0) shift is supported.
Returns
-------
Series or DataFrame
First differences of the Series or DataFrame.
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
values = self.grouping.values
values.index = self.obj.index
return values - self.shift(periods=periods)
def _scan_fill(self, method: str, limit: int) -> DataFrameOrSeries:
"""Internal implementation for `ffill` and `bfill`"""
values = self.grouping.values
result = self.obj._from_data(
dict(
zip(
values._column_names,
self._replace_nulls(values._columns, method),
strict=True,
)
)
)
result = self._mimic_pandas_order(result)
return result._copy_type_metadata(values)
def ffill(self, limit=None):
"""Forward fill NA values.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
return self._scan_fill("ffill", limit)
def bfill(self, limit=None):
"""Backward fill NA values.
Parameters
----------
limit : int, default None
Unsupported
"""
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
return self._scan_fill("bfill", limit)
@_performance_tracking
def fillna(
self,
value=None,
method=None,
axis=0,
inplace=False,
limit=None,
downcast=None,
):
"""Fill NA values using the specified method.
Parameters
----------
value : scalar, dict
Value to use to fill the holes. Cannot be specified with method.
method : { 'bfill', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
- ffill: propagate last valid observation forward to next valid
- bfill: use next valid observation to fill gap
axis : {0 or 'index', 1 or 'columns'}
Unsupported
inplace : bool, default False
If `True`, fill inplace. Note: this will modify other views on this
object.
limit : int, default None
Unsupported
downcast : dict, default None
Unsupported
Returns
-------
DataFrame or Series
"""
warnings.warn(
"groupby fillna is deprecated and "
"will be removed in a future version. Use groupby ffill "
"or groupby bfill for forward or backward filling instead.",
FutureWarning,
)
if inplace:
raise NotImplementedError("Does not support inplace yet.")
if limit is not None:
raise NotImplementedError("Does not support limit param yet.")
if downcast is not None:
raise NotImplementedError("Does not support downcast yet.")
if not axis == 0:
raise NotImplementedError("Only support axis == 0.")
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
if value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
if method is not None:
if method not in {"ffill", "bfill"}:
raise ValueError("Method can only be of 'ffill', 'bfill'.")
return getattr(self, method, limit)()
values = self.grouping.values
values.index = self.obj.index
return values.fillna(
value=value, inplace=inplace, axis=axis, limit=limit
)
@_performance_tracking
def shift(
self,
periods: int = 1,
freq=None,
axis=0,
fill_value=None,
suffix: str | None = None,
):
"""
Shift each group by ``periods`` positions.
Parameters
----------
periods : int, default 1
Number of periods to shift.
freq : str, unsupported
axis : 0, axis to shift
Shift direction. Only row-wise shift is supported
fill_value : scalar or list of scalars, optional
The scalar value to use for newly introduced missing values. Can be
specified with `None`, a single value or multiple values:
- `None` (default): sets all indeterminable values to null.
- Single value: fill all shifted columns with this value. Should
match the data type of all columns.
- List of values: fill shifted columns with corresponding value in
the list. The length of the list should match the number of
columns shifted. Each value should match the data type of the
column to fill.
suffix : str, optional
A string to add to each shifted column if there are multiple periods.
Ignored otherwise.
Currently not supported.
Returns
-------
Series or DataFrame
Object shifted within each group.
.. pandas-compat::
:meth:`pandas.core.groupby.DataFrameGroupBy.shift`,
:meth:`pandas.core.groupby.SeriesGroupBy.shift`
Parameter ``freq`` is unsupported.
"""
if freq is not None:
raise NotImplementedError("Parameter freq is unsupported.")
if axis != 0:
raise NotImplementedError("Only axis=0 is supported.")
if suffix is not None:
raise NotImplementedError("shift is not currently supported.")
values = self.grouping.values
if is_list_like(fill_value):
if len(fill_value) != values._num_columns:
raise ValueError(
"Mismatched number of columns and values to fill."
)
else:
fill_value = [fill_value] * values._num_columns
result = self.obj.__class__._from_data(
dict(
zip(
values._column_names,
self._shift(values._columns, periods, fill_value),
strict=True,
)
)
)
result = self._mimic_pandas_order(result)
return result._copy_type_metadata(values)
@_performance_tracking
def pct_change(
self,
periods=1,
fill_method=no_default,
axis=0,
limit=no_default,
freq=None,
):
"""
Calculates the percent change between sequential elements
in the group.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'ffill'
How to handle NAs before computing percent changes.
.. deprecated:: 24.04
All options of `fill_method` are deprecated
except `fill_method=None`.
limit : int, optional
The number of consecutive NAs to fill before stopping.
Not yet implemented.
.. deprecated:: 24.04
`limit` is deprecated.
freq : str, optional
Increment to use from time series API.
Not yet implemented.
Returns
-------
Series or DataFrame
Percentage changes within each group
"""
if not axis == 0:
raise NotImplementedError("Only axis=0 is supported.")
if limit is not no_default:
raise NotImplementedError("limit parameter not supported yet.")
if freq is not None:
raise NotImplementedError("freq parameter not supported yet.")
elif fill_method not in {no_default, None, "ffill", "bfill"}:
raise ValueError("fill_method must be one of 'ffill', or'bfill'.")
if fill_method not in (no_default, None) or limit is not no_default:
# Do not remove until pandas 3.0 support is added.
assert PANDAS_LT_300, (
"Need to drop after pandas-3.0 support is added."
)
warnings.warn(
"The 'fill_method' keyword being not None and the 'limit' "
f"keywords in {type(self).__name__}.pct_change are "
"deprecated and will be removed in a future version. "
"Either fill in any non-leading NA values prior "
"to calling pct_change or specify 'fill_method=None' "
"to not fill NA values.",
FutureWarning,
)
if fill_method in (no_default, None):
fill_method = "ffill"
if limit is no_default:
limit = None
with warnings.catch_warnings():
warnings.simplefilter("ignore")
filled = self.fillna(method=fill_method, limit=limit)
fill_grp = filled.groupby(self.grouping)
shifted = fill_grp.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
def _mimic_pandas_order(
self, result: DataFrameOrSeries
) -> DataFrameOrSeries:
"""Given a groupby result from libcudf, reconstruct the row orders
matching that of pandas. This also adds appropriate indices.
"""
# TODO: copy metadata after this method is a common pattern, should
# merge in this method.
# This function is used to reorder the results of scan-based
# groupbys which have the same output size as input size.
# However, if the grouping key has NAs and dropna=True, the
# result coming back from libcudf has null_count few rows than
# the input, so we must produce an ordering from the full
# input range.
_, _, (ordering,) = self._groups([as_column(range(0, len(self.obj)))])
if self._dropna and any(
c.has_nulls(include_nan=True) > 0
for c in self.grouping._key_columns
):
# Scan aggregations with null/nan keys put nulls in the
# corresponding output rows in pandas, to do that here
# expand the result by reindexing.
ri = RangeIndex(0, len(self.obj))
result.index = Index._from_column(ordering)
# This reorders and expands
result = result.reindex(ri)
else:
# Just reorder according to the groupings
result = result.take(ordering.argsort())
# Now produce the actual index we first thought of
result.index = self.obj.index
return result
def ohlc(self):
"""
Compute open, high, low and close values of a group, excluding missing values.
Currently not implemented.
"""
raise NotImplementedError("ohlc is currently not implemented")
@property
def plot(self):
"""
Make plots of a grouped Series or DataFrame.
Currently not implemented.
"""
raise NotImplementedError("plot is currently not implemented")
def resample(self, rule, *args, include_groups: bool = True, **kwargs):
"""
Provide resampling when using a TimeGrouper.
Currently not implemented.
"""
raise NotImplementedError("resample is currently not implemented")
def take(self, indices):
"""
Return the elements in the given *positional* indices in each group.
Currently not implemented.
"""
raise NotImplementedError("take is currently not implemented")
def filter(self, func, dropna: bool = True, *args, **kwargs):
"""
Filter elements from groups that don't satisfy a criterion.
Currently not implemented.
"""
raise NotImplementedError("filter is currently not implemented")
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group.
Currently not implemented.
"""
raise NotImplementedError("expanding is currently not implemented")
def ewm(self, *args, **kwargs):
"""
Return an ewm grouper, providing ewm functionality per group.
Currently not implemented.
"""
raise NotImplementedError("expanding is currently not implemented")
def any(self, skipna: bool = True):
"""
Return True if any value in the group is truthful, else False.
Currently not implemented.
"""
raise NotImplementedError("any is currently not implemented")
def all(self, skipna: bool = True):
"""
Return True if all values in the group are truthful, else False.
Currently not implemented.
"""
raise NotImplementedError("all is currently not implemented")
| GroupBy |
python | matplotlib__matplotlib | lib/matplotlib/category.py | {
"start": 5128,
"end": 7377
} | class ____:
def __init__(self, data=None):
"""
Create mapping between unique categorical values and integer ids.
Parameters
----------
data : iterable
sequence of string values
"""
self._mapping = OrderedDict()
self._counter = itertools.count()
if data is not None:
self.update(data)
@staticmethod
def _str_is_convertible(val):
"""
Helper method to check whether a string can be parsed as float or date.
"""
try:
float(val)
except ValueError:
try:
dateutil.parser.parse(val)
except (ValueError, TypeError):
# TypeError if dateutil >= 2.8.1 else ValueError
return False
return True
def update(self, data):
"""
Map new values to integer identifiers.
Parameters
----------
data : iterable of str or bytes
Raises
------
TypeError
If elements in *data* are neither str nor bytes.
"""
data = np.atleast_1d(np.array(data, dtype=object))
# check if convertible to number:
convertible = True
for val in OrderedDict.fromkeys(data):
# OrderedDict just iterates over unique values in data.
_api.check_isinstance((str, bytes), value=val)
if convertible:
# this will only be called so long as convertible is True.
convertible = self._str_is_convertible(val)
if val not in self._mapping:
self._mapping[val] = next(self._counter)
if data.size and convertible:
_log.info('Using categorical units to plot a list of strings '
'that are all parsable as floats or dates. If these '
'strings should be plotted as numbers, cast to the '
'appropriate data type before plotting.')
# Register the converter with Matplotlib's unit framework
# Intentionally set to a single instance
units.registry[str] = \
units.registry[np.str_] = \
units.registry[bytes] = \
units.registry[np.bytes_] = StrCategoryConverter()
| UnitData |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type.py | {
"start": 23520,
"end": 29674
} | class ____(type_spec.TypeSpecBatchEncoder):
"""Class used to encode and decode extension type values for batching.
In order to be batched and unbatched by APIs such as `tf.data.Dataset`,
`tf.keras`, and `tf.map_fn`, extension type values must be encoded as a list
of `tf.Tensor`s, where stacking, unstacking, or concatenating these encoded
tensors and then decoding the result must be equivalent to stacking,
unstacking, or concatenating the original values. `ExtensionTypeBatchEncoder`s
are responsible for implementing this encoding.
The default `ExtensionTypeBatchEncoder` that is used by
`BatchableExtensionType` assumes that extension type values can be stacked,
unstacked, or concatenated by simply stacking, unstacking, or concatenating
every nested `Tensor`, `ExtensionType`, `CompositeTensor`, and `TensorShape`
field.
Extension types where this is not the case will need to override
`__batch_encoder__` with a custom encoder that overrides the `batch`,
`unbatch`, `encode`, and `decode` methods. E.g.:
>>> class CustomBatchEncoder(ExtensionTypeBatchEncoder):
... pass # Override batch(), unbatch(), encode(), and decode().
>>> class CustomType(BatchableExtensionType):
... x: tf.Tensor
... y: tf.Tensor
... shape: tf.TensorShape
... __batch_encoder__ = CustomBatchEncoder()
For example, `tf.RaggedTensor` and `tf.SparseTensor` both use custom batch
encodings which define ops to "box" and "unbox" individual values into
`tf.variant` tensors.
"""
def batch(self, spec, batch_size):
"""Returns the TypeSpec representing a batch of values described by `spec`.
The default definition returns a `TypeSpec` that is equal to `spec`, except
that an outer axis with size `batch_size` is added to every nested
`TypeSpec` and `TensorShape` field. Subclasses may override this default
definition, when necessary.
Args:
spec: The `TypeSpec` for an individual value.
batch_size: An `int` indicating the number of values that are batched
together, or `None` if the batch size is not known.
Returns:
A `TypeSpec` for a batch of values.
"""
def batch_field(f):
if isinstance(f, type_spec.BatchableTypeSpec):
return f.__batch_encoder__.batch(f, batch_size)
elif isinstance(f, tensor_shape.TensorShape):
return [batch_size] + f
else:
return f
fields = tuple(spec.__dict__.items())
batched_fields = nest.map_structure(batch_field, fields)
return _create_object_from_type_and_dict(type(spec), batched_fields)
def unbatch(self, spec):
"""Returns the TypeSpec for a single unbatched element in `spec`.
The default definition returns a `TypeSpec` that is equal to `spec`, except
that the outermost axis is removed from every nested `TypeSpec`, and
`TensorShape` field. Subclasses may override this default definition, when
necessary.
Args:
spec: The `TypeSpec` for a batch of values.
Returns:
A `TypeSpec` for an individual value.
"""
def unbatch_field(f):
if isinstance(f, type_spec.BatchableTypeSpec):
return f.__batch_encoder__.unbatch(f)
elif isinstance(f, tensor_shape.TensorShape):
return f[1:]
else:
return f
fields = tuple(spec.__dict__.items())
unbatched_fields = nest.map_structure(unbatch_field, fields)
return _create_object_from_type_and_dict(type(spec), unbatched_fields)
def encode(self, spec, value, minimum_rank=0):
"""Encodes `value` as a nest of batchable Tensors or CompositeTensors.
The default definition returns a flat tuple of all the `Tensor`s,
`CompositeTensor`s, and `ExtensionType`s from a depth-first traversal of
`value`'s fields. Subclasses may override this default definition, when
necessary.
Args:
spec: The TypeSpec of the value to encode.
value: A value compatible with `spec`.
minimum_rank: The minimum rank for the returned Tensors, CompositeTensors,
and ExtensionType values. This can be used to ensure that the encoded
values can be unbatched this number of times. If `minimum_rank>0`,
then `t.shape[:minimum_rank]` must be compatible for all values `t`
returned by `encode`.
Returns:
A nest (as defined by `tf.nest`) of `tf.Tensor`s, batchable
`tf.CompositeTensor`s, or `tf.ExtensionType`s. Stacking, unstacking, or
concatenating these encoded values and then decoding the result must be
equivalent to stacking, unstacking, or concatenating the original values.
"""
return spec._to_components(value) # pylint: disable=protected-access
def decode(self, spec, encoded_value):
"""Decodes `value` from a batchable tensor encoding.
See `encode` for a description of the default encoding. Subclasses may
override this default definition, when necessary.
Args:
spec: The TypeSpec for the result value. If encoded values with spec `s`
were batched, then `spec` should be `s.batch(batch_size)`; or if encoded
values with spec `s` were unbatched, then `spec` should be
`s.unbatch()`.
encoded_value: A nest of values returned by `encode`; or a nest of values
that was formed by stacking, unstacking, or concatenating the
corresponding elements of values returned by `encode`.
Returns:
A value compatible with `type_spec`.
"""
return spec._from_components(encoded_value) # pylint: disable=protected-access
def encoding_specs(self, spec):
"""Returns a list of `TensorSpec`(s) describing the encoding for `spec`.
See `encode` for a description of the default encoding. Subclasses may
override this default definition, when necessary.
Args:
spec: The TypeSpec whose encoding should be described.
Returns:
A nest (as defined by `tf.nest) of `tf.TypeSpec`, describing the values
that are returned by `self.encode(spec, ...)`. All TypeSpecs in this
nest must be batchable.
"""
return spec._component_specs # pylint: disable=protected-access
| ExtensionTypeBatchEncoder |
python | getsentry__sentry | src/sentry/api/serializers/models/artifactbundle.py | {
"start": 2084,
"end": 3800
} | class ____(Serializer):
def __init__(self, archive, *args, **kwargs):
Serializer.__init__(self, *args, **kwargs)
self.archive = archive
def get_attrs(self, item_list, user, **kwargs):
return {item: self._compute_attrs(item) for item in item_list}
def _compute_attrs(self, item):
file_path = item.file_path
info = item.info
headers = self.archive.normalize_headers(info.get("headers", {}))
debug_id = self.archive.normalize_debug_id(headers.get("debug-id"))
sourcemap = headers.get("sourcemap")
return {
"file_type": SourceFileType.from_lowercase_key(info.get("type")),
"file_path": file_path,
"file_url": self.archive.get_file_url_by_file_path(file_path),
"file_info": self.archive.get_file_info(file_path),
"debug_id": debug_id,
"sourcemap": sourcemap,
}
def serialize(self, obj, attrs, user, **kwargs):
return {
"id": base64.urlsafe_b64encode(attrs["file_path"].encode()).decode(),
# In case the file type string was invalid, we return the sentinel value INVALID_SOURCE_FILE_TYPE.
"fileType": (
attrs["file_type"].value
if attrs["file_type"] is not None
else INVALID_SOURCE_FILE_TYPE
),
# We decided to return the file url as file path for better searchability.
"filePath": attrs["file_url"],
"fileSize": attrs["file_info"].file_size if attrs["file_info"] is not None else None,
"debugId": attrs["debug_id"],
"sourcemap": attrs["sourcemap"],
}
| ArtifactBundleFilesSerializer |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 9478,
"end": 10094
} | class ____(Callback):
def __init__(self):
super().__init__()
self.begin_count = 0
self.end_count = 0
self.epoch_begin_count = 0
self.epoch_end_count = 0
self.batch_loss_history = []
def on_epoch_begin(self, epoch, logs=None):
self.epoch_begin_count += 1
def on_epoch_end(self, epoch, logs=None):
self.epoch_end_count += 1
def on_batch_begin(self, batch, logs=None):
self.begin_count += 1
def on_batch_end(self, batch, logs=None):
self.end_count += 1
self.batch_loss_history.append(logs["mse"])
| StepObserver |
python | getsentry__sentry | tests/sentry/api/serializers/test_project.py | {
"start": 37509,
"end": 39319
} | class ____(TestCase):
@cached_property
def project(self):
return self.create_project(teams=[self.team], organization=self.organization)
@cached_property
def other_project(self):
return self.create_project(teams=[self.team], organization=self.organization)
def test_single_no_release(self) -> None:
assert bulk_fetch_project_latest_releases([self.project]) == []
def test_single_release(self) -> None:
release = self.create_release(
self.project, date_added=timezone.now() - timedelta(minutes=5)
)
assert bulk_fetch_project_latest_releases([self.project]) == [release]
newer_release = self.create_release(self.project)
assert bulk_fetch_project_latest_releases([self.project]) == [newer_release]
def test_multi_no_release(self) -> None:
assert bulk_fetch_project_latest_releases([self.project, self.other_project]) == []
def test_multi_mixed_releases(self) -> None:
release = self.create_release(self.project)
assert set(bulk_fetch_project_latest_releases([self.project, self.other_project])) == {
release
}
def test_multi_releases(self) -> None:
release = self.create_release(
self.project, date_added=timezone.now() - timedelta(minutes=5)
)
other_project_release = self.create_release(self.other_project)
assert set(bulk_fetch_project_latest_releases([self.project, self.other_project])) == {
release,
other_project_release,
}
release_2 = self.create_release(self.project)
assert set(bulk_fetch_project_latest_releases([self.project, self.other_project])) == {
release_2,
other_project_release,
}
| BulkFetchProjectLatestReleases |
python | great-expectations__great_expectations | great_expectations/data_context/data_context/serializable_data_context.py | {
"start": 1068,
"end": 18183
} | class ____(AbstractDataContext):
UNCOMMITTED_DIRECTORIES: ClassVar[list[str]] = ["data_docs", "validations"]
GX_UNCOMMITTED_DIR: ClassVar[str] = "uncommitted"
GITIGNORE: ClassVar[str] = ".gitignore"
GX_CONFIG_VARIABLES: ClassVar[str] = "config_variables.yml"
BASE_DIRECTORIES: ClassVar[list[str]] = [
DataContextConfigDefaults.CHECKPOINTS_BASE_DIRECTORY.value,
DataContextConfigDefaults.EXPECTATIONS_BASE_DIRECTORY.value,
DataContextConfigDefaults.PLUGINS_BASE_DIRECTORY.value,
DataContextConfigDefaults.VALIDATION_DEFINITIONS_BASE_DIRECTORY.value,
GX_UNCOMMITTED_DIR,
]
GX_DIR: ClassVar[str] = "gx"
_LEGACY_GX_DIR: ClassVar[str] = "great_expectations"
GX_YML: ClassVar[str] = "great_expectations.yml"
GX_EDIT_NOTEBOOK_DIR: ClassVar[str] = GX_UNCOMMITTED_DIR
def __init__(
self,
context_root_dir: PathStr,
runtime_environment: Optional[dict] = None,
user_agent_str: Optional[str] = None,
) -> None:
if isinstance(context_root_dir, pathlib.Path):
# TODO: (kilo59) 122022 should be saving and passing around `pathlib.Path` not str
context_root_dir = str(context_root_dir)
self._context_root_directory = context_root_dir
super().__init__(
runtime_environment=runtime_environment,
user_agent_str=user_agent_str,
)
def _init_datasource_store(self): # type: ignore[explicit-override] # FIXME
raise NotImplementedError # Required by parent ABC but this class is never instantiated
def _init_variables(self): # type: ignore[explicit-override] # FIXME
raise NotImplementedError # Required by parent ABC but this class is never instantiated
@property
@override
def root_directory(self) -> str:
"""The root directory for configuration objects in the data context; the location in which
``great_expectations.yml`` is located.
"""
return self._context_root_directory
@abc.abstractmethod
@override
def _save_project_config(self) -> None:
"""
See parent 'AbstractDataContext._save_project_config()` for more information.
Explicitly override base class implementation to retain legacy behavior.
"""
raise NotImplementedError
@classmethod
def _resolve_context_root_dir_and_project_root_dir(
cls, context_root_dir: PathStr | None, project_root_dir: PathStr | None
) -> PathStr | None:
if project_root_dir and context_root_dir:
raise TypeError( # noqa: TRY003 # FIXME CoP
"'project_root_dir' and 'context_root_dir' are conflicting args; please only provide one" # noqa: E501 # FIXME CoP
)
if project_root_dir:
project_root_dir = pathlib.Path(project_root_dir).absolute()
context_root_dir = pathlib.Path(project_root_dir) / cls.GX_DIR
elif context_root_dir:
context_root_dir = pathlib.Path(context_root_dir).absolute()
return context_root_dir
@classmethod
def _create(
cls,
project_root_dir: Optional[PathStr] = None,
runtime_environment: Optional[dict] = None,
) -> SerializableDataContext:
"""
Build a new gx directory and DataContext object in the provided project_root_dir.
`create` will create a new "gx" directory in the provided folder, provided one does not
already exist. Then, it will initialize a new DataContext in that folder and write the resulting config.
Args:
project_root_dir: path to the root directory in which to create a new gx directory
usage_statistics_enabled: boolean directive specifying whether or not to gather usage statistics
runtime_environment: a dictionary of config variables that override both those set in
config_variables.yml and the environment
Returns:
DataContext
""" # noqa: E501 # FIXME CoP
gx_dir = cls._scaffold(
project_root_dir=project_root_dir,
)
return cls(context_root_dir=gx_dir, runtime_environment=runtime_environment)
@classmethod
def _scaffold(
cls,
project_root_dir: Optional[PathStr] = None,
context_root_dir_name: Optional[str] = None,
) -> pathlib.Path:
if not project_root_dir:
project_root_dir = pathlib.Path.cwd()
else:
project_root_dir = pathlib.Path(project_root_dir)
if context_root_dir_name is None:
context_root_dir_name = cls.GX_DIR
gx_dir = project_root_dir / context_root_dir_name
gx_dir.mkdir(parents=True, exist_ok=True)
cls._scaffold_directories(gx_dir)
if pathlib.Path.is_file(gx_dir.joinpath(cls.GX_YML)):
message = f"""Warning. An existing `{cls.GX_YML}` was found here: {gx_dir}.
- No action was taken."""
warnings.warn(message)
else:
cls._write_project_template_to_disk(gx_dir)
uncommitted_dir = gx_dir / cls.GX_UNCOMMITTED_DIR
if pathlib.Path.is_file(uncommitted_dir.joinpath(cls.GX_CONFIG_VARIABLES)):
message = f"""Warning. An existing `config_variables.yml` was found here:
{uncommitted_dir}. - No action was taken."""
warnings.warn(message)
else:
cls._write_config_variables_template_to_disk(uncommitted_dir)
return gx_dir
@classmethod
def all_uncommitted_directories_exist(cls, gx_dir: PathStr) -> bool:
"""Check if all uncommitted directories exist."""
gx_dir = pathlib.Path(gx_dir)
uncommitted_dir = gx_dir / cls.GX_UNCOMMITTED_DIR
for directory in cls.UNCOMMITTED_DIRECTORIES:
if not pathlib.Path.is_dir(uncommitted_dir.joinpath(directory)):
return False
return True
@classmethod
def config_variables_yml_exist(cls, gx_dir: PathStr) -> bool:
"""Check if all config_variables.yml exists."""
gx_dir = pathlib.Path(gx_dir)
path_to_yml = gx_dir / cls.GX_YML
# TODO this is so brittle and gross
with path_to_yml.open() as f:
config = yaml.load(f)
config_var_path = config.get("config_variables_file_path")
if not config_var_path:
return False
config_var_path = pathlib.Path(config_var_path)
config_var_path = gx_dir / config_var_path
return config_var_path.is_file()
@classmethod
def _write_config_variables_template_to_disk(cls, uncommitted_dir: PathStr) -> None:
uncommitted_dir = pathlib.Path(uncommitted_dir)
uncommitted_dir.mkdir(exist_ok=True)
config_var_file = uncommitted_dir / cls.GX_CONFIG_VARIABLES
with config_var_file.open("w") as template:
template.write(CONFIG_VARIABLES_TEMPLATE)
@classmethod
def _write_project_template_to_disk(cls, gx_dir: PathStr) -> None:
gx_dir = pathlib.Path(gx_dir)
file_path = gx_dir / cls.GX_YML
with file_path.open("w") as template:
template.write(PROJECT_TEMPLATE_USAGE_STATISTICS_ENABLED)
@classmethod
def _scaffold_directories(cls, base_dir: pathlib.Path) -> None:
"""Safely create GE directories for a new project."""
base_dir.mkdir(exist_ok=True)
try:
cls._scaffold_gitignore(base_dir)
except Exception as e:
raise gx_exceptions.GitIgnoreScaffoldingError( # noqa: TRY003 # FIXME CoP
f"Could not create .gitignore in {base_dir} because of an error: {e}"
)
for directory in cls.BASE_DIRECTORIES:
if directory == "plugins":
plugins_dir = base_dir / directory
plugins_dir.mkdir(exist_ok=True)
custom_data_docs = plugins_dir / "custom_data_docs"
custom_data_docs.mkdir(exist_ok=True)
views = custom_data_docs / "views"
views.mkdir(exist_ok=True)
renderers = custom_data_docs / "renderers"
renderers.mkdir(exist_ok=True)
styles = custom_data_docs / "styles"
styles.mkdir(exist_ok=True)
cls._scaffold_custom_data_docs(plugins_dir)
else:
non_plugin_dir = base_dir / directory
non_plugin_dir.mkdir(exist_ok=True)
uncommitted_dir = base_dir / cls.GX_UNCOMMITTED_DIR
for new_directory in cls.UNCOMMITTED_DIRECTORIES:
new_directory_path = uncommitted_dir / new_directory
new_directory_path.mkdir(exist_ok=True)
@classmethod
def _scaffold_gitignore(cls, base_dir: PathStr) -> None:
"""Make sure .gitignore exists and contains uncommitted/"""
gitignore = pathlib.Path(base_dir) / cls.GITIGNORE
uncommitted_dir = f"{cls.GX_UNCOMMITTED_DIR}/"
if gitignore.is_file():
contents = gitignore.read_text()
if uncommitted_dir in contents:
return
with gitignore.open("a") as f:
f.write(f"\n{uncommitted_dir}")
@classmethod
def _scaffold_custom_data_docs(cls, plugins_dir: pathlib.Path) -> None:
"""Copy custom data docs templates"""
styles_template = file_relative_path(
__file__,
"../../render/view/static/styles/data_docs_custom_styles_template.css",
)
styles_destination_path = (
plugins_dir / "custom_data_docs" / "styles" / "data_docs_custom_styles.css"
)
shutil.copyfile(styles_template, styles_destination_path)
@classmethod
def find_context_root_dir(cls) -> str:
result = None
yml_path = None
gx_home_environment = os.getenv("GX_HOME")
if gx_home_environment:
gx_home_environment = os.path.expanduser( # noqa: PTH111 # FIXME CoP
gx_home_environment
)
if os.path.isdir( # noqa: PTH112 # FIXME CoP
gx_home_environment
) and os.path.isfile( # noqa: PTH113 # FIXME CoP
os.path.join(gx_home_environment, cls.GX_YML) # noqa: PTH118 # FIXME CoP
):
result = gx_home_environment
else:
yml_path = cls._find_context_yml_file()
if yml_path:
result = os.path.dirname(yml_path) # noqa: PTH120 # FIXME CoP
if result is None:
raise gx_exceptions.ConfigNotFoundError()
logger.debug(f"Using project config: {yml_path}")
return result
@classmethod
def get_ge_config_version(cls, context_root_dir: Optional[PathStr] = None) -> Optional[float]:
yml_path = cls._find_context_yml_file(search_start_dir=context_root_dir)
if yml_path is None:
return None
with open(yml_path) as f:
config_commented_map_from_yaml = yaml.load(f)
config_version = config_commented_map_from_yaml.get("config_version")
return float(config_version) if config_version else None
@classmethod
def set_ge_config_version(
cls,
config_version: Union[int, float], # noqa: PYI041 # FIXME CoP
context_root_dir: Optional[str] = None,
validate_config_version: bool = True,
) -> bool:
if not isinstance(config_version, (int, float)):
raise gx_exceptions.UnsupportedConfigVersionError( # noqa: TRY003 # FIXME CoP
"The argument `config_version` must be a number.",
)
if validate_config_version:
if config_version < MINIMUM_SUPPORTED_CONFIG_VERSION:
raise gx_exceptions.UnsupportedConfigVersionError( # noqa: TRY003 # FIXME CoP
f"""Invalid config version ({config_version})\n
The version number must be at least {MINIMUM_SUPPORTED_CONFIG_VERSION}""" # noqa: E501 # FIXME CoP
)
elif config_version > CURRENT_GX_CONFIG_VERSION:
raise gx_exceptions.UnsupportedConfigVersionError( # noqa: TRY003 # FIXME CoP
f"""Invalid config version ({config_version}).\n
The maximum valid version is {CURRENT_GX_CONFIG_VERSION}.""" # noqa: E501 # FIXME CoP
)
yml_path = cls._find_context_yml_file(search_start_dir=context_root_dir)
if yml_path is None:
return False
with open(yml_path) as f:
config_commented_map_from_yaml = yaml.load(f)
config_commented_map_from_yaml["config_version"] = float(config_version)
with open(yml_path, "w") as f:
yaml.dump(config_commented_map_from_yaml, f)
return True
@classmethod
def _find_context_yml_file(cls, search_start_dir: Optional[PathStr] = None) -> str | None:
"""Search for the yml file starting here and moving upward."""
if search_start_dir is None:
search_start_dir = pathlib.Path.cwd()
else:
search_start_dir = pathlib.Path(search_start_dir)
# Ensure backwards compatibility if user is using "great_expectations/" over "gx/"
# Starting v0.17.13, "gx/" will be the default
return cls._search_gx_dir_for_context_yml(
search_start_dir=search_start_dir, gx_dir=cls.GX_DIR
) or cls._search_gx_dir_for_context_yml(
search_start_dir=search_start_dir, gx_dir=cls._LEGACY_GX_DIR
)
@classmethod
def _search_gx_dir_for_context_yml(
cls, search_start_dir: pathlib.Path, gx_dir: str
) -> Optional[str]:
yml_path: str | None = None
for i in range(4):
logger.debug(f"Searching for config file {search_start_dir} ({i} layer deep)")
potential_ge_dir = search_start_dir / gx_dir
if potential_ge_dir.is_dir():
potential_yml = potential_ge_dir / cls.GX_YML
if potential_yml.is_file():
yml_path = str(potential_yml)
logger.debug(f"Found config file at {yml_path}")
break
# move up one directory
search_start_dir = search_start_dir.parent
return yml_path
@classmethod
def does_config_exist_on_disk(cls, context_root_dir: PathStr) -> bool:
"""Return True if the great_expectations.yml exists on disk."""
context_root_dir = pathlib.Path(context_root_dir)
config = context_root_dir / cls.GX_YML
return config.is_file()
@classmethod
def is_project_initialized(cls, ge_dir: PathStr) -> bool:
"""
Return True if the project is initialized.
To be considered initialized, all of the following must be true:
- the project must be scaffolded (see cls.is_project_scaffolded)
- the project has at least one datasource
- the project has at least one suite
"""
return (
cls.is_project_scaffolded(ge_dir)
and cls._does_context_have_at_least_one_datasource(ge_dir)
and cls._does_context_have_at_least_one_suite(ge_dir)
)
@classmethod
def is_project_scaffolded(cls, ge_dir: PathStr) -> bool:
"""
Return True if the project is scaffolded (required filesystem changes have occurred).
To be considered scaffolded, all of the following must be true:
- all project directories exist (including uncommitted directories)
- a valid great_expectations.yml is on disk
- a config_variables.yml is on disk
"""
return (
cls.does_config_exist_on_disk(ge_dir)
and cls.all_uncommitted_directories_exist(ge_dir)
and cls.config_variables_yml_exist(ge_dir)
)
@classmethod
def _does_project_have_a_datasource_in_config_file(cls, ge_dir: PathStr) -> bool:
if not cls.does_config_exist_on_disk(ge_dir):
return False
return cls._does_context_have_at_least_one_datasource(ge_dir)
@classmethod
def _does_context_have_at_least_one_datasource(cls, ge_dir: PathStr) -> bool:
context = cls._attempt_context_instantiation(ge_dir)
if not context:
return False
return len(context.list_datasources()) >= 1
@classmethod
def _does_context_have_at_least_one_suite(cls, ge_dir: PathStr) -> bool:
context = cls._attempt_context_instantiation(ge_dir)
if not context:
return False
return bool(context.suites.all())
@classmethod
def _attempt_context_instantiation(cls, ge_dir: PathStr) -> Optional[SerializableDataContext]:
try:
context = cls(context_root_dir=ge_dir)
return context
except (
gx_exceptions.DataContextError,
gx_exceptions.InvalidDataContextConfigError,
) as e:
logger.debug(e)
return None
| SerializableDataContext |
python | ray-project__ray | python/ray/llm/_internal/serve/engines/vllm/kv_transfer/lmcache.py | {
"start": 425,
"end": 2020
} | class ____(BaseConnectorBackend):
KV_CONNECTOR_EXTRA_CONFIG_FIELD_NAME = "kv_connector_extra_config"
LMCACHE_RPC_PORT_FIELD_NAME = "lmcache_rpc_port"
DEFAULT_LMCACHE_RPC_PORT_NAME = "lmcache_rpc_port"
def setup(self) -> None:
"""Initialize the LMCache connector backend.
Creates a unique LMCache RPC port name across replicas by appending
a random suffix to the base port name.
Raises:
ImportError: If LMCache is not installed.
"""
_check_lmcache_installed()
if (
LMCacheConnectorV1Backend.KV_CONNECTOR_EXTRA_CONFIG_FIELD_NAME
not in self.kv_transfer_config
):
return
kv_connector_extra_config = self.kv_transfer_config[
LMCacheConnectorV1Backend.KV_CONNECTOR_EXTRA_CONFIG_FIELD_NAME
]
base_value = kv_connector_extra_config.get(
LMCacheConnectorV1Backend.LMCACHE_RPC_PORT_FIELD_NAME,
LMCacheConnectorV1Backend.DEFAULT_LMCACHE_RPC_PORT_NAME,
)
# Append random suffix for uniqueness
lmcache_rpc_port_value = str(base_value) + self._get_unique_suffix()
if (
LMCacheConnectorV1Backend.LMCACHE_RPC_PORT_FIELD_NAME
in kv_connector_extra_config
):
logger.info(
f"Setting unique lmcache_rpc_port={lmcache_rpc_port_value} for current replica."
)
kv_connector_extra_config[
LMCacheConnectorV1Backend.LMCACHE_RPC_PORT_FIELD_NAME
] = lmcache_rpc_port_value
| LMCacheConnectorV1Backend |
python | ansible__ansible | lib/ansible/galaxy/dependency_resolution/resolvers.py | {
"start": 364,
"end": 623
} | class ____(Resolver):
"""A dependency resolver for Ansible Collections.
This is a proxy class allowing us to abstract away importing resolvelib
outside of the `ansible.galaxy.dependency_resolution` Python package.
"""
| CollectionDependencyResolver |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/nn_ops/rnn_cell_test.py | {
"start": 82987,
"end": 95458
} | class ____(test.TestCase):
def setUp(self):
self._seed = 23489
np.random.seed(self._seed)
@test_util.run_v1_only("b/124229375")
def _testRawRNN(self, max_time):
with self.session(graph=ops.Graph()) as sess:
batch_size = 16
input_depth = 4
num_units = 3
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state # copy state through
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = cond.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
reuse_scope = variable_scope.get_variable_scope()
outputs_ta, final_state, _ = rnn.raw_rnn(cell, loop_fn, scope=reuse_scope)
outputs = outputs_ta.stack()
reuse_scope.reuse_variables()
outputs_dynamic_rnn, final_state_dynamic_rnn = rnn.dynamic_rnn(
cell,
inputs,
time_major=True,
dtype=dtypes.float32,
sequence_length=sequence_length,
scope=reuse_scope)
variables = variables_lib.trainable_variables()
gradients = gradients_impl.gradients([outputs, final_state],
[inputs] + variables)
gradients_dynamic_rnn = gradients_impl.gradients(
[outputs_dynamic_rnn, final_state_dynamic_rnn], [inputs] + variables)
variables_lib.global_variables_initializer().run()
rand_input = np.random.randn(max_time, batch_size, input_depth)
if max_time == 0:
rand_seq_len = np.zeros(batch_size)
else:
rand_seq_len = np.random.randint(max_time, size=batch_size)
# To ensure same output lengths for dynamic_rnn and raw_rnn
rand_seq_len[0] = max_time
(outputs_val, outputs_dynamic_rnn_val, final_state_val,
final_state_dynamic_rnn_val) = sess.run(
[outputs, outputs_dynamic_rnn, final_state, final_state_dynamic_rnn],
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertAllClose(outputs_dynamic_rnn_val, outputs_val)
self.assertAllClose(final_state_dynamic_rnn_val, final_state_val)
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So
# this case skips the gradients test.
if max_time > 0:
self.assertEqual(len(gradients), len(gradients_dynamic_rnn))
gradients_val = sess.run(
gradients,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
gradients_dynamic_rnn_val = sess.run(
gradients_dynamic_rnn,
feed_dict={
inputs: rand_input,
sequence_length: rand_seq_len
})
self.assertEqual(len(gradients_val), len(gradients_dynamic_rnn_val))
input_gradients_val = gradients_val[0]
input_gradients_dynamic_rnn_val = gradients_dynamic_rnn_val[0]
self.assertAllClose(input_gradients_val,
input_gradients_dynamic_rnn_val)
for i in range(1, len(gradients_val)):
self.assertAllClose(gradients_dynamic_rnn_val[i], gradients_val[i])
@test_util.run_v1_only("b/124229375")
def testRawRNNZeroLength(self):
# NOTE: Because with 0 time steps, raw_rnn does not have shape
# information about the input, it is impossible to perform
# gradients comparisons as the gradients eval will fail. So this
# case skips the gradients test.
self._testRawRNN(max_time=0)
def testRawRNN(self):
self._testRawRNN(max_time=10)
@test_util.run_v1_only("b/124229375")
def testLoopState(self):
with self.session(graph=ops.Graph()):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = constant_op.constant([0])
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = array_ops_stack.stack(
[array_ops.squeeze(loop_state) + 1])
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = cond.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
self.assertEqual([10], self.evaluate(loop_state))
@test_util.run_v1_only("b/124229375")
def testLoopStateWithTensorArray(self):
with self.session(graph=ops.Graph()):
max_time = 4
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, loop_state):
if cell_output is None:
loop_state = tensor_array_ops.TensorArray(
dynamic_size=True,
size=0,
dtype=dtypes.int32,
clear_after_read=False)
loop_state = loop_state.write(0, 1)
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
loop_state = loop_state.write(time_,
loop_state.read(time_ - 1) + time_)
next_state = cell_state
emit_output = cell_output # == None for time == 0
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = cond.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output,
loop_state)
r = rnn.raw_rnn(cell, loop_fn)
loop_state = r[-1]
loop_state = loop_state.stack()
self.assertAllEqual([1, 2, 2 + 2, 4 + 3, 7 + 4], loop_state)
@test_util.run_v1_only("b/124229375")
def testEmitDifferentStructureThanCellOutput(self):
with self.session(graph=ops.Graph()) as sess:
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
inputs = np.random.randn(max_time, batch_size, input_depth)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
# Verify emit shapes may be unknown by feeding a placeholder that
# determines an emit shape.
unknown_dim = array_ops.placeholder(dtype=dtypes.int32)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, _):
if cell_output is None:
emit_output = (array_ops.zeros([2, 3], dtype=dtypes.int32),
array_ops.zeros([unknown_dim], dtype=dtypes.int64))
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
emit_output = (array_ops.ones([batch_size, 2, 3], dtype=dtypes.int32),
array_ops.ones(
[batch_size, unknown_dim], dtype=dtypes.int64))
next_state = cell_state
elements_finished = array_ops.tile([time_ >= max_time], [batch_size])
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = cond.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
r = rnn.raw_rnn(cell, loop_fn)
output_ta = r[0]
self.assertEqual(2, len(output_ta))
self.assertEqual([dtypes.int32, dtypes.int64],
[ta.dtype for ta in output_ta])
output = [ta.stack() for ta in output_ta]
output_vals = sess.run(output, feed_dict={unknown_dim: 1})
self.assertAllEqual(
np.ones((max_time, batch_size, 2, 3), np.int32), output_vals[0])
self.assertAllEqual(
np.ones((max_time, batch_size, 1), np.int64), output_vals[1])
def _testScope(self, factory, prefix="prefix", use_outer_scope=True):
with self.session(graph=ops.Graph()):
if use_outer_scope:
with variable_scope.variable_scope(prefix) as scope:
factory(scope)
else:
factory(prefix)
variables_lib.global_variables_initializer()
# check that all the variables names starts
# with the proper scope.
all_vars = variables_lib.global_variables()
prefix = prefix or "rnn"
scope_vars = [v for v in all_vars if v.name.startswith(prefix + "/")]
tf_logging.info("RNN with scope: %s (%s)" %
(prefix, "scope" if use_outer_scope else "str"))
for v in scope_vars:
tf_logging.info(v.name)
self.assertEqual(len(scope_vars), len(all_vars))
@test_util.run_v1_only("b/124229375")
def testRawRNNScope(self):
max_time = 10
batch_size = 16
input_depth = 4
num_units = 3
def factory(scope):
inputs = array_ops.placeholder(
shape=(max_time, batch_size, input_depth), dtype=dtypes.float32)
sequence_length = array_ops.placeholder(
shape=(batch_size,), dtype=dtypes.int32)
inputs_ta = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
inputs_ta = inputs_ta.unstack(inputs)
cell = rnn_cell.LSTMCell(num_units, state_is_tuple=True)
def loop_fn(time_, cell_output, cell_state, unused_loop_state):
emit_output = cell_output # == None for time == 0
if cell_output is None: # time == 0
next_state = cell.zero_state(batch_size, dtypes.float32)
else:
next_state = cell_state
elements_finished = (time_ >= sequence_length)
finished = math_ops.reduce_all(elements_finished)
# For the very final iteration, we must emit a dummy input
next_input = cond.cond(
finished,
lambda: array_ops.zeros([batch_size, input_depth], dtype=dtypes.float32),
lambda: inputs_ta.read(time_))
return (elements_finished, next_input, next_state, emit_output, None)
return rnn.raw_rnn(cell, loop_fn, scope=scope)
self._testScope(factory, use_outer_scope=True)
self._testScope(factory, use_outer_scope=False)
self._testScope(factory, prefix=None, use_outer_scope=False)
| RawRNNTest |
python | walkccc__LeetCode | solutions/1617. Count Subtrees With Max Distance Between Cities/1617.py | {
"start": 0,
"end": 1340
} | class ____:
def countSubgraphsForEachDiameter(
self,
n: int,
edges: list[list[int]],
) -> list[int]:
maxMask = 1 << n
dist = self._floydWarshall(n, edges)
ans = [0] * (n - 1)
# mask := the subset of the cities
for mask in range(maxMask):
maxDist = self._getMaxDist(mask, dist, n)
if maxDist > 0:
ans[maxDist - 1] += 1
return ans
def _floydWarshall(self, n: int, edges: list[list[int]]) -> list[list[int]]:
dist = [[n] * n for _ in range(n)]
for i in range(n):
dist[i][i] = 0
for u, v in edges:
dist[u - 1][v - 1] = 1
dist[v - 1][u - 1] = 1
for k in range(n):
for i in range(n):
for j in range(n):
dist[i][j] = min(dist[i][j], dist[i][k] + dist[k][j])
return dist
def _getMaxDist(self, mask: int, dist: list[list[int]], n: int) -> int:
maxDist = 0
edgeCount = 0
cityCount = 0
for u in range(n):
if (mask >> u) & 1 == 0: # u is not in the subset.
continue
cityCount += 1
for v in range(u + 1, n):
if (mask >> v) & 1 == 0: # v is not in the subset.
continue
if dist[u][v] == 1: # u and v are connected.
edgeCount += 1
maxDist = max(maxDist, dist[u][v])
return maxDist if edgeCount == cityCount - 1 else 0
| Solution |
python | huggingface__transformers | src/transformers/models/sam2/modular_sam2.py | {
"start": 39868,
"end": 48949
} | class ____(SamMaskDecoder):
def __init__(self, config: Sam2MaskDecoderConfig):
super().__init__(config)
del self.iou_prediction_head
self.iou_prediction_head = Sam2FeedForward(
self.hidden_size,
config.iou_head_hidden_dim,
self.num_mask_tokens,
config.iou_head_depth,
sigmoid_output=True,
)
self.conv_s0 = nn.Conv2d(config.hidden_size, config.hidden_size // 8, kernel_size=1, stride=1)
self.conv_s1 = nn.Conv2d(config.hidden_size, config.hidden_size // 4, kernel_size=1, stride=1)
self.obj_score_token = nn.Embedding(1, self.hidden_size)
self.pred_obj_score_head = Sam2FeedForward(self.hidden_size, self.hidden_size, 1, 3)
self.dynamic_multimask_via_stability = config.dynamic_multimask_via_stability
self.dynamic_multimask_stability_delta = config.dynamic_multimask_stability_delta
self.dynamic_multimask_stability_thresh = config.dynamic_multimask_stability_thresh
def _get_stability_scores(self, mask_logits):
"""
Compute stability scores of the mask logits based on the IoU between upper and
lower thresholds.
"""
mask_logits = mask_logits.flatten(-2)
stability_delta = self.dynamic_multimask_stability_delta
area_i = torch.sum(mask_logits > stability_delta, dim=-1).float()
area_u = torch.sum(mask_logits > -stability_delta, dim=-1).float()
stability_scores = torch.where(area_u > 0, area_i / area_u, 1.0)
return stability_scores
def _dynamic_multimask_via_stability(self, all_mask_logits, all_iou_scores):
"""
When outputting a single mask, if the stability score from the current single-mask
output (based on output token 0) falls below a threshold, we instead select from
multi-mask outputs (based on output token 1~3) the mask with the highest predicted
IoU score. This is intended to ensure a valid mask for both clicking and tracking.
"""
# The best mask from multimask output tokens (1~3)
multimask_logits = all_mask_logits[:, :, 1:, :, :]
multimask_iou_scores = all_iou_scores[:, :, 1:]
best_scores_inds = torch.argmax(multimask_iou_scores, dim=-1) # [B, P]
best_scores_inds_expanded = best_scores_inds.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1)
best_scores_inds_expanded = best_scores_inds_expanded.expand(
-1, -1, 1, multimask_logits.size(-2), multimask_logits.size(-1)
)
best_multimask_logits = torch.gather(multimask_logits, 2, best_scores_inds_expanded) # [B, P, 1, H, W]
best_multimask_iou_scores = torch.gather(multimask_iou_scores, 2, best_scores_inds.unsqueeze(-1)) # [B, P, 1]
# The mask from singlemask output token 0 and its stability score
singlemask_logits = all_mask_logits[:, :, 0:1, :, :]
singlemask_iou_scores = all_iou_scores[:, :, 0:1]
stability_scores = self._get_stability_scores(singlemask_logits)
is_stable = stability_scores >= self.dynamic_multimask_stability_thresh
# Dynamically fall back to best multimask output upon low stability scores.
mask_logits_out = torch.where(
is_stable[..., None, None].expand_as(singlemask_logits),
singlemask_logits,
best_multimask_logits,
)
iou_scores_out = torch.where(
is_stable.expand_as(singlemask_iou_scores),
singlemask_iou_scores,
best_multimask_iou_scores,
)
return mask_logits_out, iou_scores_out
def forward(
self,
image_embeddings: torch.Tensor,
image_positional_embeddings: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
high_resolution_features: list[torch.Tensor],
attention_similarity: Optional[torch.Tensor] = None,
target_embedding: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Args:
image_embeddings (`torch.Tensor`):
The embeddings from the image encoder.
image_positional_embeddings (`torch.Tensor`):
Positional encoding with the shape of image_embeddings.
sparse_prompt_embeddings (`torch.Tensor`):
The embeddings of the points and boxes.
dense_prompt_embeddings (`torch.Tensor`):
The embeddings of the mask inputs.
multimask_output (`bool`):
Whether to return multiple masks or a single mask.
high_resolution_features (`list[torch.Tensor]`, *optional*):
The high-resolution features from the vision encoder.
attention_similarity (`torch.Tensor`, *optional*):
The attention similarity tensor.
target_embedding (`torch.Tensor`, *optional*):
The target embedding.
"""
batch_size, num_channels, height, width = image_embeddings.shape
point_batch_size = sparse_prompt_embeddings.shape[1]
# Concatenate output tokens
output_tokens = torch.cat(
[
self.obj_score_token.weight,
self.iou_token.weight,
self.mask_tokens.weight,
],
dim=0,
)
output_tokens = output_tokens.repeat(batch_size, point_batch_size, 1, 1)
if sparse_prompt_embeddings.shape[0] != 0:
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=2)
else:
tokens = output_tokens
point_embeddings = tokens.to(self.iou_token.weight.dtype)
# Expand per-image data in batch direction to be per-mask
image_embeddings = image_embeddings + dense_prompt_embeddings
image_embeddings = image_embeddings.repeat_interleave(point_batch_size, dim=0)
image_positional_embeddings = image_positional_embeddings.repeat_interleave(point_batch_size, 0)
# Run the transformer
point_embeddings, image_embeddings = self.transformer(
point_embeddings=point_embeddings,
image_embeddings=image_embeddings,
image_positional_embeddings=image_positional_embeddings,
attention_similarity=attention_similarity,
target_embedding=target_embedding,
**kwargs,
)
iou_token_out = point_embeddings[:, :, 1, :]
mask_tokens_out = point_embeddings[:, :, 2 : (2 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
image_embeddings = image_embeddings.transpose(2, 3).view(
batch_size * point_batch_size, num_channels, height, width
)
feat_s0, feat_s1 = high_resolution_features
feat_s0 = feat_s0.repeat_interleave(point_batch_size, dim=0)
feat_s1 = feat_s1.repeat_interleave(point_batch_size, dim=0)
upscaled_embedding = self.upscale_conv1(image_embeddings) + feat_s1
upscaled_embedding = self.activation(self.upscale_layer_norm(upscaled_embedding))
upscaled_embedding = self.activation(self.upscale_conv2(upscaled_embedding) + feat_s0)
hyper_in_list: list[torch.Tensor] = []
for i in range(self.num_mask_tokens):
current_mlp = self.output_hypernetworks_mlps[i]
hyper_in_list += [current_mlp(mask_tokens_out[:, :, i, :])]
hyper_in = torch.stack(hyper_in_list, dim=2)
_, num_channels, height, width = upscaled_embedding.shape
upscaled_embedding = upscaled_embedding.view(batch_size, point_batch_size, num_channels, height * width)
masks = (hyper_in @ upscaled_embedding).view(batch_size, point_batch_size, -1, height, width)
# Generate mask quality predictions
iou_pred = self.iou_prediction_head(iou_token_out)
object_score_logits = self.pred_obj_score_head(point_embeddings[:, :, 0, :])
# Select the correct mask or masks for output
if multimask_output:
mask_slice = slice(1, None)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
elif self.dynamic_multimask_via_stability and not self.training:
mask_slice = slice(0, 1)
masks, iou_pred = self._dynamic_multimask_via_stability(masks, iou_pred)
else:
mask_slice = slice(0, 1)
masks = masks[:, :, mask_slice, :, :]
iou_pred = iou_pred[:, :, mask_slice]
sam_tokens_out = mask_tokens_out[:, :, mask_slice] # [b, 3, c] shape
return masks, iou_pred, sam_tokens_out, object_score_logits
@auto_docstring(
custom_intro="""
Segment Anything Model 2 (SAM 2) for generating segmentation masks, given an input image and
input points and labels, boxes, or masks.
"""
)
| Sam2MaskDecoder |
python | python-poetry__poetry | src/poetry/plugins/plugin.py | {
"start": 241,
"end": 452
} | class ____(BasePlugin):
"""
Generic plugin not related to the console application.
"""
group = "poetry.plugin"
@abstractmethod
def activate(self, poetry: Poetry, io: IO) -> None: ...
| Plugin |
python | ray-project__ray | python/ray/llm/tests/common/cloud/test_utils.py | {
"start": 5059,
"end": 9477
} | class ____:
"""Tests for the remote_object_cache decorator."""
@pytest.mark.asyncio
async def test_basic_functionality(self):
"""Test basic remote_object_cache decorator functionality."""
call_count = 0
MISSING = object()
@remote_object_cache(
max_size=2,
missing_expire_seconds=1,
exists_expire_seconds=3,
missing_object_value=MISSING,
)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "missing":
return MISSING
return f"value-{key}"
# Test cache hit
assert await fetch("key1") == "value-key1"
assert call_count == 1
assert await fetch("key1") == "value-key1" # Should hit cache
assert call_count == 1 # Count should not increase
# Test cache size limit
assert await fetch("key2") == "value-key2"
assert call_count == 2
assert await fetch("key3") == "value-key3" # Should evict key1
assert call_count == 3
# Verify key1 was evicted
assert await fetch("key1") == "value-key1"
assert call_count == 4
@pytest.mark.asyncio
async def test_expiration(self):
"""Test cache expiration for both missing and existing objects."""
call_count = 0
MISSING = object()
@remote_object_cache(
max_size=2,
missing_expire_seconds=1, # 1 second to expire missing object
exists_expire_seconds=3, # 3 seconds to expire existing object
missing_object_value=MISSING,
)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "missing":
return MISSING
return f"value-{key}"
# Test missing object expiration
assert await fetch("missing") is MISSING
assert call_count == 1
assert await fetch("missing") is MISSING # Should hit cache
assert call_count == 1
await asyncio.sleep(1.5) # Wait for missing object to expire
assert await fetch("missing") is MISSING # Should fetch again
assert call_count == 2
# Test existing object expiration
assert await fetch("key1") == "value-key1"
assert call_count == 3
assert await fetch("key1") == "value-key1" # Should hit cache
assert call_count == 3
await asyncio.sleep(1.5) # Not expired yet
assert await fetch("key1") == "value-key1" # Should still hit cache
assert call_count == 3
await asyncio.sleep(2) # Now expired (total > 3 seconds)
assert await fetch("key1") == "value-key1" # Should fetch again
assert call_count == 4
@pytest.mark.asyncio
async def test_error_handling(self):
"""Test error handling in remote_object_cache decorator."""
call_count = 0
@remote_object_cache(max_size=2)
async def fetch(key: str):
nonlocal call_count
call_count += 1
if key == "error":
raise ValueError("Test error")
return f"value-{key}"
# Test successful case
assert await fetch("key1") == "value-key1"
assert call_count == 1
# Test error case
with pytest.raises(ValueError, match="Test error"):
await fetch("error")
assert call_count == 2
# Verify error wasn't cached
with pytest.raises(ValueError, match="Test error"):
await fetch("error")
assert call_count == 3
@pytest.mark.asyncio
async def test_concurrent_access(self):
"""Test concurrent access to cached function."""
call_count = 0
DELAY = 0.1
@remote_object_cache(max_size=2)
async def slow_fetch(key: str):
nonlocal call_count
call_count += 1
await asyncio.sleep(DELAY) # Simulate slow operation
return f"value-{key}"
# Launch multiple concurrent calls
tasks = [slow_fetch("key1") for _ in range(5)]
results = await asyncio.gather(*tasks)
# All results should be the same
assert all(r == "value-key1" for r in results)
# Should only call once despite multiple concurrent requests
assert call_count == 1
| TestRemoteObjectCacheDecorator |
python | django__django | django/core/validators.py | {
"start": 13959,
"end": 14174
} | class ____(BaseValidator):
message = _("Ensure this value is greater than or equal to %(limit_value)s.")
code = "min_value"
def compare(self, a, b):
return a < b
@deconstructible
| MinValueValidator |
python | pytorch__pytorch | torch/_inductor/codegen/cuda/cutlass_utils.py | {
"start": 7214,
"end": 15415
} | class ____:
"""
CUTLASS args used to initialize a CUTLASS Manifest.
"""
architectures: Optional[str] = None
cuda_version: Optional[str] = None
instantiation_level: Optional[str] = None
operations: Optional[str] = None
build_dir = ""
curr_build_dir = ""
generator_target = ""
kernels = "all"
ignore_kernels = ""
exclude_kernels = ""
# TODO: these three look dead?
kernel_filter_file: None = None
selected_kernel_list: None = None
interface_dir: None = None
filter_by_cc = True
disable_full_archs_compilation = False
def __post_init__(self):
if self.architectures is None or self.cuda_version is None:
raise RuntimeError(
f"{self.architectures=} or {self.cuda_version=} is None!"
)
self.architectures = _normalize_cuda_arch(self.architectures)
@clear_on_fresh_cache
@functools.cache
def _gen_ops_cached(arch, version) -> dict[Any, Any]:
# Note: Cache needs to be specific for cuda architecture and version
# Import cutlass python scripts.
assert try_import_cutlass()
import cutlass_library.generator as cutlass_generator
import cutlass_library.manifest as cutlass_manifest
if arch is None or version is None:
log.error(
"Cannot detect cuda arch %s or cuda version %s. "
"Will discard all cutlass ops. "
"Please consider setting _inductor.cuda.arch and _inductor.cuda.version configs.",
arch,
version,
)
return {}
arch = _normalize_cuda_arch(arch)
instantiation_level: str = config.cuda.cutlass_instantiation_level
args = CUTLASSArgs(
architectures=arch,
cuda_version=version,
instantiation_level=instantiation_level,
operations=CUTLASS_OPERATION_KIND,
)
manifest = cutlass_manifest.Manifest(args)
start_time = time.time()
if arch == "100":
if hasattr(cutlass_generator, "GenerateSM100"):
cutlass_generator.GenerateSM100(manifest, args.cuda_version)
cutlass_generator.GenerateSM90(manifest, args.cuda_version)
else:
try:
func = getattr(cutlass_generator, "GenerateSM" + arch)
func(manifest, args.cuda_version)
except AttributeError as e:
raise NotImplementedError(
"Arch " + arch + " is not supported by current cutlass lib."
) from e
log.info(
"CUTLASS library generated a dict of %d operation kinds in %.2f seconds",
len(manifest.operations),
time.time() - start_time,
)
return manifest.operations
def gen_ops() -> dict[Any, Any]:
"""
Generates all supported CUTLASS operations.
"""
with dynamo_timed("cutlass_utils.gen_ops"):
arch = get_cuda_arch()
version = get_cuda_version()
return _gen_ops_cached(arch, version)
DTYPE_TO_CUTLASS_TYPE = {
**DTYPE_TO_CPP,
torch.float16: "__half",
torch.bfloat16: "__nv_bfloat16",
torch.float8_e4m3fn: "__nv_fp8_e4m3",
}
@functools.lru_cache(32)
def torch_dtype_to_cutlass_type(
torch_dtype: torch.dtype,
) -> "cutlass_library.library.DataType": # type: ignore[name-defined] # noqa: F821
# Import cutlass python scripts.
assert try_import_cutlass()
import cutlass_library # type: ignore[import]
if torch_dtype == torch.float:
return cutlass_library.library.DataType.f32
elif torch_dtype == torch.half:
return cutlass_library.library.DataType.f16
elif torch_dtype == torch.bfloat16:
return cutlass_library.library.DataType.bf16
else:
raise NotImplementedError(f"Unsupported data type: {torch_dtype=}")
@functools.lru_cache(32)
def dtype_match(
torch_dtype: Optional[torch.dtype],
cutlass_dtype: "cutlass_library.library.DataType", # type: ignore[name-defined] # noqa: F821
) -> bool:
# Import cutlass python scripts.
assert try_import_cutlass()
import cutlass_library
if torch_dtype == torch.float:
return (
cutlass_dtype == cutlass_library.library.DataType.f32
or cutlass_dtype == cutlass_library.library.DataType.tf32
)
elif torch_dtype == torch.half:
return cutlass_dtype == cutlass_library.library.DataType.f16
elif torch_dtype == torch.bfloat16:
return cutlass_dtype == cutlass_library.library.DataType.bf16
elif torch_dtype == torch.int8:
return cutlass_dtype == cutlass_library.library.DataType.s8
elif torch_dtype == torch.uint8:
return cutlass_dtype == cutlass_library.library.DataType.u8
elif torch_dtype == torch.int32:
return cutlass_dtype == cutlass_library.library.DataType.s32
elif torch_dtype == torch.float8_e4m3fn:
return cutlass_dtype == cutlass_library.library.DataType.e4m3
else:
return False
def get_accumulator_dtype(
input_torch_dtypes: list[torch.dtype],
) -> Optional[torch.dtype]:
"""
Given a pair of input torch dtypes, returns the inferred accumulator torch dtype.
"""
assert OrderedSet(input_torch_dtypes) <= XW_DTYPES, (
f"{input_torch_dtypes=} is not supported"
)
if len(input_torch_dtypes) != 2:
return None
torch_dtype = None
if input_torch_dtypes[0] == input_torch_dtypes[1]:
torch_dtype = input_torch_dtypes[0]
else:
size0 = torch.tensor([], dtype=input_torch_dtypes[0]).element_size()
size1 = torch.tensor([], dtype=input_torch_dtypes[1]).element_size()
if size0 > size1:
dtype0, dtype1 = input_torch_dtypes
else:
dtype1, dtype0 = input_torch_dtypes
if dtype0 in [torch.half, torch.bfloat16] and dtype1 in [
torch.int8,
torch.uint8,
]:
torch_dtype = dtype0
if torch_dtype in (torch.float16, torch.bfloat16, torch.float, torch.float8_e4m3fn):
accumulator_dtype = torch.float
elif torch_dtype == torch.int8:
accumulator_dtype = torch.int32
else:
raise NotImplementedError(f"Unsupported data types: {input_torch_dtypes=}")
assert accumulator_dtype in ACCUMULATOR_DTYPES, (
f"{accumulator_dtype=} is not supported"
)
return accumulator_dtype
@functools.lru_cache(32)
def get_alignments(torch_dtype: torch.dtype) -> list[int]:
"""
Returns all possible valid CUTLASS alignments in terms of the number of elements for a given dtype.
CUTLASS gemm / conv SM80 APIs support 16 bytes max alignment, and 2 bytes min alignment.
"""
if torch_dtype in (torch.half, torch.bfloat16):
return [8, 4, 2, 1]
elif torch_dtype == torch.float:
return [4, 2, 1]
elif torch_dtype in (torch.uint8, torch.int8, torch.float8_e4m3fn):
return [16, 8, 4, 2]
elif torch_dtype == torch.int32:
return [4, 2, 1]
else:
raise NotImplementedError(f"unsupported {torch_dtype=} for alignments")
def get_max_alignment(inductor_layout: Layout) -> int:
"""
Returns the max alignment (in terms of number of elements) for a given Inductor Layout.
"""
dtype = inductor_layout.dtype
size = inductor_layout.size
offset = inductor_layout.offset
def is_static_int(number: object) -> TypeIs[int | sympy.Integer]:
return isinstance(number, (int | sympy.Integer))
def a_factor_of(x, alignment):
if is_static_int(x) and is_static_int(alignment):
return x % alignment == 0
rem = sympy.Mod(x, alignment)
return V.graph.sizevars.evaluate_expr(sympy.Eq(rem, 0))
try:
contiguous_dim = inductor_layout.stride.index(1)
except ValueError:
# No dim with stride 1 found, return 1
return 1
alignments = get_alignments(dtype)
for alignment in alignments:
if not a_factor_of(size[contiguous_dim], alignment) or not a_factor_of(
offset, alignment
):
continue
if all(
(dim == contiguous_dim)
or a_factor_of(inductor_layout.stride[dim], alignment)
for dim in range(len(size))
):
return alignment
return 1
| CUTLASSArgs |
python | huggingface__transformers | src/transformers/models/git/modeling_git.py | {
"start": 25291,
"end": 27386
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: GitVisionConfig):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = GitVisionAttention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = GitVisionMLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
causal_attention_mask: torch.Tensor,
output_attentions: Optional[bool] = False,
) -> tuple[torch.FloatTensor]:
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
`(config.encoder_attention_heads,)`.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
causal_attention_mask=causal_attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->GitVision, CLIPConfig
| GitVisionEncoderLayer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI052.py | {
"start": 3791,
"end": 3814
} | class ____:
WIZ = 4
| Bop |
python | google__pytype | pytype/constant_folding.py | {
"start": 3837,
"end": 4495
} | class ____:
"""Build up a map of constants."""
def __init__(self):
self.key_types = set()
self.value_types = set()
self.keys = []
self.values = []
self.elements = {}
def add(self, key, value):
self.key_types.add(key.typ)
self.value_types.add(value.typ)
self.keys.append(key.value)
self.values.append(value.value)
self.elements[key.value] = value
def build(self):
return _Map(
key_types=frozenset(self.key_types),
keys=tuple(reversed(self.keys)),
value_types=frozenset(self.value_types),
values=tuple(reversed(self.values)),
elements=self.elements,
)
| _MapBuilder |
python | pytorch__pytorch | tools/gen_vulkan_spv.py | {
"start": 7244,
"end": 15794
} | class ____:
def __init__(
self,
src_dir_paths: str | list[str],
env: dict[Any, Any],
glslc_path: str | None,
) -> None:
if isinstance(src_dir_paths, str):
self.src_dir_paths = [src_dir_paths]
else:
self.src_dir_paths = src_dir_paths
self.env = env
self.glslc_path = glslc_path
self.glsl_src_files: dict[str, str] = {}
self.template_yaml_files: list[str] = []
self.addSrcAndYamlFiles(self.src_dir_paths)
self.shader_template_params: dict[Any, Any] = {}
for yaml_file in self.template_yaml_files:
self.parseTemplateYaml(yaml_file)
self.output_shader_map: dict[str, tuple[str, dict[str, str]]] = {}
self.constructOutputMap()
def addSrcAndYamlFiles(self, src_dir_paths: list[str]) -> None:
for src_path in src_dir_paths:
# Collect glsl source files
glsl_files = glob.glob(
os.path.join(src_path, "**", "*.glsl*"), recursive=True
)
for file in glsl_files:
if len(file) > 1:
self.glsl_src_files[extract_filename(file, keep_ext=False)] = file
# Collect template yaml files
yaml_files = glob.glob(
os.path.join(src_path, "**", "*.yaml"), recursive=True
)
for file in yaml_files:
if len(file) > 1:
self.template_yaml_files.append(file)
def generateVariantCombinations(
self,
iterated_params: dict[str, Any],
exclude_params: set[str] | None = None,
) -> list[Any]:
if exclude_params is None:
exclude_params = set()
all_iterated_params = []
for param_name, value_list in iterated_params.items():
if param_name not in exclude_params:
param_values = []
for value in value_list:
suffix = value.get("SUFFIX", value["VALUE"])
param_values.append((param_name, suffix, value["VALUE"]))
all_iterated_params.append(param_values)
return list(product(*all_iterated_params))
def parseTemplateYaml(self, yaml_file: str) -> None:
with open(yaml_file) as f:
contents = yaml.load(f, Loader=UniqueKeyLoader)
for template_name, params_dict in contents.items():
if template_name in self.shader_template_params:
raise KeyError(f"{template_name} params file is defined twice")
default_params = params_dict["parameter_names_with_default_values"]
params_names = set(default_params.keys()).union({"NAME"})
self.shader_template_params[template_name] = []
default_iterated_params = params_dict.get(
"generate_variant_forall", None
)
for variant in params_dict["shader_variants"]:
variant_params_names = set(variant.keys())
invalid_keys = (
variant_params_names
- params_names
- {"generate_variant_forall"}
)
assert len(invalid_keys) == 0
iterated_params = variant.get(
"generate_variant_forall", default_iterated_params
)
if iterated_params is not None:
variant_combinations = self.generateVariantCombinations(
iterated_params, variant_params_names
)
for combination in variant_combinations:
default_params_copy = copy.deepcopy(default_params)
for key in variant:
if key != "generate_variant_forall":
default_params_copy[key] = variant[key]
variant_name = variant["NAME"]
for param_value in combination:
default_params_copy[param_value[0]] = param_value[2]
if len(param_value[1]) > 0:
variant_name = f"{variant_name}_{param_value[1]}"
default_params_copy["NAME"] = variant_name
self.shader_template_params[template_name].append(
default_params_copy
)
else:
default_params_copy = copy.deepcopy(default_params)
for key in variant:
default_params_copy[key] = variant[key]
self.shader_template_params[template_name].append(
default_params_copy
)
def create_shader_params(
self, variant_params: dict[str, Any] | None = None
) -> dict[str, str]:
if variant_params is None:
variant_params = {}
shader_params = copy.deepcopy(self.env)
for key, value in variant_params.items():
shader_params[key] = value
shader_dtype = shader_params.get("DTYPE", "float")
if shader_dtype == "int":
shader_params["FORMAT"] = self.env["INT_IMAGE_FORMAT"]
elif shader_dtype == "uint":
shader_params["FORMAT"] = self.env["UINT_IMAGE_FORMAT"]
elif shader_dtype == "int32":
shader_params["FORMAT"] = "rgba32i"
elif shader_dtype == "uint32":
shader_params["FORMAT"] = "rgba32ui"
elif shader_dtype == "int8":
shader_params["FORMAT"] = "rgba8i"
elif shader_dtype == "uint8":
shader_params["FORMAT"] = "rgba8ui"
elif shader_dtype == "float32":
shader_params["FORMAT"] = "rgba32f"
# Assume float by default
else:
shader_params["FORMAT"] = self.env["FLOAT_IMAGE_FORMAT"]
return shader_params
def constructOutputMap(self) -> None:
for shader_name, params in self.shader_template_params.items():
for variant in params:
source_glsl = self.glsl_src_files[shader_name]
self.output_shader_map[variant["NAME"]] = (
source_glsl,
self.create_shader_params(variant),
)
for shader_name, source_glsl in self.glsl_src_files.items():
if shader_name not in self.shader_template_params:
self.output_shader_map[shader_name] = (
source_glsl,
self.create_shader_params(),
)
def generateSPV(self, output_dir: str) -> dict[str, str]:
output_file_map = {}
for shader_name in self.output_shader_map:
source_glsl = self.output_shader_map[shader_name][0]
shader_params = self.output_shader_map[shader_name][1]
with codecs.open(source_glsl, "r", encoding="utf-8") as input_file:
input_text = input_file.read()
output_text = preprocess(input_text, shader_params)
glsl_out_path = os.path.join(output_dir, f"{shader_name}.glsl")
with codecs.open(glsl_out_path, "w", encoding="utf-8") as output_file:
output_file.write(output_text)
# If no GLSL compiler is specified, then only write out the generated GLSL shaders.
# This is mainly for testing purposes.
if self.glslc_path is not None:
spv_out_path = os.path.join(output_dir, f"{shader_name}.spv")
cmd = [
self.glslc_path,
"-fshader-stage=compute",
glsl_out_path,
"-o",
spv_out_path,
"--target-env=vulkan1.0",
"-Werror",
] + [
arg
for src_dir_path in self.src_dir_paths
for arg in ["-I", src_dir_path]
]
print("glslc cmd:", cmd)
subprocess.check_call(cmd)
output_file_map[spv_out_path] = glsl_out_path
return output_file_map
##############################################
# Shader Info and Shader Registry Handling #
##############################################
@dataclass
| SPVGenerator |
python | tensorflow__tensorflow | tensorflow/python/profiler/pprof_profiler.py | {
"start": 3117,
"end": 4676
} | class ____(object):
"""Keeps track of `Function` protos for pprof profile."""
def __init__(self, string_table):
"""Constructor.
Args:
string_table: A `StringTable` object.
"""
self._string_table = string_table
# Maps tuples in the form (file_path, function_name, start_line_number)
# to `Function` protos.
self._function_key_to_function = {}
def index_of(self, file_path, function_name, function_start_line):
"""Returns index of the function, adding the function if needed.
Args:
file_path: (string) Path to file where the function is defined.
function_name: (string) Function name.
function_start_line: (integer) Start line number of function definition.
Returns:
Function index.
"""
function_key = (file_path, function_name, function_start_line)
if function_key in self._function_key_to_function:
return self._function_key_to_function[function_key].id
else:
# Function indexes should start from 1
function_index = len(self._function_key_to_function) + 1
function = profile_pb2.Function()
function.id = function_index
function.name = self._string_table.index_of(function_name)
function.filename = self._string_table.index_of(file_path)
function.start_line = function_start_line
self._function_key_to_function[function_key] = function
return function_index
def function_protos(self):
"""Returns list of `profile_pb2.Function` protos."""
return self._function_key_to_function.values()
| Functions |
python | davidhalter__jedi | jedi/inference/signature.py | {
"start": 1947,
"end": 3960
} | class ____(AbstractSignature):
def __init__(self, value, function_value=None, is_bound=False):
super().__init__(value, is_bound)
self._function_value = function_value or value
def bind(self, value):
return TreeSignature(value, self._function_value, is_bound=True)
@property
def _annotation(self):
# Classes don't need annotations, even if __init__ has one. They always
# return themselves.
if self.value.is_class():
return None
return self._function_value.tree_node.annotation
@property
def annotation_string(self):
a = self._annotation
if a is None:
return ''
return a.get_code(include_prefix=False)
@memoize_method
def get_param_names(self, resolve_stars=False):
params = self._function_value.get_param_names()
if resolve_stars:
from jedi.inference.star_args import process_params
params = process_params(params)
if self.is_bound:
return params[1:]
return params
def matches_signature(self, arguments):
from jedi.inference.param import get_executed_param_names_and_issues
executed_param_names, issues = \
get_executed_param_names_and_issues(self._function_value, arguments)
if issues:
return False
matches = all(executed_param_name.matches_signature()
for executed_param_name in executed_param_names)
if debug.enable_notice:
tree_node = self._function_value.tree_node
signature = parser_utils.get_signature(tree_node)
if matches:
debug.dbg("Overloading match: %s@%s (%s)",
signature, tree_node.start_pos[0], arguments, color='BLUE')
else:
debug.dbg("Overloading no match: %s@%s (%s)",
signature, tree_node.start_pos[0], arguments, color='BLUE')
return matches
| TreeSignature |
python | allegroai__clearml | clearml/backend_interface/metrics/events.py | {
"start": 587,
"end": 5236
} | class ____(object):
"""
Adapter providing all the base attributes required by a metrics event and defining an interface used by the
metrics manager when batching and writing events.
"""
default_nan_value = 0.0
default_inf_value = 0.0
""" Default value used when a np.nan or np.inf value is encountered """
report_nan_warning_period = 1000
report_inf_warning_period = 1000
_report_nan_warning_iteration = float("inf")
_report_inf_warning_iteration = float("inf")
@attrs(cmp=False, slots=True)
class FileEntry(object):
"""File entry used to report on file data that needs to be uploaded prior to sending the event"""
event = attr.attrib()
name = attr.attrib()
""" File name """
stream = attr.attrib()
""" File-like object containing the file's data """
url_prop = attr.attrib()
""" Property name that should be updated with the uploaded url """
key_prop = attr.attrib()
upload_uri = attr.attrib()
url = attr.attrib(default=None)
exception = attr.attrib(default=None)
retries = attr.attrib(default=None)
delete_local_file = attr.attrib(default=True)
""" Local file path, if exists, delete the file after upload completed """
def set_exception(self, exp: Exception) -> None:
self.exception = exp
self.event.upload_exception = exp
@property
def metric(self) -> Any:
return self._metric
@metric.setter
def metric(self, value: Any) -> None:
self._metric = value
@property
def variant(self) -> Any:
return self._variant
def __init__(
self,
metric: str,
variant: str,
iter: Optional[int] = None,
timestamp: Optional[int] = None,
task: Optional[str] = None,
gen_timestamp_if_none: bool = True,
model_event: Optional[dict] = None,
) -> None:
if not timestamp and gen_timestamp_if_none:
timestamp = int(time.time() * 1000)
self._metric = metric
self._variant = variant
self._iter = iter
self._timestamp = timestamp
self._task = task
self._model_event = model_event
# Try creating an event just to trigger validation
_ = self.get_api_event()
self.upload_exception = None
@abc.abstractmethod
def get_api_event(self) -> None:
"""Get an API event instance"""
pass
def get_file_entry(self) -> None:
"""Get information for a file that should be uploaded before this event is sent"""
pass
def get_iteration(self) -> Optional[int]:
return self._iter
def update(self, task: Optional[str] = None, iter_offset: Optional[int] = None, **kwargs: Any) -> None:
"""Update event properties"""
if task:
self._task = task
if iter_offset is not None and self._iter is not None:
self._iter += iter_offset
def _get_base_dict(self) -> Dict[str, Any]:
"""Get a dict with the base attributes"""
res = dict(
task=self._task,
timestamp=self._timestamp,
metric=self._metric,
variant=self._variant,
)
if self._iter is not None:
res.update(iter=self._iter)
if self._model_event is not None:
res.update(model_event=self._model_event)
return res
@classmethod
def _convert_np_nan_inf(cls, val: float) -> float:
if np.isnan(val):
cls._report_nan_warning_iteration += 1
if cls._report_nan_warning_iteration >= cls.report_nan_warning_period:
LoggerRoot.get_base_logger().info(
"NaN value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_nan_value to assign another value".format(
cls.default_nan_value
)
)
cls._report_nan_warning_iteration = 0
return cls.default_nan_value
if np.isinf(val):
cls._report_inf_warning_iteration += 1
if cls._report_inf_warning_iteration >= cls.report_inf_warning_period:
LoggerRoot.get_base_logger().info(
"inf value encountered. Reporting it as '{}'. Use clearml.Logger.set_reporting_inf_value to assign another value".format(
cls.default_inf_value
)
)
cls._report_inf_warning_iteration = 0
return cls.default_inf_value
return val
| MetricsEventAdapter |
python | getsentry__sentry | src/sentry/api/serializers/models/project.py | {
"start": 22875,
"end": 23534
} | class ____(ProjectSerializer):
def get_attrs(
self, item_list: Sequence[Project], user: User | RpcUser | AnonymousUser, **kwargs: Any
) -> dict[Project, dict[str, Any]]:
attrs = super().get_attrs(item_list, user)
orgs = {d["id"]: d for d in serialize(list({i.organization for i in item_list}), user)}
for item in item_list:
attrs[item]["organization"] = orgs[str(item.organization_id)]
return attrs
def serialize(self, obj, attrs, user, **kwargs):
base = super().serialize(obj, attrs, user)
return {**base, "organization": attrs["organization"]}
| ProjectWithOrganizationSerializer |
python | scipy__scipy | scipy/linalg/tests/test_lapack.py | {
"start": 17813,
"end": 18976
} | class ____:
def test_sing_val_update(self):
sigmas = np.array([4., 3., 2., 0])
m_vec = np.array([3.12, 5.7, -4.8, -2.2])
M = np.hstack((np.vstack((np.diag(sigmas[0:-1]),
np.zeros((1, len(m_vec) - 1)))),
m_vec[:, np.newaxis]))
SM = svd(M, full_matrices=False, compute_uv=False, overwrite_a=False,
check_finite=False)
it_len = len(sigmas)
sgm = np.concatenate((sigmas[::-1], [sigmas[0] + it_len*norm(m_vec)]))
mvc = np.concatenate((m_vec[::-1], (0,)))
lasd4 = get_lapack_funcs('lasd4', (sigmas,))
roots = []
for i in range(0, it_len):
res = lasd4(i, sgm, mvc)
roots.append(res[1])
assert_(
(res[3] <= 0),
f"LAPACK root finding dlasd4 failed to find the singular value {i}"
)
roots = np.array(roots)[::-1]
assert_((not np.any(np.isnan(roots)), "There are NaN roots"))
assert_allclose(SM, roots, atol=100*np.finfo(np.float64).eps,
rtol=100*np.finfo(np.float64).eps)
| TestDlasd4 |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 1407162,
"end": 1407457
} | class ____(
sgqlc.types.Type, Node, AuditEntry, OrganizationAuditEntryData, RepositoryAuditEntryData
):
"""Audit log entry for a repo.config.enable_anonymous_git_access
event.
"""
__schema__ = github_schema
__field_names__ = ()
| RepoConfigEnableAnonymousGitAccessAuditEntry |
python | django__django | tests/admin_scripts/tests.py | {
"start": 93335,
"end": 95909
} | class ____(AdminScriptTestCase):
"""Tests for 2-stage argument parsing scheme.
django-admin command arguments are parsed in 2 parts; the core arguments
(--settings, --traceback and --pythonpath) are parsed using a basic parser,
ignoring any unknown options. Then the full settings are
passed to the command parser, which extracts commands of interest to the
individual command.
"""
def setUp(self):
super().setUp()
self.write_settings(
"settings.py", apps=["django.contrib.auth", "django.contrib.contenttypes"]
)
self.write_settings("alternate_settings.py")
def test_setting_then_option(self):
"""Options passed after settings are correctly handled."""
args = [
"base_command",
"testlabel",
"--settings=alternate_settings",
"--option_a=x",
]
self._test(args)
def test_setting_then_short_option(self):
"""Short options passed after settings are correctly handled."""
args = ["base_command", "testlabel", "--settings=alternate_settings", "-a", "x"]
self._test(args)
def test_option_then_setting(self):
"""Options passed before settings are correctly handled."""
args = [
"base_command",
"testlabel",
"--option_a=x",
"--settings=alternate_settings",
]
self._test(args)
def test_short_option_then_setting(self):
"""Short options passed before settings are correctly handled."""
args = ["base_command", "testlabel", "-a", "x", "--settings=alternate_settings"]
self._test(args)
def test_option_then_setting_then_option(self):
"""Options are correctly handled when they are passed before and after
a setting."""
args = [
"base_command",
"testlabel",
"--option_a=x",
"--settings=alternate_settings",
"--option_b=y",
]
self._test(args, option_b="'y'")
def _test(self, args, option_b="'2'"):
out, err = self.run_manage(args)
self.assertNoOutput(err)
self.assertOutput(
out,
"EXECUTE:BaseCommand labels=('testlabel',), options=["
"('force_color', False), ('no_color', False), ('option_a', 'x'), "
"('option_b', %s), ('option_c', '3'), ('pythonpath', None), "
"('settings', 'alternate_settings'), ('traceback', False), "
"('verbosity', 1)]" % option_b,
)
| ArgumentOrder |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_dlpack.py | {
"start": 733,
"end": 3989
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_dunder_dlpack_refcount(self):
x = np.arange(5)
y = x.__dlpack__()
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@unittest.expectedFailure
@skipIfTorchDynamo("I can't figure out how to get __dlpack__ into trace_rules.py")
def test_dunder_dlpack_stream(self):
x = np.arange(5)
x.__dlpack__(stream=None)
with pytest.raises(RuntimeError):
x.__dlpack__(stream=1)
@xpassIfTorchDynamo_np # (reason="pytorch seems to handle refcounts differently")
@skipif(IS_PYPY, reason="PyPy can't get refcounts.")
def test_from_dlpack_refcount(self):
x = np.arange(5)
y = np.from_dlpack(x)
assert sys.getrefcount(x) == 3
del y
assert sys.getrefcount(x) == 2
@parametrize(
"dtype",
[
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
],
)
def test_dtype_passthrough(self, dtype):
x = np.arange(5, dtype=dtype)
y = np.from_dlpack(x)
assert y.dtype == x.dtype
assert_array_equal(x, y)
def test_non_contiguous(self):
x = np.arange(25).reshape((5, 5))
y1 = x[0]
assert_array_equal(y1, np.from_dlpack(y1))
y2 = x[:, 0]
assert_array_equal(y2, np.from_dlpack(y2))
y3 = x[1, :]
assert_array_equal(y3, np.from_dlpack(y3))
y4 = x[1]
assert_array_equal(y4, np.from_dlpack(y4))
y5 = np.diagonal(x).copy()
assert_array_equal(y5, np.from_dlpack(y5))
@parametrize("ndim", range(33))
def test_higher_dims(self, ndim):
shape = (1,) * ndim
x = np.zeros(shape, dtype=np.float64)
assert shape == np.from_dlpack(x).shape
def test_dlpack_device(self):
x = np.arange(5)
assert x.__dlpack_device__() == (1, 0)
y = np.from_dlpack(x)
assert y.__dlpack_device__() == (1, 0)
z = y[::2]
assert z.__dlpack_device__() == (1, 0)
def dlpack_deleter_exception(self):
x = np.arange(5)
_ = x.__dlpack__()
raise RuntimeError
def test_dlpack_destructor_exception(self):
with pytest.raises(RuntimeError):
self.dlpack_deleter_exception()
@skip(reason="no readonly arrays in pytorch")
def test_readonly(self):
x = np.arange(5)
x.flags.writeable = False
with pytest.raises(BufferError):
x.__dlpack__()
def test_ndim0(self):
x = np.array(1.0)
y = np.from_dlpack(x)
assert_array_equal(x, y)
def test_from_torch(self):
t = torch.arange(4)
a = np.from_dlpack(t)
assert_array_equal(a, np.asarray(t))
def test_to_torch(self):
a = np.arange(4)
t = torch.from_dlpack(a)
assert_array_equal(np.asarray(t), a)
if __name__ == "__main__":
run_tests()
| TestDLPack |
python | numba__numba | numba/cuda/cudadrv/devicearray.py | {
"start": 24888,
"end": 26415
} | class ____(object):
"""
An IPC array handle that can be serialized and transfer to another process
in the same machine for share a GPU allocation.
On the destination process, use the *.open()* method to creates a new
*DeviceNDArray* object that shares the allocation from the original process.
To release the resources, call the *.close()* method. After that, the
destination can no longer use the shared array object. (Note: the
underlying weakref to the resource is now dead.)
This object implements the context-manager interface that calls the
*.open()* and *.close()* method automatically::
with the_ipc_array_handle as ipc_array:
# use ipc_array here as a normal gpu array object
some_code(ipc_array)
# ipc_array is dead at this point
"""
def __init__(self, ipc_handle, array_desc):
self._array_desc = array_desc
self._ipc_handle = ipc_handle
def open(self):
"""
Returns a new *DeviceNDArray* that shares the allocation from the
original process. Must not be used on the original process.
"""
dptr = self._ipc_handle.open(devices.get_context())
return DeviceNDArray(gpu_data=dptr, **self._array_desc)
def close(self):
"""
Closes the IPC handle to the array.
"""
self._ipc_handle.close()
def __enter__(self):
return self.open()
def __exit__(self, type, value, traceback):
self.close()
| IpcArrayHandle |
python | streamlit__streamlit | lib/streamlit/runtime/caching/storage/cache_storage_protocol.py | {
"start": 4220,
"end": 5695
} | class ____(Protocol):
"""Cache storage protocol, that should be implemented by the concrete cache storages.
Used to store cached values for a single `@st.cache_data` decorated function
serialized as bytes.
CacheStorage instances should be created by `CacheStorageManager.create()` method.
Notes
-----
Threading: The methods of this protocol could be called from multiple threads.
This is a responsibility of the concrete implementation to ensure thread safety
guarantees.
"""
@abstractmethod
def get(self, key: str) -> bytes:
"""Returns the stored value for the key.
Raises
------
CacheStorageKeyNotFoundError
Raised if the key is not in the storage.
"""
raise NotImplementedError
@abstractmethod
def set(self, key: str, value: bytes) -> None:
"""Sets the value for a given key."""
raise NotImplementedError
@abstractmethod
def delete(self, key: str) -> None:
"""Delete a given key."""
raise NotImplementedError
@abstractmethod
def clear(self) -> None:
"""Remove all keys for the storage."""
raise NotImplementedError
def close(self) -> None:
"""Closes the cache storage, it is optional to implement, and should be used
to close open resources, before we delete the storage instance.
e.g. close the database connection etc.
"""
pass
| CacheStorage |
python | google__jax | jax/_src/interpreters/partial_eval.py | {
"start": 86118,
"end": 110455
} | class ____(core.Trace):
__slots__ = ("frame", "tag", "parent_trace")
def __init__(self, debug_info: core.DebugInfo, parent_trace=None, lower=False,
auto_dce=False):
super().__init__()
self.requires_low = lower
self.frame = JaxprStackFrame(debug_info, auto_dce)
self.parent_trace = parent_trace
def invalidate(self):
# TODO(mattjj): exposed existing tracer leaks; fix them and re-enable!
# super().invalidate()
# avoid cyclic refs
self.frame.tracing_eqns = [] # thunk -> eqn -> in_tracers -> trace ->
# -> frame -> tracing_eqns -> thunk
# TODO(dougalm): we might be able to remove these given refcounting dce
self.frame.constid_to_tracer = {}
self.frame.constvar_to_val = {}
def to_jaxpr_tracer(self, x, source_info: SourceInfo):
if isinstance(x, DynamicJaxprTracer) and x._trace is self:
return x
else:
if hasattr(x, "dimension_as_value"): # Used for shape_poly._DimExpr
with core.set_current_trace(self):
x = x.dimension_as_value()
return self.to_jaxpr_tracer(x, source_info)
else:
return self.new_const(x, source_info)
def var_to_tracer(self, var, source_info, parent=None):
aval = var.aval
if aval.has_qdd:
aval = core.AvalQDD(aval, var.initial_qdd)
return DynamicJaxprTracer(self, aval, var, source_info, parent)
def new_arg(self, aval, source_info: SourceInfo):
var = self.frame.newvar(aval)
tracer = DynamicJaxprTracer(self, aval, var, source_info)
self.frame.invars.append(var)
self.frame.mutable_qdds.append((var, tracer.mutable_qdd))
return tracer
def make_eqn(self, in_tracers, out_avals, primitive, params,
effects, source_info=None, ctx = None):
source_info = source_info or source_info_util.new_source_info()
ctx = ctx or JaxprEqnContext(
config.compute_on_context_manager.value,
config.threefry_partitionable.value,
xla_metadata_lib.current_xla_metadata())
outvars = map(self.frame.newvar, out_avals)
if config.enable_checks.value:
assert all(isinstance(x, DynamicJaxprTracer) for x in in_tracers)
assert all(isinstance(v, Var) for v in outvars)
eqn = TracingEqn(in_tracers, outvars, primitive, params, effects, source_info, ctx)
out_tracers = [self.var_to_tracer(v, source_info, eqn) for v in outvars]
return eqn, out_tracers
def emit_eqn(self, in_tracers, out_avals, primitive, params, effects, source_info=None, ctx=None):
eqn, out_tracers = self.make_eqn(in_tracers, out_avals, primitive, params, effects, source_info, ctx)
self.frame.add_eqn(eqn)
return out_tracers
def new_const(self, c, source_info: SourceInfo,
aval: AbstractValue | None = None):
# TODO(mattjj): for ints, or hashable consts, don't rely on id
tracer = self.frame.constid_to_tracer.get(id(c))
if tracer is None:
if aval is None:
aval = get_aval(c)
if aval.has_qdd:
with core.set_current_trace(self.parent_trace or core.eval_trace):
aval = core.AvalQDD(aval, core.cur_qdd(c)) # type: ignore
aval = self._lift_tracers_in_aval(aval, source_info)
tracer = self._new_const(aval, c, source_info)
return tracer
pure = lift = new_const
def _new_const(self, aval, c, source_info: SourceInfo) -> DynamicJaxprTracer:
orig_c = c
id_c = id(c)
if isinstance(c, (int, float, bool, complex, np.generic, np.ndarray)):
c = dtypes.canonicalize_value(c)
if core.is_literalable(c):
val = Literal(c, aval)
return DynamicJaxprTracer(self, aval, val, source_info)
else:
var = self.frame.newvar(aval)
tracer = DynamicJaxprTracer(self, aval, var, source_info)
self.frame.constid_to_tracer[id_c] = tracer
if isinstance(aval, core.AvalQDD):
self.frame.mutable_qdds.append((var, tracer.mutable_qdd))
self.frame.constvar_to_val[var] = Constants(canonical=c, original=orig_c)
finalize(tracer, self.finalize_const, var, id_c)
return tracer
def finalize_const(self, var, constid):
self.frame.constvar_to_val.pop(var, None)
def get_const(self, tracer) -> Any:
atom = tracer.val
if isinstance(atom, Literal):
return atom.val
else:
const = self.frame.constvar_to_val.get(atom)
if const is not None:
const = const.canonical
return const
def _lift_tracers_in_aval(self, aval, source_info: SourceInfo):
if (not isinstance(aval, DShapedArray) or
not any(isinstance(d, Tracer) for d in aval.shape)):
return aval
shape = [self.to_jaxpr_tracer(d, source_info) if isinstance(d, Tracer) else d
for d in aval.shape]
return aval.update(shape=tuple(shape))
def cur_qdd(self, x):
source_info = source_info_util.current()
return self.to_jaxpr_tracer(x, source_info=source_info).mutable_qdd.cur_val
def process_primitive(self, primitive, tracers, params):
self.frame.is_high |= primitive.is_high(*map(typeof, tracers), **params)
if config.eager_constant_folding.value and not any(isinstance(x, Tracer) for x in tracers):
return primitive.bind_with_trace(core.eval_trace, tracers, params)
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
jaxpr_tracers = map(to_jaxpr_tracer, tracers)
if primitive in custom_staging_rules:
return custom_staging_rules[primitive](self, source_info, *jaxpr_tracers,
**params)
return self.default_process_primitive(
primitive, jaxpr_tracers, params, source_info)
def default_process_primitive(self, primitive, tracers, params,
source_info=None):
from jax._src.hijax import call_hi_primitive_p
aval_qdds = [t.aval_mutable_qdd for t in tracers]
# TODO(mattjj): make custom_lin have hashable params.
# TODO(dougalm): add an attribute to primitives to mark primitives with
# effectful abstract_eval rules.
# TODO(mattjj,dougalm): clean up how we check for new-style hi primitives
if primitive is call_hi_primitive_p:
out_avals, effs = params['prim'].out_avals_flat, set() # TODO effs
elif (primitive.name == "custom_lin" or config.dynamic_shapes.value or
primitive.is_effectful and primitive.is_effectful(params)):
out_avals, effs = primitive.abstract_eval(*aval_qdds, **params)
else:
try:
out_avals, effs = _cached_abstract_eval(primitive, *aval_qdds, **params)
except Exception as e:
# TODO(phawkins): remove this 3 months after the release of JAX v0.7.
_verify_params_are_hashable(primitive, params)
raise
if isinstance(out_avals, (tuple, list)) != primitive.multiple_results:
raise ValueError(f"{primitive}.abstract_eval() method should return "
f"a tuple or a list iff {primitive}.multiple_results.")
out_avals = [out_avals] if not primitive.multiple_results else out_avals
source_info = source_info or source_info_util.current()
maybe_consts_out = try_constant_folding(primitive, tracers, params, out_avals)
if maybe_consts_out is not None:
eqn = None
out_tracers = [self.new_const(c, source_info=source_info, aval=aval)
for c, aval in zip(maybe_consts_out, out_avals)]
else:
eqn, out_tracers = self.make_eqn(tracers, out_avals, primitive, params,
effs, source_info=source_info)
# Input-to-output tracer forwarding
no_input_effects = not any(isinstance(e, effects.JaxprInputEffect) for e in effs)
if eqn is not None and no_input_effects and primitive in forwarding_rules:
in_fwd, eqn = forwarding_rules[primitive](eqn)
for out_idx, in_idx in enumerate(in_fwd):
if in_idx is not None:
out_tracers[out_idx] = tracers[in_idx]
if eqn is not None:
self.frame.add_eqn(eqn)
return out_tracers if primitive.multiple_results else out_tracers.pop()
def process_call(self, call_primitive, f: lu.WrappedFun, explicit_tracers,
params):
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
in_type = (tuple((get_aval(t), True) for t in explicit_tracers)
if f.in_type is None else f.in_type)
f.in_type = None
assert in_type is not None
implicit_tracers = _extract_implicit_args(self, in_type, explicit_tracers,
source_info)
in_tracers = map(to_jaxpr_tracer, [*implicit_tracers, *explicit_tracers])
# TODO(mattjj): check in_tracers are consistent with f.in_type annotation
jaxpr, out_type, consts = _cached_trace_to_jaxpr(f, in_type)
if params.get('inline', False):
return core.eval_jaxpr(jaxpr, consts, *in_tracers,
propagate_source_info=False)
out_avals = [aval for aval, _ in out_type]
new_jaxpr = convert_constvars_jaxpr(jaxpr)
if isinstance(call_primitive, core.ClosedCallPrimitive):
new_jaxpr = close_jaxpr(new_jaxpr) # type: ignore
new_params = dict(params, call_jaxpr=new_jaxpr)
update_params = call_param_updaters.get(call_primitive)
if update_params:
new_params = update_params(new_params, [True] * len(explicit_tracers),
len(consts) + len(implicit_tracers))
const_tracers = map(to_jaxpr_tracer, consts)
out_tracers = self.emit_eqn(
[*const_tracers, *in_tracers], out_avals, call_primitive,
new_params, new_params['call_jaxpr'].effects, source_info=source_info)
return [t for t, (_, keep) in zip(out_tracers, out_type) if keep]
def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
tracers = map(to_jaxpr_tracer, tracers)
in_avals = [t.aval for t in tracers]
axis_name, axis_size = params['axis_name'], params['axis_size']
reduced_in_avals = [core.mapped_aval(axis_size, in_axis, a)
if in_axis is not None else a
for a, in_axis in zip(in_avals, params['in_axes'])]
with core.extend_axis_env_nd([(axis_name, params["global_axis_size"])]):
jaxpr, reduced_out_avals, consts = trace_to_jaxpr_dynamic(
f.with_unknown_names(), reduced_in_avals)
jaxpr, consts = _linearize_of_pmap_hack(f, jaxpr, consts)
ordered_effects = effects.ordered_effects.filter_in(jaxpr.effects)
if ordered_effects:
raise ValueError("Ordered effects not supported for "
f"map primitives: {ordered_effects}")
out_axes = params['out_axes_thunk']()
out_avals = [core.unmapped_aval(axis_size, out_axis, a)
if out_axis is not None else a
for a, out_axis in zip(reduced_out_avals, out_axes)]
const_tracers = map(to_jaxpr_tracer, consts)
new_in_axes = (None,) * len(consts) + params['in_axes']
new_params = dict(params, in_axes=new_in_axes, out_axes=out_axes,
call_jaxpr=convert_constvars_jaxpr(jaxpr))
del new_params['out_axes_thunk']
update_params = call_param_updaters.get(map_primitive)
if update_params:
new_params = update_params(new_params, [True] * len(tracers), len(consts))
effs = core.filter_named_axis_effects(jaxpr.effects, {axis_name})
out_tracers = self.emit_eqn(
[*const_tracers, *tracers], out_avals, map_primitive, new_params, effs, source_info=source_info)
return out_tracers
def process_custom_jvp_call(self, prim, fun: lu.WrappedFun,
jvp: lu.WrappedFun, tracers,
symbolic_zeros: bool):
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
tracers = map(to_jaxpr_tracer, tracers)
in_avals = [t.aval for t in tracers]
in_tangent_avals = [t.to_tangent_aval() for t in in_avals]
fun_jaxpr, out_avals, consts = trace_to_jaxpr_dynamic(fun, in_avals)
closed_fun_jaxpr = core.ClosedJaxpr(convert_constvars_jaxpr(fun_jaxpr), ())
@partial(lu.wrap_init, debug_info=jvp.debug_info)
@_memoize
def jvp_jaxpr_thunk(*in_zeros):
for store in jvp.stores: store and store.reset()
nz_tangent_avals, zero_avals = partition_list(in_zeros, in_tangent_avals)
jvp_, out_zeros = _jvp_jaxpr_zeros(jvp, in_zeros, tuple(zero_avals))
in_avals_ = (*in_avals, *nz_tangent_avals)
jaxpr, _, out_consts = trace_to_jaxpr_dynamic(jvp_.with_unknown_names(),
in_avals_)
return jaxpr, out_consts, out_zeros()
const_tracers = map(to_jaxpr_tracer, consts)
return self.emit_eqn(
[*const_tracers, *tracers], out_avals, prim,
dict(call_jaxpr=closed_fun_jaxpr,
jvp_jaxpr_fun=jvp_jaxpr_thunk,
num_consts=len(consts),
symbolic_zeros=symbolic_zeros),
fun_jaxpr.effects,
source_info=source_info)
def process_custom_vjp_call(self, prim: core.Primitive,
fun: lu.WrappedFun,
fwd: lu.WrappedFun, bwd: lu.WrappedFun, tracers,
out_trees: Callable[[], tuple[PyTreeDef, PyTreeDef, list[int | None]]],
symbolic_zeros: bool):
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
tracers = map(to_jaxpr_tracer, tracers)
in_avals = [core.AvalQDD(t.aval, core.cur_qdd(t)) if t.aval.has_qdd else t.aval for t in tracers]
fun_jaxpr, out_avals, consts = trace_to_jaxpr_dynamic(fun.with_unknown_names(), in_avals)
num_consts = len(consts)
closed_fun_jaxpr = core.ClosedJaxpr(convert_constvars_jaxpr(fun_jaxpr), ())
@partial(lu.wrap_init, debug_info=fwd.debug_info)
@_memoize
def fwd_jaxpr_from_zeros(*zeros):
for store in fwd.stores: store and store.reset()
fwd_ = _interleave_fun(fwd.with_unknown_names(), zeros)
jaxpr, _, consts = trace_to_jaxpr_dynamic(fwd_, in_avals)
return jaxpr, consts
def out_trees_():
out_tree, res_tree, input_fwds = out_trees()
input_fwds = [f if f is None else f + num_consts for f in input_fwds]
return out_tree, res_tree, input_fwds
const_tracers = map(to_jaxpr_tracer, consts)
return self.emit_eqn(
[*const_tracers, *tracers], out_avals, prim,
dict(call_jaxpr=closed_fun_jaxpr,
fwd_jaxpr_thunk=fwd_jaxpr_from_zeros,
num_consts=num_consts,
bwd=bwd, out_trees=out_trees_,
symbolic_zeros=symbolic_zeros),
fun_jaxpr.effects,
source_info=source_info)
def process_custom_transpose(self, prim: core.Primitive, # type: ignore[override]
call: lu.WrappedFun, tracers, *,
transpose: lu.WrappedFun,
out_types,
lin_tree: PyTreeDef,
res_tree: PyTreeDef, out_tree: PyTreeDef):
source_info = source_info_util.current()
to_jaxpr_tracer = partial(self.to_jaxpr_tracer, source_info=source_info)
tracers = map(to_jaxpr_tracer, tracers)
tracers_res, tracers_lin = split_list(tracers, [res_tree.num_leaves])
in_avals_p = [t.aval for t in tracers]
in_avals_t = [*[t.aval for t in tracers_res], *out_types]
call_jaxpr, out_avals, call_consts = trace_to_jaxpr_dynamic(call, in_avals_p)
closed_call_jaxpr = core.ClosedJaxpr(
convert_constvars_jaxpr(call_jaxpr), ())
transpose_flat, in_tree2 = api_util.flatten_fun_nokwargs(
transpose, treedef_tuple((res_tree, out_tree)))
# the following thunk evaluates to a pair: transpose_jaxpr, transpose_consts
@_memoize
def transpose_jaxpr_thunk():
for store in transpose_flat.stores: store.reset()
jaxpr, _, consts = trace_to_jaxpr_dynamic(transpose_flat, in_avals_t)
return jaxpr, consts
const_tracers = map(to_jaxpr_tracer, call_consts)
return self.emit_eqn(
[*const_tracers, *tracers], out_avals, prim,
dict(call_jaxpr=closed_call_jaxpr,
transpose_jaxpr_thunk=transpose_jaxpr_thunk,
out_types=out_types, res_tree=res_tree,
lin_tree=lin_tree, out_tree=out_tree),
closed_call_jaxpr.effects,
source_info=source_info)
def to_jaxpr(self, out_tracers: Sequence[Tracer],
debug_info: core.DebugInfo, source_info: SourceInfo):
return self.frame.to_jaxpr(self, out_tracers, debug_info, source_info)
@lu.cache
def _cached_trace_to_jaxpr(f, in_type):
jaxpr, out_type, consts = trace_to_jaxpr_dynamic2(lu.annotate(f, in_type))
return jaxpr, out_type, consts
custom_staging_rules: dict[Primitive, Callable] = {}
@lu.transformation2
def _interleave_fun(f, every_others, *args, **kwargs):
args_ = [x for pair in zip(args, every_others) for x in pair]
return f(*args_, **kwargs)
# TODO: consider renaming to "lazy_thunk"
def _memoize(fn):
cells = {}
sentinel = object()
def memoized(*args):
out = cells.get(args, sentinel)
if out is sentinel:
with core.set_current_trace(None):
out = cells[args] = fn(*args)
return out
return memoized
@lu.transformation_with_aux2
def _jvp_jaxpr_zeros(f, store, in_zeros, zero_avals, *primal_tangent_avals):
in_primals, nz_in_tangents = split_list(primal_tangent_avals, [len(in_zeros)])
symbolic_zeros = map(ad_util.SymbolicZero, zero_avals)
tangents = merge_lists(in_zeros, nz_in_tangents, symbolic_zeros)
out = f(*in_primals, *tangents)
n, ragged = divmod(len(out), 2)
assert not ragged
out_primals, out_tangents = out[:n], out[n:]
out_zeros = [type(t) is ad_util.SymbolicZero for t in out_tangents]
out_nz_tangents, _ = partition_list(out_zeros, out_tangents)
store.store(out_zeros)
return [*out_primals, *out_nz_tangents]
def trace_to_jaxpr(
fun: Callable,
in_tree: PyTreeDef,
in_avals_flat: Sequence[AbstractValue | core.AvalQDD],
debug_info: core.DebugInfo
) -> tuple[Jaxpr, PyTreeDef, list[Any]]:
config.enable_checks.value and debug_info.assert_arg_names(len(in_avals_flat))
parent_trace = core.trace_ctx.trace
trace = DynamicJaxprTrace(debug_info, parent_trace=parent_trace)
# Name stacks are reset because the name stacks on jaxpr equations should be
# rooted at the enclosing jaxpr.
with core.ensure_no_leaks(trace), source_info_util.reset_name_stack():
source_info = source_info_util.current()
in_tracers_flat = map(partial(trace.new_arg, source_info=source_info),
in_avals_flat)
with core.set_current_trace(trace):
in_tracers = tree_unflatten(in_tree, in_tracers_flat)
ans = fun(*in_tracers)
debug_info = debug_info.set_result_paths(ans)
ans_flat, out_tree = tree_flatten(ans)
_check_returned_jaxtypes(debug_info, ans_flat)
out_tracers = map(partial(trace.to_jaxpr_tracer, source_info=source_info), ans_flat)
_check_no_returned_refs(debug_info, out_tracers)
jaxpr, consts = trace.frame.to_jaxpr(trace, out_tracers, debug_info,
source_info)
del trace, fun, in_tracers_flat, in_tracers, out_tracers, ans, ans_flat
config.enable_checks.value and core.check_jaxpr(jaxpr)
return jaxpr, out_tree, consts
# TODO(dougalm): remove in favor of `trace_to_jaxpr`
@profiler.annotate_function
def trace_to_jaxpr_dynamic(
fun: lu.WrappedFun,
in_avals: Sequence[AbstractValue | core.AvalQDD],
*,
keep_inputs: list[bool] | None = None,
lower: bool = False,
auto_dce: bool = False,
) -> tuple[Jaxpr, list[AbstractValue], list[Any]]:
config.enable_checks.value and fun.debug_info.assert_arg_names(len(in_avals))
keep_inputs = [True] * len(in_avals) if keep_inputs is None else keep_inputs
parent_trace = core.trace_ctx.trace
trace = DynamicJaxprTrace(fun.debug_info, parent_trace=parent_trace,
lower=lower, auto_dce=auto_dce)
# Name stacks are reset because the name stacks on jaxpr equations should be
# rooted at the enclosing jaxpr.
with core.ensure_no_leaks(trace), source_info_util.reset_name_stack():
source_info = source_info_util.current()
in_tracers = _input_type_to_tracers(
partial(trace.new_arg, source_info=source_info), in_avals)
in_tracers = [t for t, keep in zip(in_tracers, keep_inputs) if keep]
with core.set_current_trace(trace):
ans = fun.call_wrapped(*in_tracers)
_check_returned_jaxtypes(fun.debug_info, ans)
out_tracers = map(partial(trace.to_jaxpr_tracer, source_info=source_info), ans)
_check_no_returned_refs(fun.debug_info, out_tracers)
jaxpr, consts = trace.frame.to_jaxpr(trace, out_tracers, fun.debug_info,
source_info)
del trace, fun, in_tracers, out_tracers, ans
config.enable_checks.value and core.check_jaxpr(jaxpr)
return jaxpr, [v.aval for v in jaxpr.outvars], consts
def _check_returned_jaxtypes(dbg, out_tracers):
for i, x in enumerate(out_tracers):
try: typeof(x)
except TypeError:
if (dbg and len(paths := dbg.resolve_result_paths()) > i and
(p := paths[i].removeprefix('result'))):
extra = f' at output component {p}'
else:
extra = ''
raise TypeError(
f"function {dbg.func_src_info} traced for {dbg.traced_for} returned a "
f"value of type {type(x)}{extra}, which is not a valid JAX type") from None
def _check_no_returned_refs(
dbg: core.DebugInfo,
out_tracers: Sequence[DynamicJaxprTracer]
) -> None:
if not config.mutable_array_checks.value: return
for i, t in enumerate(out_tracers):
a = t.aval
if isinstance(a, AbstractRef):
result_paths = dbg.resolve_result_paths().safe_result_paths(len(out_tracers))
loc = result_paths[i] and f' at output tree path {result_paths[i]}'
frame = t._trace.frame
v = t.val
eqns = frame.get_eqns()
# TODO(dougalm): something more efficient
eqn = next((e for e in eqns if v in e.outvars), None)
if eqn:
assert eqn.primitive is core.ref_p
origin_info = ('\n\nThe returned mutable array was created on line '
f'{source_info_util.summarize(eqn.source_info)}.')
elif v in frame.invars:
arg_name = dbg.safe_arg_names(len(frame.invars))[frame.invars.index(v)]
origin_info = ('\n\nThe returned mutable array was passed in as the '
f'argument {arg_name}.')
else:
origin_info = ''
raise ValueError(
f"function {dbg.func_src_info} traced for {dbg.traced_for} returned "
f"a mutable array reference of type {a.str_short()}{loc}, but "
f"mutable array references cannot be returned.{origin_info}")
@profiler.annotate_function
def trace_to_jaxpr_dynamic2(
fun: lu.WrappedFun,
) -> tuple[Jaxpr, OutputType, list[Any]]:
assert fun.in_type is not None, "fun must be annotated with lu.annotate()"
config.enable_checks.value and fun.debug_info.assert_arg_names(len(fun.in_type))
parent_trace = core.trace_ctx.trace
trace = DynamicJaxprTrace(fun.debug_info, parent_trace=parent_trace)
with core.ensure_no_leaks(trace), source_info_util.reset_name_stack():
source_info = source_info_util.current()
in_avals, keep_inputs = unzip2(fun.in_type)
in_tracers = _input_type_to_tracers(
partial(trace.new_arg, source_info=source_info), in_avals)
in_tracers = [t for t, keep in zip(in_tracers, keep_inputs) if keep]
with core.set_current_trace(trace):
ans = fun.call_wrapped(*in_tracers)
out_tracers = map(partial(trace.to_jaxpr_tracer, source_info=source_info), ans)
jaxpr = trace.frame.to_jaxpr2(out_tracers, fun.debug_info)
del trace, in_tracers, out_tracers, ans
return jaxpr
AbstractedAxisName = Hashable
AbstractedAxesSpec = Union[
dict[int, AbstractedAxisName],
tuple[AbstractedAxisName, ...],
]
@register_static
| DynamicJaxprTrace |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/req/req_file.py | {
"start": 2195,
"end": 2688
} | class ____:
def __init__(
self,
requirement: str,
is_editable: bool,
comes_from: str,
constraint: bool,
options: Optional[Dict[str, Any]] = None,
line_source: Optional[str] = None,
) -> None:
self.requirement = requirement
self.is_editable = is_editable
self.comes_from = comes_from
self.options = options
self.constraint = constraint
self.line_source = line_source
| ParsedRequirement |
python | pandas-dev__pandas | asv_bench/benchmarks/series_methods.py | {
"start": 4859,
"end": 5168
} | class ____:
params = [[10**3, 10**4, 10**5], ["int", "uint", "float", "object"]]
param_names = ["N", "dtype"]
def setup(self, N, dtype):
self.s = Series(np.random.randint(0, N, size=10 * N)).astype(dtype)
def time_value_counts(self, N, dtype):
self.s.value_counts()
| ValueCounts |
python | sqlalchemy__sqlalchemy | examples/dogpile_caching/caching_query.py | {
"start": 851,
"end": 3243
} | class ____:
"""An add-on for an ORM :class:`.Session` optionally loads full results
from a dogpile cache region.
"""
def __init__(self, regions):
self.cache_regions = regions
self._statement_cache = {}
def listen_on_session(self, session_factory):
event.listen(session_factory, "do_orm_execute", self._do_orm_execute)
def _do_orm_execute(self, orm_context):
for opt in orm_context.user_defined_options:
if isinstance(opt, RelationshipCache):
opt = opt._process_orm_context(orm_context)
if opt is None:
continue
if isinstance(opt, FromCache):
dogpile_region = self.cache_regions[opt.region]
our_cache_key = opt._generate_cache_key(
orm_context.statement, orm_context.parameters or {}, self
)
if opt.ignore_expiration:
cached_value = dogpile_region.get(
our_cache_key,
expiration_time=opt.expiration_time,
ignore_expiration=opt.ignore_expiration,
)
else:
def createfunc():
return orm_context.invoke_statement().freeze()
cached_value = dogpile_region.get_or_create(
our_cache_key,
createfunc,
expiration_time=opt.expiration_time,
)
if cached_value is NO_VALUE:
# keyerror? this is bigger than a keyerror...
raise KeyError()
orm_result = loading.merge_frozen_result(
orm_context.session,
orm_context.statement,
cached_value,
load=False,
)
return orm_result()
else:
return None
def invalidate(self, statement, parameters, opt):
"""Invalidate the cache value represented by a statement."""
if isinstance(statement, Query):
statement = statement.__clause_element__()
dogpile_region = self.cache_regions[opt.region]
cache_key = opt._generate_cache_key(statement, parameters, self)
dogpile_region.delete(cache_key)
| ORMCache |
python | openai__gym | gym/envs/mujoco/mujoco_env.py | {
"start": 9587,
"end": 14450
} | class ____(BaseMujocoEnv):
"""Superclass for MuJoCo environments."""
def __init__(
self,
model_path,
frame_skip,
observation_space: Space,
render_mode: Optional[str] = None,
width: int = DEFAULT_SIZE,
height: int = DEFAULT_SIZE,
camera_id: Optional[int] = None,
camera_name: Optional[str] = None,
):
if MUJOCO_IMPORT_ERROR is not None:
raise error.DependencyNotInstalled(
f"{MUJOCO_IMPORT_ERROR}. (HINT: you need to install mujoco)"
)
super().__init__(
model_path,
frame_skip,
observation_space,
render_mode,
width,
height,
camera_id,
camera_name,
)
def _initialize_simulation(self):
self.model = mujoco.MjModel.from_xml_path(self.fullpath)
# MjrContext will copy model.vis.global_.off* to con.off*
self.model.vis.global_.offwidth = self.width
self.model.vis.global_.offheight = self.height
self.data = mujoco.MjData(self.model)
def _reset_simulation(self):
mujoco.mj_resetData(self.model, self.data)
def set_state(self, qpos, qvel):
super().set_state(qpos, qvel)
self.data.qpos[:] = np.copy(qpos)
self.data.qvel[:] = np.copy(qvel)
if self.model.na == 0:
self.data.act[:] = None
mujoco.mj_forward(self.model, self.data)
def _step_mujoco_simulation(self, ctrl, n_frames):
self.data.ctrl[:] = ctrl
mujoco.mj_step(self.model, self.data, nstep=self.frame_skip)
# As of MuJoCo 2.0, force-related quantities like cacc are not computed
# unless there's a force sensor in the model.
# See https://github.com/openai/gym/issues/1541
mujoco.mj_rnePostConstraint(self.model, self.data)
def render(self):
if self.render_mode is None:
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym("{self.spec.id}", render_mode="rgb_array")'
)
return
if self.render_mode in {
"rgb_array",
"depth_array",
}:
camera_id = self.camera_id
camera_name = self.camera_name
if camera_id is not None and camera_name is not None:
raise ValueError(
"Both `camera_id` and `camera_name` cannot be"
" specified at the same time."
)
no_camera_specified = camera_name is None and camera_id is None
if no_camera_specified:
camera_name = "track"
if camera_id is None:
camera_id = mujoco.mj_name2id(
self.model,
mujoco.mjtObj.mjOBJ_CAMERA,
camera_name,
)
self._get_viewer(self.render_mode).render(camera_id=camera_id)
if self.render_mode == "rgb_array":
data = self._get_viewer(self.render_mode).read_pixels(depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif self.render_mode == "depth_array":
self._get_viewer(self.render_mode).render()
# Extract depth part of the read_pixels() tuple
data = self._get_viewer(self.render_mode).read_pixels(depth=True)[1]
# original image is upside-down, so flip it
return data[::-1, :]
elif self.render_mode == "human":
self._get_viewer(self.render_mode).render()
def close(self):
if self.viewer is not None:
self.viewer.close()
super().close()
def _get_viewer(
self, mode
) -> Union[
"gym.envs.mujoco.mujoco_rendering.Viewer",
"gym.envs.mujoco.mujoco_rendering.RenderContextOffscreen",
]:
self.viewer = self._viewers.get(mode)
if self.viewer is None:
if mode == "human":
from gym.envs.mujoco.mujoco_rendering import Viewer
self.viewer = Viewer(self.model, self.data)
elif mode in {"rgb_array", "depth_array"}:
from gym.envs.mujoco.mujoco_rendering import RenderContextOffscreen
self.viewer = RenderContextOffscreen(self.model, self.data)
else:
raise AttributeError(
f"Unexpected mode: {mode}, expected modes: {self.metadata['render_modes']}"
)
self.viewer_setup()
self._viewers[mode] = self.viewer
return self.viewer
def get_body_com(self, body_name):
return self.data.body(body_name).xpos
| MujocoEnv |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_group_tagkey_values.py | {
"start": 502,
"end": 10828
} | class ____(APITestCase, SnubaTestCase, PerformanceIssueTestCase):
@mock.patch("sentry.analytics.record")
def test_simple(self, mock_record: mock.MagicMock) -> None:
key, value = "foo", "bar"
project = self.create_project()
event = self.store_event(
data={"tags": {key: value}, "timestamp": before_now(seconds=1).isoformat()},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/{key}/values/"
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["value"] == "bar"
assert_last_analytics_event(
mock_record,
EventUserEndpointRequest(
project_id=project.id,
endpoint="sentry.api.endpoints.group_tagkey_values.get",
),
)
def test_simple_perf(self) -> None:
key, value = "foo", "bar"
event = self.create_performance_issue(
tags=[[key, value]],
fingerprint="group1",
contexts={"trace": {"trace_id": "b" * 32, "span_id": "c" * 16, "op": ""}},
)
assert event.group is not None
self.login_as(user=self.user)
url = f"/api/0/issues/{event.group.id}/tags/{key}/values/"
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["value"] == value
def test_user_tag(self) -> None:
project = self.create_project()
project.date_added = timezone.now() - timedelta(minutes=10)
project.save()
event = self.store_event(
data={
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foo",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/user/values/"
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["email"] == "foo@example.com"
assert response.data[0]["value"] == "id:1"
def test_tag_value_with_backslash(self) -> None:
project = self.create_project()
project.date_added = timezone.now() - timedelta(minutes=10)
project.save()
event = self.store_event(
data={
"message": "minidumpC:\\Users\\test",
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foo",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=5).isoformat(),
"tags": {"message": "minidumpC:\\Users\\test"},
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = (
f"/api/0/issues/{group.id}/tags/message/values/?query=minidumpC%3A%5C%5CUsers%5C%5Ctest"
)
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 1
assert response.data[0]["value"] == "minidumpC:\\Users\\test"
def test_includes_empty_values_by_default(self) -> None:
project = self.create_project()
self.store_event(
data={
"tags": {"foo": ""},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=project.id,
assert_no_errors=False,
)
event = self.store_event(
data={
"tags": {"foo": "bar"},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/foo/values/"
response = self.client.get(url)
assert response.status_code == 200
values = {item["value"] for item in response.data}
assert values == {"", "bar"}
counts = {item["value"]: item["count"] for item in response.data}
assert counts.get("") == 1
assert counts.get("bar") == 1
def test_includes_empty_values_backend_helpers(self) -> None:
project = self.create_project()
self.store_event(
data={
"tags": {"foo": ""},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=project.id,
assert_no_errors=False,
)
event = self.store_event(
data={
"tags": {"foo": "bar"},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/foo/values/"
group_tag_key = tagstore.backend.get_group_tag_key(
group,
None,
"foo",
tenant_ids={"organization_id": group.project.organization_id},
)
top_values = {tv.value for tv in group_tag_key.top_values}
assert top_values == {"", "bar"}
iter_values = tagstore.backend.get_group_tag_value_iter(
group,
[],
"foo",
tenant_ids={"organization_id": group.project.organization_id},
)
assert {tv.value for tv in iter_values} == {"", "bar"}
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 2
values = {item["value"]: item["count"] for item in response.data}
assert values.get("bar") == 1
assert values.get("") == 1
def test_user_tag_with_empty_values(self) -> None:
"""Test that user tags with empty values don't cause AttributeError."""
project = self.create_project()
# Event with user data
self.store_event(
data={
"user": {"id": "user123"},
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=project.id,
)
# Event without user data (will have empty user tag)
event = self.store_event(
data={
"timestamp": before_now(seconds=2).isoformat(),
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/user/values/"
# This should not crash with AttributeError: 'NoneType' object has no attribute 'split'
response = self.client.get(url)
assert response.status_code == 200
# Should return at least the user with id, empty values may or may not be included
assert len(response.data) >= 1
def test_count_sort(self) -> None:
project = self.create_project()
project.date_added = timezone.now() - timedelta(minutes=10)
project.save()
event = self.store_event(
data={
"message": "message 1",
"platform": "python",
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foo",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=project.id,
)
self.store_event(
data={
"message": "message 1",
"platform": "python",
"user": {
"id": 1,
"email": "foo@example.com",
"username": "foo",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=project.id,
)
self.store_event(
data={
"message": "message 1",
"platform": "python",
"user": {
"id": 2,
"email": "bar@example.com",
"username": "bar",
"ip_address": "127.0.0.1",
},
"timestamp": before_now(seconds=10).isoformat(),
},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/user/values/?sort=count"
response = self.client.get(url)
assert response.status_code == 200
assert len(response.data) == 2
assert response.data[0]["email"] == "foo@example.com"
assert response.data[0]["value"] == "id:1"
assert response.data[1]["email"] == "bar@example.com"
assert response.data[1]["value"] == "id:2"
@mock.patch("sentry.analytics.record")
@override_settings(SENTRY_SELF_HOSTED=False)
def test_ratelimit(self, mock_record: mock.MagicMock) -> None:
key, value = "foo", "bar"
project = self.create_project()
event = self.store_event(
data={"tags": {key: value}, "timestamp": before_now(seconds=1).isoformat()},
project_id=project.id,
)
group = event.group
self.login_as(user=self.user)
url = f"/api/0/issues/{group.id}/tags/{key}/values/"
with freeze_time(datetime.datetime.now()):
for i in range(150):
response = self.client.get(url)
assert response.status_code == 200
response = self.client.get(url)
assert response.status_code == 429
assert_last_analytics_event(
mock_record,
EventUserEndpointRequest(
project_id=project.id,
endpoint="sentry.api.endpoints.group_tagkey_values.get",
),
)
| GroupTagKeyValuesTest |
python | anthropics__anthropic-sdk-python | src/anthropic/types/text_block.py | {
"start": 259,
"end": 662
} | class ____(BaseModel):
citations: Optional[List[TextCitation]] = None
"""Citations supporting the text block.
The type of citation returned will depend on the type of document being cited.
Citing a PDF results in `page_location`, plain text results in `char_location`,
and content document results in `content_block_location`.
"""
text: str
type: Literal["text"]
| TextBlock |
python | pypa__pip | src/pip/_internal/cli/spinners.py | {
"start": 2650,
"end": 3487
} | class ____(SpinnerInterface):
def __init__(self, message: str, min_update_interval_seconds: float = 60.0) -> None:
self._message = message
self._finished = False
self._rate_limiter = RateLimiter(min_update_interval_seconds)
self._update("started")
def _update(self, status: str) -> None:
assert not self._finished
self._rate_limiter.reset()
logger.info("%s: %s", self._message, status)
def spin(self) -> None:
if self._finished:
return
if not self._rate_limiter.ready():
return
self._update("still running...")
def finish(self, final_status: str) -> None:
if self._finished:
return
self._update(f"finished with status '{final_status}'")
self._finished = True
| NonInteractiveSpinner |
python | huggingface__transformers | src/transformers/models/m2m_100/modeling_m2m_100.py | {
"start": 8710,
"end": 14415
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
config: Optional[M2M100Config] = None,
layer_idx: Optional[int] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def forward(
self,
hidden_states: torch.Tensor,
key_value_states: Optional[torch.Tensor] = None,
past_key_values: Optional[Cache] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
cache_position: Optional[torch.Tensor] = None,
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
**kwargs: Unpack[FlashAttentionKwargs],
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
# determine input shapes
bsz, tgt_len = hidden_states.shape[:-1]
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
kv_input_shape = (bsz, src_len, -1, self.head_dim)
# get query proj
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
is_updated = False
if past_key_values is not None:
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k_proj(current_states)
value_states = self.v_proj(current_states)
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=self.scaling,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.mbart.modeling_mbart.MBartEncoderLayer with MBart->M2M100, MBART->M2M100
| M2M100Attention |
python | run-llama__llama_index | llama-index-core/llama_index/core/callbacks/schema.py | {
"start": 2923,
"end": 3351
} | class ____:
"""Generic class to store event information."""
event_type: CBEventType
payload: Optional[Dict[str, Any]] = None
time: str = ""
id_: str = ""
def __post_init__(self) -> None:
"""Init time and id if needed."""
if not self.time:
self.time = datetime.now().strftime(TIMESTAMP_FORMAT)
if not self.id_:
self.id = str(uuid.uuid4())
@dataclass
| CBEvent |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/_internal/expandinput.py | {
"start": 3877,
"end": 8799
} | class ____(ResolveMixin):
"""
Storage type of a mapped operator's mapped kwargs.
This is created from ``expand(**kwargs)``.
"""
value: dict[str, OperatorExpandArgument]
EXPAND_INPUT_TYPE: ClassVar[str] = "dict-of-lists"
def _iter_parse_time_resolved_kwargs(self) -> Iterable[tuple[str, Sized]]:
"""Generate kwargs with values available on parse-time."""
return ((k, v) for k, v in self.value.items() if _is_parse_time_mappable(v))
def get_parse_time_mapped_ti_count(self) -> int:
if not self.value:
return 0
literal_values = [len(v) for _, v in self._iter_parse_time_resolved_kwargs()]
if len(literal_values) != len(self.value):
literal_keys = (k for k, _ in self._iter_parse_time_resolved_kwargs())
raise NotFullyPopulated(set(self.value).difference(literal_keys))
return functools.reduce(operator.mul, literal_values, 1)
def _get_map_lengths(
self, resolved_vals: dict[str, Sized], upstream_map_indexes: dict[str, int]
) -> dict[str, int]:
"""
Return dict of argument name to map length.
If any arguments are not known right now (upstream task not finished),
they will not be present in the dict.
"""
# TODO: This initiates one API call for each XComArg. Would it be
# more efficient to do one single call and unpack the value here?
def _get_length(k: str, v: OperatorExpandArgument) -> int | None:
from airflow.sdk.definitions.xcom_arg import XComArg, get_task_map_length
if isinstance(v, XComArg):
return get_task_map_length(v, resolved_vals[k], upstream_map_indexes)
# Unfortunately a user-defined TypeGuard cannot apply negative type
# narrowing. https://github.com/python/typing/discussions/1013
if TYPE_CHECKING:
assert isinstance(v, Sized)
return len(v)
map_lengths = {
k: res for k, v in self.value.items() if v is not None if (res := _get_length(k, v)) is not None
}
if len(map_lengths) < len(self.value):
raise NotFullyPopulated(set(self.value).difference(map_lengths))
return map_lengths
def _expand_mapped_field(self, key: str, value: Any, map_index: int, all_lengths: dict[str, int]) -> Any:
def _find_index_for_this_field(index: int) -> int:
# Need to use the original user input to retain argument order.
for mapped_key in reversed(self.value):
mapped_length = all_lengths[mapped_key]
if mapped_length < 1:
raise RuntimeError(f"cannot expand field mapped to length {mapped_length!r}")
if mapped_key == key:
return index % mapped_length
index //= mapped_length
return -1
found_index = _find_index_for_this_field(map_index)
if found_index < 0:
return value
if isinstance(value, Sequence):
return value[found_index]
if not isinstance(value, dict):
raise TypeError(f"can't map over value of type {type(value)}")
for i, (k, v) in enumerate(value.items()):
if i == found_index:
return k, v
raise IndexError(f"index {map_index} is over mapped length")
def iter_references(self) -> Iterable[tuple[Operator, str]]:
from airflow.sdk.definitions.xcom_arg import XComArg
for x in self.value.values():
if isinstance(x, XComArg):
yield from x.iter_references()
def resolve(self, context: Mapping[str, Any]) -> tuple[Mapping[str, Any], set[int]]:
map_index: int | None = context["ti"].map_index
if map_index is None or map_index < 0:
raise RuntimeError("can't resolve task-mapping argument without expanding")
upstream_map_indexes = getattr(context["ti"], "_upstream_map_indexes", {})
# TODO: This initiates one API call for each XComArg. Would it be
# more efficient to do one single call and unpack the value here?
resolved = {
k: v.resolve(context) if _needs_run_time_resolution(v) else v for k, v in self.value.items()
}
sized_resolved = {k: v for k, v in resolved.items() if isinstance(v, Sized)}
all_lengths = self._get_map_lengths(sized_resolved, upstream_map_indexes)
data = {k: self._expand_mapped_field(k, v, map_index, all_lengths) for k, v in resolved.items()}
literal_keys = {k for k, _ in self._iter_parse_time_resolved_kwargs()}
resolved_oids = {id(v) for k, v in data.items() if k not in literal_keys}
return data, resolved_oids
def _describe_type(value: Any) -> str:
if value is None:
return "None"
return type(value).__name__
@attrs.define()
| DictOfListsExpandInput |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_audio.py | {
"start": 46949,
"end": 47809
} | class ____(nn.Module):
def __init__(self, input_dim, num_labels, scale=30.0, margin=0.4):
super().__init__()
self.scale = scale
self.margin = margin
self.num_labels = num_labels
self.weight = nn.Parameter(torch.randn(input_dim, num_labels), requires_grad=True)
self.loss = nn.CrossEntropyLoss()
def forward(self, hidden_states, labels):
labels = labels.flatten()
weight = nn.functional.normalize(self.weight, dim=0)
hidden_states = nn.functional.normalize(hidden_states, dim=1)
cos_theta = torch.mm(hidden_states, weight)
psi = cos_theta - self.margin
onehot = nn.functional.one_hot(labels, self.num_labels)
logits = self.scale * torch.where(onehot.bool(), psi, cos_theta)
loss = self.loss(logits, labels)
return loss
| AMSoftmaxLoss |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 497530,
"end": 498009
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of ClosePullRequest"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "pull_request")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The pull request that was closed."""
| ClosePullRequestPayload |
python | walkccc__LeetCode | solutions/403. Frog Jump/403.py | {
"start": 0,
"end": 463
} | class ____:
def canCross(self, stones: list[int]) -> bool:
n = len(stones)
# dp[i][j] := True if a frog can make a size j jump to stones[i]
dp = [[False] * (n + 1) for _ in range(n)]
dp[0][0] = True
for i in range(1, n):
for j in range(i):
k = stones[i] - stones[j]
if k > n:
continue
for x in (k - 1, k, k + 1):
if 0 <= x <= n:
dp[i][k] |= dp[j][x]
return any(dp[-1])
| Solution |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 6964,
"end": 6998
} | class ____(BlogBase):
pass
| BlogB |
python | scipy__scipy | scipy/stats/tests/test_qmc.py | {
"start": 33904,
"end": 37802
} | class ____(QMCEngineTests):
qmce = qmc.PoissonDisk
can_scramble = False
def test_bounds(self, *args):
pytest.skip("Too costly in memory.")
def test_fast_forward(self, *args):
pytest.skip("Not applicable: recursive process.")
def test_sample(self, *args):
pytest.skip("Not applicable: the value of reference sample is"
" implementation dependent.")
def test_continuing(self, *args):
# can continue a sampling, but will not preserve the same order
# because candidates are lost, so we will not select the same center
radius = 0.05
ns = 6
engine = self.engine(d=2, radius=radius, scramble=False)
sample_init = engine.random(n=ns)
assert len(sample_init) <= ns
assert l2_norm(sample_init) >= radius
sample_continued = engine.random(n=ns)
assert len(sample_continued) <= ns
assert l2_norm(sample_continued) >= radius
sample = np.concatenate([sample_init, sample_continued], axis=0)
assert len(sample) <= ns * 2
assert l2_norm(sample) >= radius
def test_mindist(self):
rng = np.random.default_rng(132074951149370773672162394161442690287)
ns = 50
low, high = 0.08, 0.2
radii = (high - low) * rng.random(5) + low
dimensions = [1, 3, 4]
hypersphere_methods = ["volume", "surface"]
gen = product(dimensions, radii, hypersphere_methods)
for d, radius, hypersphere in gen:
engine = self.qmce(
d=d, radius=radius, hypersphere=hypersphere, rng=rng
)
sample = engine.random(ns)
assert len(sample) <= ns
assert l2_norm(sample) >= radius
def test_fill_space(self):
radius = 0.2
engine = self.qmce(d=2, radius=radius)
sample = engine.fill_space()
# circle packing problem is np complex
assert l2_norm(sample) >= radius
@pytest.mark.parametrize("l_bounds", [[-1, -2, -1], [1, 2, 1]])
def test_sample_inside_lower_bounds(self, l_bounds):
radius = 0.2
u_bounds=[3, 3, 2]
engine = self.qmce(
d=3, radius=radius, l_bounds=l_bounds, u_bounds=u_bounds
)
sample = engine.random(30)
for point in sample:
assert_array_less(point, u_bounds)
assert_array_less(l_bounds, point)
@pytest.mark.parametrize("u_bounds", [[-1, -2, -1], [1, 2, 1]])
def test_sample_inside_upper_bounds(self, u_bounds):
radius = 0.2
l_bounds=[-3, -3, -2]
engine = self.qmce(
d=3, radius=radius, l_bounds=l_bounds, u_bounds=u_bounds
)
sample = engine.random(30)
for point in sample:
assert_array_less(point, u_bounds)
assert_array_less(l_bounds, point)
def test_inconsistent_bound_value(self):
radius = 0.2
l_bounds=[3, 2, 1]
u_bounds=[-1, -2, -1]
with pytest.raises(
ValueError,
match="Bounds are not consistent 'l_bounds' < 'u_bounds'"):
self.qmce(d=3, radius=radius, l_bounds=l_bounds, u_bounds=u_bounds)
@pytest.mark.parametrize("u_bounds", [[-1, -2, -1], [-1, -2]])
@pytest.mark.parametrize("l_bounds", [[3, 2]])
def test_inconsistent_bounds(self, u_bounds, l_bounds):
radius = 0.2
with pytest.raises(
ValueError,
match="'l_bounds' and 'u_bounds' must be broadcastable and respect"
" the sample dimension"):
self.qmce(
d=3, radius=radius,
l_bounds=l_bounds, u_bounds=u_bounds
)
def test_raises(self):
message = r"'toto' is not a valid hypersphere sampling"
with pytest.raises(ValueError, match=message):
qmc.PoissonDisk(1, hypersphere="toto")
| TestPoisson |
python | pennersr__django-allauth | allauth/headless/app_settings.py | {
"start": 37,
"end": 2110
} | class ____:
def __init__(self, prefix):
self.prefix = prefix
def _setting(self, name, dflt):
from allauth.utils import get_setting
return get_setting(self.prefix + name, dflt)
@property
def ADAPTER(self):
return self._setting(
"ADAPTER", "allauth.headless.adapter.DefaultHeadlessAdapter"
)
@property
def TOKEN_STRATEGY(self):
from allauth.utils import import_attribute
path = self._setting(
"TOKEN_STRATEGY",
"allauth.headless.tokens.strategies.sessions.SessionTokenStrategy",
)
cls = import_attribute(path)
return cls()
@property
def SERVE_SPECIFICATION(self) -> bool:
return self._setting("SERVE_SPECIFICATION", False)
@property
def SPECIFICATION_TEMPLATE_NAME(self) -> Optional[str]:
return self._setting(
"SPECIFICATION_TEMPLATE_NAME", "headless/spec/redoc_cdn.html"
)
@property
def CLIENTS(self) -> Tuple[str]:
return tuple(self._setting("CLIENTS", ("browser", "app")))
@property
def FRONTEND_URLS(self):
return self._setting("FRONTEND_URLS", {})
@property
def JWT_PRIVATE_KEY(self) -> str:
return self._setting("JWT_PRIVATE_KEY", "")
@property
def JWT_ACCESS_TOKEN_EXPIRES_IN(self) -> int:
return self._setting("JWT_ACCESS_TOKEN_EXPIRES_IN", 300)
@property
def JWT_REFRESH_TOKEN_EXPIRES_IN(self) -> int:
return self._setting("JWT_REFRESH_TOKEN_EXPIRES_IN", 86400)
@property
def JWT_AUTHORIZATION_HEADER_SCHEME(self) -> str:
return "Bearer"
@property
def JWT_STATEFUL_VALIDATION_ENABLED(self) -> bool:
return self._setting("JWT_STATEFUL_VALIDATION_ENABLED", False)
@property
def JWT_ROTATE_REFRESH_TOKEN(self) -> bool:
return self._setting("JWT_ROTATE_REFRESH_TOKEN", True)
_app_settings = AppSettings("HEADLESS_")
def __getattr__(name):
# See https://peps.python.org/pep-0562/
return getattr(_app_settings, name)
| AppSettings |
python | python__mypy | mypyc/irbuild/for_helpers.py | {
"start": 40286,
"end": 42567
} | class ____(ForGenerator):
"""Generate optimized IR for a for loop over an integer range."""
def init(self, start_reg: Value, end_reg: Value, step: int) -> None:
builder = self.builder
self.start_reg = start_reg
self.end_reg = end_reg
self.step = step
self.end_target = builder.maybe_spill(end_reg)
if is_short_int_rprimitive(start_reg.type) and is_short_int_rprimitive(end_reg.type):
index_type: RType = short_int_rprimitive
elif is_fixed_width_rtype(end_reg.type):
index_type = end_reg.type
else:
index_type = int_rprimitive
index_reg = Register(index_type)
builder.assign(index_reg, start_reg, -1)
self.index_reg = builder.maybe_spill_assignable(index_reg)
# Initialize loop index to 0. Assert that the index target is assignable.
self.index_target: Register | AssignmentTarget = builder.get_assignment_target(self.index)
builder.assign(self.index_target, builder.read(self.index_reg, self.line), self.line)
def gen_condition(self) -> None:
builder = self.builder
line = self.line
# Add loop condition check.
cmp = "<" if self.step > 0 else ">"
comparison = builder.binary_op(
builder.read(self.index_reg, line), builder.read(self.end_target, line), cmp, line
)
builder.add_bool_branch(comparison, self.body_block, self.loop_exit)
def gen_step(self) -> None:
builder = self.builder
line = self.line
# Increment index register. If the range is known to fit in short ints, use
# short ints.
if is_short_int_rprimitive(self.start_reg.type) and is_short_int_rprimitive(
self.end_reg.type
):
new_val = builder.int_op(
short_int_rprimitive,
builder.read(self.index_reg, line),
Integer(self.step),
IntOp.ADD,
line,
)
else:
new_val = builder.binary_op(
builder.read(self.index_reg, line), Integer(self.step), "+", line
)
builder.assign(self.index_reg, new_val, line)
builder.assign(self.index_target, new_val, line)
| ForRange |
python | kamyu104__LeetCode-Solutions | Python/sum-of-good-subsequences.py | {
"start": 67,
"end": 538
} | class ____(object):
def sumOfGoodSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
dp = collections.defaultdict(int)
cnt = collections.defaultdict(int)
for x in nums:
c = cnt[x-1]+cnt[x+1]+1
cnt[x] = (cnt[x]+c)%MOD
dp[x] = (dp[x]+(dp[x-1]+dp[x+1]+x*c))%MOD
return reduce(lambda accu, x: (accu+x)%MOD, dp.itervalues())
| Solution |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/integrations/dlt/dlt_dagster_translator.py | {
"start": 316,
"end": 1145
} | class ____(DagsterDltTranslator):
def get_asset_spec(self, data: DltResourceTranslatorData) -> AssetSpec:
"""Overrides asset spec to override asset key to be the dlt resource name."""
default_spec = super().get_asset_spec(data)
return default_spec.replace_attributes(
key=AssetKey(f"{data.resource.name}"),
)
@dlt_assets(
name="example_dlt_assets",
dlt_source=example_dlt_source(),
dlt_pipeline=dlt.pipeline(
pipeline_name="example_pipeline_name",
dataset_name="example_dataset_name",
destination="snowflake",
progress="log",
),
dagster_dlt_translator=CustomDagsterDltTranslator(),
)
def dlt_example_assets(context: AssetExecutionContext, dlt: DagsterDltResource):
yield from dlt.run(context=context)
| CustomDagsterDltTranslator |
python | spyder-ide__spyder | spyder/plugins/layout/container.py | {
"start": 1996,
"end": 2089
} | class ____:
PluginsMenu = "plugins_menu"
LayoutsMenu = 'layouts_menu'
| LayoutPluginMenus |
python | pytorch__pytorch | torch/distributed/fsdp/_flat_param.py | {
"start": 6926,
"end": 7312
} | class ____(_ParameterMeta):
# Make `isinstance(t, FlatParameter)` return True for custom tensor
# instances that have the _is_flat_param flag for BC
def __instancecheck__(self, instance):
# NB: do NOT test the super implementation
return isinstance(instance, torch.Tensor) and getattr(
instance, "_is_flat_param", False
)
| _FlatParameterMeta |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 692739,
"end": 693429
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "created_at", "new_base", "old_base", "pull_request")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
new_base = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="newBase")
old_base = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="oldBase")
pull_request = sgqlc.types.Field(
sgqlc.types.non_null("PullRequest"), graphql_name="pullRequest"
)
| AutomaticBaseChangeFailedEvent |
python | spyder-ide__spyder | spyder/plugins/run/plugin.py | {
"start": 1424,
"end": 32683
} | class ____(SpyderPluginV2):
"""
Run Plugin.
"""
NAME = "run"
REQUIRES = [Plugins.Preferences, Plugins.WorkingDirectory]
OPTIONAL = [Plugins.MainMenu, Plugins.Toolbar, Plugins.Shortcuts]
CONTAINER_CLASS = RunContainer
CONF_SECTION = NAME
CONF_WIDGET_CLASS = RunConfigPage
CONF_FILE = False
sig_run_input = Signal(str)
"""
Request to run an input.
Arguments
---------
context: str
Context used to request the run input information from the currently
focused `RunConfigurationProvider`
"""
# ---- SpyderPluginV2 API
# -------------------------------------------------------------------------
@staticmethod
def get_name():
return _("Run")
@staticmethod
def get_description():
return _("Manage run configuration for executing files.")
@classmethod
def get_icon(cls):
return cls.create_icon('run')
def on_initialize(self):
self.pending_toolbar_actions = []
self.pending_menu_actions = []
self.main_menu_ready = False
self.pending_shortcut_actions = []
self.all_run_actions = {}
self.menu_actions = set({})
self.toolbar_actions = set({})
self.shortcut_actions = {}
self.action_lock = Lock()
container = self.get_container()
container.sig_run_action_created.connect(
self._register_action_shortcuts
)
container.sig_open_preferences_requested.connect(
self._open_run_preferences
)
@on_plugin_available(plugin=Plugins.WorkingDirectory)
def on_working_directory_available(self):
working_dir = self.get_plugin(Plugins.WorkingDirectory)
working_dir.sig_current_directory_changed.connect(
self._switch_working_dir
)
self._switch_working_dir(working_dir.get_workdir())
@on_plugin_available(plugin=Plugins.MainMenu)
def on_main_menu_available(self):
main_menu = self.get_plugin(Plugins.MainMenu)
for action in [
RunActions.Run,
RunActions.ReRun,
RunActions.Configure,
RunActions.GlobalConfigurations,
]:
main_menu.add_item_to_application_menu(
self.get_action(action),
ApplicationMenus.Run,
RunMenuSections.Run,
before_section=RunMenuSections.RunExtras
)
self.main_menu_ready = True
while self.pending_menu_actions != []:
action, menu_id, menu_section, before_section = (
self.pending_menu_actions.pop(0))
main_menu.add_item_to_application_menu(
action,
menu_id,
menu_section,
before_section=before_section
)
@on_plugin_available(plugin=Plugins.Preferences)
def on_preferences_available(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.register_plugin_preferences(self)
@on_plugin_available(plugin=Plugins.Toolbar)
def on_toolbar_available(self):
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.add_item_to_application_toolbar(
self.get_action(RunActions.Run), ApplicationToolbars.Run
)
while self.pending_toolbar_actions != []:
(
action,
toolbar_id,
section,
before,
before_section,
) = self.pending_toolbar_actions.pop(0)
toolbar.add_item_to_application_toolbar(
action,
toolbar_id,
section,
before,
before_section,
)
@on_plugin_available(plugin=Plugins.Shortcuts)
def on_shortcuts_available(self):
shortcuts = self.get_plugin(Plugins.Shortcuts)
while self.pending_shortcut_actions != []:
args = self.pending_shortcut_actions.pop(0)
shortcuts.register_shortcut(*args)
@on_plugin_teardown(plugin=Plugins.WorkingDirectory)
def on_working_directory_teardown(self):
working_dir = self.get_plugin(Plugins.WorkingDirectory)
working_dir.sig_current_directory_changed.disconnect(
self._switch_working_dir
)
self._switch_working_dir(None)
@on_plugin_teardown(plugin=Plugins.MainMenu)
def on_main_menu_teardown(self):
main_menu = self.get_plugin(Plugins.MainMenu)
for action in [
RunActions.Run,
RunActions.ReRun,
RunActions.Configure,
RunActions.GlobalConfigurations,
]:
main_menu.remove_item_from_application_menu(
action,
ApplicationMenus.Run
)
for key in self.menu_actions:
(_, count, action_id) = self.all_run_actions[key]
if count > 0:
main_menu.remove_item_from_application_menu(
action_id, ApplicationMenus.Run
)
@on_plugin_teardown(plugin=Plugins.Preferences)
def on_preferences_teardown(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.deregister_plugin_preferences(self)
@on_plugin_teardown(plugin=Plugins.Toolbar)
def on_toolbar_teardown(self):
toolbar = self.get_plugin(Plugins.Toolbar)
toolbar.remove_item_from_application_toolbar(
RunActions.Run, ApplicationToolbars.Run)
for key in self.toolbar_actions:
(_, count, action_id) = self.all_run_actions[key]
if count > 0:
toolbar.remove_item_from_application_toolbar(
action_id, ApplicationToolbars.Run
)
@on_plugin_teardown(plugin=Plugins.Shortcuts)
def on_shortcuts_teardown(self):
shortcuts = self.get_plugin(Plugins.Shortcuts)
for key in self.shortcut_actions:
(action, count, action_id) = self.all_run_actions[key]
if count > 0:
shortcut_context = self.shortcut_actions[key]
shortcuts.unregister_shortcut(
action, shortcut_context, action_id
)
# ---- Public API
# -------------------------------------------------------------------------
def register_run_configuration_provider(
self,
provider_name: str,
supported_extensions_contexts: List[SupportedExtensionContexts]
):
"""
Register the extensions and contexts that a `RunConfigurationProvider`
supports.
Parameters
----------
provider_name: str
The identifier of the :class:`RunConfigurationProvider` instance
that is registering the set of supported contexts per extension.
supported_extensions_contexts: List[SupportedExtensionContexts]
A list containing the supported contexts per file extension.
"""
self.get_container().register_run_configuration_provider(
provider_name, supported_extensions_contexts)
def deregister_run_configuration_provider(
self,
provider_name: str,
unsupported_extensions_contexts: List[SupportedExtensionContexts]
):
"""
Deregister the extensions and contexts that a
`RunConfigurationProvider` no longer supports.
Parameters
----------
provider_name: str
The identifier of the :class:`RunConfigurationProvider` instance
that is registering the set of formerly supported contexts
per extension.
unsupported_extensions_contexts: List[SupportedExtensionContexts]
A list containing the formerly supported contexts per
file extension.
"""
self.get_container().deregister_run_configuration_provider(
provider_name, unsupported_extensions_contexts)
def register_run_configuration_metadata(
self,
provider: RunConfigurationProvider,
metadata: RunConfigurationMetadata
):
"""
Register the metadata for a run configuration.
Parameters
----------
provider: RunConfigurationProvider
A :class:`SpyderPluginV2` instance that implements the
:class:`RunConfigurationProvider` interface and will register
and own a run configuration.
metadata: RunConfigurationMetadata
The metadata for a run configuration that the provider is able to
produce.
Notes
-----
The unique identifier for the metadata dictionary is produced and
managed by the provider and the Run plugin will only refer to the
run configuration by using such id.
"""
self.get_container().register_run_configuration_metadata(
provider, metadata)
def get_currently_selected_configuration(self):
"""
Get currently selected configuration
"""
return self.get_container().currently_selected_configuration
def deregister_run_configuration_metadata(self, uuid: str):
"""
Deregister a given run configuration by its unique identifier.
Parameters
----------
uuid: str
Unique identifier for a run configuration metadata that will not
longer exist. This id should have been registered using
`register_run_configuration_metadata`.
"""
self.get_container().deregister_run_configuration_metadata(uuid)
def register_executor_configuration(
self,
provider: RunExecutor,
configuration: List[SupportedExecutionRunConfiguration]
):
"""
Register a :class:`RunExecutor` instance to indicate its support
for a given set of run configurations. This method can be called
whenever an executor can extend its support for a given run input
configuration.
Parameters
----------
provider: RunExecutor
A :class:`SpyderPluginV2` instance that implements the
:class:`RunExecutor` interface and will register execution
input type information.
configuration: List[SupportedExecutionRunConfiguration]
A list of input configurations that the provider is able to
process. Each configuration specifies the input extension
identifier, the available execution context and the output formats
for that type.
"""
self.get_container().register_executor_configuration(
provider, configuration)
def deregister_executor_configuration(
self,
provider: RunExecutor,
configuration: List[SupportedExecutionRunConfiguration]
):
"""
Deregister a :class:`RunConfigurationProvider` instance from providing
a set of run configurations that are no longer supported by it.
This method can be called whenever an input provider wants to remove
its support for a given run input configuration.
Parameters
----------
provider: RunConfigurationProvider
A :class:`SpyderPluginV2` instance that implements the
:class:`RunConfigurationProvider` interface and will deregister
execution input type information.
configuration: List[SupportedExecutionRunConfiguration]
A list of input configurations that the provider is able to
process. Each configuration specifies the input extension
identifier, the available execution context and the output formats
for that type.
"""
self.get_container().deregister_executor_configuration(
provider, configuration)
def register_viewer_configuration(
self,
viewer: RunResultViewer,
formats: List[OutputFormat]
):
"""
Register a :class:`RunExecutorProvider` instance to indicate its
support for a given set of output run result formats. This method can
be called whenever a viewer can extend its support for a given output
format.
Parameters
----------
provider: RunResultViewer
A :class:`SpyderPluginV2` instance that implements the
:class:`RunResultViewer` interface and will register
supported output formats.
formats: List[OutputFormat]
A list of output formats that the viewer is able to display.
"""
self.get_container().register_viewer_configuration(viewer, formats)
def deregister_viewer_configuration(
self,
viewer: RunResultViewer,
formats: List[OutputFormat]
):
"""
Deregister a :class:`RunResultViewer` instance from supporting a set of
output formats that are no longer supported by it. This method
can be called whenever a viewer wants to remove its support
for a given output format.
Parameters
----------
provider: RunResultViewer
A :class:`SpyderPluginV2` instance that implements the
:class:`RunResultViewer` interface and will deregister
output format support.
formats: List[OutputFormat]
A list of output formats that the viewer wants to deregister.
"""
self.get_container().deregister_viewer_configuration(viewer, formats)
def create_run_button(
self,
context_name: str,
text: str,
icon: Optional[QIcon] = None,
tip: Optional[str] = None,
shortcut_context: Optional[str] = None,
register_shortcut: bool = False,
extra_action_name: Optional[str] = None,
context_modificator: Optional[str] = None,
add_to_toolbar: bool | str | dict = False,
add_to_menu: bool | dict = False,
re_run: bool = False
) -> QAction:
"""
Create a run or a "run and do something" (optionally re-run) button
for a specific run context.
Parameters
----------
context_name: str
The identifier of the run context.
text: str
Localized text for the action
icon: Optional[QIcon]
Icon for the action when used in menu or toolbar.
tip: Optional[str]
Tooltip to define for action on menu or toolbar.
shortcut_context: Optional[str]
Set the `str` context of the shortcut.
register_shortcut: bool
If True, main window will expose the shortcut in Preferences.
The default value is `False`.
extra_action_name: Optional[str]
The name of the action to execute on the run input provider
after requesting the run input.
context_modificator: Optional[str]
The name of the modification to apply to the action, e.g. run
selection <up to line>.
add_to_toolbar: bool or str
If True, then the action will be added to the Run section of the
main toolbar. If a string, it must be a toolbar_id. If dictionary,
it corresponds to
{'toolbar': ..., 'section': ..., 'before': ...,
'before_section': ...}
add_to_menu: bool or dict
If True, then the action will be added to the Run menu.
If a dictionnary, it corresponds to
{'menu': ..., 'section': ..., 'before_section': ...}
re_run: bool
If True, then the button will act as a re-run button instead of
a run one.
Returns
-------
action: SpyderAction
The corresponding action that was created.
Notes
-----
1. The context passed as a parameter must be a subordinate of the
context of the current focused run configuration that was
registered via `register_run_configuration_metadata`. For instance,
Cell can be used if and only if the file was registered.
2. The button will be registered as `run <context>` or
`run <context> <context_modificator> and <extra_action_name>`
on the action registry.
3. The created button will operate over the last focused run input
provider.
4. If the requested button already exists, this method will not do
anything, which implies that the first registered shortcut will be the
one to be used. For the built-in run contexts (file, cell and
selection), the editor will register their corresponding icons and
shortcuts.
"""
key = (context_name, extra_action_name, context_modificator,
re_run)
action = self.get_container().create_run_button(
context_name,
text,
icon=icon,
tip=tip,
shortcut_context=shortcut_context,
register_shortcut=register_shortcut,
extra_action_name=extra_action_name,
context_modificator=context_modificator,
re_run=re_run
)
if add_to_toolbar:
toolbar_id, section, before, before_section = (
ApplicationToolbars.Run,
None,
None,
None
)
if isinstance(add_to_toolbar, str):
toolbar_id = add_to_toolbar
if isinstance(add_to_toolbar, dict):
toolbar_id = add_to_toolbar['toolbar']
section = add_to_toolbar.get('section')
before = add_to_toolbar.get('before')
before_section = add_to_toolbar.get('before_section')
toolbar = self.get_plugin(Plugins.Toolbar)
if toolbar:
toolbar.add_item_to_application_toolbar(
action,
toolbar_id,
section,
before,
before_section,
)
else:
self.pending_toolbar_actions.append(
(
action,
toolbar_id,
section,
before,
before_section,
)
)
self.toolbar_actions |= {key}
if add_to_menu:
menu_id, menu_section, before_section = (
ApplicationMenus.Run, RunMenuSections.RunExtras,
RunMenuSections.RunInExecutors
)
if isinstance(add_to_menu, dict):
menu_id = add_to_menu['menu']
menu_section = add_to_menu['section']
before_section = add_to_menu.get('before_section', None)
main_menu = self.get_plugin(Plugins.MainMenu)
if self.main_menu_ready and main_menu:
main_menu.add_item_to_application_menu(
action, menu_id, menu_section,
before_section=before_section
)
else:
self.pending_menu_actions.append((
action,
menu_id,
menu_section,
before_section
))
self.menu_actions |= {key}
if register_shortcut:
self.shortcut_actions[key] = shortcut_context
with self.action_lock:
(_, count, _) = self.all_run_actions.get(key, (None, 0, None))
count += 1
self.all_run_actions[key] = (action, count, action.action_id)
return action
def destroy_run_button(
self,
context_name: str,
extra_action_name: Optional[str] = None,
context_modificator: Optional[str] = None,
re_run: bool = False
):
"""
Destroy a run or a "run and do something" (optionally re-run) button
for a specific run context.
Parameters
----------
context_name: str
The identifier of the run context.
extra_action_name: Optional[str]
The name of the action to execute on the run input provider
after requesting the run input.
context_modificator: Optional[str]
The name of the modification to apply to the action, e.g. run
run selection <up to line>.
re_run: bool
If True, then the button was registered as a re-run button
instead of a run one.
Notes
-----
1. The action will be removed from the main menu and toolbar if and
only if there is no longer a RunInputProvider that registered the same
action and has not called this method.
"""
main_menu = self.get_plugin(Plugins.MainMenu)
toolbar = self.get_plugin(Plugins.Toolbar)
shortcuts = self.get_plugin(Plugins.Shortcuts)
key = (context_name, extra_action_name, context_modificator,
re_run)
with self.action_lock:
action, count, action_id = self.all_run_actions[key]
if count == 0:
self.all_run_actions.pop(key)
if key in self.menu_actions:
self.menu_actions.pop(key)
if main_menu:
main_menu.remove_item_from_application_menu(
action_id, menu_id=ApplicationMenus.Run
)
if key in self.toolbar_actions:
self.toolbar_actions.pop(key)
if toolbar:
toolbar.remove_item_from_application_toolbar(
action_id, toolbar_id=ApplicationToolbars.Run
)
if key in self.shortcut_actions:
shortcut_context = self.shortcut_actions.pop(key)
if shortcuts:
shortcuts.unregister_shortcut(
action, shortcut_context, action_id
)
shortcuts.apply_shortcuts()
else:
count -= 1
self.all_run_actions[key] = (action, count, action_id)
def create_run_in_executor_button(
self,
context_name: str,
executor_name: str,
text: str,
icon: Optional[QIcon] = None,
tip: Optional[str] = None,
shortcut_context: Optional[str] = None,
register_shortcut: bool = False,
add_to_toolbar: bool | str | dict = False,
add_to_menu: bool | dict = False,
shortcut_widget_context: Qt.ShortcutContext = Qt.WidgetShortcut,
) -> QAction:
"""
Create a "run <context> in <provider>" button for a given run context
and executor.
Parameters
----------
context_name: str
The identifier of the run context.
executor_name: str
The identifier of the run executor.
text: str
Localized text for the action
icon: Optional[QIcon]
Icon for the action when used in menu or toolbar.
tip: Optional[str]
Tooltip to define for action on menu or toolbar.
shortcut_context: Optional[str]
Set the `str` context of the shortcut.
register_shortcut: bool
If True, main window will expose the shortcut in Preferences.
The default value is `False`.
add_to_toolbar: bool or str or dict
If True, then the action will be added to the Run section of the
main toolbar. If a string, it must be a toolbar_id. If dictionary,
it corresponds to
{'toolbar': ..., 'section': ..., 'before': ...,
'before_section': ...}
add_to_menu: bool or dict
If True, then the action will be added to the Run menu.
If a dictionnary, it corresponds to
{'menu': ..., 'section': ..., 'before_section': ...}
shortcut_widget_context: Qt.ShortcutContext
Qt context for the shorctut set for this button.
Returns
-------
action: SpyderAction
The corresponding action that was created.
Notes
-----
1. The context passed as a parameter must be a subordinate of the
context of the current focused run configuration that was
registered via `register_run_configuration_metadata`. For instance,
Cell can be used if and only if the file was registered.
2. The button will be registered as `run <context> in <provider>` on
the action registry.
3. The created button will operate over the last focused run input
provider.
4. If the requested button already exists, this method will not do
anything, which implies that the first registered shortcut will be the
one to be used.
"""
key = (context_name, executor_name, None, False)
action = self.get_container().create_run_in_executor_button(
context_name,
executor_name,
text,
icon=icon,
tip=tip,
shortcut_context=shortcut_context,
register_shortcut=register_shortcut,
shortcut_widget_context=shortcut_widget_context,
)
if add_to_toolbar:
toolbar_id, section, before, before_section = (
ApplicationToolbars.Run,
None,
None,
None
)
if isinstance(add_to_toolbar, str):
toolbar_id = add_to_toolbar
if isinstance(add_to_toolbar, dict):
toolbar_id = add_to_toolbar['toolbar']
section = add_to_toolbar.get('section')
before = add_to_toolbar.get('before')
before_section = add_to_toolbar.get('before_section')
toolbar = self.get_plugin(Plugins.Toolbar)
if toolbar:
toolbar.add_item_to_application_toolbar(
action,
toolbar_id,
section,
before,
before_section,
)
else:
self.pending_toolbar_actions.append(
(
action,
toolbar_id,
section,
before,
before_section,
)
)
self.toolbar_actions |= {key}
if add_to_menu:
menu_id, menu_section, before_section = (
ApplicationMenus.Run, RunMenuSections.RunExtras,
RunMenuSections.RunInExecutors
)
if isinstance(add_to_menu, dict):
menu_id = add_to_menu['menu']
menu_section = add_to_menu['section']
before_section = add_to_menu.get('before_section', None)
main_menu = self.get_plugin(Plugins.MainMenu)
if self.main_menu_ready and main_menu:
main_menu.add_item_to_application_menu(
action, menu_id, menu_section,
before_section=before_section
)
else:
self.pending_menu_actions.append((
action,
menu_id,
menu_section,
before_section
))
self.menu_actions |= {key}
if register_shortcut:
self.shortcut_actions[key] = shortcut_context
self.all_run_actions[key] = (action, 1, action.action_id)
return action
def destroy_run_in_executor_button(
self,
context_name: str,
executor_name: str
):
"""
Destroy a "run <context> in <provider>" button for a given run context
and executor.
Parameters
----------
context_name: str
The identifier of the run context.
executor_name: str
The identifier of the run executor.
"""
self.destroy_run_button(context_name, executor_name, None)
def get_last_used_executor_parameters(
self,
uuid: str
) -> StoredRunConfigurationExecutor:
"""
Retrieve the last used execution parameters for a given
run configuration.
Parameters
----------
uuid: str
The run configuration identifier.
Returns
-------
last_used_params: StoredRunConfigurationExecutor
A dictionary containing the last used executor and parameters
for the given run configuration.
"""
return self.get_container().get_last_used_executor_parameters(uuid)
def get_executor_configuration_parameters(
self,
executor_name: str,
extension: str, context_id: str
) -> StoredRunExecutorParameters:
"""
Retrieve the stored parameters for a given executor `executor_name`
using context `context_id` with file extension `extension`.
Parameters
----------
executor_name: str
The identifier of the run executor.
extension: str
The file extension to register the configuration parameters for.
context_id: str
The context to register the configuration parameters for.
Returns
-------
config: StoredRunExecutorParameters
A dictionary containing the run executor parameters for the given
run configuration.
"""
return self.get_container().get_executor_configuration_parameters(
executor_name, extension, context_id)
def switch_focused_run_configuration(self, uuid: str):
"""
Switch to the last selected run configuration.
Parameters
----------
uuid: str
The run configuration identifier.
"""
self.get_container().switch_focused_run_configuration(uuid)
# ---- Private API
# -------------------------------------------------------------------------
def _switch_working_dir(self, path: str):
self.get_container().set_current_working_dir(path)
def _register_action_shortcuts(
self,
action_name: str,
register_shortcut: bool,
shortcut_context: str
):
if register_shortcut:
action = self.get_action(action_name)
shortcuts = self.get_plugin(Plugins.Shortcuts)
if shortcuts:
shortcuts.register_shortcut(action, shortcut_context,
action_name)
else:
self.pending_shortcut_actions.append(
(action, shortcut_context, action_name))
def _open_run_preferences(self):
preferences = self.get_plugin(Plugins.Preferences)
preferences.open_dialog()
container = preferences.get_container()
dlg = container.dialog
index = dlg.get_index_by_name("run")
dlg.set_current_index(index)
| Run |
python | django__django | tests/auth_tests/models/with_custom_email_field.py | {
"start": 423,
"end": 772
} | class ____(AbstractBaseUser):
username = models.CharField(max_length=255)
password = models.CharField(max_length=255)
email_address = models.EmailField(null=True)
is_active = models.BooleanField(default=True)
EMAIL_FIELD = "email_address"
USERNAME_FIELD = "username"
objects = CustomEmailFieldUserManager()
| CustomEmailField |
python | numba__numba | numba/core/types/functions.py | {
"start": 20053,
"end": 22054
} | class ____(WeakType, Callable, Dummy):
"""
Type class for @jit-compiled functions.
"""
def __init__(self, dispatcher):
self._store_object(dispatcher)
super(Dispatcher, self).__init__("type(%s)" % dispatcher)
def dump(self, tab=''):
print((f'{tab}DUMP {type(self).__name__}[code={self._code}, '
f'name={self.name}]'))
self.dispatcher.dump(tab=tab + ' ')
print(f'{tab}END DUMP')
def get_call_type(self, context, args, kws):
"""
Resolve a call to this dispatcher using the given argument types.
A signature returned and it is ensured that a compiled specialization
is available for it.
"""
template, pysig, args, kws = \
self.dispatcher.get_call_template(args, kws)
sig = template(context).apply(args, kws)
if sig:
sig = sig.replace(pysig=pysig)
return sig
def get_call_signatures(self):
sigs = self.dispatcher.nopython_signatures
return sigs, True
@property
def dispatcher(self):
"""
A strong reference to the underlying numba.dispatcher.Dispatcher
instance.
"""
return self._get_object()
def get_overload(self, sig):
"""
Get the compiled overload for the given signature.
"""
return self.dispatcher.get_overload(sig.args)
def get_impl_key(self, sig):
"""
Get the implementation key for the given signature.
"""
return self.get_overload(sig)
def unify(self, context, other):
return utils.unified_function_type((self, other), require_precise=False)
def can_convert_to(self, typingctx, other):
if isinstance(other, types.FunctionType):
try:
self.dispatcher.get_compile_result(other.signature)
except errors.NumbaError:
return None
else:
return Conversion.safe
| Dispatcher |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/base.py | {
"start": 6028,
"end": 7015
} | class ____(Enum):
"""enumeration which indicates the 'direction' of a
:class:`_orm.RelationshipProperty`.
:class:`.RelationshipDirection` is accessible from the
:attr:`_orm.Relationship.direction` attribute of
:class:`_orm.RelationshipProperty`.
"""
ONETOMANY = 1
"""Indicates the one-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
"""
MANYTOONE = 2
"""Indicates the many-to-one direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
"""
MANYTOMANY = 3
"""Indicates the many-to-many direction for a :func:`_orm.relationship`.
This symbol is typically used by the internals but may be exposed within
certain API features.
"""
ONETOMANY, MANYTOONE, MANYTOMANY = tuple(RelationshipDirection)
| RelationshipDirection |
python | ansible__ansible | lib/ansible/utils/context_objects.py | {
"start": 2794,
"end": 3130
} | class ____(CLIArgs, metaclass=_ABCSingleton):
"""
Globally hold a parsed copy of cli arguments.
Only one of these exist per program as it is for global context
"""
pass
def __getattr__(importable_name):
return _no_six.deprecate(importable_name, __name__, "binary_type", "text_type", "add_metaclass")
| GlobalCLIArgs |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/plan/inputs.py | {
"start": 20708,
"end": 22534
} | class ____(MultiStepInputSource, IHaveNew):
"""This step input fans-in multiple sources in to a single input. The input will receive just
the value from loading source_to_load_from.
"""
sources: Sequence[StepInputSource]
source_to_load_from: StepInputSource
def __new__(cls, sources: Sequence[StepInputSource], source_to_load_from: StepInputSource):
check.sequence_param(sources, "sources", StepInputSource)
for source in sources:
check.invariant(
not isinstance(source, MultiStepInputSource),
"Can not have multiple levels of MultiStepInputSource StepInputSource",
)
return super().__new__(
cls,
sources=sources,
source_to_load_from=source_to_load_from,
)
def load_input_object(
self,
step_context: "StepExecutionContext",
input_def: InputDefinition,
) -> Iterator[object]:
yield from self.source_to_load_from.load_input_object(step_context, input_def)
def _load_input_with_input_manager(
input_manager: "InputManager", context: "InputContext"
) -> Iterator[object]:
step_context = cast("StepExecutionContext", context.step_context)
with op_execution_error_boundary(
DagsterExecutionLoadInputError,
msg_fn=lambda: f'Error occurred while loading input "{context.name}" of step "{step_context.step.key}":',
step_context=step_context,
step_key=step_context.step.key,
input_name=context.name,
):
value = input_manager.load_input(context)
# close user code boundary before returning value
yield from context.consume_events()
yield value
@whitelist_for_serdes(storage_field_names={"node_handle": "solid_handle"})
@record_custom
| FromMultipleSourcesLoadSingleSource |
python | getsentry__sentry | tests/sentry/api/bases/test_team.py | {
"start": 8303,
"end": 15960
} | class ____(TeamPermissionBase):
def setUp(self) -> None:
super().setUp()
self.org = self.create_organization()
self.org.flags.allow_joinleave = False
self.org.save()
self.team = self.create_team(organization=self.org)
self.project = self.create_project(organization=self.org)
def test_get_regular_user(self) -> None:
user = self.create_user()
assert not self.has_object_perm("GET", self.team, user=user)
assert not self.has_object_perm("POST", self.team, user=user)
assert not self.has_object_perm("PUT", self.team, user=user)
assert not self.has_object_perm("DELETE", self.team, user=user)
def test_get_superuser(self) -> None:
user = self.create_user(is_superuser=True)
assert self.has_object_perm("GET", self.team, user=user, is_superuser=True)
assert self.has_object_perm("POST", self.team, user=user, is_superuser=True)
assert self.has_object_perm("PUT", self.team, user=user, is_superuser=True)
assert self.has_object_perm("DELETE", self.team, user=user, is_superuser=True)
def test_member_without_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="member", teams=[])
assert not self.has_object_perm("GET", self.team, user=user)
assert not self.has_object_perm("POST", self.team, user=user)
assert not self.has_object_perm("PUT", self.team, user=user)
assert not self.has_object_perm("DELETE", self.team, user=user)
def test_member_with_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="member", teams=[self.team])
assert self.has_object_perm("GET", self.team, user=user)
assert not self.has_object_perm("POST", self.team, user=user)
assert not self.has_object_perm("PUT", self.team, user=user)
assert not self.has_object_perm("DELETE", self.team, user=user)
@with_feature("organizations:team-roles")
def test_member_with_team_membership_and_team_role_admin(self) -> None:
user = self.create_user()
member = self.create_member(user=user, organization=self.org, role="member")
self.create_team_membership(self.team, member, role="admin")
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_admin_without_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="admin")
# if `allow_joinleave` is False, admins can't act on teams
# they don't have access to
assert not self.has_object_perm("GET", self.team, user=user)
assert not self.has_object_perm("POST", self.team, user=user)
assert not self.has_object_perm("PUT", self.team, user=user)
assert not self.has_object_perm("DELETE", self.team, user=user)
def test_admin_with_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="admin", teams=[self.team])
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_manager_without_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="manager")
# managers should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_manager_with_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="manager", teams=[self.team])
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_owner_without_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="owner")
# owners should be able to act on teams/projects they
# don't have access to
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_owner_with_team_membership(self) -> None:
user = self.create_user()
self.create_member(user=user, organization=self.org, role="owner", teams=[self.team])
assert self.has_object_perm("GET", self.team, user=user)
assert self.has_object_perm("POST", self.team, user=user)
assert self.has_object_perm("PUT", self.team, user=user)
assert self.has_object_perm("DELETE", self.team, user=user)
def test_api_key_with_org_access(self) -> None:
key = self.create_api_key(organization=self.org, scope_list=["team:read"])
assert self.has_object_perm("GET", self.team, auth=key)
assert not self.has_object_perm("POST", self.team, auth=key)
assert not self.has_object_perm("PUT", self.team, auth=key)
assert not self.has_object_perm("DELETE", self.team, auth=key)
def test_api_key_without_org_access(self) -> None:
key = self.create_api_key(organization=self.create_organization(), scope_list=["team:read"])
assert not self.has_object_perm("GET", self.team, auth=key)
assert not self.has_object_perm("POST", self.team, auth=key)
assert not self.has_object_perm("PUT", self.team, auth=key)
assert not self.has_object_perm("DELETE", self.team, auth=key)
def test_api_key_without_access(self) -> None:
key = self.create_api_key(organization=self.org)
assert not self.has_object_perm("GET", self.org, auth=key)
assert not self.has_object_perm("POST", self.team, auth=key)
assert not self.has_object_perm("PUT", self.team, auth=key)
assert not self.has_object_perm("DELETE", self.team, auth=key)
def test_api_key_with_wrong_access(self) -> None:
key = self.create_api_key(organization=self.org, scope_list=["project:read"])
assert not self.has_object_perm("GET", self.org, auth=key)
assert not self.has_object_perm("POST", self.team, auth=key)
assert not self.has_object_perm("PUT", self.team, auth=key)
assert not self.has_object_perm("DELETE", self.team, auth=key)
def test_api_key_with_wrong_access_for_method(self) -> None:
key = self.create_api_key(organization=self.org, scope_list=["team:write"])
assert self.has_object_perm("PUT", self.project, auth=key)
assert self.has_object_perm("POST", self.team, auth=key)
assert self.has_object_perm("PUT", self.team, auth=key)
assert not self.has_object_perm("DELETE", self.team, auth=key)
| TeamPermissionNoJoinLeaveTest |
python | apache__airflow | providers/standard/src/airflow/providers/standard/sensors/external_task.py | {
"start": 28140,
"end": 30837
} | class ____(EmptyOperator):
"""
Use this operator to indicate that a task on a different DAG depends on this task.
When this task is cleared with "Recursive" selected, Airflow will clear the task on
the other DAG and its downstream tasks recursively. Transitive dependencies are followed
until the recursion_depth is reached.
:param external_dag_id: The dag_id that contains the dependent task that needs to be cleared.
:param external_task_id: The task_id of the dependent task that needs to be cleared.
:param logical_date: The logical date of the dependent task execution that needs to be cleared.
:param recursion_depth: The maximum level of transitive dependencies allowed. Default is 10.
This is mostly used for preventing cyclic dependencies. It is fine to increase
this number if necessary. However, too many levels of transitive dependencies will make
it slower to clear tasks in the web UI.
"""
template_fields = ["external_dag_id", "external_task_id", "logical_date"]
if not AIRFLOW_V_3_0_PLUS:
template_fields.append("execution_date")
ui_color = "#4db7db"
operator_extra_links = [ExternalDagLink()]
# The _serialized_fields are lazily loaded when get_serialized_fields() method is called
__serialized_fields: ClassVar[frozenset[str] | None] = None
def __init__(
self,
*,
external_dag_id: str,
external_task_id: str,
logical_date: str | datetime.datetime | None = "{{ logical_date.isoformat() }}",
recursion_depth: int = 10,
**kwargs,
):
super().__init__(**kwargs)
self.external_dag_id = external_dag_id
self.external_task_id = external_task_id
if isinstance(logical_date, datetime.datetime):
self.logical_date = logical_date.isoformat()
elif isinstance(logical_date, str):
self.logical_date = logical_date
else:
raise TypeError(
f"Expected str or datetime.datetime type for logical_date. Got {type(logical_date)}"
)
if not AIRFLOW_V_3_0_PLUS:
self.execution_date = self.logical_date
if recursion_depth <= 0:
raise ValueError("recursion_depth should be a positive integer")
self.recursion_depth = recursion_depth
@classmethod
def get_serialized_fields(cls):
"""Serialize ExternalTaskMarker to contain exactly these fields + templated_fields ."""
if not cls.__serialized_fields:
cls.__serialized_fields = frozenset(super().get_serialized_fields() | {"recursion_depth"})
return cls.__serialized_fields
| ExternalTaskMarker |
python | tensorflow__tensorflow | tensorflow/python/ops/distributions/distribution.py | {
"start": 4194,
"end": 7343
} | class ____(abc.ABCMeta):
def __new__(mcs, classname, baseclasses, attrs):
"""Control the creation of subclasses of the Distribution class.
The main purpose of this method is to properly propagate docstrings
from private Distribution methods, like `_log_prob`, into their
public wrappers as inherited by the Distribution base class
(e.g. `log_prob`).
Args:
classname: The name of the subclass being created.
baseclasses: A tuple of parent classes.
attrs: A dict mapping new attributes to their values.
Returns:
The class object.
Raises:
TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or
the new class is derived via multiple inheritance and the first
parent class is not a subclass of `BaseDistribution`.
AttributeError: If `Distribution` does not implement e.g. `log_prob`.
ValueError: If a `Distribution` public method lacks a docstring.
"""
if not baseclasses: # Nothing to be done for Distribution
raise TypeError("Expected non-empty baseclass. Does Distribution "
"not subclass _BaseDistribution?")
which_base = [
base for base in baseclasses
if base == _BaseDistribution or issubclass(base, Distribution)]
base = which_base[0]
if base == _BaseDistribution: # Nothing to be done for Distribution
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
if not issubclass(base, Distribution):
raise TypeError("First parent class declared for %s must be "
"Distribution, but saw '%s'" % (classname, base.__name__))
for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS:
special_attr = "_%s" % attr
class_attr_value = attrs.get(attr, None)
if attr in attrs:
# The method is being overridden, do not update its docstring
continue
base_attr_value = getattr(base, attr, None)
if not base_attr_value:
raise AttributeError(
"Internal error: expected base class '%s' to implement method '%s'"
% (base.__name__, attr))
class_special_attr_value = attrs.get(special_attr, None)
if class_special_attr_value is None:
# No _special method available, no need to update the docstring.
continue
class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value)
if not class_special_attr_docstring:
# No docstring to append.
continue
class_attr_value = _copy_fn(base_attr_value)
class_attr_docstring = tf_inspect.getdoc(base_attr_value)
if (
class_attr_docstring is not None
and class_attr_value.__doc__ is not None
):
class_attr_value.__doc__ = _update_docstring(
class_attr_value.__doc__,
(
"Additional documentation from `%s`:\n\n%s"
% (classname, class_special_attr_docstring)
),
)
attrs[attr] = class_attr_value
return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs)
@tf_export(v1=["distributions.ReparameterizationType"])
| _DistributionMeta |
python | h5py__h5py | h5py/_hl/filters.py | {
"start": 4900,
"end": 14604
} | class ____(FilterRefBase):
filter_id = h5z.FILTER_DEFLATE
def __init__(self, level=DEFAULT_GZIP):
self.filter_options = (level,)
def fill_dcpl(plist, shape, dtype, chunks, compression, compression_opts,
shuffle, fletcher32, maxshape, scaleoffset, external,
allow_unknown_filter=False, *, fill_time=None):
""" Generate a dataset creation property list.
Undocumented and subject to change without warning.
"""
if shape is None or shape == ():
shapetype = 'Empty' if shape is None else 'Scalar'
if any((chunks, compression, compression_opts, shuffle, fletcher32,
scaleoffset is not None)):
raise TypeError(
f"{shapetype} datasets don't support chunk/filter options"
)
if maxshape and maxshape != ():
raise TypeError(f"{shapetype} datasets cannot be extended")
return h5p.create(h5p.DATASET_CREATE)
def rq_tuple(tpl, name):
""" Check if chunks/maxshape match dataset rank """
if tpl in (None, True):
return
try:
tpl = tuple(tpl)
except TypeError as exc:
raise TypeError(f'{name!r} argument must be None or a sequence object') from exc
if len(tpl) != len(shape):
raise ValueError(f'{name!r} must have same rank as dataset shape')
rq_tuple(chunks, 'chunks')
rq_tuple(maxshape, 'maxshape')
if compression is not None:
if isinstance(compression, FilterRefBase):
compression_opts = compression.filter_options
compression = compression.filter_id
if compression not in encode and not isinstance(compression, int):
raise ValueError('Compression filter "%s" is unavailable' % compression)
if compression == 'gzip':
if compression_opts is None:
gzip_level = DEFAULT_GZIP
elif compression_opts in range(10):
gzip_level = compression_opts
else:
raise ValueError("GZIP setting must be an integer from 0-9, not %r" % compression_opts)
elif compression == 'lzf':
if compression_opts is not None:
raise ValueError("LZF compression filter accepts no options")
elif compression == 'szip':
if compression_opts is None:
compression_opts = DEFAULT_SZIP
err = "SZIP options must be a 2-tuple ('ec'|'nn', even integer 0-32)"
try:
szmethod, szpix = compression_opts
except TypeError as exc:
raise TypeError(err) from exc
if szmethod not in ('ec', 'nn'):
raise ValueError(err)
if not (0<szpix<=32 and szpix%2 == 0):
raise ValueError(err)
elif compression_opts is not None:
# Can't specify just compression_opts by itself.
raise TypeError("Compression method must be specified")
if scaleoffset is not None:
# scaleoffset must be an integer when it is not None or False,
# except for integral data, for which scaleoffset == True is
# permissible (will use SO_INT_MINBITS_DEFAULT)
if scaleoffset < 0:
raise ValueError('scale factor must be >= 0')
if dtype.kind == 'f':
if scaleoffset is True:
raise ValueError('integer scaleoffset must be provided for '
'floating point types')
elif dtype.kind in ('u', 'i'):
if scaleoffset is True:
scaleoffset = h5z.SO_INT_MINBITS_DEFAULT
else:
raise TypeError('scale/offset filter only supported for integer '
'and floating-point types')
# Scale/offset following fletcher32 in the filter chain will (almost?)
# always triggers a read error, as most scale/offset settings are
# lossy. Since fletcher32 must come first (see comment below) we
# simply prohibit the combination of fletcher32 and scale/offset.
if fletcher32:
raise ValueError('fletcher32 cannot be used with potentially lossy'
' scale/offset filter')
external = _normalize_external(external)
# End argument validation
if (chunks is True) or (chunks is None and any((
shuffle,
fletcher32,
compression,
(maxshape and not len(external)),
scaleoffset is not None,
))):
chunks = guess_chunk(shape, maxshape, dtype.itemsize)
if maxshape is True:
maxshape = (None,)*len(shape)
if chunks is not None:
plist.set_chunk(chunks)
if fill_time is not None:
if (ft := _FILL_TIME_ENUM.get(fill_time)) is not None:
plist.set_fill_time(ft)
else:
msg = ("fill_time must be one of the following choices: 'alloc', "
f"'never' or 'ifset', but it is {fill_time}.")
raise ValueError(msg)
# scale-offset must come before shuffle and compression
if scaleoffset is not None:
if dtype.kind in ('u', 'i'):
plist.set_scaleoffset(h5z.SO_INT, scaleoffset)
else: # dtype.kind == 'f'
plist.set_scaleoffset(h5z.SO_FLOAT_DSCALE, scaleoffset)
for item in external:
plist.set_external(*item)
if shuffle:
plist.set_shuffle()
if compression == 'gzip':
plist.set_deflate(gzip_level)
elif compression == 'lzf':
plist.set_filter(h5z.FILTER_LZF, h5z.FLAG_OPTIONAL)
elif compression == 'szip':
opts = {'ec': h5z.SZIP_EC_OPTION_MASK, 'nn': h5z.SZIP_NN_OPTION_MASK}
plist.set_szip(opts[szmethod], szpix)
elif isinstance(compression, int):
if not allow_unknown_filter and not h5z.filter_avail(compression):
raise ValueError("Unknown compression filter number: %s" % compression)
plist.set_filter(compression, h5z.FLAG_OPTIONAL, compression_opts)
# `fletcher32` must come after `compression`, otherwise, if `compression`
# is "szip" and the data is 64bit, the fletcher32 checksum will be wrong
# (see GitHub issue #953).
if fletcher32:
plist.set_fletcher32()
return plist
def get_filter_name(code):
"""
Return the name of the compression filter for a given filter identifier.
Undocumented and subject to change without warning.
"""
filters = {h5z.FILTER_DEFLATE: 'gzip', h5z.FILTER_SZIP: 'szip',
h5z.FILTER_SHUFFLE: 'shuffle', h5z.FILTER_FLETCHER32: 'fletcher32',
h5z.FILTER_LZF: 'lzf', h5z.FILTER_SCALEOFFSET: 'scaleoffset'}
return filters.get(code, str(code))
def get_filters(plist):
""" Extract a dictionary of active filters from a DCPL, along with
their settings.
Undocumented and subject to change without warning.
"""
pipeline = {}
nfilters = plist.get_nfilters()
for i in range(nfilters):
code, _, vals, _ = plist.get_filter(i)
if code == h5z.FILTER_DEFLATE:
vals = vals[0] # gzip level
elif code == h5z.FILTER_SZIP:
mask, pixels = vals[0:2]
if mask & h5z.SZIP_EC_OPTION_MASK:
mask = 'ec'
elif mask & h5z.SZIP_NN_OPTION_MASK:
mask = 'nn'
else:
raise TypeError("Unknown SZIP configuration")
vals = (mask, pixels)
elif code == h5z.FILTER_LZF:
vals = None
else:
if len(vals) == 0:
vals = None
pipeline[get_filter_name(code)] = vals
return pipeline
CHUNK_BASE = 16*1024 # Multiplier by which chunks are adjusted
CHUNK_MIN = 8*1024 # Soft lower limit (8k)
CHUNK_MAX = 1024*1024 # Hard upper limit (1M)
def guess_chunk(shape, maxshape, typesize):
""" Guess an appropriate chunk layout for a dataset, given its shape and
the size of each element in bytes. Will allocate chunks only as large
as MAX_SIZE. Chunks are generally close to some power-of-2 fraction of
each axis, slightly favoring bigger values for the last index.
Undocumented and subject to change without warning.
"""
# pylint: disable=unused-argument
# For unlimited dimensions we have to guess 1024
shape = tuple((x if x!=0 else 1024) for i, x in enumerate(shape))
ndims = len(shape)
if ndims == 0:
raise ValueError("Chunks not allowed for scalar datasets.")
chunks = np.array(shape, dtype='=f8')
if not np.all(np.isfinite(chunks)):
raise ValueError("Illegal value in chunk tuple")
# Determine the optimal chunk size in bytes using a PyTables expression.
# This is kept as a float.
dset_size = product(chunks)*typesize
target_size = CHUNK_BASE * (2**np.log10(dset_size/(1024.*1024)))
if target_size > CHUNK_MAX:
target_size = CHUNK_MAX
elif target_size < CHUNK_MIN:
target_size = CHUNK_MIN
idx = 0
while True:
# Repeatedly loop over the axes, dividing them by 2. Stop when:
# 1a. We're smaller than the target chunk size, OR
# 1b. We're within 50% of the target chunk size, AND
# 2. The chunk is smaller than the maximum chunk size
chunk_bytes = product(chunks)*typesize
if (chunk_bytes < target_size or \
abs(chunk_bytes-target_size)/target_size < 0.5) and \
chunk_bytes < CHUNK_MAX:
break
if product(chunks) == 1:
break # Element size larger than CHUNK_MAX
chunks[idx%ndims] = np.ceil(chunks[idx%ndims] / 2.0)
idx += 1
return tuple(int(x) for x in chunks)
| Gzip |
python | OmkarPathak__pygorithm | pygorithm/data_structures/queue.py | {
"start": 1673,
"end": 3160
} | class ____(object):
"""Deque
Deque implementation
"""
def __init__(self, limit=10):
self.queue = []
self.limit = limit
def __str__(self):
return ' '.join([str(i) for i in self.queue])
def is_empty(self):
"""
checks whether the deque is empty
"""
return len(self.queue) <= 0
def is_full(self):
"""
checks whether the deque is full
"""
return len(self.queue) >= self.limit
def insert_rear(self, data):
"""
inserts an element at the rear end of the deque
"""
if self.is_full():
return
else:
self.queue.insert(0, data)
def insert_front(self, data):
"""
inserts an element at the front end of the deque
"""
if self.is_full():
return -1
else:
self.queue.append(data)
def delete_rear(self):
"""
deletes an element from the rear end of the deque
"""
if self.is_empty():
return -1
else:
return self.queue.pop(0)
def delete_front(self):
"""
deletes an element from the front end of the deque
"""
if self.is_full():
return
else:
return self.queue.pop()
@staticmethod
def get_code():
"""
returns the code of the current class
"""
return inspect.getsource(Deque)
| Deque |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 173323,
"end": 175720
} | class ____(OperationBuffer):
"""
Represents a Triton (in the future other type) of template operator
that we can fuse an epilogue onto.
"""
def __init__(
self,
layout: OutputSpec,
inputs: Sequence[IRNode],
make_kernel_render: Optional[Callable[..., Any]],
) -> None:
super().__init__(name=None, layout=layout)
self.inputs = InputsKernel.unwrap_storage(inputs)
self.make_kernel_render = make_kernel_render
self.name = V.graph.register_buffer(self)
V.graph.register_operation(self)
def get_read_writes(self) -> dependencies.ReadWrites:
return self.extract_read_writes(normalize=True)
def extract_read_writes(self, normalize: bool = False) -> dependencies.ReadWrites:
name = self.get_name()
indexer = self.get_layout().make_indexer()
def dummy(index: Sequence[Any], rindex: Sequence[Any]) -> Any:
assert len(rindex) == 0
return ops.store(name, indexer(index), "fake")
deps = dependencies.extract_read_writes(
dummy, self.get_size(), (), normalize=normalize
)
for inp in self.inputs:
assert isinstance(inp, (ReinterpretView, Buffer)), type(inp)
assert isinstance(inp.layout, Layout), type(inp.layout)
indexer = inp.layout.make_indexer()
def dummy(index: Sequence[Any], rindex: Sequence[Any]) -> Any:
assert len(rindex) == 0
# pyrefly: ignore [missing-attribute]
return ops.load(inp.get_name(), indexer(index))
deps.reads |= dependencies.extract_read_writes(
dummy, inp.get_size(), (), normalize=normalize
).reads
return deps
def get_reduction_size(self) -> Sequence[Expr]:
return sympy.S.One
def get_reduction_type(self) -> Optional[str]:
return None
def should_allocate(self) -> bool:
return True
def simplify_and_reorder(
self,
extra_indexing_constraints: Optional[tuple[dict[Any, Any], list[Any]]] = None,
recompute_sizes_body_func: Optional[Callable[..., Any]] = None,
) -> tuple[tuple[Sequence[Expr], list[Expr]], Optional[LoopBody]]:
return (
(
self.get_size(),
[],
),
None,
)
| TemplateBuffer |
python | langchain-ai__langchain | libs/partners/mistralai/langchain_mistralai/embeddings.py | {
"start": 1095,
"end": 10203
} | class ____(BaseModel, Embeddings):
"""MistralAI embedding model integration.
Setup:
Install `langchain_mistralai` and set environment variable
`MISTRAL_API_KEY`.
```bash
pip install -U langchain_mistralai
export MISTRAL_API_KEY="your-api-key"
```
Key init args — completion params:
model:
Name of `MistralAI` model to use.
Key init args — client params:
api_key:
The API key for the MistralAI API. If not provided, it will be read from the
environment variable `MISTRAL_API_KEY`.
max_concurrent_requests: int
max_retries:
The number of times to retry a request if it fails.
timeout:
The number of seconds to wait for a response before timing out.
wait_time:
The number of seconds to wait before retrying a request in case of 429
error.
max_concurrent_requests:
The maximum number of concurrent requests to make to the Mistral API.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from __module_name__ import MistralAIEmbeddings
embed = MistralAIEmbeddings(
model="mistral-embed",
# api_key="...",
# other params...
)
```
Embed single text:
```python
input_text = "The meaning of life is 42"
vector = embed.embed_query(input_text)
print(vector[:3])
```
```python
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
```
Embed multiple text:
```python
input_texts = ["Document 1...", "Document 2..."]
vectors = embed.embed_documents(input_texts)
print(len(vectors))
# The first 3 coordinates for the first vector
print(vectors[0][:3])
```
```python
2
[-0.024603435769677162, -0.007543657906353474, 0.0039630369283258915]
```
Async:
```python
vector = await embed.aembed_query(input_text)
print(vector[:3])
# multiple:
# await embed.aembed_documents(input_texts)
```
```python
[-0.009100092574954033, 0.005071679595857859, -0.0029193938244134188]
```
"""
# The type for client and async_client is ignored because the type is not
# an Optional after the model is initialized and the model_validator
# is run.
client: httpx.Client = Field(default=None) # type: ignore[assignment]
async_client: httpx.AsyncClient = Field( # type: ignore[assignment]
default=None
)
mistral_api_key: SecretStr = Field(
alias="api_key",
default_factory=secret_from_env("MISTRAL_API_KEY", default=""),
)
endpoint: str = "https://api.mistral.ai/v1/"
max_retries: int | None = 5
timeout: int = 120
wait_time: int | None = 30
max_concurrent_requests: int = 64
tokenizer: Tokenizer = Field(default=None)
model: str = "mistral-embed"
model_config = ConfigDict(
extra="forbid",
arbitrary_types_allowed=True,
populate_by_name=True,
)
@model_validator(mode="after")
def validate_environment(self) -> Self:
"""Validate configuration."""
api_key_str = self.mistral_api_key.get_secret_value()
# TODO: handle retries
if not self.client:
self.client = httpx.Client(
base_url=self.endpoint,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {api_key_str}",
},
timeout=self.timeout,
)
# TODO: handle retries and max_concurrency
if not self.async_client:
self.async_client = httpx.AsyncClient(
base_url=self.endpoint,
headers={
"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": f"Bearer {api_key_str}",
},
timeout=self.timeout,
)
if self.tokenizer is None:
try:
self.tokenizer = Tokenizer.from_pretrained(
"mistralai/Mixtral-8x7B-v0.1"
)
except OSError: # huggingface_hub GatedRepoError
warnings.warn(
"Could not download mistral tokenizer from Huggingface for "
"calculating batch sizes. Set a Huggingface token via the "
"HF_TOKEN environment variable to download the real tokenizer. "
"Falling back to a dummy tokenizer that uses `len()`.",
stacklevel=2,
)
self.tokenizer = DummyTokenizer()
return self
def _get_batches(self, texts: list[str]) -> Iterable[list[str]]:
"""Split list of texts into batches of less than 16k tokens for Mistral API."""
batch: list[str] = []
batch_tokens = 0
text_token_lengths = [
len(encoded) for encoded in self.tokenizer.encode_batch(texts)
]
for text, text_tokens in zip(texts, text_token_lengths, strict=False):
if batch_tokens + text_tokens > MAX_TOKENS:
if len(batch) > 0:
# edge case where first batch exceeds max tokens
# should not yield an empty batch.
yield batch
batch = [text]
batch_tokens = text_tokens
else:
batch.append(text)
batch_tokens += text_tokens
if batch:
yield batch
def _retry(self, func: Callable) -> Callable:
if self.max_retries is None or self.wait_time is None:
return func
return retry(
retry=retry_if_exception_type(
(httpx.TimeoutException, httpx.HTTPStatusError)
),
wait=wait_fixed(self.wait_time),
stop=stop_after_attempt(self.max_retries),
)(func)
def embed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
batch_responses = []
@self._retry
def _embed_batch(batch: list[str]) -> Response:
response = self.client.post(
url="/embeddings",
json={
"model": self.model,
"input": batch,
},
)
response.raise_for_status()
return response
batch_responses = [
_embed_batch(batch) for batch in self._get_batches(texts)
]
return [
list(map(float, embedding_obj["embedding"]))
for response in batch_responses
for embedding_obj in response.json()["data"]
]
except Exception:
logger.exception("An error occurred with MistralAI")
raise
async def aembed_documents(self, texts: list[str]) -> list[list[float]]:
"""Embed a list of document texts.
Args:
texts: The list of texts to embed.
Returns:
List of embeddings, one for each text.
"""
try:
@self._retry
async def _aembed_batch(batch: list[str]) -> Response:
response = await self.async_client.post(
url="/embeddings",
json={
"model": self.model,
"input": batch,
},
)
response.raise_for_status()
return response
batch_responses = await asyncio.gather(
*[_aembed_batch(batch) for batch in self._get_batches(texts)]
)
return [
list(map(float, embedding_obj["embedding"]))
for response in batch_responses
for embedding_obj in response.json()["data"]
]
except Exception:
logger.exception("An error occurred with MistralAI")
raise
def embed_query(self, text: str) -> list[float]:
"""Embed a single query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return self.embed_documents([text])[0]
async def aembed_query(self, text: str) -> list[float]:
"""Embed a single query text.
Args:
text: The text to embed.
Returns:
Embedding for the text.
"""
return (await self.aembed_documents([text]))[0]
| MistralAIEmbeddings |
python | doocs__leetcode | solution/2000-2099/2024.Maximize the Confusion of an Exam/Solution.py | {
"start": 0,
"end": 376
} | class ____:
def maxConsecutiveAnswers(self, answerKey: str, k: int) -> int:
def f(c: str) -> int:
cnt = l = 0
for ch in answerKey:
cnt += ch == c
if cnt > k:
cnt -= answerKey[l] == c
l += 1
return len(answerKey) - l
return max(f("T"), f("F"))
| Solution |
python | great-expectations__great_expectations | great_expectations/expectations/core/expect_multicolumn_values_to_be_unique.py | {
"start": 1038,
"end": 7687
} | class ____(ColumnMapExpectation):
"""Expect that the columns are unique together (e.g. a multi-column primary key)
Note that all instances of any duplicates are considered failed
ExpectMulticolumnvaluesToBeUnique is a \
Column Map Expectation.
For example:
::
A B C
1 1 2 Fail
1 2 3 Pass
1 1 2 Fail
2 2 2 Pass
3 2 3 Pass
Args:
column_list (tuple or list): The column names to evaluate
Keyword Args:
ignore_row_if (str): "all_values_are_missing", "any_value_is_missing", "never"
Other Parameters:
result_format (str or None): \
Which output mode to use: BOOLEAN_ONLY, BASIC, COMPLETE, or SUMMARY. \
For more detail, see [result_format](https://docs.greatexpectations.io/docs/reference/expectations/result_format).
catch_exceptions (boolean or None): \
If True, then catch exceptions and include them as part of the result object. \
For more detail, see [catch_exceptions](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#catch_exceptions).
meta (dict or None): \
A JSON-serializable dictionary (nesting allowed) that will be included in the output without \
modification. For more detail, see [meta](https://docs.greatexpectations.io/docs/reference/expectations/standard_arguments/#meta).
severity (str or None): \
{FAILURE_SEVERITY_DESCRIPTION} \
For more detail, see [failure severity](https://docs.greatexpectations.io/docs/cloud/expectations/expectations_overview/#failure-severity).
Returns:
An [ExpectationSuiteValidationResult](https://docs.greatexpectations.io/docs/terms/validation_result)
Exact fields vary depending on the values passed to result_format, catch_exceptions, and meta.
""" # noqa: E501 # FIXME CoP
column_list: Union[tuple, list]
ignore_row_if: Union[str, SuiteParameterDict] = "all_values_are_missing"
library_metadata = {
"maturity": "production",
"tags": [
"core expectation",
"multi-column expectation",
"column map expectation",
],
"contributors": [
"@great_expectations",
],
"requirements": [],
"has_full_test_suite": True,
"manually_reviewed_code": True,
}
metric_dependencies = tuple()
success_keys = (
"column_list",
"ignore_row_if",
"mostly",
)
args_keys = ("column_list",)
@classmethod
def _prescriptive_template(
cls,
renderer_configuration: RendererConfiguration,
) -> RendererConfiguration:
add_param_args: AddParamArgs = (
("column_list", RendererValueType.ARRAY),
("mostly", RendererValueType.NUMBER),
("ignore_row_if", RendererValueType.STRING),
)
for name, param_type in add_param_args:
renderer_configuration.add_param(name=name, param_type=param_type)
params = renderer_configuration.params
if params.mostly and params.mostly.value < 1.0:
renderer_configuration = cls._add_mostly_pct_param(
renderer_configuration=renderer_configuration
)
template_str = (
"Values must be unique across columns, at least $mostly_pct % of the time: "
)
else:
template_str = "Values must always be unique across columns: "
if params.column_list:
array_param_name = "column_list"
param_prefix = "column_list_"
renderer_configuration = cls._add_array_params(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
template_str += cls._get_array_string(
array_param_name=array_param_name,
param_prefix=param_prefix,
renderer_configuration=renderer_configuration,
)
renderer_configuration.template_str = template_str
return renderer_configuration
@classmethod
@renderer(renderer_type=LegacyRendererType.PRESCRIPTIVE)
@render_suite_parameter_string
def _prescriptive_renderer(
cls,
configuration: Optional[ExpectationConfiguration] = None,
result: Optional[ExpectationValidationResult] = None,
runtime_configuration: Optional[dict] = None,
**kwargs,
):
runtime_configuration = runtime_configuration or {}
_ = runtime_configuration.get("include_column_name") is not False
styling = runtime_configuration.get("styling")
# NOTE: This expectation is deprecated, please use
# expect_select_column_values_to_be_unique_within_record instead.
params = substitute_none_for_missing(
configuration.kwargs,
[
"column_list",
"ignore_row_if",
"row_condition",
"condition_parser",
"mostly",
],
)
if params["mostly"] is not None:
params["mostly_pct"] = num_to_str(params["mostly"] * 100, no_scientific=True)
mostly_str = "" if params.get("mostly") is None else ", at least $mostly_pct % of the time"
template_str = f"Values must always be unique across columns{mostly_str}: "
for idx in range(len(params["column_list"]) - 1):
template_str += f"$column_list_{idx!s}, "
params[f"column_list_{idx!s}"] = params["column_list"][idx]
last_idx = len(params["column_list"]) - 1
template_str += f"$column_list_{last_idx!s}"
params[f"column_list_{last_idx!s}"] = params["column_list"][last_idx]
if params["row_condition"] is not None:
conditional_template_str = parse_row_condition_string(params["row_condition"])
template_str, styling = _style_row_condition(
conditional_template_str,
template_str[0].lower() + template_str[1:],
params,
styling,
)
return [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": template_str,
"params": params,
"styling": styling,
},
}
)
]
| ExpectMulticolumnValuesToBeUnique |
python | ipython__ipython | IPython/core/prefilter.py | {
"start": 14842,
"end": 15221
} | class ____(PrefilterChecker):
priority = Integer(100).tag(config=True)
enabled = Bool(False).tag(config=True)
def check(self, line_info):
"Emacs ipython-mode tags certain input lines."
if line_info.line.endswith('# PYTHON-MODE'):
return self.prefilter_manager.get_handler_by_name('emacs')
else:
return None
| EmacsChecker |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/sqlite/dml.py | {
"start": 8237,
"end": 9859
} | class ____(OnConflictClause):
__visit_name__ = "on_conflict_do_update"
update_values_to_set: Dict[_DMLColumnElement, ColumnElement[Any]]
update_whereclause: Optional[ColumnElement[Any]]
_traverse_internals = OnConflictClause._traverse_internals + [
("update_values_to_set", InternalTraversal.dp_dml_values),
("update_whereclause", InternalTraversal.dp_clauseelement),
]
def __init__(
self,
index_elements: _OnConflictIndexElementsT = None,
index_where: _OnConflictIndexWhereT = None,
set_: _OnConflictSetT = None,
where: _OnConflictWhereT = None,
):
super().__init__(
index_elements=index_elements,
index_where=index_where,
)
if isinstance(set_, dict):
if not set_:
raise ValueError("set parameter dictionary must not be empty")
elif isinstance(set_, ColumnCollection):
set_ = dict(set_)
else:
raise ValueError(
"set parameter must be a non-empty dictionary "
"or a ColumnCollection such as the `.c.` collection "
"of a Table object"
)
self.update_values_to_set = {
coercions.expect(roles.DMLColumnRole, k): coercions.expect(
roles.ExpressionElementRole, v, type_=NULLTYPE, is_crud=True
)
for k, v in set_.items()
}
self.update_whereclause = (
coercions.expect(roles.WhereHavingRole, where)
if where is not None
else None
)
| OnConflictDoUpdate |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 6211,
"end": 6848
} | class ____(ModelOutput):
r"""
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
The image embeddings obtained by applying the projection layer to the pooler_output.
"""
image_embeds: Optional[torch.FloatTensor] = None
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
# Copied from transformers.models.blip.modeling_blip.BlipVisionEmbeddings with Blip->Blip2
| Blip2VisionModelOutput |
python | walkccc__LeetCode | solutions/458. Poor Pigs/458.py | {
"start": 0,
"end": 234
} | class ____:
def poorPigs(self, buckets: int, minutesToDie: int, minutesToTest: int) -> int:
base = minutesToTest // minutesToDie + 1
ans = 0
x = 1
while x < buckets:
ans += 1
x *= base
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-non-overlapping-palindrome-substrings.py | {
"start": 56,
"end": 588
} | class ____(object):
def maxPalindromes(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
result = prev = 0
for mid in xrange(2*len(s)-1):
left, right = mid//2, mid//2+mid%2
while left >= prev and right < len(s) and s[left] == s[right]:
if right-left+1 >= k:
prev = right+1
result += 1
break
left, right = left-1, right+1
return result
| Solution |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_namers.py | {
"start": 1619,
"end": 2049
} | class ____(TestCase):
def test_basic(self):
filename = namers.alias(
thumbnailer=FakeThumbnailer(),
prepared_options=['100x100', 'q80', 'crop', 'upscale'],
thumbnail_options={'size': (100, 100), 'ALIAS': 'medium_large'},
source_filename='source.jpg',
thumbnail_extension='jpg',
)
self.assertEqual(filename, 'source.jpg.medium_large.jpg')
| Alias |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/genericType28.py | {
"start": 3425,
"end": 3513
} | class ____(Variadic_TA[T_co]): ...
# This should generate an error.
| VariadicChildCo_WithTA |
python | mlflow__mlflow | tests/store/tracking/test_rest_store.py | {
"start": 3131,
"end": 107108
} | class ____(RestStore):
def _call_endpoint(self, api, json_body):
raise MyCoolException("cool")
def mock_http_request():
return mock.patch(
"mlflow.utils.rest_utils.http_request",
return_value=mock.MagicMock(status_code=200, text="{}"),
)
def test_successful_http_request():
def mock_request(*args, **kwargs):
# Filter out None arguments
assert args == ("POST", "https://hello/api/2.0/mlflow/experiments/search")
kwargs = {k: v for k, v in kwargs.items() if v is not None}
assert kwargs == {
"allow_redirects": True,
"json": {"view_type": "ACTIVE_ONLY"},
"headers": DefaultRequestHeaderProvider().request_headers(),
"verify": True,
"timeout": 120,
}
response = mock.MagicMock()
response.status_code = 200
response.text = '{"experiments": [{"name": "Exp!", "lifecycle_stage": "active"}]}'
return response
with mock.patch("requests.Session.request", side_effect=mock_request):
store = RestStore(lambda: MlflowHostCreds("https://hello"))
experiments = store.search_experiments()
assert experiments[0].name == "Exp!"
def test_failed_http_request():
response = mock.MagicMock()
response.status_code = 404
response.text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "No experiment"}'
with mock.patch("requests.Session.request", return_value=response):
store = RestStore(lambda: MlflowHostCreds("https://hello"))
with pytest.raises(MlflowException, match="RESOURCE_DOES_NOT_EXIST: No experiment"):
store.search_experiments()
def test_failed_http_request_custom_handler():
response = mock.MagicMock()
response.status_code = 404
response.text = '{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "No experiment"}'
with mock.patch("requests.Session.request", return_value=response):
store = CustomErrorHandlingRestStore(lambda: MlflowHostCreds("https://hello"))
with pytest.raises(MyCoolException, match="cool"):
store.search_experiments()
def test_response_with_unknown_fields():
experiment_json = {
"experiment_id": "1",
"name": "My experiment",
"artifact_location": "foo",
"lifecycle_stage": "deleted",
"OMG_WHAT_IS_THIS_FIELD": "Hooly cow",
}
response = mock.MagicMock()
response.status_code = 200
experiments = {"experiments": [experiment_json]}
response.text = json.dumps(experiments)
with mock.patch("requests.Session.request", return_value=response):
store = RestStore(lambda: MlflowHostCreds("https://hello"))
experiments = store.search_experiments()
assert len(experiments) == 1
assert experiments[0].name == "My experiment"
def _args(host_creds, endpoint, method, json_body, use_v3=False, retry_timeout_seconds=None):
version = "3.0" if use_v3 else "2.0"
res = {
"host_creds": host_creds,
"endpoint": f"/api/{version}/mlflow/{endpoint}",
"method": method,
}
if retry_timeout_seconds is not None:
res["retry_timeout_seconds"] = retry_timeout_seconds
if method == "GET":
res["params"] = json.loads(json_body)
else:
res["json"] = json.loads(json_body)
return res
def _verify_requests(
http_request, host_creds, endpoint, method, json_body, use_v3=False, retry_timeout_seconds=None
):
"""
Verify HTTP requests in tests.
Args:
http_request: The mocked HTTP request object
host_creds: MlflowHostCreds object
endpoint: The endpoint being called (e.g., "traces/123")
method: The HTTP method (e.g., "GET", "POST")
json_body: The request body as a JSON string
use_v3: If True, verify using /api/3.0/mlflow/ prefix instead of /api/2.0/mlflow/
This is used for trace-related endpoints that use the V3 API.
retry_timeout_seconds: The retry timeout seconds to use for the request
"""
http_request.assert_any_call(
**(_args(host_creds, endpoint, method, json_body, use_v3, retry_timeout_seconds))
)
def test_requestor():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
user_name = "mock user"
source_name = "rest test"
run_name = "my name"
source_name_patch = mock.patch(
"mlflow.tracking.context.default_context._get_source_name", return_value=source_name
)
source_type_patch = mock.patch(
"mlflow.tracking.context.default_context._get_source_type",
return_value=SourceType.LOCAL,
)
with (
mock_http_request() as mock_http,
mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store),
mock.patch("mlflow.tracking.context.default_context._get_user", return_value=user_name),
mock.patch("time.time", return_value=13579),
source_name_patch,
source_type_patch,
):
with mlflow.start_run(experiment_id="43", run_name=run_name):
cr_body = message_to_json(
CreateRun(
experiment_id="43",
user_id=user_name,
run_name=run_name,
start_time=13579000,
tags=[
ProtoRunTag(key="mlflow.source.name", value=source_name),
ProtoRunTag(key="mlflow.source.type", value="LOCAL"),
ProtoRunTag(key="mlflow.user", value=user_name),
ProtoRunTag(key="mlflow.runName", value=run_name),
],
)
)
expected_kwargs = _args(creds, "runs/create", "POST", cr_body)
assert mock_http.call_count == 1
actual_kwargs = mock_http.call_args[1]
# Test the passed tag values separately from the rest of the request
# Tag order is inconsistent on Python 2 and 3, but the order does not matter
expected_tags = expected_kwargs["json"].pop("tags")
actual_tags = actual_kwargs["json"].pop("tags")
assert sorted(expected_tags, key=lambda t: t["key"]) == sorted(
actual_tags, key=lambda t: t["key"]
)
assert expected_kwargs == actual_kwargs
with mock_http_request() as mock_http:
store.log_param("some_uuid", Param("k1", "v1"))
body = message_to_json(
LogParam(run_uuid="some_uuid", run_id="some_uuid", key="k1", value="v1")
)
_verify_requests(mock_http, creds, "runs/log-parameter", "POST", body)
with mock_http_request() as mock_http:
store.set_experiment_tag("some_id", ExperimentTag("t1", "abcd" * 1000))
body = message_to_json(
SetExperimentTag(experiment_id="some_id", key="t1", value="abcd" * 1000)
)
_verify_requests(mock_http, creds, "experiments/set-experiment-tag", "POST", body)
with mock_http_request() as mock_http:
store.set_tag("some_uuid", RunTag("t1", "abcd" * 1000))
body = message_to_json(
SetTag(run_uuid="some_uuid", run_id="some_uuid", key="t1", value="abcd" * 1000)
)
_verify_requests(mock_http, creds, "runs/set-tag", "POST", body)
with mock_http_request() as mock_http:
store.delete_tag("some_uuid", "t1")
body = message_to_json(DeleteTag(run_id="some_uuid", key="t1"))
_verify_requests(mock_http, creds, "runs/delete-tag", "POST", body)
with mock_http_request() as mock_http:
store.log_metric("u2", Metric("m1", 0.87, 12345, 3))
body = message_to_json(
LogMetric(run_uuid="u2", run_id="u2", key="m1", value=0.87, timestamp=12345, step=3)
)
_verify_requests(mock_http, creds, "runs/log-metric", "POST", body)
with mock_http_request() as mock_http:
metrics = [
Metric("m1", 0.87, 12345, 0),
Metric("m2", 0.49, 12345, -1),
Metric("m3", 0.58, 12345, 2),
]
params = [Param("p1", "p1val"), Param("p2", "p2val")]
tags = [RunTag("t1", "t1val"), RunTag("t2", "t2val")]
store.log_batch(run_id="u2", metrics=metrics, params=params, tags=tags)
metric_protos = [metric.to_proto() for metric in metrics]
param_protos = [param.to_proto() for param in params]
tag_protos = [tag.to_proto() for tag in tags]
body = message_to_json(
LogBatch(run_id="u2", metrics=metric_protos, params=param_protos, tags=tag_protos)
)
_verify_requests(mock_http, creds, "runs/log-batch", "POST", body)
with mock_http_request() as mock_http:
dataset = Dataset(name="name", digest="digest", source_type="st", source="source")
tag = InputTag(key="k1", value="v1")
dataset_input = DatasetInput(dataset=dataset, tags=[tag])
store.log_inputs("some_uuid", [dataset_input])
body = message_to_json(LogInputs(run_id="some_uuid", datasets=[dataset_input.to_proto()]))
_verify_requests(mock_http, creds, "runs/log-inputs", "POST", body)
with mock_http_request() as mock_http:
store.delete_run("u25")
_verify_requests(
mock_http, creds, "runs/delete", "POST", message_to_json(DeleteRun(run_id="u25"))
)
with mock_http_request() as mock_http:
store.restore_run("u76")
_verify_requests(
mock_http, creds, "runs/restore", "POST", message_to_json(RestoreRun(run_id="u76"))
)
with mock_http_request() as mock_http:
store.delete_experiment("0")
_verify_requests(
mock_http,
creds,
"experiments/delete",
"POST",
message_to_json(DeleteExperiment(experiment_id="0")),
)
with mock_http_request() as mock_http:
store.restore_experiment("0")
_verify_requests(
mock_http,
creds,
"experiments/restore",
"POST",
message_to_json(RestoreExperiment(experiment_id="0")),
)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
response = mock.MagicMock()
response.status_code = 200
response.text = '{"runs": ["1a", "2b", "3c"], "next_page_token": "67890fghij"}'
mock_http.return_value = response
result = store.search_runs(
["0", "1"],
"params.p1 = 'a'",
ViewType.ACTIVE_ONLY,
max_results=10,
order_by=["a"],
page_token="12345abcde",
)
expected_message = SearchRuns(
experiment_ids=["0", "1"],
filter="params.p1 = 'a'",
run_view_type=ViewType.to_proto(ViewType.ACTIVE_ONLY),
max_results=10,
order_by=["a"],
page_token="12345abcde",
)
_verify_requests(mock_http, creds, "runs/search", "POST", message_to_json(expected_message))
assert result.token == "67890fghij"
with mock_http_request() as mock_http:
run_id = "run_id"
m = Model(artifact_path="model/path", run_id="run_id", flavors={"tf": "flavor body"})
store.record_logged_model("run_id", m)
expected_message = LogModel(run_id=run_id, model_json=json.dumps(m.get_tags_dict()))
_verify_requests(
mock_http, creds, "runs/log-model", "POST", message_to_json(expected_message)
)
# if model has config, it should be removed from the model_json before sending to the server
with mock_http_request() as mock_http:
run_id = "run_id"
flavors_with_config = {
"tf": "flavor body",
"python_function": {"config": {"a": 1}, "code": "code"},
}
m_with_config = Model(
artifact_path="model/path", run_id="run_id", flavors=flavors_with_config
)
store.record_logged_model("run_id", m_with_config)
flavors = m_with_config.get_tags_dict().get("flavors", {})
assert all("config" not in v for v in flavors.values())
expected_message = LogModel(
run_id=run_id, model_json=json.dumps(m_with_config.get_tags_dict())
)
_verify_requests(
mock_http,
creds,
"runs/log-model",
"POST",
message_to_json(expected_message),
)
with mock_http_request() as mock_http:
trace_id = "tr-123"
store.get_trace_info(trace_id)
# Verify the V3 API was called
v3_expected_message = GetTraceInfoV3(trace_id=trace_id)
_verify_requests(
mock_http,
creds,
"traces/tr-123",
"GET",
message_to_json(v3_expected_message),
use_v3=True,
)
def test_get_experiment_by_name():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
response = mock.MagicMock()
response.status_code = 200
experiment = Experiment(
experiment_id="123",
name="abc",
artifact_location="/abc",
lifecycle_stage=LifecycleStage.ACTIVE,
)
response.text = json.dumps(
{"experiment": json.loads(message_to_json(experiment.to_proto()))}
)
mock_http.return_value = response
result = store.get_experiment_by_name("abc")
expected_message0 = GetExperimentByName(experiment_name="abc")
_verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message0),
)
assert result.experiment_id == experiment.experiment_id
assert result.name == experiment.name
assert result.artifact_location == experiment.artifact_location
assert result.lifecycle_stage == experiment.lifecycle_stage
# Test GetExperimentByName against nonexistent experiment
mock_http.reset_mock()
nonexistent_exp_response = mock.MagicMock()
nonexistent_exp_response.status_code = 404
nonexistent_exp_response.text = MlflowException(
"Exp doesn't exist!", RESOURCE_DOES_NOT_EXIST
).serialize_as_json()
mock_http.return_value = nonexistent_exp_response
assert store.get_experiment_by_name("nonexistent-experiment") is None
expected_message1 = GetExperimentByName(experiment_name="nonexistent-experiment")
_verify_requests(
mock_http,
creds,
"experiments/get-by-name",
"GET",
message_to_json(expected_message1),
)
assert mock_http.call_count == 1
def test_search_experiments():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
with mock_http_request() as mock_http:
store.search_experiments(
view_type=ViewType.DELETED_ONLY,
max_results=5,
filter_string="name",
order_by=["name"],
page_token="abc",
)
_verify_requests(
mock_http,
creds,
"experiments/search",
"POST",
message_to_json(
SearchExperiments(
view_type=ViewType.DELETED_ONLY,
max_results=5,
filter="name",
order_by=["name"],
page_token="abc",
)
),
)
def _mock_response_with_200_status_code():
mock_response = mock.MagicMock()
mock_response.status_code = 200
return mock_response
def test_get_metric_history_paginated():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response_1 = _mock_response_with_200_status_code()
response_2 = _mock_response_with_200_status_code()
response_payload_1 = {
"metrics": [
{"key": "a_metric", "value": 42, "timestamp": 123456777, "step": 0},
{"key": "a_metric", "value": 46, "timestamp": 123456797, "step": 1},
],
"next_page_token": "AcursorForTheRestofTheData",
}
response_1.text = json.dumps(response_payload_1)
response_payload_2 = {
"metrics": [
{"key": "a_metric", "value": 40, "timestamp": 123456877, "step": 2},
{"key": "a_metric", "value": 56, "timestamp": 123456897, "step": 3},
],
"next_page_token": "",
}
response_2.text = json.dumps(response_payload_2)
with mock.patch(
"requests.Session.request", side_effect=[response_1, response_2]
) as mock_request:
# Fetch the first page
metrics = store.get_metric_history(
run_id="2", metric_key="a_metric", max_results=2, page_token=None
)
mock_request.assert_called_once()
assert mock_request.call_args.kwargs["params"] == {
"max_results": 2,
"metric_key": "a_metric",
"run_id": "2",
"run_uuid": "2",
}
assert len(metrics) == 2
assert metrics[0] == Metric(key="a_metric", value=42, timestamp=123456777, step=0)
assert metrics[1] == Metric(key="a_metric", value=46, timestamp=123456797, step=1)
assert metrics.token == "AcursorForTheRestofTheData"
# Fetch the second page
mock_request.reset_mock()
metrics = store.get_metric_history(
run_id="2", metric_key="a_metric", max_results=2, page_token=metrics.token
)
mock_request.assert_called_once()
assert mock_request.call_args.kwargs["params"] == {
"max_results": 2,
"page_token": "AcursorForTheRestofTheData",
"metric_key": "a_metric",
"run_id": "2",
"run_uuid": "2",
}
assert len(metrics) == 2
assert metrics[0] == Metric(key="a_metric", value=40, timestamp=123456877, step=2)
assert metrics[1] == Metric(key="a_metric", value=56, timestamp=123456897, step=3)
assert metrics.token is None
def test_get_metric_history_on_non_existent_metric_key():
creds = MlflowHostCreds("https://hello")
rest_store = RestStore(lambda: creds)
empty_metric_response = _mock_response_with_200_status_code()
empty_metric_response.text = json.dumps({})
with mock.patch(
"requests.Session.request", side_effect=[empty_metric_response]
) as mock_request:
metrics = rest_store.get_metric_history(run_id="1", metric_key="test_metric")
mock_request.assert_called_once()
assert metrics == []
def test_deprecated_start_trace_v2():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
request_id = "tr-123"
experiment_id = "447585625682310"
timestamp_ms = 123
# Metadata/tags values should be string, but should not break for other types too
metadata = {"key1": "val1", "key2": "val2", "key3": 123, TRACE_SCHEMA_VERSION_KEY: "2"}
tags = {"tag1": "tv1", "tag2": "tv2", "tag3": None}
expected_request = StartTrace(
experiment_id=experiment_id,
timestamp_ms=123,
request_metadata=[
ProtoTraceRequestMetadata(key=k, value=str(v)) for k, v in metadata.items()
],
tags=[ProtoTraceTag(key=k, value=str(v)) for k, v in tags.items()],
)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"trace_info": {
"request_id": request_id,
"experiment_id": experiment_id,
"timestamp_ms": timestamp_ms,
"execution_time_ms": None,
"status": 0, # Running
"request_metadata": [{"key": k, "value": str(v)} for k, v in metadata.items()],
"tags": [{"key": k, "value": str(v)} for k, v in tags.items()],
}
}
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.deprecated_start_trace_v2(
experiment_id=experiment_id,
timestamp_ms=timestamp_ms,
request_metadata=metadata,
tags=tags,
)
_verify_requests(mock_http, creds, "traces", "POST", message_to_json(expected_request))
assert isinstance(res, TraceInfoV2)
assert res.request_id == request_id
assert res.experiment_id == experiment_id
assert res.timestamp_ms == timestamp_ms
assert res.execution_time_ms == 0
assert res.status == TraceStatus.UNSPECIFIED
assert res.request_metadata == {k: str(v) for k, v in metadata.items()}
assert res.tags == {k: str(v) for k, v in tags.items()}
def test_start_trace(monkeypatch):
monkeypatch.setenv(MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT.name, "1")
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
trace = Trace(
info=TraceInfo(
trace_id="tr-123",
trace_location=TraceLocation.from_experiment_id("123"),
request_time=123,
execution_duration=10,
state=TraceState.OK,
request_preview="",
response_preview="",
trace_metadata={},
),
data=TraceData(),
)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps({})
expected_request = StartTraceV3(trace=trace.to_proto())
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.start_trace(trace.info)
_verify_requests(
mock_http,
creds,
"traces",
"POST",
message_to_json(expected_request),
use_v3=True,
retry_timeout_seconds=1,
)
def test_deprecated_end_trace_v2():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
experiment_id = "447585625682310"
request_id = "tr-123"
timestamp_ms = 123
status = TraceStatus.OK
metadata = {"key1": "val1", "key2": "val2", TRACE_SCHEMA_VERSION_KEY: "2"}
tags = {"tag1": "tv1", "tag2": "tv2"}
expected_request = EndTrace(
request_id=request_id,
timestamp_ms=123,
status=status,
request_metadata=[ProtoTraceRequestMetadata(key=k, value=v) for k, v in metadata.items()],
tags=[ProtoTraceTag(key=k, value=v) for k, v in tags.items()],
)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"trace_info": {
"request_id": request_id,
"experiment_id": experiment_id,
"timestamp_ms": timestamp_ms,
"execution_time_ms": 12345,
"status": 1, # OK
"request_metadata": [{"key": k, "value": v} for k, v in metadata.items()],
"tags": [{"key": k, "value": v} for k, v in tags.items()],
}
}
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.deprecated_end_trace_v2(
request_id=request_id,
timestamp_ms=timestamp_ms,
status=status,
request_metadata=metadata,
tags=tags,
)
_verify_requests(
mock_http,
creds,
f"traces/{request_id}",
"PATCH",
message_to_json(expected_request),
use_v3=False,
)
assert isinstance(res, TraceInfoV2)
assert res.request_id == request_id
assert res.experiment_id == experiment_id
assert res.timestamp_ms == timestamp_ms
assert res.execution_time_ms == 12345
assert res.status == TraceStatus.OK
assert res.request_metadata == metadata
assert res.tags == tags
def test_search_traces():
"""Test the search_traces method with default behavior using SearchTracesV3Request."""
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
# Format the response
response.text = json.dumps(
{
"traces": [
{
"trace_id": "tr-1234",
"trace_location": {
"type": "MLFLOW_EXPERIMENT",
"mlflow_experiment": {"experiment_id": "1234"},
},
"request_time": "1970-01-01T00:00:00.123Z",
"execution_duration_ms": 456,
"state": "OK",
"trace_metadata": {"key": "value"},
"tags": {"k": "v"},
}
],
"next_page_token": "token",
}
)
# Parameters for search_traces
experiment_ids = ["1234"]
filter_string = "state = 'OK'"
max_results = 10
order_by = ["request_time DESC"]
page_token = "12345abcde"
# Test with databricks tracking URI (using v3 endpoint)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
trace_infos, token = store.search_traces(
locations=experiment_ids,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
# Verify the correct endpoint was called
call_args = mock_http.call_args[1]
assert call_args["endpoint"] == f"{_V3_TRACE_REST_API_PATH_PREFIX}/search"
# Verify the correct parameters were passed
json_body = call_args["json"]
# The field name should now be 'locations' instead of 'trace_locations'
assert "locations" in json_body
# The experiment_ids are converted to trace_locations
assert len(json_body["locations"]) == 1
assert json_body["locations"][0]["mlflow_experiment"]["experiment_id"] == experiment_ids[0]
assert json_body["filter"] == filter_string
assert json_body["max_results"] == max_results
assert json_body["order_by"] == order_by
assert json_body["page_token"] == page_token
# Verify the correct parameters were passed and the correct trace info objects were returned
# for either endpoint
assert len(trace_infos) == 1
assert isinstance(trace_infos[0], TraceInfo)
assert trace_infos[0].trace_id == "tr-1234"
assert trace_infos[0].experiment_id == "1234"
assert trace_infos[0].request_time == 123
# V3's state maps to V2's status
assert trace_infos[0].state == TraceStatus.OK.to_state()
# This is correct because TraceInfoV3.from_proto converts the repeated field tags to a dict
assert trace_infos[0].tags == {"k": "v"}
assert trace_infos[0].trace_metadata == {"key": "value"}
assert token == "token"
def test_search_traces_errors():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
with pytest.raises(
MlflowException,
match="Locations must be a list of experiment IDs",
):
store.search_traces(locations=["catalog.schema"])
with pytest.raises(
MlflowException,
match="Searching traces by model_id is not supported on the current tracking server.",
):
store.search_traces(model_id="model_id")
def test_get_artifact_uri_for_trace_compatibility():
"""Test that get_artifact_uri_for_trace works with both TraceInfo and TraceInfoV3 objects."""
from mlflow.tracing.utils.artifact_utils import get_artifact_uri_for_trace
# Create a TraceInfo (v2) object
trace_info_v2 = TraceInfoV2(
request_id="tr-1234",
experiment_id="1234",
timestamp_ms=123,
execution_time_ms=456,
status=TraceStatus.OK,
request_metadata={"key": "value"},
tags={MLFLOW_ARTIFACT_LOCATION: "s3://bucket/trace-v2-path"},
)
# Create a TraceInfoV3 object
trace_location = TraceLocation.from_experiment_id("5678")
trace_info_v3 = TraceInfo(
trace_id="tr-5678",
trace_location=trace_location,
request_time=789,
state=TraceState.OK,
trace_metadata={"key3": "value3"},
tags={MLFLOW_ARTIFACT_LOCATION: "s3://bucket/trace-v3-path"},
)
# Test that get_artifact_uri_for_trace works with TraceInfo (v2)
v2_uri = get_artifact_uri_for_trace(trace_info_v2)
assert v2_uri == "s3://bucket/trace-v2-path"
# Test that get_artifact_uri_for_trace works with TraceInfoV3
v3_uri = get_artifact_uri_for_trace(trace_info_v3)
assert v3_uri == "s3://bucket/trace-v3-path"
# Test that get_artifact_uri_for_trace raises the expected exception when tag is missing
trace_info_no_tag = TraceInfoV2(
request_id="tr-1234",
experiment_id="1234",
timestamp_ms=123,
execution_time_ms=456,
status=TraceStatus.OK,
tags={},
)
with pytest.raises(MlflowException, match="Unable to determine trace artifact location"):
get_artifact_uri_for_trace(trace_info_no_tag)
@pytest.mark.parametrize(
"delete_traces_kwargs",
[
{"experiment_id": "0", "request_ids": ["tr-1234"]},
{"experiment_id": "0", "max_timestamp_millis": 1, "max_traces": 2},
],
)
def test_delete_traces(delete_traces_kwargs):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
request = DeleteTraces(**delete_traces_kwargs)
response.text = json.dumps({"traces_deleted": 1})
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
if "request_ids" in delete_traces_kwargs:
delete_traces_kwargs["trace_ids"] = delete_traces_kwargs.pop("request_ids")
res = store.delete_traces(**delete_traces_kwargs)
_verify_requests(mock_http, creds, "traces/delete-traces", "POST", message_to_json(request))
assert res == 1
def test_delete_traces_with_batching():
"""Test that delete_traces batches requests when trace_ids exceed the batch size limit."""
from mlflow.environment_variables import _MLFLOW_DELETE_TRACES_MAX_BATCH_SIZE
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
# Create 250 trace IDs to test batching (should create 3 batches: 100, 100, 50)
num_traces = 250
trace_ids = [f"tr-{i}" for i in range(num_traces)]
# Each batch returns some number of deleted traces
response.text = json.dumps({"traces_deleted": 100})
batch_size = _MLFLOW_DELETE_TRACES_MAX_BATCH_SIZE.get()
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.delete_traces(experiment_id="0", trace_ids=trace_ids)
# Verify that we made 3 API calls (250 / 100 = 3 batches)
expected_num_calls = math.ceil(num_traces / batch_size)
assert mock_http.call_count == expected_num_calls
# Verify that batch sizes are [100, 100, 50]
batch_sizes = [len(call[1]["json"]["request_ids"]) for call in mock_http.call_args_list]
assert batch_sizes == [100, 100, 50]
def test_set_trace_tag():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
trace_id = "tr-1234"
request = SetTraceTag(
key="k",
value="v",
)
response.text = "{}"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.set_trace_tag(
trace_id=trace_id,
key=request.key,
value=request.value,
)
_verify_requests(
mock_http,
creds,
f"traces/{trace_id}/tags",
"PATCH",
message_to_json(request),
use_v3=False,
)
assert res is None
@pytest.mark.parametrize("is_databricks", [True, False])
def test_log_assessment_feedback(is_databricks):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"assessment": {
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_id": "tr-1234",
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"feedback": {"value": True},
"rationale": "rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
}
)
feedback = Feedback(
trace_id="tr-1234",
name="assessment_name",
value=True,
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE, source_id="gpt-4o-mini"
),
create_time_ms=int(time.time() * 1000),
last_update_time_ms=int(time.time() * 1000),
rationale="rationale",
metadata={"model": "gpt-4o-mini"},
span_id=None,
)
request = CreateAssessment(assessment=feedback.to_proto())
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.create_assessment(feedback)
_verify_requests(
mock_http,
creds,
"traces/tr-1234/assessments",
"POST",
message_to_json(request),
use_v3=True,
)
assert isinstance(res, Feedback)
assert res.assessment_id is not None
assert res.value == feedback.value
@pytest.mark.parametrize("is_databricks", [True, False])
def test_log_assessment_expectation(is_databricks):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"assessment": {
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_id": "tr-1234",
"source": {
"source_type": "HUMAN",
"source_id": "me",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-20T05:47:23Z",
"expectation": {
"serialized_value": {
"value": '{"key1": "value1", "key2": "value2"}',
"serialization_format": "JSON_FORMAT",
}
},
"error": None,
"span_id": None,
}
}
)
expectation = Expectation(
trace_id="tr-1234",
name="assessment_name",
value={"key1": "value1", "key2": "value2"},
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN, source_id="me"),
create_time_ms=int(time.time() * 1000),
last_update_time_ms=int(time.time() * 1000),
span_id=None,
)
request = CreateAssessment(assessment=expectation.to_proto())
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.create_assessment(expectation)
_verify_requests(
mock_http,
creds,
"traces/tr-1234/assessments",
"POST",
message_to_json(request),
use_v3=True,
)
assert isinstance(res, Expectation)
assert res.assessment_id is not None
assert res.value == expectation.value
@pytest.mark.parametrize("is_databricks", [True, False])
@pytest.mark.parametrize(
("updates", "expected_request_json"),
[
(
{"name": "updated_name"},
{
"assessment": {
"assessment_id": "1234",
"trace_id": "tr-1234",
"assessment_name": "updated_name",
},
"update_mask": "assessmentName",
},
),
(
{"expectation": ExpectationValue(value="updated_value")},
{
"assessment": {
"assessment_id": "1234",
"trace_id": "tr-1234",
"expectation": {"value": "updated_value"},
},
"update_mask": "expectation",
},
),
(
{
"feedback": FeedbackValue(value=0.5),
"rationale": "update",
"metadata": {"model": "gpt-4o-mini"},
},
{
"assessment": {
"assessment_id": "1234",
"trace_id": "tr-1234",
"feedback": {"value": 0.5},
"rationale": "update",
"metadata": {"model": "gpt-4o-mini"},
},
"update_mask": "feedback,rationale,metadata",
},
),
],
)
def test_update_assessment(updates, expected_request_json, is_databricks):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"assessment": {
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_id": "tr-1234",
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-25T01:23:45Z",
"feedback": {"value": True},
"rationale": "rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
}
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.update_assessment(
trace_id="tr-1234",
assessment_id="1234",
**updates,
)
_verify_requests(
mock_http,
creds,
"traces/tr-1234/assessments/1234",
"PATCH",
json.dumps(expected_request_json),
use_v3=True,
)
assert isinstance(res, Assessment)
@pytest.mark.parametrize("is_databricks", [True, False])
def test_get_assessment(is_databricks):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = json.dumps(
{
"assessment": {
"assessment_id": "1234",
"assessment_name": "assessment_name",
"trace_id": "tr-1234",
"source": {
"source_type": "LLM_JUDGE",
"source_id": "gpt-4o-mini",
},
"create_time": "2025-02-20T05:47:23Z",
"last_update_time": "2025-02-25T01:23:45Z",
"feedback": {"value": "test value"},
"rationale": "rationale",
"metadata": {"model": "gpt-4o-mini"},
"error": None,
"span_id": None,
}
}
)
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
res = store.get_assessment(trace_id="tr-1234", assessment_id="1234")
expected_request_json = {"assessment_id": "1234", "trace_id": "tr-1234"}
_verify_requests(
mock_http,
creds,
"traces/tr-1234/assessments/1234",
"GET",
json.dumps(expected_request_json),
use_v3=True,
)
assert isinstance(res, Feedback)
assert res.assessment_id == "1234"
assert res.name == "assessment_name"
assert res.trace_id == "tr-1234"
assert res.source.source_type == AssessmentSourceType.LLM_JUDGE
assert res.source.source_id == "gpt-4o-mini"
assert res.value == "test value"
@pytest.mark.parametrize("is_databricks", [True, False])
def test_delete_assessment(is_databricks):
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
response = mock.MagicMock()
response.status_code = 200
response.text = "{}"
with mock.patch("mlflow.utils.rest_utils.http_request", return_value=response) as mock_http:
store.delete_assessment(trace_id="tr-1234", assessment_id="1234")
expected_request_json = {"assessment_id": "1234", "trace_id": "tr-1234"}
_verify_requests(
mock_http,
creds,
"traces/tr-1234/assessments/1234",
"DELETE",
json.dumps(expected_request_json),
use_v3=True,
)
def test_update_assessment_invalid_update():
creds = MlflowHostCreds("https://hello")
store = RestStore(lambda: creds)
with pytest.raises(MlflowException, match="Exactly one of `expectation` or `feedback`"):
store.update_assessment(
trace_id="tr-1234",
assessment_id="1234",
expectation=ExpectationValue(value="updated_value"),
feedback=FeedbackValue(value=0.5),
)
def test_get_trace_info():
# Generate a sample trace in v3 format
with mlflow.start_span(name="test_span") as span:
span.set_inputs({"input": "value"})
span.set_outputs({"output": "value"})
trace = mlflow.get_trace(span.trace_id)
trace.info.trace_metadata = {"key1": "value1"}
trace.info.tags = {"tag1": "value1"}
trace.info.assessments = [
Feedback(name="feedback", value=0.9, trace_id=span.trace_id),
Feedback(
name="feedback_error",
value=None,
error=AssessmentError(error_code="500", error_message="error message"),
trace_id=span.trace_id,
),
Expectation(name="expectation", value=True, trace_id=span.trace_id, span_id=span.span_id),
Expectation(
name="complex_expectation",
value={"complex": [{"key": "value"}]},
source=AssessmentSource(
source_type=AssessmentSourceType.LLM_JUDGE, source_id="gpt-4o-mini"
),
trace_id=span.trace_id,
),
]
trace_proto = trace.to_proto()
mock_response = GetTraceInfoV3.Response(trace=trace_proto)
store = RestStore(lambda: MlflowHostCreds("https://hello"))
with mock.patch.object(store, "_call_endpoint", return_value=mock_response):
result = store.get_trace_info(span.trace_id)
# Verify we get the expected object back
assert isinstance(result, TraceInfo)
assert result.trace_id == span.trace_id
assert result.experiment_id == "0"
assert result.trace_metadata == {"key1": "value1"}
assert result.tags == {"tag1": "value1"}
assert result.state == TraceState.OK
assert len(result.assessments) == 4
assert result.assessments[0].name == "feedback"
assert result.assessments[1].name == "feedback_error"
assert result.assessments[2].name == "expectation"
assert result.assessments[3].name == "complex_expectation"
def test_get_trace():
# Generate a sample trace with spans
with mlflow.start_span(name="root_span") as span:
span.set_inputs({"input": "value"})
span.set_outputs({"output": "value"})
with mlflow.start_span(name="child_span") as child:
child.set_inputs({"child_input": "child_value"})
trace = mlflow.get_trace(span.trace_id)
trace_proto = trace.to_proto()
mock_response = GetTrace.Response(trace=trace_proto)
store = RestStore(lambda: MlflowHostCreds("https://hello"))
with mock.patch.object(store, "_call_endpoint", return_value=mock_response) as mock_call:
result = store.get_trace(span.trace_id, allow_partial=True)
# Verify we get the expected object back
assert isinstance(result, Trace)
assert result.info.trace_id == span.trace_id
assert len(result.data.spans) == 2
# Verify the endpoint was called with correct parameters
mock_call.assert_called_once()
call_args = mock_call.call_args
assert call_args[0][0] == GetTrace
# Check the request body contains the trace_id and allow_partial
request_body_json = json.loads(call_args[0][1])
assert request_body_json["trace_id"] == span.trace_id
assert request_body_json["allow_partial"] is True
def test_get_trace_with_allow_partial_false():
# Generate a sample trace
with mlflow.start_span(name="test_span") as span:
span.set_inputs({"input": "value"})
trace = mlflow.get_trace(span.trace_id)
trace_proto = trace.to_proto()
mock_response = GetTrace.Response(trace=trace_proto)
store = RestStore(lambda: MlflowHostCreds("https://hello"))
with mock.patch.object(store, "_call_endpoint", return_value=mock_response) as mock_call:
result = store.get_trace(span.trace_id, allow_partial=False)
# Verify we get the expected object back
assert isinstance(result, Trace)
assert result.info.trace_id == span.trace_id
# Verify the endpoint was called with allow_partial=False
call_args = mock_call.call_args
request_body_json = json.loads(call_args[0][1])
assert request_body_json["allow_partial"] is False
def test_log_logged_model_params():
with mock.patch("mlflow.store.tracking.rest_store.call_endpoint") as mock_call_endpoint:
# Create test data
model_id = "model_123"
params = [
LoggedModelParameter(key=f"param_{i}", value=f"value_{i}")
for i in range(250) # Create enough params to test batching
]
batches = [
message_to_json(
LogLoggedModelParamsRequest(
model_id=model_id,
params=[p.to_proto() for p in params[:100]],
)
),
message_to_json(
LogLoggedModelParamsRequest(
model_id=model_id,
params=[p.to_proto() for p in params[100:200]],
)
),
message_to_json(
LogLoggedModelParamsRequest(
model_id=model_id,
params=[p.to_proto() for p in params[200:]],
)
),
]
store = RestStore(lambda: None)
store.log_logged_model_params(model_id=model_id, params=params)
# Verify call_endpoint was called with the correct arguments
assert mock_call_endpoint.call_count == 3
for i, call in enumerate(mock_call_endpoint.call_args_list):
_, endpoint, method, json_body, response_proto = call.args
# Verify endpoint and method are correct
assert endpoint, method == RestStore._METHOD_TO_INFO[LogLoggedModelParamsRequest]
assert json_body == batches[i]
@pytest.mark.parametrize(
("params_count", "expected_call_count", "create_batch_size", "log_batch_size"),
[
(None, 1, 100, 100), # None params - only CreateLoggedModel
(0, 1, 100, 100), # No params - only CreateLoggedModel
(5, 1, 100, 100), # Few params - only CreateLoggedModel
(100, 1, 100, 100), # Exactly 100 params - only CreateLoggedModel
(
150,
3,
100,
100,
), # 150 params - CreateLoggedModel + LogLoggedModelParamsRequest + GetLoggedModel
(
250,
4,
100,
100,
), # 250 params - CreateLoggedModel + 2 LogLoggedModelParamsRequest calls + GetLoggedModel
(
250,
3,
200,
100,
), # 250 params with larger create batch - CreateLoggedModel
# + 1 LogLoggedModelParamsRequest + GetLoggedModel
(
250,
5,
100,
50,
), # 250 params with smaller log batch - CreateLoggedModel
# + 4 LogLoggedModelParamsRequest calls + GetLoggedModel
],
)
def test_create_logged_models_with_params(
monkeypatch, params_count, expected_call_count, create_batch_size, log_batch_size
):
# Set environment variables using monkeypatch
monkeypatch.setenv(_MLFLOW_CREATE_LOGGED_MODEL_PARAMS_BATCH_SIZE.name, str(create_batch_size))
monkeypatch.setenv(_MLFLOW_LOG_LOGGED_MODEL_PARAMS_BATCH_SIZE.name, str(log_batch_size))
store = RestStore(lambda: None)
with (
mock.patch("mlflow.entities.logged_model.LoggedModel.from_proto") as mock_from_proto,
mock.patch.object(store, "_call_endpoint") as mock_call_endpoint,
):
# Setup mocks
mock_model = mock.MagicMock()
model_id = "model_123"
mock_model.model_id = model_id
mock_from_proto.return_value = mock_model
mock_response = mock.MagicMock()
mock_response.model = mock.MagicMock()
mock_call_endpoint.return_value = mock_response
# Create params
params = (
[LoggedModelParameter(key=f"key_{i}", value=f"value_{i}") for i in range(params_count)]
if params_count
else None
)
# Call the method
store.create_logged_model("experiment_id", params=params)
# Verify calls
endpoint = get_logged_model_endpoint(model_id)
# CreateLoggedModel should always be called
initial_params = [p.to_proto() for p in params[:create_batch_size]] if params else None
mock_call_endpoint.assert_any_call(
CreateLoggedModel,
message_to_json(
CreateLoggedModel(
experiment_id="experiment_id",
params=initial_params,
)
),
)
# If params > create_batch_size, additional calls should be made
if params_count and params_count > create_batch_size:
# LogLoggedModelParamsRequest should be called for remaining params
remaining_params = params[create_batch_size:]
for i in range(0, len(remaining_params), log_batch_size):
batch = remaining_params[i : i + log_batch_size]
mock_call_endpoint.assert_any_call(
LogLoggedModelParamsRequest,
json_body=message_to_json(
LogLoggedModelParamsRequest(
model_id=model_id,
params=[p.to_proto() for p in batch],
)
),
endpoint=f"{endpoint}/params",
)
# GetLoggedModel should be called to get the updated model
mock_call_endpoint.assert_any_call(GetLoggedModel, endpoint=endpoint)
# Verify total number of calls
assert mock_call_endpoint.call_count == expected_call_count
def test_create_evaluation_dataset():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
create_response = CreateDataset.Response()
create_response.dataset.dataset_id = "d-1234567890abcdef1234567890abcdef"
create_response.dataset.name = "test_dataset"
create_response.dataset.created_time = 1234567890
create_response.dataset.last_update_time = 1234567890
create_response.dataset.digest = "abc123"
create_response.dataset.tags = json.dumps({"env": "test"})
mock_call.side_effect = [create_response]
store.create_dataset(
name="test_dataset",
tags={"env": "test"},
experiment_ids=["0", "1"],
)
assert mock_call.call_count == 1
create_req = CreateDataset(
name="test_dataset",
experiment_ids=["0", "1"],
tags=json.dumps({"env": "test"}),
)
mock_call.assert_called_once_with(
CreateDataset,
message_to_json(create_req),
endpoint="/api/3.0/mlflow/datasets/create",
)
def test_create_dataset_without_experiment_ids():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
create_response = CreateDataset.Response()
create_response.dataset.dataset_id = "d-abcdef1234567890abcdef1234567890"
create_response.dataset.name = "test_dataset_no_exp"
create_response.dataset.created_time = 1234567890
create_response.dataset.last_update_time = 1234567890
create_response.dataset.digest = "xyz789"
mock_call.side_effect = [create_response]
store.create_dataset(
name="test_dataset_no_exp",
tags={"env": "prod"},
)
assert mock_call.call_count == 1
create_req = CreateDataset(
name="test_dataset_no_exp",
tags=json.dumps({"env": "prod"}),
)
mock_call.assert_called_once_with(
CreateDataset,
message_to_json(create_req),
endpoint="/api/3.0/mlflow/datasets/create",
)
def test_create_dataset_with_empty_experiment_ids():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
create_response = CreateDataset.Response()
create_response.dataset.dataset_id = "d-fedcba0987654321fedcba0987654321"
create_response.dataset.name = "test_dataset_empty"
create_response.dataset.created_time = 1234567890
create_response.dataset.last_update_time = 1234567890
create_response.dataset.digest = "empty123"
mock_call.side_effect = [create_response]
store.create_dataset(
name="test_dataset_empty",
experiment_ids=[],
tags={"env": "staging"},
)
assert mock_call.call_count == 1
create_req = CreateDataset(
name="test_dataset_empty",
experiment_ids=[],
tags=json.dumps({"env": "staging"}),
)
mock_call.assert_called_once_with(
CreateDataset,
message_to_json(create_req),
endpoint="/api/3.0/mlflow/datasets/create",
)
def test_get_evaluation_dataset():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = GetDataset.Response()
response.dataset.dataset_id = dataset_id
response.dataset.name = "test_dataset"
response.dataset.digest = "abc123"
response.dataset.created_time = 1234567890
response.dataset.last_update_time = 1234567890
mock_call.return_value = response
result = store.get_dataset(dataset_id)
assert result.dataset_id == dataset_id
assert result.name == "test_dataset"
mock_call.assert_called_once_with(
GetDataset,
None,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}",
)
def test_delete_evaluation_dataset():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call:
mock_call.return_value = DeleteDataset.Response()
store.delete_dataset(dataset_id)
mock_call.assert_called_once_with(
DeleteDataset,
None,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}",
)
def test_search_evaluation_datasets():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock_http_request() as mock_http:
store.search_datasets(
experiment_ids=["0", "1"],
filter_string='name = "dataset1"',
max_results=10,
order_by=["name DESC"],
page_token="token123",
)
_verify_requests(
mock_http,
creds,
"datasets/search",
"POST",
message_to_json(
SearchEvaluationDatasets(
experiment_ids=["0", "1"],
filter_string='name = "dataset1"',
max_results=10,
order_by=["name DESC"],
page_token="token123",
)
),
use_v3=True,
)
def test_set_evaluation_dataset_tags():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
tags = {"env": "production", "version": "2.0", "deprecated": None}
with mock.patch.object(store, "_call_endpoint") as mock_call:
mock_call.return_value = mock.Mock()
store.set_dataset_tags(
dataset_id=dataset_id,
tags=tags,
)
req = SetDatasetTags(
tags=json.dumps(tags),
)
expected_json = message_to_json(req)
mock_call.assert_called_once_with(
SetDatasetTags,
expected_json,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/tags",
)
def test_delete_dataset_tag():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
key = "deprecated_tag"
with mock.patch.object(store, "_call_endpoint") as mock_call:
mock_call.return_value = mock.Mock()
store.delete_dataset_tag(
dataset_id=dataset_id,
key=key,
)
mock_call.assert_called_once_with(
DeleteDatasetTag,
None,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/tags/{key}",
)
def test_dataset_apis_blocked_in_databricks():
# Test that the decorator blocks dataset APIs when using a Databricks tracking URI
# Mock the tracking URI to return a Databricks URI
with mock.patch("mlflow.tracking.get_tracking_uri", return_value="databricks://profile"):
creds = MlflowHostCreds("https://workspace.cloud.databricks.com")
store = RestStore(lambda: creds)
with pytest.raises(
MlflowException,
match="Evaluation dataset APIs is not supported in Databricks environments",
):
store.create_dataset(name="test", experiment_id=["0"])
# Test that APIs work when not using Databricks tracking URI
with mock.patch("mlflow.tracking.get_tracking_uri", return_value="http://localhost:5000"):
non_databricks_creds = MlflowHostCreds("http://localhost:5000")
non_databricks_store = RestStore(lambda: non_databricks_creds)
mock_response = mock.MagicMock()
mock_response.dataset.tags = "{}"
non_databricks_store._call_endpoint = mock.MagicMock(return_value=mock_response)
# This should not raise an error
result = non_databricks_store.get_dataset("d-123")
assert result is not None
def test_upsert_evaluation_dataset_records():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
records = [
{
"inputs": {"question": "What is MLflow?"},
"expectations": {"accuracy": 0.95},
"source": {
"source_type": "HUMAN",
"source_data": {"user": "user123"},
},
},
{
"inputs": {"question": "How to use MLflow?"},
"expectations": {"accuracy": 0.9},
"source": {
"source_type": "TRACE",
"source_data": {"trace_id": "trace123"},
},
},
]
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = UpsertDatasetRecords.Response()
response.inserted_count = 2
response.updated_count = 0
mock_call.return_value = response
result = store.upsert_dataset_records(
dataset_id=dataset_id,
records=records,
)
assert result == {"inserted": 2, "updated": 0}
req = UpsertDatasetRecords(
records=json.dumps(records),
)
expected_json = message_to_json(req)
mock_call.assert_called_once_with(
UpsertDatasetRecords,
expected_json,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/records",
)
def test_get_evaluation_dataset_experiment_ids():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = GetDatasetExperimentIds.Response()
response.experiment_ids.extend(["exp1", "exp2", "exp3"])
mock_call.return_value = response
result = store.get_dataset_experiment_ids(dataset_id)
assert result == ["exp1", "exp2", "exp3"]
mock_call.assert_called_once_with(
GetDatasetExperimentIds,
None,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/experiment-ids",
)
def test_evaluation_dataset_error_handling():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch("mlflow.utils.rest_utils.http_request") as mock_http:
error_response = {
"error_code": "RESOURCE_DOES_NOT_EXIST",
"message": "Evaluation dataset not found",
}
response = mock.MagicMock()
response.status_code = 404
response.text = json.dumps(error_response)
mock_http.return_value = response
with pytest.raises(MlflowException, match="Evaluation dataset not found"):
store.get_dataset("d-nonexistent")
def test_evaluation_dataset_comprehensive_workflow():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call:
create_response = CreateDataset.Response()
create_response.dataset.dataset_id = dataset_id
create_response.dataset.name = "test_dataset"
create_response.dataset.created_time = 1234567890
create_response.dataset.last_update_time = 1234567890
create_response.dataset.digest = "abc123"
create_response.dataset.tags = json.dumps({"env": "test", "version": "1.0"})
get_response1 = GetDataset.Response()
get_response1.dataset.CopyFrom(create_response.dataset)
get_response1.dataset.tags = json.dumps({"env": "staging", "version": "1.1", "team": "ml"})
upsert_response1 = UpsertDatasetRecords.Response()
upsert_response1.inserted_count = 2
upsert_response1.updated_count = 0
get_response2 = GetDataset.Response()
get_response2.dataset.CopyFrom(get_response1.dataset)
get_response2.dataset.tags = json.dumps({"env": "production", "version": "2.0"})
upsert_response2 = UpsertDatasetRecords.Response()
upsert_response2.inserted_count = 1
upsert_response2.updated_count = 2
mock_call.side_effect = [
create_response, # Create with tags
None, # First tag update
get_response1, # Get after first tag update
upsert_response1, # First record upsert
None, # Second tag update (remove team tag)
get_response2, # Get after second tag update
upsert_response2, # Second record upsert
]
dataset = store.create_dataset(
name="test_dataset",
tags={"env": "test", "version": "1.0"},
experiment_ids=["exp1"],
)
assert dataset.tags == {"env": "test", "version": "1.0"}
store.set_dataset_tags(
dataset_id=dataset_id,
tags={"env": "staging", "version": "1.1", "team": "ml"},
)
updated_dataset = store.get_dataset(dataset_id)
assert updated_dataset.tags == {"env": "staging", "version": "1.1", "team": "ml"}
records1 = [
{"inputs": {"q": "What is MLflow?"}, "expectations": {"score": 0.9}},
{"inputs": {"q": "How to track?"}, "expectations": {"score": 0.8}},
]
result1 = store.upsert_dataset_records(dataset_id, records1)
assert result1 == {"inserted": 2, "updated": 0}
store.set_dataset_tags(
dataset_id=dataset_id,
tags={"env": "production", "version": "2.0", "team": None},
)
final_dataset = store.get_dataset(dataset_id)
assert final_dataset.tags == {"env": "production", "version": "2.0"}
records2 = [
{"inputs": {"q": "What is tracking?"}, "expectations": {"score": 0.95}}, # New
{"inputs": {"q": "What is MLflow?"}, "expectations": {"score": 0.95}}, # Update
{"inputs": {"q": "How to track?"}, "expectations": {"score": 0.85}}, # Update
]
result2 = store.upsert_dataset_records(dataset_id, records2)
assert result2 == {"inserted": 1, "updated": 2}
assert mock_call.call_count == 7
def test_evaluation_dataset_merge_records():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
eval_dataset = EvaluationDataset(
dataset_id=dataset_id,
name="test_dataset",
digest="abc123",
created_time=1234567890,
last_update_time=1234567890,
)
with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as mock_get_store:
mock_get_store.return_value = store
with mock.patch.object(store, "get_dataset") as mock_get:
mock_get.return_value = eval_dataset
with mock.patch.object(store, "_call_endpoint") as mock_call:
upsert_response = UpsertDatasetRecords.Response()
upsert_response.inserted_count = 2
upsert_response.updated_count = 0
mock_call.return_value = upsert_response
records = [
{
"inputs": {"question": "What is MLflow?", "temperature": 0.7},
"expectations": {"accuracy": 0.95},
},
{
"inputs": {"question": "How to track?", "model": "gpt-4"},
"expectations": {"clarity": 0.85},
},
]
result = eval_dataset.merge_records(records)
assert result is eval_dataset
assert mock_get.call_count == 1
assert mock_call.call_count == 1
call_args = mock_call.call_args_list[0]
assert call_args[0][0] == UpsertDatasetRecords
# Check the endpoint path contains the dataset_id
endpoint = call_args[1].get("endpoint")
assert endpoint == f"/api/3.0/mlflow/datasets/{dataset_id}/records"
# Check the request body contains the records
upsert_req_json = call_args[0][1]
upsert_req_dict = json.loads(upsert_req_json)
sent_records = json.loads(upsert_req_dict["records"])
assert len(sent_records) == 2
def test_evaluation_dataset_get_records():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = GetDatasetRecords.Response()
records = [
{
"dataset_id": dataset_id,
"dataset_record_id": "r-001",
"inputs": {"question": "What is MLflow?"},
"expectations": {"accuracy": 0.95},
"tags": {"source": "test"},
"source_type": "HUMAN",
"source_id": "user123",
"created_time": 1234567890,
"last_update_time": 1234567890,
},
{
"dataset_id": dataset_id,
"dataset_record_id": "r-002",
"inputs": {"question": "How to track?"},
"expectations": {"clarity": 0.85},
"tags": {},
"source_type": "TRACE",
"source_id": "trace456",
"created_time": 1234567891,
"last_update_time": 1234567891,
},
]
response.records = json.dumps(records)
response.next_page_token = ""
mock_call.return_value = response
records, next_page_token = store._load_dataset_records(dataset_id)
assert len(records) == 2
assert records[0].dataset_record_id == "r-001"
assert records[0].inputs == {"question": "What is MLflow?"}
assert records[1].dataset_record_id == "r-002"
assert next_page_token is None
req = GetDatasetRecords(
max_results=1000,
)
expected_json = message_to_json(req)
mock_call.assert_called_once_with(
GetDatasetRecords,
expected_json,
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/records",
)
def test_evaluation_dataset_lazy_loading_records():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
dataset_id = "d-1234567890abcdef1234567890abcdef"
eval_dataset = EvaluationDataset(
dataset_id=dataset_id,
name="test_dataset",
digest="abc123",
created_time=1234567890,
last_update_time=1234567890,
)
with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as mock_get_store:
mock_get_store.return_value = store
with mock.patch.object(store, "_load_dataset_records") as mock_load:
from mlflow.entities.dataset_record import DatasetRecord
mock_records = [
DatasetRecord(
dataset_id=dataset_id,
dataset_record_id="r-001",
inputs={"q": "test"},
expectations={"score": 0.9},
tags={},
created_time=1234567890,
last_update_time=1234567890,
)
]
mock_load.return_value = (mock_records, None)
records = eval_dataset.records
assert len(records) == 1
assert records[0].dataset_record_id == "r-001"
mock_load.assert_called_once_with(dataset_id, max_results=None)
def test_evaluation_dataset_pagination():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock_http_request() as mock_http:
store.search_datasets(max_results=10)
_verify_requests(
mock_http,
creds,
"datasets/search",
"POST",
message_to_json(
SearchEvaluationDatasets(
experiment_ids=[],
filter_string=None,
max_results=10,
order_by=[],
page_token=None,
)
),
use_v3=True,
)
with mock_http_request() as mock_http:
store.search_datasets(max_results=10, page_token="page2")
_verify_requests(
mock_http,
creds,
"datasets/search",
"POST",
message_to_json(
SearchEvaluationDatasets(
experiment_ids=[],
filter_string=None,
max_results=10,
order_by=[],
page_token="page2",
)
),
use_v3=True,
)
def test_load_dataset_records_pagination():
store = RestStore(lambda: None)
dataset_id = "d-1234567890abcdef1234567890abcdef"
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
mock_response = mock.MagicMock()
mock_record1 = mock.MagicMock()
mock_record1.dataset_record_id = "r-001"
mock_record1.inputs = json.dumps({"q": "Question 1"})
mock_record1.expectations = json.dumps({"a": "Answer 1"})
mock_record1.tags = "{}"
mock_record1.source_type = "TRACE"
mock_record1.source_id = "trace-1"
mock_record1.created_time = 1609459200
mock_record2 = mock.MagicMock()
mock_record2.dataset_record_id = "r-002"
mock_record2.inputs = json.dumps({"q": "Question 2"})
mock_record2.expectations = json.dumps({"a": "Answer 2"})
mock_record2.tags = "{}"
mock_record2.source_type = "TRACE"
mock_record2.source_id = "trace-2"
mock_record2.created_time = 1609459201
mock_response.records = json.dumps(
[
{
"dataset_id": dataset_id,
"dataset_record_id": "r-001",
"inputs": {"q": "Question 1"},
"expectations": {"a": "Answer 1"},
"tags": {},
"source_type": "TRACE",
"source_id": "trace-1",
"created_time": 1609459200,
"last_update_time": 1609459200,
},
{
"dataset_id": dataset_id,
"dataset_record_id": "r-002",
"inputs": {"q": "Question 2"},
"expectations": {"a": "Answer 2"},
"tags": {},
"source_type": "TRACE",
"source_id": "trace-2",
"created_time": 1609459201,
"last_update_time": 1609459201,
},
]
)
mock_response.next_page_token = "token_page2"
mock_call_endpoint.return_value = mock_response
records, next_token = store._load_dataset_records(
dataset_id, max_results=2, page_token=None
)
assert len(records) == 2
assert records[0].dataset_record_id == "r-001"
assert records[1].dataset_record_id == "r-002"
assert next_token == "token_page2"
mock_call_endpoint.assert_called_once_with(
GetDatasetRecords,
message_to_json(GetDatasetRecords(max_results=2)),
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/records",
)
mock_call_endpoint.reset_mock()
mock_response.records = json.dumps(
[
{
"dataset_id": dataset_id,
"dataset_record_id": "r-003",
"inputs": {"q": "Question 3"},
"expectations": {"a": "Answer 3"},
"tags": {},
"source_type": "TRACE",
"source_id": "trace-3",
"created_time": 1609459202,
"last_update_time": 1609459202,
}
]
)
mock_response.next_page_token = ""
records, next_token = store._load_dataset_records(
dataset_id, max_results=2, page_token="token_page2"
)
assert len(records) == 1
assert records[0].dataset_record_id == "r-003"
assert next_token is None
req_with_token = GetDatasetRecords(max_results=2)
req_with_token.page_token = "token_page2"
mock_call_endpoint.assert_called_once_with(
GetDatasetRecords,
message_to_json(req_with_token),
endpoint=f"/api/3.0/mlflow/datasets/{dataset_id}/records",
)
def test_evaluation_dataset_created_by_and_updated_by():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
created_response = CreateDataset.Response()
created_response.dataset.dataset_id = "d-test123"
created_response.dataset.name = "test_dataset"
created_response.dataset.created_time = 1234567890000
created_response.dataset.last_update_time = 1234567890000
created_response.dataset.created_by = "user1"
created_response.dataset.last_updated_by = "user1"
created_response.dataset.digest = "abc123"
created_response.dataset.tags = json.dumps({"mlflow.user": "user1", "environment": "test"})
mock_call.return_value = created_response
dataset = store.create_dataset(
name="test_dataset",
experiment_ids=["exp1"],
tags={"mlflow.user": "user1", "environment": "test"},
)
assert dataset.created_by == "user1"
assert dataset.last_updated_by == "user1"
assert dataset.tags["mlflow.user"] == "user1"
upsert_response = UpsertDatasetRecords.Response()
upsert_response.inserted_count = 1
upsert_response.updated_count = 0
get_response = GetDataset.Response()
get_response.dataset.dataset_id = "d-test123"
get_response.dataset.name = "test_dataset"
get_response.dataset.created_time = 1234567890000
get_response.dataset.last_update_time = 1234567900000
get_response.dataset.created_by = "user1"
get_response.dataset.last_updated_by = "user2"
get_response.dataset.digest = "def456"
get_response.dataset.tags = json.dumps({"mlflow.user": "user1", "environment": "test"})
mock_call.side_effect = [upsert_response, get_response]
records = [
{
"inputs": {"question": "Test?"},
"expectations": {"score": 0.9},
"tags": {"mlflow.user": "user2"},
}
]
result = store.upsert_dataset_records("d-test123", records)
assert result["inserted"] == 1
assert result["updated"] == 0
updated_dataset = store.get_dataset("d-test123")
assert updated_dataset.created_by == "user1"
assert updated_dataset.last_updated_by == "user2"
created_response_no_user = CreateDataset.Response()
created_response_no_user.dataset.dataset_id = "d-test456"
created_response_no_user.dataset.name = "test_dataset_no_user"
created_response_no_user.dataset.created_time = 1234567890000
created_response_no_user.dataset.last_update_time = 1234567890000
created_response_no_user.dataset.digest = "ghi789"
created_response_no_user.dataset.tags = json.dumps({"environment": "production"})
mock_call.side_effect = None
mock_call.return_value = created_response_no_user
dataset_no_user = store.create_dataset(
name="test_dataset_no_user",
experiment_ids=["exp2"],
tags={"environment": "production"},
)
assert dataset_no_user.created_by is None
assert dataset_no_user.last_updated_by is None
set_tags_response = SetDatasetTags.Response()
set_tags_response.dataset.dataset_id = "d-test123"
set_tags_response.dataset.name = "test_dataset"
set_tags_response.dataset.created_time = 1234567890000
set_tags_response.dataset.last_update_time = 1234567890000
set_tags_response.dataset.created_by = "user1"
set_tags_response.dataset.last_updated_by = "user1"
set_tags_response.dataset.digest = "abc123"
set_tags_response.dataset.tags = json.dumps(
{"mlflow.user": "user3", "environment": "staging", "version": "2.0"}
)
mock_call.return_value = set_tags_response
store.set_dataset_tags(
"d-test123",
{"mlflow.user": "user3", "version": "2.0", "environment": "staging"},
)
call_args = mock_call.call_args_list[-1]
api, json_body = call_args[0]
assert api == SetDatasetTags
request_dict = json.loads(json_body)
tags_dict = json.loads(request_dict["tags"])
assert "mlflow.user" in tags_dict
assert tags_dict["mlflow.user"] == "user3"
def test_evaluation_dataset_user_tracking_search():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
search_response = SearchEvaluationDatasets.Response()
dataset1 = search_response.datasets.add()
dataset1.dataset_id = "d-dataset1"
dataset1.name = "dataset1"
dataset1.created_time = 1234567890000
dataset1.last_update_time = 1234567900000
dataset1.created_by = "user1"
dataset1.last_updated_by = "user2"
dataset1.digest = "search1"
dataset2 = search_response.datasets.add()
dataset2.dataset_id = "d-dataset2"
dataset2.name = "dataset2"
dataset2.created_time = 1234567891000
dataset2.last_update_time = 1234567891000
dataset2.created_by = "user2"
dataset2.last_updated_by = "user2"
dataset2.digest = "search2"
mock_call.return_value = search_response
results = store.search_datasets(filter_string="created_by = 'user1'")
assert len(results) == 2
assert results[0].created_by == "user1"
assert results[0].last_updated_by == "user2"
call_args = mock_call.call_args_list[-1]
api, json_body = call_args[0]
assert api == SearchEvaluationDatasets
request_json = json.loads(json_body)
assert request_json["filter_string"] == "created_by = 'user1'"
results = store.search_datasets(filter_string="last_updated_by = 'user2'")
call_args = mock_call.call_args_list[-1]
api, json_body = call_args[0]
request_json = json.loads(json_body)
assert request_json["filter_string"] == "last_updated_by = 'user2'"
def test_add_dataset_to_experiments():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = AddDatasetToExperiments.Response()
response.dataset.dataset_id = "d-1234567890abcdef1234567890abcdef"
response.dataset.name = "test_dataset"
response.dataset.experiment_ids.extend(["0", "1", "3", "4"])
response.dataset.created_time = 1234567890
response.dataset.last_update_time = 1234567890
response.dataset.digest = "abc123"
mock_call.side_effect = [response]
result = store.add_dataset_to_experiments(
dataset_id="d-1234567890abcdef1234567890abcdef",
experiment_ids=["3", "4"],
)
assert mock_call.call_count == 1
assert result.dataset_id == "d-1234567890abcdef1234567890abcdef"
assert "3" in result.experiment_ids
assert "4" in result.experiment_ids
assert "0" in result.experiment_ids
assert "1" in result.experiment_ids
req = AddDatasetToExperiments(
dataset_id="d-1234567890abcdef1234567890abcdef",
experiment_ids=["3", "4"],
)
mock_call.assert_called_once_with(
AddDatasetToExperiments,
message_to_json(req),
endpoint="/api/3.0/mlflow/datasets/d-1234567890abcdef1234567890abcdef/add-experiments",
)
def test_remove_dataset_from_experiments():
creds = MlflowHostCreds("https://test-server")
store = RestStore(lambda: creds)
with mock.patch.object(store, "_call_endpoint") as mock_call:
response = RemoveDatasetFromExperiments.Response()
response.dataset.dataset_id = "d-1234567890abcdef1234567890abcdef"
response.dataset.name = "test_dataset"
response.dataset.experiment_ids.extend(["0", "1"])
response.dataset.created_time = 1234567890
response.dataset.last_update_time = 1234567890
response.dataset.digest = "abc123"
mock_call.side_effect = [response]
result = store.remove_dataset_from_experiments(
dataset_id="d-1234567890abcdef1234567890abcdef",
experiment_ids=["3"],
)
assert mock_call.call_count == 1
assert result.dataset_id == "d-1234567890abcdef1234567890abcdef"
assert "3" not in result.experiment_ids
assert "0" in result.experiment_ids
assert "1" in result.experiment_ids
req = RemoveDatasetFromExperiments(
dataset_id="d-1234567890abcdef1234567890abcdef",
experiment_ids=["3"],
)
mock_call.assert_called_once_with(
RemoveDatasetFromExperiments,
message_to_json(req),
endpoint="/api/3.0/mlflow/datasets/d-1234567890abcdef1234567890abcdef/remove-experiments",
)
def test_register_scorer():
"""Test register_scorer method."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
serialized_scorer = "serialized_scorer_data"
mock_response = mock.MagicMock()
mock_response.version = 1
mock_response.scorer_id = "test-scorer-id"
mock_response.experiment_id = experiment_id
mock_response.name = name
mock_response.serialized_scorer = serialized_scorer
mock_response.creation_time = 1234567890
mock_call_endpoint.return_value = mock_response
scorer_version = store.register_scorer(experiment_id, name, serialized_scorer)
assert scorer_version.scorer_version == 1
assert scorer_version.scorer_id == "test-scorer-id"
assert scorer_version.experiment_id == experiment_id
assert scorer_version.scorer_name == name
assert scorer_version._serialized_scorer == serialized_scorer
assert scorer_version.creation_time == 1234567890
mock_call_endpoint.assert_called_once_with(
RegisterScorer,
message_to_json(
RegisterScorer(
experiment_id=experiment_id, name=name, serialized_scorer=serialized_scorer
)
),
endpoint="/api/3.0/mlflow/scorers/register",
)
def test_list_scorers():
"""Test list_scorers method."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
# Mock response
mock_scorer1 = mock.MagicMock()
mock_scorer1.experiment_id = 123
mock_scorer1.scorer_name = "accuracy_scorer"
mock_scorer1.scorer_version = 1
mock_scorer1.serialized_scorer = "serialized_accuracy_scorer"
mock_scorer2 = mock.MagicMock()
mock_scorer2.experiment_id = 123
mock_scorer2.scorer_name = "safety_scorer"
mock_scorer2.scorer_version = 2
mock_scorer2.serialized_scorer = "serialized_safety_scorer"
mock_response = mock.MagicMock()
mock_response.scorers = [mock_scorer1, mock_scorer2]
mock_call_endpoint.return_value = mock_response
# Call the method
scorers = store.list_scorers(experiment_id)
# Verify result
assert len(scorers) == 2
assert scorers[0].scorer_name == "accuracy_scorer"
assert scorers[0].scorer_version == 1
assert scorers[0]._serialized_scorer == "serialized_accuracy_scorer"
assert scorers[1].scorer_name == "safety_scorer"
assert scorers[1].scorer_version == 2
assert scorers[1]._serialized_scorer == "serialized_safety_scorer"
# Verify API call
mock_call_endpoint.assert_called_once_with(
ListScorers,
message_to_json(ListScorers(experiment_id=experiment_id)),
endpoint="/api/3.0/mlflow/scorers/list",
)
def test_list_scorer_versions():
"""Test list_scorer_versions method."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
# Mock response
mock_scorer1 = mock.MagicMock()
mock_scorer1.experiment_id = 123
mock_scorer1.scorer_name = "accuracy_scorer"
mock_scorer1.scorer_version = 1
mock_scorer1.serialized_scorer = "serialized_accuracy_scorer_v1"
mock_scorer2 = mock.MagicMock()
mock_scorer2.experiment_id = 123
mock_scorer2.scorer_name = "accuracy_scorer"
mock_scorer2.scorer_version = 2
mock_scorer2.serialized_scorer = "serialized_accuracy_scorer_v2"
mock_response = mock.MagicMock()
mock_response.scorers = [mock_scorer1, mock_scorer2]
mock_call_endpoint.return_value = mock_response
# Call the method
scorers = store.list_scorer_versions(experiment_id, name)
# Verify result
assert len(scorers) == 2
assert scorers[0].scorer_version == 1
assert scorers[0]._serialized_scorer == "serialized_accuracy_scorer_v1"
assert scorers[1].scorer_version == 2
assert scorers[1]._serialized_scorer == "serialized_accuracy_scorer_v2"
# Verify API call
mock_call_endpoint.assert_called_once_with(
ListScorerVersions,
message_to_json(ListScorerVersions(experiment_id=experiment_id, name=name)),
endpoint="/api/3.0/mlflow/scorers/versions",
)
def test_get_scorer_with_version():
"""Test get_scorer method with specific version."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
version = 2
# Mock response
mock_response = mock.MagicMock()
mock_scorer = mock.MagicMock()
mock_scorer.experiment_id = 123
mock_scorer.scorer_name = "accuracy_scorer"
mock_scorer.scorer_version = 2
mock_scorer.serialized_scorer = "serialized_accuracy_scorer_v2"
mock_scorer.creation_time = 1640995200000
mock_response.scorer = mock_scorer
mock_call_endpoint.return_value = mock_response
# Call the method
result = store.get_scorer(experiment_id, name, version=version)
# Verify result
assert result._serialized_scorer == "serialized_accuracy_scorer_v2"
assert result.scorer_version == 2
assert result.scorer_name == "accuracy_scorer"
# Verify API call
mock_call_endpoint.assert_called_once_with(
GetScorer,
message_to_json(GetScorer(experiment_id=experiment_id, name=name, version=version)),
endpoint="/api/3.0/mlflow/scorers/get",
)
def test_get_scorer_without_version():
"""Test get_scorer method without version (should return latest)."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
# Mock response
mock_response = mock.MagicMock()
mock_scorer = mock.MagicMock()
mock_scorer.experiment_id = 123
mock_scorer.scorer_name = "accuracy_scorer"
mock_scorer.scorer_version = 3
mock_scorer.serialized_scorer = "serialized_accuracy_scorer_latest"
mock_scorer.creation_time = 1640995200000
mock_response.scorer = mock_scorer
mock_call_endpoint.return_value = mock_response
# Call the method
result = store.get_scorer(experiment_id, name)
# Verify result
assert result._serialized_scorer == "serialized_accuracy_scorer_latest"
assert result.scorer_version == 3
assert result.scorer_name == "accuracy_scorer"
# Verify API call
mock_call_endpoint.assert_called_once_with(
GetScorer,
message_to_json(GetScorer(experiment_id=experiment_id, name=name)),
endpoint="/api/3.0/mlflow/scorers/get",
)
def test_delete_scorer_with_version():
"""Test delete_scorer method with specific version."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
version = 2
# Mock response (empty response for delete operations)
mock_response = mock.MagicMock()
mock_call_endpoint.return_value = mock_response
# Call the method
store.delete_scorer(experiment_id, name, version=version)
# Verify API call
mock_call_endpoint.assert_called_once_with(
DeleteScorer,
message_to_json(DeleteScorer(experiment_id=experiment_id, name=name, version=version)),
endpoint="/api/3.0/mlflow/scorers/delete",
)
def test_delete_scorer_without_version():
"""Test delete_scorer method without version (should delete all versions)."""
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_id = "123"
name = "accuracy_scorer"
# Mock response (empty response for delete operations)
mock_response = mock.MagicMock()
mock_call_endpoint.return_value = mock_response
# Call the method
store.delete_scorer(experiment_id, name)
# Verify API call
mock_call_endpoint.assert_called_once_with(
DeleteScorer,
message_to_json(DeleteScorer(experiment_id=experiment_id, name=name)),
endpoint="/api/3.0/mlflow/scorers/delete",
)
def test_calculate_trace_filter_correlation():
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_ids = ["123", "456"]
filter_string1 = "span.type = 'LLM'"
filter_string2 = "feedback.quality > 0.8"
base_filter = "request_time > 1000"
mock_response = mock.MagicMock()
mock_response.npmi = 0.456
mock_response.npmi_smoothed = 0.445
mock_response.filter1_count = 100
mock_response.filter2_count = 80
mock_response.joint_count = 50
mock_response.total_count = 200
mock_response.HasField = lambda field: field in ["npmi", "npmi_smoothed"]
mock_call_endpoint.return_value = mock_response
result = store.calculate_trace_filter_correlation(
experiment_ids=experiment_ids,
filter_string1=filter_string1,
filter_string2=filter_string2,
base_filter=base_filter,
)
assert isinstance(result, TraceFilterCorrelationResult)
assert result.npmi == 0.456
assert result.npmi_smoothed == 0.445
assert result.filter1_count == 100
assert result.filter2_count == 80
assert result.joint_count == 50
assert result.total_count == 200
expected_request = CalculateTraceFilterCorrelation(
experiment_ids=experiment_ids,
filter_string1=filter_string1,
filter_string2=filter_string2,
base_filter=base_filter,
)
mock_call_endpoint.assert_called_once_with(
CalculateTraceFilterCorrelation,
message_to_json(expected_request),
"/api/3.0/mlflow/traces/calculate-filter-correlation",
)
def test_calculate_trace_filter_correlation_without_base_filter():
store = RestStore(lambda: None)
with mock.patch.object(store, "_call_endpoint") as mock_call_endpoint:
experiment_ids = ["123"]
filter_string1 = "span.type = 'LLM'"
filter_string2 = "feedback.quality > 0.8"
mock_response = mock.MagicMock()
mock_response.filter1_count = 0
mock_response.filter2_count = 0
mock_response.joint_count = 0
mock_response.total_count = 100
mock_response.HasField = lambda field: False
mock_call_endpoint.return_value = mock_response
result = store.calculate_trace_filter_correlation(
experiment_ids=experiment_ids,
filter_string1=filter_string1,
filter_string2=filter_string2,
)
assert isinstance(result, TraceFilterCorrelationResult)
assert math.isnan(result.npmi)
assert result.npmi_smoothed is None
assert result.filter1_count == 0
assert result.filter2_count == 0
assert result.joint_count == 0
assert result.total_count == 100
expected_request = CalculateTraceFilterCorrelation(
experiment_ids=experiment_ids,
filter_string1=filter_string1,
filter_string2=filter_string2,
)
mock_call_endpoint.assert_called_once_with(
CalculateTraceFilterCorrelation,
message_to_json(expected_request),
"/api/3.0/mlflow/traces/calculate-filter-correlation",
)
def _create_mock_response(status_code: int = 200, text: str = "{}") -> mock.MagicMock:
"""Helper to create a mock HTTP response."""
response = mock.MagicMock()
response.status_code = status_code
response.text = text
return response
def _create_test_spans() -> list[LiveSpan]:
"""Helper to create test spans for log_spans tests."""
otel_span = create_mock_otel_span(
trace_id=123,
span_id=1,
name="test_span",
start_time=1000000,
end_time=2000000,
)
return [LiveSpan(otel_span, trace_id="tr-123")]
def test_log_spans_with_version_check():
"""Test that log_spans raises NotImplementedError for old server versions."""
spans = _create_test_spans()
experiment_id = "exp-123"
# Test 1: Server version is None (failed to retrieve)
# Use unique host to avoid cache conflicts
creds1 = MlflowHostCreds("https://host1")
store1 = RestStore(lambda: creds1)
with mock.patch(
"mlflow.store.tracking.rest_store.http_request", side_effect=Exception("Connection error")
):
with pytest.raises(NotImplementedError, match="could not identify MLflow server version"):
store1.log_spans(experiment_id, spans)
# Test 2: Server version is less than 3.4
creds2 = MlflowHostCreds("https://host2")
store2 = RestStore(lambda: creds2)
with mock.patch(
"mlflow.store.tracking.rest_store.http_request",
return_value=_create_mock_response(text="3.3.0"),
):
with pytest.raises(
NotImplementedError, match="MLflow server version 3.3.0 is less than 3.4"
):
store2.log_spans(experiment_id, spans)
# Test 3: Server version is exactly 3.4.0 - should succeed
creds3 = MlflowHostCreds("https://host3")
store3 = RestStore(lambda: creds3)
with mock.patch(
"mlflow.store.tracking.rest_store.http_request",
side_effect=[
# First call is to /version, second is to OTLP endpoint
_create_mock_response(text="3.4.0"), # version response
_create_mock_response(), # OTLP response
],
):
result = store3.log_spans(experiment_id, spans)
assert result == spans
# Test 4: Server version is greater than 3.4 - should succeed
creds4 = MlflowHostCreds("https://host4")
store4 = RestStore(lambda: creds4)
with mock.patch(
"mlflow.store.tracking.rest_store.http_request",
side_effect=[
# First call is to /version, second is to OTLP endpoint
_create_mock_response(text="3.5.0"), # version response
_create_mock_response(), # OTLP response
],
):
result = store4.log_spans(experiment_id, spans)
assert result == spans
# Test 5: Real timeout test - verify that timeout works properly without mocking
# Using a non-existent host that will trigger timeout
creds5 = MlflowHostCreds("https://host5")
store5 = RestStore(lambda: creds5)
start_time = time.time()
with pytest.raises(NotImplementedError, match="could not identify MLflow server version"):
store5.log_spans(experiment_id, spans)
elapsed_time = time.time() - start_time
# Should timeout within 3 seconds (plus some buffer for processing)
assert elapsed_time < 5, f"Version check took {elapsed_time}s, should timeout within 3s"
def test_server_version_check_caching():
"""Test that server version is cached and not fetched multiple times."""
spans = _create_test_spans()
experiment_id = "exp-123"
# Use the same host credentials for all stores to test caching
creds = MlflowHostCreds("https://cached-host")
store1 = RestStore(lambda: creds)
store2 = RestStore(lambda: creds) # Different store instance, same creds
# First call - should fetch version and then call OTLP
with mock.patch(
"mlflow.store.tracking.rest_store.http_request",
side_effect=[
_create_mock_response(text="3.5.0"), # version response
_create_mock_response(), # OTLP response
],
) as mock_http:
# We call log_spans because it performs a server version check via _get_server_version
result1 = store1.log_spans(experiment_id, spans)
assert result1 == spans
# Should have called /version first, then /v1/traces
mock_http.assert_any_call(
host_creds=creds,
endpoint="/version",
method="GET",
timeout=3,
max_retries=0,
retry_timeout_seconds=1,
raise_on_status=True,
)
mock_http.assert_any_call(
host_creds=creds,
endpoint="/v1/traces",
method="POST",
data=mock.ANY,
extra_headers=mock.ANY,
)
assert mock_http.call_count == 2
# Second call with same store - should use cached version, only call OTLP
with mock.patch(
"mlflow.store.tracking.rest_store.http_request", return_value=_create_mock_response()
) as mock_http:
result2 = store1.log_spans(experiment_id, spans)
assert result2 == spans
# Should only call OTLP, not version (cached)
mock_http.assert_called_once_with(
host_creds=creds,
endpoint="/v1/traces",
method="POST",
data=mock.ANY,
extra_headers=mock.ANY,
)
# Third call with different store but same creds - should still use cached version
with mock.patch(
"mlflow.store.tracking.rest_store.http_request", return_value=_create_mock_response()
) as mock_http:
result3 = store2.log_spans(experiment_id, spans)
assert result3 == spans
# Should only call OTLP, not version (cached across instances)
mock_http.assert_called_once_with(
host_creds=creds,
endpoint="/v1/traces",
method="POST",
data=mock.ANY,
extra_headers=mock.ANY,
)
| CustomErrorHandlingRestStore |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 1067,
"end": 1356
} | class ____(graphene.ObjectType):
numFailedChecks = graphene.NonNull(graphene.Int)
numWarningChecks = graphene.NonNull(graphene.Int)
totalNumChecks = graphene.NonNull(graphene.Int)
class Meta:
name = "AssetHealthCheckDegradedMeta"
| GrapheneAssetHealthCheckDegradedMeta |
python | spyder-ide__spyder | spyder/plugins/remoteclient/api/modules/file_services.py | {
"start": 1611,
"end": 10228
} | class ____(SpyderBaseJupyterAPI, RawIOBase):
"""
API for remote file I/O.
This API is a RawIOBase subclass that allows reading and writing files
on a remote server.
The file is open upon the websocket connection and closed when the
connection is closed.
If lock is True, the file will be locked on the remote server.
And any other attempts to open the file will wait until the lock is
released.
If atomic is True, any operations on the file will be done on a temporary
copy of the file, and then the file will be replaced with the copy upon
closing.
Parameters
----------
file : str
The path to the file to open.
mode : str, optional
The mode to open the file in, by default "r".
atomic : bool, optional
Whether to open the file atomically, by default False.
lock : bool, optional
Whether to lock the file, by default False.
encoding : str, optional
The encoding to use when reading and writing the file, by default
"utf-8".
Raises
------
RemoteFileServicesError
If an error occurs when opening the file.
RemoteOSError
If an OSError occured on the remote server.
"""
base_url = SPYDER_PLUGIN_NAME + "/fs/open"
def __init__(
self,
file,
mode="r",
atomic=False,
lock=False,
encoding="utf-8",
*args,
**kwargs,
):
super().__init__(*args, **kwargs)
self.name = file
self.mode = mode
self.encoding = encoding
self.atomic = atomic
self.lock = lock
self._websocket: aiohttp.ClientWebSocketResponse = None
async def _raise_for_status(self, response):
response.raise_for_status()
async def connect(self):
await super().connect()
if self._websocket is not None and not self._websocket.closed:
return
self._websocket = await self.session.ws_connect(
self.api_url,
params={
"path": f"file://{self.name}",
"mode": self.mode,
"atomic": str(self.atomic).lower(),
"lock": str(self.lock).lower(),
"encoding": self.encoding,
},
)
try:
await self._check_connection()
except Exception as e:
self._websocket = None
raise e
async def _check_connection(self):
status = await self._websocket.receive()
if status.type == aiohttp.WSMsgType.CLOSE:
await self._websocket.close()
if status.data == 1002:
data = json.loads(status.extra)
if data["status"] in (
HTTPStatus.LOCKED,
HTTPStatus.EXPECTATION_FAILED,
):
raise RemoteOSError.from_json(
data, url=self._websocket._response.url
)
raise RemoteFileServicesError(
data.get("type", "UnknownError"),
data.get("message", "Unknown error"),
self._websocket._response.url,
data.get("tracebacks", []),
)
else:
raise RemoteFileServicesError(
"UnknownError",
"Failed to open file",
self._websocket._response.url,
[],
)
async def close(self):
await self._websocket.close()
try:
await self._websocket.receive()
except Exception:
pass
await super().close()
@property
def closed(self):
if self._websocket is None:
return super().closed
return self._websocket.closed and super().closed
def _decode_data(self, data: str | object) -> str | bytes | object:
"""Decode data from a message."""
if not isinstance(data, str):
return data
if "b" in self.mode:
return base64.b64decode(data)
return base64.b64decode(data).decode(self.encoding)
def _encode_data(self, data: bytes | str | object) -> str:
"""Encode data for a message."""
if isinstance(data, bytes):
return base64.b64encode(data).decode("ascii")
if isinstance(data, str):
return base64.b64encode(data.encode(self.encoding)).decode("ascii")
return data
async def _send_request(self, method: str, **args):
await self._websocket.send_json({"method": method, **args})
async def _get_response(self, timeout=None):
message = json.loads(
await self._websocket.receive_bytes(timeout=timeout)
)
if message["status"] > 400:
if message["status"] == HTTPStatus.EXPECTATION_FAILED:
raise RemoteOSError.from_json(
message, url=self._websocket._response.url
)
raise RemoteFileServicesError(
message.get("type", "UnknownError"),
message.get("message", "Unknown error"),
self._websocket._response.url,
message.get("tracebacks", []),
)
data = message.get("data")
if data is None:
return None
if isinstance(data, list):
return [self._decode_data(d) for d in data]
return self._decode_data(data)
@property
def closefd(self):
return True
async def __aiter__(self):
while response := await self.readline():
yield response
async def __anext__(self):
response = await self.readline()
if not response:
raise StopIteration
return response
async def write(self, s: bytes | str) -> int:
"""Write data to the file."""
await self._send_request("write", data=self._encode_data(s))
return await self._get_response()
async def flush(self):
"""Flush the file."""
await self._send_request("flush")
return await self._get_response()
async def read(self, size: int = -1) -> bytes | str:
"""Read data from the file."""
await self._send_request("read", n=size)
return await self._get_response()
async def readall(self):
"""Read all data from the file."""
return await self.read(size=-1)
async def readinto(self, b) -> int:
"""Read data into a buffer."""
raise NotImplementedError(
"readinto() is not supported by the remote file API"
)
async def seek(self, pos: int, whence: int = 0) -> int:
"""Seek to a new position in the file."""
await self._send_request("seek", offset=pos, whence=whence)
return await self._get_response()
async def tell(self) -> int:
"""Get the current file position."""
await self._send_request("tell")
return await self._get_response()
async def truncate(self, size: int | None = None) -> int:
"""Truncate the file to a new size."""
await self._send_request("truncate", size=size)
return await self._get_response()
async def fileno(self):
"""Flush the file to disk."""
await self._send_request("fileno")
return await self._get_response()
async def readline(self, size: int = -1) -> bytes | str:
"""Read a line from the file."""
await self._send_request("readline", size=size)
return await self._get_response()
async def readlines(self, hint: int = -1) -> list[bytes | str]:
"""Read lines from the file."""
await self._send_request("readlines", hint=hint)
return await self._get_response()
async def writelines(self, lines: list[bytes | str]):
"""Write lines to the file."""
await self._send_request(
"writelines", lines=list(map(self._encode_data, lines))
)
return await self._get_response()
async def isatty(self) -> bool:
"""Check if the file is a TTY."""
await self._send_request("isatty")
return await self._get_response()
async def readable(self) -> bool:
"""Check if the file is readable."""
await self._send_request("readable")
return await self._get_response()
async def writable(self) -> bool:
"""Check if the file is writable."""
await self._send_request("writable")
return await self._get_response()
@SpyderRemoteAPIManagerBase.register_api
| SpyderRemoteFileIOAPI |
python | numba__numba | numba/core/types/abstract.py | {
"start": 7873,
"end": 7948
} | class ____(Type):
"""
Base class for hashable types.
"""
| Hashable |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/strava/tests.py | {
"start": 339,
"end": 3551
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = StravaProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""{
"id": 32641234,
"username": null,
"resource_state": 2,
"firstname": "georges",
"lastname": "camembert",
"city": "London",
"state": "England",
"country": "United Kingdom",
"sex": "M",
"premium": false,
"summit": false,
"created_at": "2017-07-12T12:42:52Z",
"updated_at": "2017-10-21T11:01:23Z",
"badge_type_id": 0,
"profile_medium": "avatar/athlete/medium.png",
"profile": "avatar/athlete/large.png",
"friend": null,
"follower": null,
"email": "bill@example.com"
}""",
) # noqa
def get_expected_to_str(self):
return "bill@example.com"
def get_mocked_response_avatar_invalid_id(self):
"""Profile including realistic avatar URL
user ID set to 0 to test edge case where id would be missing"""
return MockedResponse(
HTTPStatus.OK,
"""{
"id": 0,
"username": null,
"resource_state": 2,
"firstname": "georges",
"lastname": "camembert",
"city": "London",
"state": "England",
"country": "United Kingdom",
"sex": "M",
"premium": false,
"summit": false,
"created_at": "2017-07-12T12:42:52Z",
"updated_at": "2017-10-21T11:01:23Z",
"badge_type_id": 0,
"profile_medium": "https://cloudfront.net/1/medium.jpg",
"profile": "https://cloudfront.net/1/large.jpg",
"friend": null,
"follower": null,
"email": "bill@example.com"
}""",
) # noqa
def test_valid_avatar(self):
"""test response with Avatar URL"""
self.login(self.get_mocked_response_avatar_invalid_id())
user = User.objects.get(email="bill@example.com")
soc_acc = SocialAccount.objects.filter(
user=user, provider=self.provider.id
).get()
provider_account = soc_acc.get_provider_account()
self.assertEqual(
provider_account.get_avatar_url(),
"https://cloudfront.net/1/large.jpg",
)
self.assertIsNone(provider_account.get_profile_url())
def get_login_response_json(self, with_refresh_token=True):
rt = ""
if with_refresh_token:
rt = ',"refresh_token": "testrf"'
return (
"""{
"uid":"weibo",
"access_token":"testac",
"livemode": false,
"token_type": "bearer",
"strava_publishable_key": "pk_test_someteskey",
"strava_user_id": "acct_sometestid",
"scope": "read_write"
%s }"""
% rt
)
| StravaTests |
python | kamyu104__LeetCode-Solutions | Python/number-of-matching-subsequences.py | {
"start": 124,
"end": 585
} | class ____(object):
def numMatchingSubseq(self, S, words):
"""
:type S: str
:type words: List[str]
:rtype: int
"""
waiting = collections.defaultdict(list)
for word in words:
it = iter(word)
waiting[next(it, None)].append(it)
for c in S:
for it in waiting.pop(c, ()):
waiting[next(it, None)].append(it)
return len(waiting[None])
| Solution |
python | jina-ai__jina | tests/k8s/test-executor/debug_executor.py | {
"start": 78,
"end": 3142
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
from jina.logging.logger import JinaLogger
self.logger = JinaLogger(self.__class__.__name__)
self._name = self.runtime_args.name
@requests(on='/debug')
def debug(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
key = 'traversed-executors'
for doc in docs:
if key not in doc.tags:
doc.tags[key] = []
traversed = list(doc.tags.get(key))
traversed.append(self._name)
doc.tags[key] = traversed
doc.tags['parallel'] = self.runtime_args.replicas
doc.tags['shards'] = self.runtime_args.shards
doc.tags['shard_id'] = self.runtime_args.shard_id
doc.tags['hostname'] = socket.gethostname()
@requests(on='/env')
def env(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
for doc in docs:
doc.tags['k1'] = os.environ.get('k1')
doc.tags['k2'] = os.environ.get('k2')
doc.tags['JINA_LOG_LEVEL'] = os.environ.get('JINA_LOG_LEVEL')
doc.tags['env'] = {'k1': os.environ.get('k1'), 'k2': os.environ.get('k2')}
doc.tags['SECRET_USERNAME'] = os.environ.get('SECRET_USERNAME')
doc.tags['SECRET_PASSWORD'] = os.environ.get('SECRET_PASSWORD')
@requests(on='/cuda')
def cuda(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
import kubernetes
from kubernetes import client
api_client = client.ApiClient()
core_client = client.CoreV1Api(api_client=api_client)
try:
# try loading kube config from disk first
kubernetes.config.load_kube_config()
except kubernetes.config.config_exception.ConfigException:
# if the config could not be read from disk, try loading in cluster config
# this works if we are running inside k8s
kubernetes.config.load_incluster_config()
pods = core_client.list_namespaced_pod('test-gpu') # List[V1Pod]
pod_spec = pods[0].spec # V1PodSpec
pod_container = pod_spec.containers[0] # V1Container
pod_resources = pod_container.resources # V1ResourceRequirements
for doc in docs:
doc.tags['resources']['limits'] = pod_resources.limits
@requests(on='/workspace')
def foo_workspace(self, docs: DocumentArray, **kwargs):
self.logger.debug(
f'Received doc array in test-executor {self._name} with length {len(docs)}.'
)
self.logger.debug(f'Workspace {self.workspace}.')
for doc in docs:
doc.tags['workspace'] = self.workspace
| TestExecutor |
python | pytorch__pytorch | test/test_cpp_extensions_jit.py | {
"start": 1148,
"end": 50091
} | class ____(common.TestCase):
"""Tests just-in-time cpp extensions.
Don't confuse this with the PyTorch JIT (aka TorchScript).
"""
def setUp(self):
super().setUp()
# cpp extensions use relative paths. Those paths are relative to
# this file, so we'll change the working directory temporarily
self.old_working_dir = os.getcwd()
os.chdir(os.path.dirname(os.path.abspath(__file__)))
def tearDown(self):
super().tearDown()
# return the working directory (see setUp)
os.chdir(self.old_working_dir)
@classmethod
def setUpClass(cls):
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
@classmethod
def tearDownClass(cls):
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
def test_jit_compile_extension(self):
module = torch.utils.cpp_extension.load(
name="jit_extension",
sources=[
"cpp_extensions/jit_extension.cpp",
"cpp_extensions/jit_extension2.cpp",
],
extra_include_paths=[
"cpp_extensions",
"path / with spaces in it",
"path with quote'",
],
extra_cflags=["-g"],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
# Checking we can call a method defined not in the main C++ file.
z = module.exp_add(x, y)
self.assertEqual(z, x.exp() + y.exp())
# Checking we can use this JIT-compiled class.
doubler = module.Doubler(2, 2)
self.assertIsNone(doubler.get().grad)
self.assertEqual(doubler.get().sum(), 4)
self.assertEqual(doubler.forward().sum(), 8)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_jit_cuda_extension(self):
# NOTE: The name of the extension must equal the name of the module.
module = torch.utils.cpp_extension.load(
name="torch_test_cuda_extension",
sources=[
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
extra_cuda_cflags=["-O2"],
verbose=True,
keep_intermediates=False,
)
x = torch.zeros(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
z = module.sigmoid_add(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
def _test_jit_xpu_extension(self, extra_sycl_cflags):
# randomizing extension name and names of extension methods
# for the case when we test building few extensions in a row
# using this function
rand = "".join(random.sample(string.ascii_letters, 5))
name = f"torch_test_xpu_extension_{rand}"
temp_dir = tempfile.mkdtemp()
try:
with open("cpp_extensions/xpu_extension.sycl") as f:
text = f.read()
for fn in ["sigmoid_add", "SigmoidAddKernel"]:
text = text.replace(fn, f"{fn}_{rand}")
sycl_file = f"{temp_dir}/xpu_extension.sycl"
with open(sycl_file, "w") as f:
f.write(text)
module = torch.utils.cpp_extension.load(
name=name,
sources=[sycl_file],
extra_sycl_cflags=extra_sycl_cflags,
verbose=True,
keep_intermediates=True,
build_directory=temp_dir,
)
x = torch.zeros(100, device="xpu", dtype=torch.float32)
y = torch.zeros(100, device="xpu", dtype=torch.float32)
method = f"sigmoid_add_{rand}"
self.assertTrue(hasattr(module, method))
z = getattr(module, method)(x, y).cpu()
# 2 * sigmoid(0) = 2 * 0.5 = 1
self.assertEqual(z, torch.ones_like(z))
finally:
shutil.rmtree(temp_dir)
@unittest.skipIf(not (TEST_XPU), "XPU not found")
def test_jit_xpu_extension(self):
# NOTE: this test can be affected by setting TORCH_XPU_ARCH_LIST
self._test_jit_xpu_extension(extra_sycl_cflags=[])
@unittest.skipIf(not (TEST_XPU), "XPU not found")
def test_jit_xpu_archlists(self):
# NOTE: in this test we explicitly test few different options
# for TORCH_XPU_ARCH_LIST. Setting TORCH_XPU_ARCH_LIST in the
# environment before the test won't affect it.
cases = [
{
# Testing JIT compilation
"archlist": "",
"extra_sycl_cflags": [],
},
{
# Testing JIT + AOT (full torch AOT arch list)
# NOTE: default cpp extension AOT arch list might be reduced
# from the full list
"archlist": ",".join(torch.xpu.get_arch_list()),
"extra_sycl_cflags": [],
},
{
# Testing AOT (full torch AOT arch list)
# NOTE: default cpp extension AOT arch list might be reduced
# from the full list
"archlist": ",".join(torch.xpu.get_arch_list()),
# below excludes spir64 target responsible for JIT
"extra_sycl_cflags": ["-fsycl-targets=spir64_gen"],
},
]
old_envvar = os.environ.get("TORCH_XPU_ARCH_LIST", None)
try:
for c in cases:
os.environ["TORCH_XPU_ARCH_LIST"] = c["archlist"]
self._test_jit_xpu_extension(extra_sycl_cflags=c["extra_sycl_cflags"])
finally:
if old_envvar is None:
os.environ.pop("TORCH_XPU_ARCH_LIST")
else:
os.environ["TORCH_XPU_ARCH_LIST"] = old_envvar
@unittest.skipIf(not TEST_MPS, "MPS not found")
def test_mps_extension(self):
module = torch.utils.cpp_extension.load(
name="torch_test_mps_extension",
sources=[
"cpp_extensions/mps_extension.mm",
],
verbose=True,
keep_intermediates=False,
)
tensor_length = 100000
x = torch.randn(tensor_length, device="cpu", dtype=torch.float32)
y = torch.randn(tensor_length, device="cpu", dtype=torch.float32)
cpu_output = module.get_cpu_add_output(x, y)
mps_output = module.get_mps_add_output(x.to("mps"), y.to("mps"))
self.assertEqual(cpu_output, mps_output.to("cpu"))
# Regression test for https://github.com/pytorch/pytorch/issues/163721
lib = torch.mps.compile_shader("void kernel noop(device float *x) {}")
lib.noop(mps_output)
module.mps_add_one_new_context(mps_output)
self.assertEqual(cpu_output + 1.0, mps_output.to("cpu"))
def _run_jit_cuda_archflags(self, flags, expected):
# Compile an extension with given `flags`
def _check_cuobjdump_output(expected_values, is_ptx=False):
elf_or_ptx = "--list-ptx" if is_ptx else "--list-elf"
lib_ext = ".pyd" if IS_WINDOWS else ".so"
# Note, .extension name may include _v1, _v2, so first find exact name
ext_filename = glob.glob(
os.path.join(temp_dir, "cudaext_archflag*" + lib_ext)
)[0]
command = ["cuobjdump", elf_or_ptx, ext_filename]
p = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
output, err = p.communicate()
output = output.decode("ascii")
err = err.decode("ascii")
if not p.returncode == 0 or not err == "":
raise AssertionError(
f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n"
f"Output: {output} "
)
actual_arches = sorted(re.findall(r"sm_\d+", output))
expected_arches = sorted(
["sm_" + xx.replace("121", "120") for xx in expected_values]
)
self.assertEqual(
actual_arches,
expected_arches,
msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n"
f"Stderr: {err}\nOutput: {output}",
)
temp_dir = tempfile.mkdtemp()
old_envvar = os.environ.get("TORCH_CUDA_ARCH_LIST", None)
try:
os.environ["TORCH_CUDA_ARCH_LIST"] = flags
params = {
"name": "cudaext_archflags",
"sources": [
"cpp_extensions/cuda_extension.cpp",
"cpp_extensions/cuda_extension.cu",
],
"extra_cuda_cflags": ["-O2"],
"verbose": True,
"build_directory": temp_dir,
}
if IS_WINDOWS:
p = mp.Process(target=torch.utils.cpp_extension.load, kwargs=params)
# Compile and load the test CUDA arch in a different Python process to avoid
# polluting the current one and causes test_jit_cuda_extension to fail on
# Windows. There is no clear way to unload a module after it has been imported
# and torch.utils.cpp_extension.load builds and loads the module in one go.
# See https://github.com/pytorch/pytorch/issues/61655 for more details
p.start()
p.join()
else:
torch.utils.cpp_extension.load(**params)
# Expected output for --list-elf:
# ELF file 1: cudaext_archflags.1.sm_61.cubin
# ELF file 2: cudaext_archflags.2.sm_52.cubin
_check_cuobjdump_output(expected[0])
if expected[1] is not None:
# Expected output for --list-ptx:
# PTX file 1: cudaext_archflags.1.sm_61.ptx
_check_cuobjdump_output(expected[1], is_ptx=True)
finally:
if IS_WINDOWS:
# rmtree returns permission error: [WinError 5] Access is denied
# on Windows, this is a word-around
subprocess.run(["rm", "-rf", temp_dir], stdout=subprocess.PIPE)
else:
shutil.rmtree(temp_dir)
if old_envvar is None:
os.environ.pop("TORCH_CUDA_ARCH_LIST")
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = old_envvar
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
@unittest.skipIf(TEST_ROCM, "disabled on rocm")
def test_jit_cuda_archflags(self):
# Test a number of combinations:
# - the default for the machine we're testing on
# - Separators, can be ';' (most common) or ' '
# - Architecture names
# - With/without '+PTX'
n = torch.cuda.device_count()
capabilities = {torch.cuda.get_device_capability(i) for i in range(n)}
# expected values is length-2 tuple: (list of ELF, list of PTX)
# note: there should not be more than one PTX value
archflags = {
"": (
[f"{capability[0]}{capability[1]}" for capability in capabilities],
None,
),
}
archflags["7.5+PTX"] = (["75"], ["75"])
major, minor = map(int, torch.version.cuda.split(".")[:2])
if major < 12 or (major == 12 and minor <= 9):
# Compute capability <= 7.0 is only supported up to CUDA 12.9
archflags["Maxwell+Tegra;6.1"] = (["53", "61"], None)
archflags["Volta"] = (["70"], ["70"])
archflags["5.0;6.0+PTX;7.0;7.5"] = (["50", "60", "70", "75"], ["60"])
if major < 12:
# CUDA 12 drops compute capability < 5.0
archflags["Pascal 3.5"] = (["35", "60", "61"], None)
for flags, expected in archflags.items():
try:
self._run_jit_cuda_archflags(flags, expected)
except RuntimeError as e:
# Using the device default (empty flags) may fail if the device is newer than the CUDA compiler
# This raises a RuntimeError with a specific message which we explicitly ignore here
if not flags and "Error building" in str(e):
pass
else:
raise
try:
torch.cuda.synchronize()
except RuntimeError:
# Ignore any error, e.g. unsupported PTX code on current device
# to avoid errors from here leaking into other tests
pass
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cuda_arch_flags_non_default_gencode(self):
user_arch_flags = ["-gencode=arch=compute_86,code=sm_86"]
result = _get_cuda_arch_flags(user_arch_flags)
self.assertEqual(
len(result),
0,
f"User arch flags should prevent default generation. "
f"Expected: [], Got: {result}",
)
@unittest.skipIf(not TEST_CUDA, "CUDA not found")
def test_cuda_arch_flags_default_gencode(self):
default_flags = _get_cuda_arch_flags()
self.assertGreater(
len(default_flags), 0, "No args should generate default flags"
)
non_arch_flags = _get_cuda_arch_flags(["-O2", "--use-fast-math"])
self.assertGreater(
len(non_arch_flags), 0, "Non-arch flags should still generate defaults"
)
empty_flags = _get_cuda_arch_flags([])
self.assertGreater(
len(empty_flags), 0, "Empty list should generate default flags"
)
@unittest.skipIf(not TEST_CUDNN, "CuDNN not found")
@unittest.skipIf(TEST_ROCM, "Not supported on ROCm")
def test_jit_cudnn_extension(self):
# implementation of CuDNN ReLU
if IS_WINDOWS:
extra_ldflags = ["cudnn.lib"]
else:
extra_ldflags = ["-lcudnn"]
module = torch.utils.cpp_extension.load(
name="torch_test_cudnn_extension",
sources=["cpp_extensions/cudnn_extension.cpp"],
extra_ldflags=extra_ldflags,
verbose=True,
with_cuda=True,
)
x = torch.randn(100, device="cuda", dtype=torch.float32)
y = torch.zeros(100, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y) # y=relu(x)
self.assertEqual(torch.nn.functional.relu(x), y)
with self.assertRaisesRegex(RuntimeError, "same size"):
y_incorrect = torch.zeros(20, device="cuda", dtype=torch.float32)
module.cudnn_relu(x, y_incorrect)
def test_inline_jit_compile_extension_with_functions_as_list(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_list",
cpp_sources=cpp_source,
functions="tanh_add",
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "tanh_add")
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.tanh_add(x, y)
self.assertEqual(z, x.tanh() + y.tanh())
def test_inline_jit_compile_extension_with_functions_as_dict(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_with_functions_dict",
cpp_sources=cpp_source,
functions={"tanh_add": "Tanh and then sum :D"},
verbose=True,
)
self.assertEqual(module.tanh_add.__doc__.split("\n")[2], "Tanh and then sum :D")
def test_inline_jit_compile_extension_multiple_sources_and_no_functions(self):
cpp_source1 = """
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y) {
return x.sin() + y.sin();
}
"""
cpp_source2 = """
#include <torch/extension.h>
torch::Tensor sin_add(torch::Tensor x, torch::Tensor y);
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("sin_add", &sin_add, "sin(x) + sin(y)");
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension",
cpp_sources=[cpp_source1, cpp_source2],
verbose=True,
)
x = torch.randn(4, 4)
y = torch.randn(4, 4)
z = module.sin_add(x, y)
self.assertEqual(z, x.sin() + y.sin())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_extension_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data<float>(), y.data<float>(), output.data<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = "torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);"
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
functions=["cos_add"],
verbose=True,
)
self.assertEqual(module.cos_add.__doc__.split("\n")[2], "cos_add")
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = module.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_inline_jit_compile_custom_op_cuda(self):
cuda_source = """
__global__ void cos_add_kernel(
const float* __restrict__ x,
const float* __restrict__ y,
float* __restrict__ output,
const int size) {
const auto index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
output[index] = __cosf(x[index]) + __cosf(y[index]);
}
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel<<<blocks, threads>>>(x.data_ptr<float>(), y.data_ptr<float>(), output.data_ptr<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = """
#include <torch/library.h>
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);
TORCH_LIBRARY(inline_jit_extension_custom_op_cuda, m) {
m.def("cos_add", cos_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_custom_op_cuda",
cpp_sources=cpp_source,
cuda_sources=cuda_source,
verbose=True,
is_python_module=False,
)
x = torch.randn(4, 4, device="cuda", dtype=torch.float32)
y = torch.randn(4, 4, device="cuda", dtype=torch.float32)
z = torch.ops.inline_jit_extension_custom_op_cuda.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
@unittest.skipIf(not TEST_XPU, "XPU not found")
def test_inline_jit_compile_extension_xpu(self):
sycl_source = """
#include <c10/xpu/XPUStream.h>
class CosAddKernel {
public:
void operator()(const sycl::nd_item<3> &item_ct1) const {
const int index = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
item_ct1.get_local_id(2);
if (index < size) {
output[index] = cosf(x[index]) + cosf(y[index]);
}
}
CosAddKernel(const float* _x, const float* _y, float* _output, int _size):
x(_x),
y(_y),
output(_output),
size(_size)
{}
private:
const float* x;
const float* y;
float* output;
int size;
};
void cos_add_kernel(
const float* x,
const float* y,
float* output,
int size) {
CosAddKernel krn(x, y, output, size);
const int threads = 1024;
const int blocks = (size + threads - 1) / threads;
sycl::queue& queue = c10::xpu::getCurrentXPUStream().queue();
queue.submit([&](sycl::handler &cgh) {
cgh.parallel_for<CosAddKernel>(
sycl::nd_range<3>(
sycl::range<3>(1, 1, blocks) * sycl::range<3>(1, 1, threads),
sycl::range<3>(1, 1, threads)),
krn);
});
}
torch::Tensor cos_add(torch::Tensor x, torch::Tensor y) {
auto output = torch::zeros_like(x);
const int threads = 1024;
const int blocks = (output.numel() + threads - 1) / threads;
cos_add_kernel(x.data_ptr<float>(), y.data_ptr<float>(), output.data_ptr<float>(), output.numel());
return output;
}
"""
# Here, the C++ source need only declare the function signature.
cpp_source = "torch::Tensor cos_add(torch::Tensor x, torch::Tensor y);"
module = torch.utils.cpp_extension.load_inline(
name="inline_jit_extension_xpu",
cpp_sources=cpp_source,
sycl_sources=sycl_source,
functions=["cos_add"],
verbose=True,
)
self.assertEqual(module.cos_add.__doc__.split("\n")[2], "cos_add")
x = torch.randn(4, 4, device="xpu", dtype=torch.float32)
y = torch.randn(4, 4, device="xpu", dtype=torch.float32)
z = module.cos_add(x, y)
self.assertEqual(z, x.cos() + y.cos())
def test_inline_jit_compile_extension_throws_when_functions_is_bad(self):
with self.assertRaises(ValueError):
torch.utils.cpp_extension.load_inline(
name="invalid_jit_extension", cpp_sources="", functions=5
)
def test_lenient_flag_handling_in_jit_extensions(self):
cpp_source = """
torch::Tensor tanh_add(torch::Tensor x, torch::Tensor y) {
return x.tanh() + y.tanh();
}
"""
module = torch.utils.cpp_extension.load_inline(
name="lenient_flag_handling_extension",
cpp_sources=cpp_source,
functions="tanh_add",
extra_cflags=["-g\n\n", "-O0 -Wall"],
extra_include_paths=[" cpp_extensions\n"],
verbose=True,
)
x = torch.zeros(100, dtype=torch.float32)
y = torch.zeros(100, dtype=torch.float32)
z = module.tanh_add(x, y).cpu()
self.assertEqual(z, x.tanh() + y.tanh())
@unittest.skip("Temporarily disabled")
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_half_support(self):
"""
Checks for an issue with operator< ambiguity for half when certain
THC headers are included.
See https://github.com/pytorch/pytorch/pull/10301#issuecomment-416773333
for the corresponding issue.
"""
cuda_source = """
template<typename T, typename U>
__global__ void half_test_kernel(const T* input, U* output) {
if (input[0] < input[1] || input[0] >= input[1]) {
output[0] = 123;
}
}
torch::Tensor half_test(torch::Tensor input) {
auto output = torch::empty(1, input.options().dtype(torch::kFloat));
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "half_test", [&] {
half_test_kernel<scalar_t><<<1, 1>>>(
input.data<scalar_t>(),
output.data<float>());
});
return output;
}
"""
module = torch.utils.cpp_extension.load_inline(
name="half_test_extension",
cpp_sources="torch::Tensor half_test(torch::Tensor input);",
cuda_sources=cuda_source,
functions=["half_test"],
verbose=True,
)
x = torch.randn(3, device="cuda", dtype=torch.half)
result = module.half_test(x)
self.assertEqual(result[0], 123)
def test_reload_jit_extension(self):
def compile(code):
return torch.utils.cpp_extension.load_inline(
name="reloaded_jit_extension",
cpp_sources=code,
functions="f",
verbose=True,
)
module = compile("int f() { return 123; }")
self.assertEqual(module.f(), 123)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 456; }")
self.assertEqual(module.f(), 456)
module = compile("int f() { return 789; }")
self.assertEqual(module.f(), 789)
@unittest.skipIf(
"utf" not in locale.getlocale()[1].lower(), "Only test in UTF-8 locale"
)
def test_load_with_non_platform_default_encoding(self):
# Assume the code is saved in UTF-8, but the locale is set to a different encoding.
# You might encounter decoding errors in ExtensionVersioner.
# But this case is quite hard to cover because CI environments may not in non-latin locale.
# So the following code just test source file in gbk and locale in utf-8.
cpp_source = """
#include <torch/extension.h>
// Non-latin1 character test: 字符.
// It will cause utf-8 decoding error.
int f() { return 123; }
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("f", &f, "f");
}
"""
build_dir = tempfile.mkdtemp()
src_path = os.path.join(build_dir, "main.cpp")
with open(src_path, encoding="gbk", mode="w") as f:
f.write(cpp_source)
module = torch.utils.cpp_extension.load(
name="non_default_encoding",
sources=src_path,
verbose=True,
)
self.assertEqual(module.f(), 123)
def test_cpp_frontend_module_has_same_output_as_python(self, dtype=torch.double):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
input = torch.randn(2, 5, dtype=dtype)
cpp_linear = extension.Net(5, 2)
cpp_linear.to(dtype)
python_linear = torch.nn.Linear(5, 2).to(dtype)
# First make sure they have the same parameters
cpp_parameters = dict(cpp_linear.named_parameters())
with torch.no_grad():
python_linear.weight.copy_(cpp_parameters["fc.weight"])
python_linear.bias.copy_(cpp_parameters["fc.bias"])
cpp_output = cpp_linear.forward(input)
python_output = python_linear(input)
self.assertEqual(cpp_output, python_output)
cpp_output.sum().backward()
python_output.sum().backward()
for p in cpp_linear.parameters():
self.assertFalse(p.grad is None)
self.assertEqual(cpp_parameters["fc.weight"].grad, python_linear.weight.grad)
self.assertEqual(cpp_parameters["fc.bias"].grad, python_linear.bias.grad)
def test_cpp_frontend_module_python_inter_op(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
# Create a torch.nn.Module which uses the C++ module as a submodule.
class M(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.x = torch.nn.Parameter(torch.tensor(1.0))
self.net = extension.Net(3, 5)
def forward(self, input):
return self.net.forward(input) + self.x
net = extension.Net(5, 2)
net.double()
net.to(torch.get_default_dtype())
self.assertEqual(str(net), "Net")
# Further embed the torch.nn.Module into a Sequential, and also add the
# C++ module as an element of the Sequential.
sequential = torch.nn.Sequential(M(), torch.nn.Tanh(), net, torch.nn.Sigmoid())
input = torch.randn(2, 3)
# Try calling the module!
output = sequential.forward(input)
# The call operator is bound to forward too.
self.assertEqual(output, sequential(input))
self.assertEqual(list(output.shape), [2, 2])
# Do changes on the module hierarchy.
old_dtype = torch.get_default_dtype()
sequential.to(torch.float64)
sequential.to(torch.float32)
sequential.to(old_dtype)
self.assertEqual(sequential[2].parameters()[0].dtype, old_dtype)
# Make sure we can access these methods recursively.
self.assertEqual(
len(list(sequential.parameters())), len(net.parameters()) * 2 + 1
)
self.assertEqual(
len(list(sequential.named_parameters())),
len(net.named_parameters()) * 2 + 1,
)
self.assertEqual(len(list(sequential.buffers())), len(net.buffers()) * 2)
self.assertEqual(len(list(sequential.modules())), 8)
# Test clone()
net2 = net.clone()
self.assertEqual(len(net.parameters()), len(net2.parameters()))
self.assertEqual(len(net.buffers()), len(net2.buffers()))
self.assertEqual(len(net.modules()), len(net2.modules()))
# Try differentiating through the whole module.
for parameter in net.parameters():
self.assertIsNone(parameter.grad)
output.sum().backward()
for parameter in net.parameters():
self.assertFalse(parameter.grad is None)
self.assertGreater(parameter.grad.sum(), 0)
# Try calling zero_grad()
net.zero_grad()
for p in net.parameters():
assert p.grad is None, "zero_grad defaults to setting grads to None"
# Test train(), eval(), training (a property)
self.assertTrue(net.training)
net.eval()
self.assertFalse(net.training)
net.train()
self.assertTrue(net.training)
net.eval()
# Try calling the additional methods we registered.
biased_input = torch.randn(4, 5)
output_before = net.forward(biased_input)
bias = net.get_bias().clone()
self.assertEqual(list(bias.shape), [2])
net.set_bias(bias + 1)
self.assertEqual(net.get_bias(), bias + 1)
output_after = net.forward(biased_input)
self.assertNotEqual(output_before, output_after)
# Try accessing parameters
self.assertEqual(len(net.parameters()), 2)
np = net.named_parameters()
self.assertEqual(len(np), 2)
self.assertIn("fc.weight", np)
self.assertIn("fc.bias", np)
self.assertEqual(len(net.buffers()), 1)
nb = net.named_buffers()
self.assertEqual(len(nb), 1)
self.assertIn("buf", nb)
self.assertEqual(nb[0][1], torch.eye(5))
def test_cpp_frontend_module_has_up_to_date_attributes(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
self.assertEqual(len(net._parameters), 0)
net.add_new_parameter("foo", torch.eye(5))
self.assertEqual(len(net._parameters), 1)
self.assertEqual(len(net._buffers), 1)
net.add_new_buffer("bar", torch.eye(5))
self.assertEqual(len(net._buffers), 2)
self.assertEqual(len(net._modules), 1)
net.add_new_submodule("fc2")
self.assertEqual(len(net._modules), 2)
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_cpp_frontend_module_python_inter_op_with_cuda(self):
extension = torch.utils.cpp_extension.load(
name="cpp_frontend_extension",
sources="cpp_extensions/cpp_frontend_extension.cpp",
verbose=True,
)
net = extension.Net(5, 2)
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
cpu_parameters = [p.clone() for p in net.parameters()]
device = torch.device("cuda", 0)
net.to(device)
for i, p in enumerate(net.parameters()):
self.assertTrue(p.device.type == "cuda")
self.assertTrue(p.device.index == 0)
self.assertEqual(cpu_parameters[i], p)
net.cpu()
net.add_new_parameter("a", torch.eye(5))
net.add_new_parameter("b", torch.eye(5))
net.add_new_buffer("c", torch.eye(5))
net.add_new_buffer("d", torch.eye(5))
net.add_new_submodule("fc2")
net.add_new_submodule("fc3")
for p in net.parameters():
self.assertTrue(p.device.type == "cpu")
net.cuda()
for p in net.parameters():
self.assertTrue(p.device.type == "cuda")
def test_returns_shared_library_path_when_is_python_module_is_true(self):
source = """
#include <torch/script.h>
torch::Tensor func(torch::Tensor x) { return x; }
static torch::RegisterOperators r("test::func", &func);
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
functions="func",
verbose=True,
is_python_module=False,
)
self.assertEqual(torch.ops.test.func(torch.eye(5)), torch.eye(5))
def test_set_default_type_also_changes_aten_default_type(self):
module = torch.utils.cpp_extension.load_inline(
name="test_set_default_type",
cpp_sources="torch::Tensor get() { return torch::empty({}); }",
functions="get",
verbose=True,
)
initial_default = torch.get_default_dtype()
try:
self.assertEqual(module.get().dtype, initial_default)
torch.set_default_dtype(torch.float64)
self.assertEqual(module.get().dtype, torch.float64)
torch.set_default_dtype(torch.float32)
self.assertEqual(module.get().dtype, torch.float32)
torch.set_default_dtype(torch.float16)
self.assertEqual(module.get().dtype, torch.float16)
finally:
torch.set_default_dtype(initial_default)
def test_compilation_error_formatting(self):
# Test that the missing-semicolon error message has linebreaks in it.
# This'll fail if the message has been munged into a single line.
# It's hard to write anything more specific as every compiler has it's own
# error formatting.
with self.assertRaises(RuntimeError) as e:
torch.utils.cpp_extension.load_inline(
name="test_compilation_error_formatting",
cpp_sources="int main() { return 0 }",
)
pattern = r".*(\\n|\\r).*"
self.assertNotRegex(str(e), pattern)
def test_warning(self):
# Note: the module created from this source will include the py::key_error
# symbol. But because of visibility and the fact that it lives in a
# different compilation unit than pybind, this trips up ubsan even though
# it is fine. "ubsan.supp" thus needs to contain "vptr:warn_mod.so".
source = """
// error_type:
// 0: no error
// 1: torch::TypeError
// 2: python_error()
// 3: py::error_already_set
at::Tensor foo(at::Tensor x, int error_type) {
std::ostringstream err_stream;
err_stream << "Error with " << x.type();
TORCH_WARN(err_stream.str());
if(error_type == 1) {
throw torch::TypeError(err_stream.str().c_str());
}
if(error_type == 2) {
PyObject* obj = PyTuple_New(-1);
TORCH_CHECK(!obj);
// Pretend it was caught in a different thread and restored here
auto e = python_error();
e.persist();
e.restore();
throw e;
}
if(error_type == 3) {
throw py::key_error(err_stream.str());
}
return x.cos();
}
"""
# Ensure double type for hard-coded c name below
t = torch.rand(2).double()
cpp_tensor_name = r"CPUDoubleType"
# Without error handling, the warnings cannot be caught
warn_mod = torch.utils.cpp_extension.load_inline(
name="warn_mod",
cpp_sources=[source],
functions=["foo"],
with_pytorch_error_handling=False,
)
with warnings.catch_warnings(record=True) as w:
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(
SystemError, "bad argument to internal function"
):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 0)
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 0)
warn_mod = torch.utils.cpp_extension.load_inline(
name="warn_mod",
cpp_sources=[source],
functions=["foo"],
with_pytorch_error_handling=True,
)
with warnings.catch_warnings(record=True) as w:
# Caught with no error should be detected
warn_mod.foo(t, 0)
self.assertEqual(len(w), 1)
# Caught with cpp error should also be detected
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 2)
# Caught with python error should also be detected
with self.assertRaisesRegex(
SystemError, "bad argument to internal function"
):
warn_mod.foo(t, 2)
self.assertEqual(len(w), 3)
# Caught with pybind error should also be detected
# Note that there is no type name translation for pybind errors
with self.assertRaisesRegex(KeyError, cpp_tensor_name):
warn_mod.foo(t, 3)
self.assertEqual(len(w), 4)
# Make sure raising warnings are handled properly
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("error")
# No error, the warning should raise
with self.assertRaisesRegex(UserWarning, t.type()):
warn_mod.foo(t, 0)
self.assertEqual(len(w), 0)
# Another error happened, the warning is ignored
with self.assertRaisesRegex(TypeError, t.type()):
warn_mod.foo(t, 1)
self.assertEqual(len(w), 0)
def test_autograd_from_cpp(self):
source = """
void run_back(at::Tensor x) {
x.backward({});
}
void run_back_no_gil(at::Tensor x) {
pybind11::gil_scoped_release no_gil;
x.backward({});
}
"""
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, gx):
return gx
test_backward_deadlock = torch.utils.cpp_extension.load_inline(
name="test_backward_deadlock",
cpp_sources=[source],
functions=["run_back", "run_back_no_gil"],
)
# This used to deadlock
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
with self.assertRaisesRegex(
RuntimeError, "The autograd engine was called while holding the GIL."
):
test_backward_deadlock.run_back(loss)
inp = torch.rand(20, requires_grad=True)
loss = MyFn.apply(inp).sum()
test_backward_deadlock.run_back_no_gil(loss)
def test_custom_compound_op_autograd(self):
# Test that a custom compound op (i.e. a custom op that just calls other aten ops)
# correctly returns gradients of those other ops
source = """
#include <torch/library.h>
torch::Tensor my_add(torch::Tensor x, torch::Tensor y) {
return x + y;
}
TORCH_LIBRARY(my, m) {
m.def("add", &my_add);
}
"""
torch.utils.cpp_extension.load_inline(
name="is_python_module",
cpp_sources=source,
verbose=True,
is_python_module=False,
)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
for fast_mode in (True, False):
gradcheck(torch.ops.my.add, [a, b], eps=1e-2, fast_mode=fast_mode)
def test_custom_functorch_error(self):
# Test that a custom C++ Function raises an error under functorch transforms
identity_m = torch.utils.cpp_extension.load(
name="identity",
sources=["cpp_extensions/identity.cpp"],
)
t = torch.randn(3, requires_grad=True)
msg = r"cannot use C\+\+ torch::autograd::Function with functorch"
with self.assertRaisesRegex(RuntimeError, msg):
torch.func.vmap(identity_m.identity)(t)
with self.assertRaisesRegex(RuntimeError, msg):
torch.func.grad(identity_m.identity)(t)
def test_gen_extension_h_pch(self):
if not IS_LINUX:
return
source = """
at::Tensor sin_add(at::Tensor x, at::Tensor y) {
return x.sin() + y.sin();
}
"""
head_file_pch = os.path.join(_TORCH_PATH, "include", "torch", "extension.h.gch")
head_file_signature = os.path.join(
_TORCH_PATH, "include", "torch", "extension.h.sign"
)
remove_extension_h_precompiler_headers()
pch_exist = os.path.exists(head_file_pch)
signature_exist = os.path.exists(head_file_signature)
self.assertEqual(pch_exist, False)
self.assertEqual(signature_exist, False)
torch.utils.cpp_extension.load_inline(
name="inline_extension_with_pch",
cpp_sources=[source],
functions=["sin_add"],
verbose=True,
use_pch=True,
)
pch_exist = os.path.exists(head_file_pch)
signature_exist = os.path.exists(head_file_signature)
compiler = get_cxx_compiler()
if check_compiler_is_gcc(compiler):
self.assertEqual(pch_exist, True)
self.assertEqual(signature_exist, True)
def test_aoti_torch_call_dispatcher(self):
source = """
#include <torch/csrc/inductor/aoti_runtime/utils.h>
#include <torch/csrc/inductor/aoti_torch/utils.h>
#include <torch/csrc/inductor/aoti_torch/c/shim.h>
#include <torch/csrc/stable/stableivalue_conversions.h>
using RAIIATH = torch::aot_inductor::RAIIAtenTensorHandle;
at::Tensor my_abs(at::Tensor x) {
StableIValue stack[1];
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
stack[0] = torch::stable::detail::from(raii.release());
aoti_torch_call_dispatcher("aten::abs", "", stack);
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
return *reinterpret_cast<at::Tensor*>(res.release());
}
at::Tensor my_floor(at::Tensor x) {
StableIValue stack[1];
RAIIATH raii(torch::aot_inductor::new_tensor_handle(std::move(x)));
stack[0] = torch::stable::detail::from(raii.release());
aoti_torch_call_dispatcher("aten::floor", "", stack);
RAIIATH res(torch::stable::detail::to<AtenTensorHandle>(stack[0]));
return *reinterpret_cast<at::Tensor*>(res.release());
}
"""
module = torch.utils.cpp_extension.load_inline(
name="inline_extension_using_shim_dispatcher",
cpp_sources=[source],
functions=["my_abs", "my_floor"],
)
t = torch.rand(2, 3) - 1.0
floor_t = module.my_floor(t)
abs_t = module.my_abs(t)
self.assertEqual(abs_t, torch.abs(t))
self.assertEqual(floor_t, torch.floor(t))
@unittest.skipIf(not (TEST_CUDA or TEST_ROCM), "CUDA not found")
def test_cuda_pluggable_allocator_include(self):
"""
This method creates a minimal example to replicate the apex setup.py to build nccl_allocator extension
"""
# the cpp source includes CUDAPluggableAllocator and has an empty exported function
cpp_source = """
#include <torch/csrc/cuda/CUDAPluggableAllocator.h>
#include <torch/extension.h>
int get_nccl_allocator() {
return 0;
}
PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) {
m.def("get_nccl_allocator", []() { return get_nccl_allocator(); });
}
"""
build_dir = tempfile.mkdtemp()
src_path = os.path.join(build_dir, "NCCLAllocator.cpp")
with open(src_path, mode="w") as f:
f.write(cpp_source)
# initially success is false
success = False
try:
# try to build the module
torch.utils.cpp_extension.load(
name="nccl_allocator",
sources=src_path,
verbose=True,
with_cuda=True,
)
# set success as true if built successfully
success = True
except Exception as e:
print(f"Failed to load the module: {e}")
# test if build was successful
self.assertEqual(success, True)
@unittest.skipIf(
not IS_LINUX or not check_compiler_is_gcc(get_cxx_compiler()),
"PCH is only available on Linux with GCC",
)
def test_pch_command_injection(self):
"""Tests that PCH compilation is not vulnerable to command injection."""
with tempfile.TemporaryDirectory() as tmpdir:
exploit_file = os.path.join(tmpdir, "pch_exploit")
# If executed by shell, this would create exploit_file
payload = f'; echo vulnerable > "{exploit_file}"'
cpp_source = "void foo() {}"
# Try to compile with malicious payload in extra_cflags
# The compilation may succeed or fail, but the key test is whether
# the shell command in the payload gets executed
try:
torch.utils.cpp_extension.load_inline(
name="test_pch_injection",
cpp_sources=cpp_source,
functions=["foo"],
extra_cflags=[payload],
use_pch=True,
verbose=True,
)
except RuntimeError:
# Compilation failure is expected since payload is not a valid flag
pass
# The critical security check: verify the shell command was NOT executed
self.assertFalse(
os.path.exists(exploit_file),
"Command injection vulnerability detected!",
)
if __name__ == "__main__":
common.run_tests()
| TestCppExtensionJIT |
python | huggingface__transformers | tests/sagemaker/scripts/pytorch/run_glue_model_parallelism.py | {
"start": 5202,
"end": 22110
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: str | None = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: str | None = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: str | None = field(
default=None,
metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.parallel_mode.value == 'distributed')}, 16-bits training: {training_args.fp16}"
)
# Set the verbosity to info of the Transformers logger (on main process only):
if training_args.should_log:
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the
# sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named
# label if at least two columns are provided.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.task_name is not None:
# Downloading and loading a dataset from the hub.
datasets = load_dataset("nyu-mll/glue", data_args.task_name)
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
data_files = {"train": data_args.train_file, "validation": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
train_extension = data_args.train_file.split(".")[-1]
test_extension = data_args.test_file.split(".")[-1]
assert test_extension == train_extension, (
"`test_file` should have the same extension (csv or json) as `train_file`."
)
data_files["test"] = data_args.test_file
else:
raise ValueError("Need either a GLUE task or a test file for `do_predict`.")
for key in data_files:
logger.info(f"load a local file for {key}: {data_files[key]}")
if data_args.train_file.endswith(".csv"):
# Loading a dataset from local csv files
datasets = load_dataset("csv", data_files=data_files)
else:
# Loading a dataset from local json files
datasets = load_dataset("json", data_files=data_files)
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.
# Labels
if data_args.task_name is not None:
is_regression = data_args.task_name == "stsb"
if not is_regression:
label_list = datasets["train"].features["label"].names
num_labels = len(label_list)
else:
num_labels = 1
else:
# Trying to have good defaults here, don't hesitate to tweak to your needs.
is_regression = datasets["train"].features["label"].dtype in ["float32", "float64"]
if is_regression:
num_labels = 1
else:
# A useful fast method:
# https://huggingface.co/docs/datasets/package_reference/main_classes#datasets.Dataset.unique
label_list = datasets["train"].unique("label")
label_list.sort() # Let's sort it for determinism
num_labels = len(label_list)
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
num_labels=num_labels,
finetuning_task=data_args.task_name,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
)
model = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
)
# Preprocessing the datasets
if data_args.task_name is not None:
sentence1_key, sentence2_key = task_to_keys[data_args.task_name]
else:
# Again, we try to have some nice defaults but don't hesitate to tweak to your use case.
non_label_column_names = [name for name in datasets["train"].column_names if name != "label"]
if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names:
sentence1_key, sentence2_key = "sentence1", "sentence2"
else:
if len(non_label_column_names) >= 2:
sentence1_key, sentence2_key = non_label_column_names[:2]
else:
sentence1_key, sentence2_key = non_label_column_names[0], None
# Padding strategy
if data_args.pad_to_max_length:
padding = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
padding = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
label_to_id = None
if (
model.config.label2id != PreTrainedConfig(num_labels=num_labels).label2id
and data_args.task_name is not None
and not is_regression
):
# Some have all caps in their config, some don't.
label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()}
if sorted(label_name_to_id.keys()) == sorted(label_list):
label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)}
else:
logger.warning(
"Your model seems to have been trained with labels, but they don't match the dataset: "
f"model labels: {sorted(label_name_to_id.keys())}, dataset labels: {sorted(label_list)}."
"\nIgnoring the model labels as a result.",
)
elif data_args.task_name is None and not is_regression:
label_to_id = {v: i for i, v in enumerate(label_list)}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the "
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_function(examples):
# Tokenize the texts
args = (
(examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key])
)
result = tokenizer(*args, padding=padding, max_length=max_seq_length, truncation=True)
# Map labels to IDs (not necessary for GLUE tasks)
if label_to_id is not None and "label" in examples:
result["label"] = [(label_to_id[l] if l != -1 else -1) for l in examples["label"]]
return result
datasets = datasets.map(preprocess_function, batched=True, load_from_cache_file=not data_args.overwrite_cache)
if training_args.do_train:
if "train" not in datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = datasets["train"]
if data_args.max_train_samples is not None:
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if "validation" not in datasets and "validation_matched" not in datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = datasets["validation_matched" if data_args.task_name == "mnli" else "validation"]
if data_args.max_val_samples is not None:
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict or data_args.task_name is not None or data_args.test_file is not None:
if "test" not in datasets and "test_matched" not in datasets:
raise ValueError("--do_predict requires a test dataset")
test_dataset = datasets["test_matched" if data_args.task_name == "mnli" else "test"]
if data_args.max_test_samples is not None:
test_dataset = test_dataset.select(range(data_args.max_test_samples))
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(train_dataset)), 3):
logger.info(f"Sample {index} of the training set: {train_dataset[index]}.")
# Get the metric function
if data_args.task_name is not None:
metric = load_metric("glue", data_args.task_name)
# TODO: When datasets metrics include regular accuracy, make an else here and remove special branch from
# compute_metrics
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(p: EvalPrediction):
preds = p.predictions[0] if isinstance(p.predictions, tuple) else p.predictions
preds = np.squeeze(preds) if is_regression else np.argmax(preds, axis=1)
if data_args.task_name is not None:
result = metric.compute(predictions=preds, references=p.label_ids)
if len(result) > 1:
result["combined_score"] = np.mean(list(result.values())).item()
return result
elif is_regression:
return {"mse": ((preds - p.label_ids) ** 2).mean().item()}
else:
return {"accuracy": (preds == p.label_ids).astype(np.float32).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
data_collator = default_data_collator
elif training_args.fp16:
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
else:
data_collator = None
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
compute_metrics=compute_metrics,
processing_class=tokenizer,
data_collator=data_collator,
)
# Training
if training_args.do_train:
checkpoint = None
if os.path.isdir(model_args.model_name_or_path):
# Check the config from that potential checkpoint has the right number of labels before using it as a
# checkpoint.
if AutoConfig.from_pretrained(model_args.model_name_or_path).num_labels == num_labels:
checkpoint = model_args.model_name_or_path
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
eval_datasets = [eval_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
eval_datasets.append(datasets["validation_mismatched"])
for eval_dataset, task in zip(eval_datasets, tasks):
metrics = trainer.evaluate(eval_dataset=eval_dataset)
max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
if training_args.do_predict:
logger.info("*** Test ***")
# Loop to handle MNLI double evaluation (matched, mis-matched)
tasks = [data_args.task_name]
test_datasets = [test_dataset]
if data_args.task_name == "mnli":
tasks.append("mnli-mm")
test_datasets.append(datasets["test_mismatched"])
for test_dataset, task in zip(test_datasets, tasks):
# Removing the `label` columns because it contains -1 and Trainer won't like that.
test_dataset = test_dataset.remove_columns("label")
predictions = trainer.predict(test_dataset=test_dataset).predictions
predictions = np.squeeze(predictions) if is_regression else np.argmax(predictions, axis=1)
output_test_file = os.path.join(training_args.output_dir, f"test_results_{task}.txt")
if trainer.is_world_process_zero():
with open(output_test_file, "w") as writer:
logger.info(f"***** Test results {task} *****")
writer.write("index\tprediction\n")
for index, item in enumerate(predictions):
if is_regression:
writer.write(f"{index}\t{item:3.3f}\n")
else:
item = label_list[item]
writer.write(f"{index}\t{item}\n")
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| ModelArguments |
python | django__django | tests/managers_regress/models.py | {
"start": 2697,
"end": 3127
} | class ____(models.Model):
fk = models.ForeignKey(RelatedModel, models.CASCADE, related_name="test_fk")
m2m = models.ManyToManyField(RelatedModel, related_name="test_m2m")
gfk_ctype = models.ForeignKey(ContentType, models.SET_NULL, null=True)
gfk_id = models.IntegerField(null=True)
gfk = GenericForeignKey(ct_field="gfk_ctype", fk_field="gfk_id")
def __str__(self):
return str(self.pk)
| RelationModel |
python | MongoEngine__mongoengine | mongoengine/fields.py | {
"start": 13289,
"end": 17056
} | class ____(BaseField):
"""Disclaimer: This field is kept for historical reason but since it converts the values to float, it
is not suitable for true decimal storage. Consider using :class:`~mongoengine.fields.Decimal128Field`.
Fixed-point decimal number field. Stores the value as a float by default unless `force_string` is used.
If using floats, beware of Decimal to float conversion (potential precision loss)
"""
def __init__(
self,
min_value=None,
max_value=None,
force_string=False,
precision=2,
rounding=decimal.ROUND_HALF_UP,
**kwargs,
):
"""
:param min_value: (optional) A min value that will be applied during validation
:param max_value: (optional) A max value that will be applied during validation
:param force_string: Store the value as a string (instead of a float).
Be aware that this affects query sorting and operation like lte, gte (as string comparison is applied)
and some query operator won't work (e.g. inc, dec)
:param precision: Number of decimal places to store.
:param rounding: The rounding rule from the python decimal library:
- decimal.ROUND_CEILING (towards Infinity)
- decimal.ROUND_DOWN (towards zero)
- decimal.ROUND_FLOOR (towards -Infinity)
- decimal.ROUND_HALF_DOWN (to nearest with ties going towards zero)
- decimal.ROUND_HALF_EVEN (to nearest with ties going to nearest even integer)
- decimal.ROUND_HALF_UP (to nearest with ties going away from zero)
- decimal.ROUND_UP (away from zero)
- decimal.ROUND_05UP (away from zero if last digit after rounding towards zero would have been 0 or 5; otherwise towards zero)
Defaults to: ``decimal.ROUND_HALF_UP``
:param kwargs: Keyword arguments passed into the parent :class:`~mongoengine.BaseField`
"""
self.min_value = min_value
self.max_value = max_value
self.force_string = force_string
if precision < 0 or not isinstance(precision, int):
self.error("precision must be a positive integer")
self.precision = precision
self.rounding = rounding
super().__init__(**kwargs)
def to_python(self, value):
# Convert to string for python 2.6 before casting to Decimal
try:
value = decimal.Decimal("%s" % value)
except (TypeError, ValueError, decimal.InvalidOperation):
return value
if self.precision > 0:
return value.quantize(
decimal.Decimal(".%s" % ("0" * self.precision)), rounding=self.rounding
)
else:
return value.quantize(decimal.Decimal(), rounding=self.rounding)
def to_mongo(self, value):
if self.force_string:
return str(self.to_python(value))
return float(self.to_python(value))
def validate(self, value):
if not isinstance(value, decimal.Decimal):
if not isinstance(value, str):
value = str(value)
try:
value = decimal.Decimal(value)
except (TypeError, ValueError, decimal.InvalidOperation) as exc:
self.error("Could not convert value to decimal: %s" % exc)
if self.min_value is not None and value < self.min_value:
self.error("Decimal value is too small")
if self.max_value is not None and value > self.max_value:
self.error("Decimal value is too large")
def prepare_query_value(self, op, value):
if value is None:
return value
return super().prepare_query_value(op, self.to_mongo(value))
| DecimalField |
python | django__django | tests/view_tests/tests/test_csrf.py | {
"start": 328,
"end": 5706
} | class ____(SimpleTestCase):
def setUp(self):
super().setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE=[
"django.middleware.locale.LocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
],
)
def test_translation(self):
"""An invalid request is rejected with a localized error message."""
response = self.client.post("/")
self.assertContains(response, "Forbidden", status_code=403)
self.assertContains(
response, "CSRF verification failed. Request aborted.", status_code=403
)
with self.settings(LANGUAGE_CODE="nl"), override("en-us"):
response = self.client.post("/")
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(
response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403,
)
@override_settings(SECURE_PROXY_SSL_HEADER=("HTTP_X_FORWARDED_PROTO", "https"))
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post("/", headers={"x-forwarded-proto": "https"})
self.assertContains(
response,
"You are seeing this message because this HTTPS site requires a "
"“Referer header” to be sent by your web browser, but "
"none was sent.",
status_code=403,
)
self.assertContains(
response,
"If you have configured your browser to disable “Referer” "
"headers, please re-enable them, at least for this site, or for "
"HTTPS connections, or for “same-origin” requests.",
status_code=403,
)
self.assertContains(
response,
"If you are using the <meta name="referrer" "
"content="no-referrer"> tag or including the "
"“Referrer-Policy: no-referrer” header, please remove them.",
status_code=403,
)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post("/")
self.assertContains(
response,
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties.",
status_code=403,
)
@override_settings(TEMPLATES=[])
def test_no_django_template_engine(self):
"""
The CSRF view doesn't depend on the TEMPLATES configuration (#24388).
"""
response = self.client.post("/")
self.assertContains(response, "Forbidden", status_code=403)
@override_settings(
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"loaders": [
(
"django.template.loaders.locmem.Loader",
{
CSRF_FAILURE_TEMPLATE_NAME: (
"Test template for CSRF failure"
)
},
),
],
},
}
]
)
def test_custom_template(self):
"""A custom CSRF_FAILURE_TEMPLATE_NAME is used."""
response = self.client.post("/")
self.assertContains(response, "Test template for CSRF failure", status_code=403)
self.assertIs(response.wsgi_request, response.context.request)
def test_custom_template_does_not_exist(self):
"""An exception is raised if a nonexistent template is supplied."""
factory = RequestFactory()
request = factory.post("/")
with self.assertRaises(TemplateDoesNotExist):
csrf_failure(request, template_name="nonexistent.html")
def test_template_encoding(self):
"""
The template is loaded directly, not via a template loader, and should
be opened as utf-8 charset as is the default specified on template
engines.
"""
from django.views.csrf import Path
with mock.patch.object(Path, "open") as m:
csrf_failure(mock.MagicMock(), mock.Mock())
m.assert_called_once_with(encoding="utf-8")
@override_settings(DEBUG=True)
@mock.patch("django.views.csrf.get_docs_version", return_value="4.2")
def test_doc_links(self, mocked_get_complete_version):
response = self.client.post("/")
self.assertContains(response, "Forbidden", status_code=403)
self.assertNotContains(
response, "https://docs.djangoproject.com/en/dev/", status_code=403
)
self.assertContains(
response, "https://docs.djangoproject.com/en/4.2/", status_code=403
)
| CsrfViewTests |
python | jina-ai__jina | tests/unit/serve/runtimes/worker/test_worker_runtime.py | {
"start": 2246,
"end": 2652
} | class ____(Executor):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._count = 0
@requests
async def foo(self, docs, **kwargs):
self._count += 1
current_count = self._count
if current_count % 2 == 0:
await asyncio.sleep(0.1)
return DocumentArray([Document(text=str(current_count))])
| AsyncSlowNewDocsExecutor |
python | tensorflow__tensorflow | tensorflow/tools/compatibility/ast_edits.py | {
"start": 28047,
"end": 28390
} | class ____:
"""This class represents an analysis result and how it should be logged.
This class must provide the following fields:
* `log_level`: The log level to which this detection should be logged
* `log_message`: The message that should be logged for this detection
For an example, see `VersionedTFImport`.
"""
| AnalysisResult |
python | ray-project__ray | python/ray/exceptions.py | {
"start": 30649,
"end": 32655
} | class ____(RayError):
"""Raised when there is an error deserializing a serialized exception.
This occurs when deserializing (unpickling) a previously serialized exception
fails. In this case, we fall back to raising the string representation of
the original exception along with its stack trace that was captured at the
time of serialization.
For more details and how to handle this with custom serializers, :ref:`configuring custom exeception serializers <custom-exception-serializer>`
Args:
original_stack_trace: The string representation and stack trace of the
original exception that was captured during serialization.
"""
def __init__(self, original_stack_trace: str):
self._original_stack_trace = original_stack_trace
def __str__(self):
return (
"Failed to deserialize exception. Refer to https://docs.ray.io/en/latest/ray-core/objects/serialization.html#custom-serializers-for-exceptions for more information.\n"
"Original exception:\n"
f"{self._original_stack_trace}"
)
RAY_EXCEPTION_TYPES = [
PlasmaObjectNotAvailable,
RayError,
RayTaskError,
WorkerCrashedError,
RayActorError,
ObjectStoreFullError,
ObjectLostError,
ObjectFetchTimedOutError,
ReferenceCountingAssertionError,
ObjectReconstructionFailedError,
ObjectReconstructionFailedMaxAttemptsExceededError,
ObjectReconstructionFailedLineageEvictedError,
OwnerDiedError,
GetTimeoutError,
AsyncioActorExit,
RuntimeEnvSetupError,
TaskPlacementGroupRemoved,
ActorPlacementGroupRemoved,
PendingCallsLimitExceeded,
LocalRayletDiedError,
TaskUnschedulableError,
ActorDiedError,
ActorUnschedulableError,
ActorUnavailableError,
RayChannelError,
RayChannelTimeoutError,
OufOfBandObjectRefSerializationException,
RayCgraphCapacityExceeded,
UnserializableException,
AuthenticationError,
]
| UnserializableException |
python | pytorch__pytorch | torch/cuda/__init__.py | {
"start": 54609,
"end": 55162
} | class ____:
is_cuda = True
is_sparse = False
def type(self, *args, **kwargs):
# We could use a Protocol here to tell mypy that self has `get_device` method
# but it is only available in the typing module on Python >= 3.8
# or on typing_extensions module on Python >= 3.6
with device(self.get_device()): # type: ignore[attr-defined]
return super().type(*args, **kwargs) # type: ignore[misc]
__new__ = _lazy_new
from torch.storage import _LegacyStorage, _warn_typed_storage_removal
| _CudaBase |
python | walkccc__LeetCode | solutions/2973. Find Number of Coins to Place in Tree Nodes/2973.py | {
"start": 917,
"end": 1384
} | class ____:
def placedCoins(self, edges: list[list[int]], cost: list[int]) -> list[int]:
n = len(cost)
ans = [0] * n
tree = [[] for _ in range(n)]
for u, v in edges:
tree[u].append(v)
tree[v].append(u)
def dfs(u: int, prev: int) -> None:
res = ChildCost(cost[u])
for v in tree[u]:
if v != prev:
res.update(dfs(v, u))
ans[u] = res.maxProduct()
return res
dfs(0, -1)
return ans
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-total-space-wasted-with-k-resizing-operations.py | {
"start": 39,
"end": 730
} | class ____(object):
def minSpaceWastedKResizing(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
INF = float("inf")
k += 1
dp = [[INF]*(k+1) for _ in xrange(len(nums)+1)]
dp[0][0] = 0
for i in xrange(1, len(nums)+1):
total = max_num = 0
for j in reversed(xrange(1, i+1)):
total += nums[j-1]
max_num = max(max_num, nums[j-1])
for m in xrange(1, k+1):
if dp[j-1][m-1] != INF:
dp[i][m] = min(dp[i][m], dp[j-1][m-1] + (max_num*(i-j+1)-total))
return dp[-1][-1]
| Solution |
python | django-haystack__django-haystack | test_haystack/solr_tests/test_inputs.py | {
"start": 77,
"end": 3738
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.query_obj = connections["solr"].get_query()
def test_raw_init(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {})
self.assertEqual(raw.post_process, False)
raw = inputs.Raw("hello OR there, :you", test="really")
self.assertEqual(raw.query_string, "hello OR there, :you")
self.assertEqual(raw.kwargs, {"test": "really"})
self.assertEqual(raw.post_process, False)
def test_raw_prepare(self):
raw = inputs.Raw("hello OR there, :you")
self.assertEqual(raw.prepare(self.query_obj), "hello OR there, :you")
def test_clean_init(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.query_string, "hello OR there, :you")
self.assertEqual(clean.post_process, True)
def test_clean_prepare(self):
clean = inputs.Clean("hello OR there, :you")
self.assertEqual(clean.prepare(self.query_obj), "hello or there, \\:you")
def test_exact_init(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.query_string, "hello OR there, :you")
self.assertEqual(exact.post_process, True)
def test_exact_prepare(self):
exact = inputs.Exact("hello OR there, :you")
self.assertEqual(exact.prepare(self.query_obj), '"hello OR there, :you"')
exact = inputs.Exact("hello OR there, :you", clean=True)
self.assertEqual(exact.prepare(self.query_obj), '"hello or there, \\:you"')
def test_not_init(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.query_string, "hello OR there, :you")
self.assertEqual(not_it.post_process, True)
def test_not_prepare(self):
not_it = inputs.Not("hello OR there, :you")
self.assertEqual(not_it.prepare(self.query_obj), "NOT (hello or there, \\:you)")
def test_autoquery_init(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(autoquery.query_string, 'panic -don\'t "froody dude"')
self.assertEqual(autoquery.post_process, False)
def test_autoquery_prepare(self):
autoquery = inputs.AutoQuery('panic -don\'t "froody dude"')
self.assertEqual(
autoquery.prepare(self.query_obj), 'panic NOT don\'t "froody dude"'
)
def test_altparser_init(self):
altparser = inputs.AltParser("dismax")
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "")
self.assertEqual(altparser.kwargs, {})
self.assertEqual(altparser.post_process, False)
altparser = inputs.AltParser("dismax", "douglas adams", qf="author", mm=1)
self.assertEqual(altparser.parser_name, "dismax")
self.assertEqual(altparser.query_string, "douglas adams")
self.assertEqual(altparser.kwargs, {"mm": 1, "qf": "author"})
self.assertEqual(altparser.post_process, False)
def test_altparser_prepare(self):
altparser = inputs.AltParser("dismax", "douglas adams", qf="author", mm=1)
self.assertEqual(
altparser.prepare(self.query_obj),
"""_query_:"{!dismax mm=1 qf=author}douglas adams\"""",
)
altparser = inputs.AltParser("dismax", "Don't panic", qf="text author", mm=1)
self.assertEqual(
altparser.prepare(self.query_obj),
"""_query_:"{!dismax mm=1 qf='text author'}Don't panic\"""",
)
| SolrInputTestCase |
python | astropy__astropy | astropy/time/tests/test_methods.py | {
"start": 22035,
"end": 33464
} | class ____:
"""Arithmetic on Time objects, using both doubles."""
kwargs = ({}, {"axis": None}, {"axis": 0}, {"axis": 1}, {"axis": 2})
functions = ("min", "max", "sort")
def setup_class(cls):
mjd = np.arange(50000, 50100, 10).reshape(2, 5, 1)
frac = np.array([0.1, 0.1 + 1.0e-15, 0.1 - 1.0e-15, 0.9 + 2.0e-16, 0.9])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t0 = {
"not_masked": Time(mjd, frac, format="mjd", scale="utc"),
"masked": Time(mjd, frac_masked, format="mjd", scale="utc"),
}
# Define arrays with same ordinal properties
frac = np.array([1, 2, 0, 4, 3])
frac_masked = np.ma.array(frac)
frac_masked[1] = np.ma.masked
cls.t1 = {
"not_masked": Time(mjd + frac, format="mjd", scale="utc"),
"masked": Time(mjd + frac_masked, format="mjd", scale="utc"),
}
cls.jd = {"not_masked": mjd + frac, "masked": mjd + frac_masked}
cls.t2 = {
"not_masked": Time(
mjd + frac,
format="mjd",
scale="utc",
location=(np.arange(len(frac)), np.arange(len(frac))),
),
"masked": Time(
mjd + frac_masked,
format="mjd",
scale="utc",
location=(np.arange(len(frac_masked)), np.arange(len(frac_masked))),
),
}
def create_data(self, use_mask):
self.t0 = self.__class__.t0[use_mask]
self.t1 = self.__class__.t1[use_mask]
self.t2 = self.__class__.t2[use_mask]
self.jd = self.__class__.jd[use_mask]
@pytest.mark.parametrize("kw, func", list(itertools.product(kwargs, functions)))
def test_argfuncs(self, kw, func, use_mask):
"""
Test that ``np.argfunc(jd, **kw)`` is the same as ``t0.argfunc(**kw)``
where ``jd`` is a similarly shaped array with the same ordinal properties
but all integer values. Also test the same for t1 which has the same
integral values as jd.
"""
self.create_data(use_mask)
t0v = getattr(self.t0, "arg" + func)(**kw)
t1v = getattr(self.t1, "arg" + func)(**kw)
jdv = getattr(np, "arg" + func)(self.jd, **kw)
if self.t0.masked and kw == {"axis": None} and func == "sort":
t0v = np.ma.array(t0v, mask=self.t0.mask.reshape(t0v.shape)[t0v])
t1v = np.ma.array(t1v, mask=self.t1.mask.reshape(t1v.shape)[t1v])
jdv = np.ma.array(jdv, mask=self.jd.mask.reshape(jdv.shape)[jdv])
assert np.all(t0v == jdv)
assert np.all(t1v == jdv)
assert t0v.shape == jdv.shape
assert t1v.shape == jdv.shape
@pytest.mark.parametrize("kw, func", list(itertools.product(kwargs, functions)))
def test_funcs(self, kw, func, use_mask):
"""
Test that ``np.func(jd, **kw)`` is the same as ``t1.func(**kw)`` where
``jd`` is a similarly shaped array and the same integral values.
"""
self.create_data(use_mask)
t1v = getattr(self.t1, func)(**kw)
jdv = getattr(np, func)(self.jd, **kw)
assert np.all(t1v.value == jdv)
assert t1v.shape == jdv.shape
def test_argmin(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmin() == 2
assert np.all(self.t0.argmin(axis=0) == 0)
assert np.all(self.t0.argmin(axis=1) == 0)
assert np.all(self.t0.argmin(axis=2) == 2)
def test_argmax(self, use_mask):
self.create_data(use_mask)
assert self.t0.argmax() == self.t0.size - 2
if use_mask == "masked":
# The 0 is where all entries are masked in that axis
assert np.all(self.t0.argmax(axis=0) == [1, 0, 1, 1, 1])
assert np.all(self.t0.argmax(axis=1) == [4, 0, 4, 4, 4])
else:
assert np.all(self.t0.argmax(axis=0) == 1)
assert np.all(self.t0.argmax(axis=1) == 4)
assert np.all(self.t0.argmax(axis=2) == 3)
def test_argsort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.argsort() == np.array(order))
assert np.all(self.t0.argsort(axis=0) == np.arange(2).reshape(2, 1, 1))
assert np.all(self.t0.argsort(axis=1) == np.arange(5).reshape(5, 1))
assert np.all(self.t0.argsort(axis=2) == np.array(order))
ravel = np.arange(50).reshape(-1, 5)[:, order].ravel()
if use_mask == "masked":
t0v = self.t0.argsort(axis=None)
# Manually remove elements in ravel that correspond to masked
# entries in self.t0. This removes the 10 entries that are masked
# which show up at the end of the list.
mask = self.t0.mask.ravel()[ravel]
ravel = ravel[~mask]
assert np.all(t0v[:-10] == ravel)
else:
assert np.all(self.t0.argsort(axis=None) == ravel)
@pytest.mark.parametrize("scale", Time.SCALES)
def test_argsort_warning(self, use_mask, scale):
self.create_data(use_mask)
if scale == "utc":
pytest.xfail()
with warnings.catch_warnings(record=True) as wlist:
Time([1, 2, 3], format="jd", scale=scale).argsort()
assert len(wlist) == 0
def test_min(self, use_mask):
self.create_data(use_mask)
assert self.t0.min() == self.t0[0, 0, 2]
assert np.all(self.t0.min(0) == self.t0[0])
assert np.all(self.t0.min(1) == self.t0[:, 0])
assert np.all(self.t0.min(2) == self.t0[:, :, 2])
assert self.t0.min(0).shape == (5, 5)
assert self.t0.min(0, keepdims=True).shape == (1, 5, 5)
assert self.t0.min(1).shape == (2, 5)
assert self.t0.min(1, keepdims=True).shape == (2, 1, 5)
assert self.t0.min(2).shape == (2, 5)
assert self.t0.min(2, keepdims=True).shape == (2, 5, 1)
def test_max(self, use_mask):
self.create_data(use_mask)
assert self.t0.max() == self.t0[-1, -1, -2]
assert np.all(self.t0.max(0) == self.t0[1])
assert np.all(self.t0.max(1) == self.t0[:, 4])
assert np.all(self.t0.max(2) == self.t0[:, :, 3])
assert self.t0.max(0).shape == (5, 5)
assert self.t0.max(0, keepdims=True).shape == (1, 5, 5)
def test_ptp(self, use_mask):
self.create_data(use_mask)
assert np.ptp(self.t0) == self.t0.max() - self.t0.min()
assert np.all(np.ptp(self.t0, axis=0) == self.t0.max(0) - self.t0.min(0))
assert np.ptp(self.t0, axis=0).shape == (5, 5)
assert np.ptp(self.t0, 0, keepdims=True).shape == (1, 5, 5)
with pytest.warns(AstropyDeprecationWarning):
assert self.t0.ptp() == self.t0.max() - self.t0.min()
def test_sort(self, use_mask):
self.create_data(use_mask)
order = [2, 0, 4, 3, 1] if use_mask == "masked" else [2, 0, 1, 4, 3]
assert np.all(self.t0.sort() == self.t0[:, :, order])
assert np.all(self.t0.sort(0) == self.t0)
assert np.all(self.t0.sort(1) == self.t0)
assert np.all(self.t0.sort(2) == self.t0[:, :, order])
if use_mask == "not_masked":
assert np.all(self.t0.sort(None) == self.t0[:, :, order].ravel())
# Bit superfluous, but good to check.
assert np.all(self.t0.sort(-1)[:, :, 0] == self.t0.min(-1))
assert np.all(self.t0.sort(-1)[:, :, -1] == self.t0.max(-1))
@pytest.mark.parametrize("axis", [None, 0, 1, 2, (0, 1)])
@pytest.mark.parametrize(
"where", [True, np.array([True, False, True, True, False])[..., np.newaxis]]
)
@pytest.mark.parametrize("keepdims", [False, True])
def test_mean(self, use_mask, axis, where, keepdims):
self.create_data(use_mask)
kwargs = dict(axis=axis, where=where, keepdims=keepdims)
def is_consistent(time):
where_expected = where & ~time.mask
where_expected = np.broadcast_to(where_expected, time.shape)
kw = kwargs.copy()
kw["where"] = where_expected
divisor = where_expected.sum(axis=axis, keepdims=keepdims)
if np.any(divisor == 0):
with pytest.raises(ValueError):
time.mean(**kwargs)
else:
time_mean = time.mean(**kwargs)
time_expected = Time(
*day_frac(
val1=np.ma.getdata(time.tai.jd1).sum(**kw),
val2=np.ma.getdata(time.tai.jd2).sum(**kw),
divisor=divisor,
),
format="jd",
scale="tai",
)
time_expected._set_scale(time.scale)
assert np.all(time_mean == time_expected)
is_consistent(self.t0)
is_consistent(self.t1)
axes_location_not_constant = [None, 2]
if axis in axes_location_not_constant:
with pytest.raises(ValueError):
self.t2.mean(**kwargs)
else:
is_consistent(self.t2)
def test_mean_precision(self, use_mask):
scale = "tai"
epsilon = 1 * u.ns
t0 = Time("2021-07-27T00:00:00", scale=scale)
t1 = Time("2022-07-27T00:00:00", scale=scale)
t2 = Time("2023-07-27T00:00:00", scale=scale)
t = Time([t0, t2 + epsilon])
if use_mask == "masked":
t[0] = np.ma.masked
assert t.mean() == (t2 + epsilon)
else:
assert t.mean() == (t1 + epsilon / 2)
def test_mean_dtype(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(dtype=int)
def test_mean_out(self, use_mask):
self.create_data(use_mask)
with pytest.raises(ValueError):
self.t0.mean(out=Time(np.zeros_like(self.t0.jd1), format="jd"))
def test_mean_leap_second(self, use_mask):
# Check that leap second is dealt with correctly: for UTC, across a leap
# second boundary, one cannot just average jd, but has to go through TAI.
if use_mask == "not_masked":
t = Time(["2012-06-30 23:59:60.000", "2012-07-01 00:00:01.000"])
mean_expected = t[0] + (t[1] - t[0]) / 2
mean_expected_explicit = Time("2012-07-01 00:00:00")
mean_test = t.mean()
assert mean_expected == mean_expected_explicit
assert mean_expected == mean_test
assert mean_test != Time(
*day_frac(t.jd1.sum(), t.jd2.sum(), divisor=2), format="jd"
)
def test_regression():
# For #5225, where a time with a single-element delta_ut1_utc could not
# be copied, flattened, or ravelled. (For copy, it is in test_basic.)
with iers.conf.set_temp("auto_download", False):
t = Time(49580.0, scale="tai", format="mjd")
t_ut1 = t.ut1
t_ut1_copy = copy.deepcopy(t_ut1)
assert type(t_ut1_copy.delta_ut1_utc) is np.ndarray
t_ut1_flatten = t_ut1.flatten()
assert type(t_ut1_flatten.delta_ut1_utc) is np.ndarray
t_ut1_ravel = t_ut1.ravel()
assert type(t_ut1_ravel.delta_ut1_utc) is np.ndarray
assert t_ut1_copy.delta_ut1_utc == t_ut1.delta_ut1_utc
| TestArithmetic |
python | scipy__scipy | scipy/stats/tests/test_mstats_basic.py | {
"start": 52491,
"end": 55310
} | class ____:
# data from gh-1428
x = np.array([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 2., 1., 1., 2.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 3., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1.])
y = np.array([1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1., 1., 1., 1.,
2., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2., 1., 1., 3.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 2., 1., 2., 1.,
1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1., 2.,
2., 1., 1., 2., 1., 1., 2., 1., 2., 1., 1., 1., 1., 2.,
2., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
1., 2., 1., 1., 1., 1., 1., 2., 2., 2., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
2., 1., 1., 2., 1., 1., 1., 1., 2., 1., 1., 1., 1., 1.,
1., 1., 1., 1., 1., 1., 1., 2., 1., 1., 1., 2., 1., 1.,
1., 1., 1., 1.])
def test_result_attributes(self):
res = mstats.mannwhitneyu(self.x, self.y)
attributes = ('statistic', 'pvalue')
check_named_results(res, attributes, ma=True)
def test_against_stats(self):
# gh-4641 reported that stats.mannwhitneyu returned half the p-value
# of mstats.mannwhitneyu. Default alternative of stats.mannwhitneyu
# is now two-sided, so they match.
res1 = mstats.mannwhitneyu(self.x, self.y)
res2 = stats.mannwhitneyu(self.x, self.y)
assert res1.statistic == res2.statistic
assert_allclose(res1.pvalue, res2.pvalue)
| TestMannwhitneyu |
python | python-attrs__attrs | typing-examples/mypy.py | {
"start": 5637,
"end": 5916
} | class ____:
num: int = attr.field(validator=attr.validators.ge(0))
with attr.validators.disabled():
Validated2(num=-1)
try:
attr.validators.set_disabled(True)
Validated2(num=-1)
finally:
attr.validators.set_disabled(False)
# Custom repr()
@attr.s
| Validated2 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.