language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 13710,
"end": 13862
} | class ____(GeoFunc):
output_field = FloatField()
arity = 2
function = ""
arg_joiner = " <-> "
geom_param_pos = (0, 1)
| GeometryDistance |
python | facebook__pyre-check | client/coverage_data.py | {
"start": 3866,
"end": 4797
} | class ____(json_mixins.SnakeCaseAndExcludeJsonMixin):
identifier: FunctionIdentifier
location: Location
annotation_status: FunctionAnnotationStatus
returns: ReturnAnnotationInfo
parameters: Sequence[ParameterAnnotationInfo]
is_method_or_classmethod: bool
def non_self_cls_parameters(self) -> Iterable[ParameterAnnotationInfo]:
if self.is_method_or_classmethod:
yield from self.parameters[1:]
else:
yield from self.parameters
@property
def is_annotated(self) -> bool:
return self.annotation_status != FunctionAnnotationStatus.NOT_ANNOTATED
@property
def is_partially_annotated(self) -> bool:
return self.annotation_status == FunctionAnnotationStatus.PARTIALLY_ANNOTATED
@property
def is_fully_annotated(self) -> bool:
return self.annotation_status == FunctionAnnotationStatus.FULLY_ANNOTATED
| FunctionAnnotationInfo |
python | walkccc__LeetCode | solutions/1394. Find Lucky Integer in an Array/1394.py | {
"start": 0,
"end": 254
} | class ____:
def findLucky(self, arr: list[int]) -> int:
count = [0] * (len(arr) + 1)
for a in arr:
if a <= len(arr):
count[a] += 1
for i in range(len(arr), 0, -1):
if count[i] == i:
return i
return -1
| Solution |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_print_options03.py | {
"start": 315,
"end": 1192
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("print_options03.xlsx")
self.ignore_files = [
"xl/printerSettings/printerSettings1.bin",
"xl/worksheets/_rels/sheet1.xml.rels",
]
self.ignore_elements = {
"[Content_Types].xml": ['<Default Extension="bin"'],
"xl/worksheets/sheet1.xml": ["<pageMargins", "<pageSetup"],
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with print options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.center_vertically()
worksheet.write("A1", "Foo")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | keras-team__keras | keras/src/callbacks/backup_and_restore_test.py | {
"start": 902,
"end": 1190
} | class ____(layers.Layer):
def __init__(self):
super().__init__()
self.counter = self.add_weight(
shape=(), initializer="zeros", dtype="float32", trainable=False
)
def call(self, x):
self.counter.assign_add(1)
return x
| CanaryLayer |
python | urllib3__urllib3 | src/urllib3/exceptions.py | {
"start": 1829,
"end": 1931
} | class ____(HTTPError):
"""Raised when automatic decoding based on Content-Type fails."""
| DecodeError |
python | realpython__materials | build-a-blog-from-scratch-django/django-blog/blog/models.py | {
"start": 555,
"end": 852
} | class ____(models.Model):
author = models.CharField(max_length=60)
body = models.TextField()
created_on = models.DateTimeField(auto_now_add=True)
post = models.ForeignKey("Post", on_delete=models.CASCADE)
def __str__(self):
return f"{self.author} on '{self.post}'"
| Comment |
python | pandas-dev__pandas | pandas/io/formats/format.py | {
"start": 42000,
"end": 49890
} | class ____(_GenericArrayFormatter):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# float_format is expected to be a string
# formatter should be used to pass a function
if self.float_format is not None and self.formatter is None:
# GH21625, GH22270
self.fixed_width = False
if callable(self.float_format):
self.formatter = self.float_format
self.float_format = None
def _value_formatter(
self,
float_format: FloatFormatType | None = None,
threshold: float | None = None,
) -> Callable:
"""Returns a function to be applied on each value to format it"""
# the float_format parameter supersedes self.float_format
if float_format is None:
float_format = self.float_format
# we are going to compose different functions, to first convert to
# a string, then replace the decimal symbol, and finally chop according
# to the threshold
# when there is no float_format, we use str instead of '%g'
# because str(0.0) = '0.0' while '%g' % 0.0 = '0'
if float_format:
def base_formatter(v):
assert float_format is not None # for mypy
# error: "str" not callable
# error: Unexpected keyword argument "value" for "__call__" of
# "EngFormatter"
return (
float_format(value=v) # type: ignore[operator,call-arg]
if notna(v)
else self.na_rep
)
else:
def base_formatter(v):
return str(v) if notna(v) else self.na_rep
if self.decimal != ".":
def decimal_formatter(v):
return base_formatter(v).replace(".", self.decimal, 1)
else:
decimal_formatter = base_formatter
if threshold is None:
return decimal_formatter
def formatter(value):
if notna(value):
if abs(value) > threshold:
return decimal_formatter(value)
else:
return decimal_formatter(0.0)
else:
return self.na_rep
return formatter
def get_result_as_array(self) -> np.ndarray:
"""
Returns the float values converted into strings using
the parameters given at initialisation, as a numpy array
"""
def format_with_na_rep(
values: ArrayLike, formatter: Callable, na_rep: str
) -> np.ndarray:
mask = isna(values)
formatted = np.array(
[
formatter(val) if not m else na_rep
for val, m in zip(values.ravel(), mask.ravel(), strict=True)
]
).reshape(values.shape)
return formatted
def format_complex_with_na_rep(
values: ArrayLike, formatter: Callable, na_rep: str
) -> np.ndarray:
real_values = np.real(values).ravel() # type: ignore[arg-type]
imag_values = np.imag(values).ravel() # type: ignore[arg-type]
real_mask, imag_mask = isna(real_values), isna(imag_values)
formatted_lst = []
for val, real_val, imag_val, re_isna, im_isna in zip(
values.ravel(),
real_values,
imag_values,
real_mask,
imag_mask,
strict=True,
):
if not re_isna and not im_isna:
formatted_lst.append(formatter(val))
elif not re_isna: # xxx+nanj
formatted_lst.append(f"{formatter(real_val)}+{na_rep}j")
elif not im_isna: # nan[+/-]xxxj
# The imaginary part may either start with a "-" or a space
imag_formatted = formatter(imag_val).strip()
if imag_formatted.startswith("-"):
formatted_lst.append(f"{na_rep}{imag_formatted}j")
else:
formatted_lst.append(f"{na_rep}+{imag_formatted}j")
else: # nan+nanj
formatted_lst.append(f"{na_rep}+{na_rep}j")
return np.array(formatted_lst).reshape(values.shape)
if self.formatter is not None:
return format_with_na_rep(self.values, self.formatter, self.na_rep)
if self.fixed_width:
threshold = get_option("display.chop_threshold")
else:
threshold = None
# if we have a fixed_width, we'll need to try different float_format
def format_values_with(float_format):
formatter = self._value_formatter(float_format, threshold)
# default formatter leaves a space to the left when formatting
# floats, must be consistent for left-justifying NaNs (GH #25061)
na_rep = " " + self.na_rep if self.justify == "left" else self.na_rep
# different formatting strategies for complex and non-complex data
# need to distinguish complex and float NaNs (GH #53762)
values = self.values
is_complex = is_complex_dtype(values)
# separate the wheat from the chaff
if is_complex:
values = format_complex_with_na_rep(values, formatter, na_rep)
else:
values = format_with_na_rep(values, formatter, na_rep)
if self.fixed_width:
if is_complex:
result = _trim_zeros_complex(values, self.decimal)
else:
result = _trim_zeros_float(values, self.decimal)
return np.asarray(result, dtype="object")
return values
# There is a special default string when we are fixed-width
# The default is otherwise to use str instead of a formatting string
float_format: FloatFormatType | None
if self.float_format is None:
if self.fixed_width:
if self.leading_space is True:
fmt_str = "{value: .{digits:d}f}"
else:
fmt_str = "{value:.{digits:d}f}"
float_format = partial(fmt_str.format, digits=self.digits)
else:
float_format = self.float_format
else:
float_format = lambda value: self.float_format % value
formatted_values = format_values_with(float_format)
if not self.fixed_width:
return formatted_values
# we need do convert to engineering format if some values are too small
# and would appear as 0, or if some values are too big and take too
# much space
if len(formatted_values) > 0:
maxlen = max(len(x) for x in formatted_values)
too_long = maxlen > self.digits + 6
else:
too_long = False
abs_vals = np.abs(self.values)
# this is pretty arbitrary for now
# large values: more that 8 characters including decimal symbol
# and first digit, hence > 1e6
has_large_values = (abs_vals > 1e6).any()
has_small_values = ((abs_vals < 10 ** (-self.digits)) & (abs_vals > 0)).any()
if has_small_values or (too_long and has_large_values):
if self.leading_space is True:
fmt_str = "{value: .{digits:d}e}"
else:
fmt_str = "{value:.{digits:d}e}"
float_format = partial(fmt_str.format, digits=self.digits)
formatted_values = format_values_with(float_format)
return formatted_values
def _format_strings(self) -> list[str]:
return list(self.get_result_as_array())
| FloatArrayFormatter |
python | ApeWorX__ape | src/ape/plugins/_utils.py | {
"start": 18149,
"end": 20724
} | class ____:
def __init__(self, plugin: PluginMetadata):
self._plugin = plugin
def handle_install_result(self, result: int) -> bool:
if not self._plugin.check_installed(use_cache=False):
self._log_modify_failed("install")
return False
elif result != 0:
self._log_errors_occurred("installing")
return False
else:
plugin_id = self._plugin.name
version = self._plugin.version
if version:
# Sometimes, like in editable mode, the version is missing here.
plugin_id = f"{plugin_id}=={version}"
logger.success(f"Plugin '{plugin_id}' has been installed.")
return True
def handle_upgrade_result(self, result: int, version_before: str) -> bool:
if result != 0:
self._log_errors_occurred("upgrading")
return False
version_now = self._plugin.version
if version_now is not None and version_before == version_now:
logger.info(f"'{self._plugin.name}' already has version '{version_now}'.")
return True
elif self._plugin.version:
logger.success(
f"Plugin '{self._plugin.name}' has been upgraded to version {self._plugin.version}."
)
return True
else:
# The process was successful but there is still no pip freeze version.
# This may happen when installing things from GitHub.
return True
def handle_uninstall_result(self, result) -> bool:
if self._plugin.check_installed(use_cache=False):
self._log_modify_failed("uninstall")
return False
elif result != 0:
self._log_errors_occurred("uninstalling")
return False
else:
logger.success(f"Plugin '{self._plugin.name}' has been uninstalled.")
return True
def _log_errors_occurred(self, verb: str):
logger.error(f"Errors occurred when {verb} '{self._plugin}'.")
def _log_modify_failed(self, verb: str):
logger.error(f"Failed to {verb} plugin '{self._plugin}.")
def _split_name_and_version(value: str) -> tuple[str, Optional[str]]:
if "@" in value:
parts = [x for x in value.split("@") if x]
return parts[0], "@".join(parts[1:])
if not (chars := [c for c in ("=", "<", ">") if c in value]):
return value, None
index = min(value.index(c) for c in chars)
return value[:index], value[index:]
| ModifyPluginResultHandler |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-facebook-marketing/source_facebook_marketing/streams/streams.py | {
"start": 12064,
"end": 12126
} | class ____(AdsInsights):
breakdowns = ["dma"]
| AdsInsightsDma |
python | huggingface__transformers | src/transformers/models/llama4/modeling_llama4.py | {
"start": 41863,
"end": 42610
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
kernel_size = config.patch_size
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
self.unfold = torch.nn.Unfold(kernel_size=kernel_size, stride=config.patch_size)
self.linear = nn.Linear(
config.num_channels * kernel_size[0] * kernel_size[1],
config.hidden_size,
bias=False,
)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.unfold(hidden_states)
hidden_states = hidden_states.permute(0, 2, 1)
hidden_states = self.linear(hidden_states)
return hidden_states
| Llama4UnfoldConvolution |
python | django__django | django/core/serializers/xml_serializer.py | {
"start": 17245,
"end": 17645
} | class ____(DefusedXmlException):
"""Document type definition is forbidden."""
def __init__(self, name, sysid, pubid):
super().__init__()
self.name = name
self.sysid = sysid
self.pubid = pubid
def __str__(self):
tpl = "DTDForbidden(name='{}', system_id={!r}, public_id={!r})"
return tpl.format(self.name, self.sysid, self.pubid)
| DTDForbidden |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 272620,
"end": 272689
} | class ____:
name: str
node: export_schema.Node
| ExternKernelNode |
python | google__jax | jax/_src/export/shape_poly.py | {
"start": 13828,
"end": 36911
} | class ____:
"""Symbolic expressions using dimension variables.
A dimension expression is an addition of terms (_DimTerm), which themselves
are products of factors (_DimFactor).
The representation of a _DimExpr is as sequence of pairs `(term, coeff)`,
representing the linear combination of terms with the given coefficients.
The sequence is sorted by lexicographic (syntactic) ordering of `_DimTerm`,
with the largest terms first. The special term `_DimTerm_one` is mapped
to the free integer coefficient of the expression.
We overload integer operations, but we do that soundly, raising
:class:`InconclusiveDimensionOperation` when the result is not
representable as a _DimExpr.
"""
__array_priority__ = 1000 # Same as tracer, for __radd__ and others on ndarray
__slots__ = ("_sorted_terms", "_scope", "_hash", "_size")
def __init__(self, sorted_terms: SortedTerms,
scope: SymbolicScope):
# Do not construct _DimExpr directly, unless you are sure that `terms` is
# normalized; Use _DimExpr._normalize_sorted_terms.
self._sorted_terms = tuple(sorted_terms) or ((_DimTerm_one, 0),)
self._scope = scope
self._hash = None
# _size speeds up _syntactic_cmp, which is used a lot for hashing.
self._size = sum((1 + abs(m_count) * m._size)
for m, m_count in self._sorted_terms)
@property
def scope(self):
# We make the expression scope visible, but read-only.
return self._scope
@staticmethod
def _coeff_to_sorted_terms(coeffs: dict[_DimTerm, int]) -> SortedTerms:
return sorted((p for p in coeffs.items() if p[1] != 0), reverse=True)
@staticmethod
def _from_term(t: _DimTerm, t_k: int, scope: SymbolicScope) -> DimSize:
return _DimExpr._normalize_sorted_terms(((t, t_k),), scope)
@staticmethod
def _from_var(v: str, scope: SymbolicScope) -> DimSize:
return _DimExpr._normalize_sorted_terms(((_DimTerm.from_var(v), 1),), scope)
@staticmethod
def _from_operation(operation: str, *operands: DimSize,
scope: SymbolicScope) -> DimSize:
return _DimExpr._from_term(
_DimTerm.from_operation(operation, *operands, scope=scope), 1,
scope=scope)
@property
def _leading_term(self) -> tuple[_DimTerm, int]:
"""Returns the highest degree term that comes last lexicographically."""
return self._sorted_terms[0]
def _to_single_term(self) -> tuple[int, int, _DimTerm] | None:
"""Extracts the single term: k + c * term.
Returns None if the expression is not a single term, or (k, c, term)
"""
n1 = 0
n2 = 0
term = None
for t, t_k in self._sorted_terms:
if t.is_constant:
n1 = t_k
continue
if term is None:
term = t
n2 = t_k
continue
return None
assert term is not None
return (n1, n2, term)
@staticmethod
def _add_coeff(coeffs: dict[_DimTerm, int], t: _DimTerm, coeff: int):
"""coeffs[t] += coeff, with squashing 0 coefficients."""
if coeff == 0: return
coeffs[t] = coeffs.get(t, 0) + coeff
@staticmethod
def _normalize_term(t: _DimTerm, t_k: int,
scope: SymbolicScope) -> Sequence[tuple[_DimTerm, int]]:
# If (t, t_k) is among the scope normalization rules, then return
# a list of `term * coefficient` to add to the expression containing (t, t_k).
# Returns the empty sequence if no normalizations are necessary.
if not scope._normalization_rules: return []
updates = []
after, t_k_after = scope._normalization_rules.get(t, (None, 0))
if after is not None and t_k % t_k_after == 0:
# We have t*t_k_after -> after.
# We subtract `t*t_k` and add `after * (t_k // t_k_after)`.
updates.append((t, - t_k))
updates.extend((t2, tc2 * (t_k // t_k_after))
for t2, tc2 in after._sorted_terms)
return updates
if len(t._factors) <= 1:
return updates
# A product of factors; look up individually
for f, fexp in t._factors:
f_after, f_k_after = scope._normalization_rules.get(_DimTerm(((f, fexp),)), (None, 0))
if f_after is not None and t_k % f_k_after == 0:
# We subtract `t*t_k`.
updates.append((t, - t_k))
# And add `(t // f**fexp) * f_after * (t_k // f_k_after)`
t_without_f = t.divide(_DimTerm(((f, fexp),)))
updates.extend((t2.mul(t_without_f), tc2 * (t_k // f_k_after))
for t2, tc2 in f_after._sorted_terms)
return updates
return updates
@staticmethod
def _normalize_sorted_terms(terms: SortedTerms,
scope: SymbolicScope) -> DimSize:
"""Constructs a _DimExpr in normal form from sorted terms.
Ensures that the symbolic dimension is normalized, e.g., does not
have terms with coefficient 0, it reflects all the scope
normalization_rules, and it is represented as a Python integer if it is
known to be a constant.
Does not attempt to normalize the keys (terms) inside `terms`.
"""
for t, t_k in terms:
assert t_k != 0
if updates := _DimExpr._normalize_term(t, t_k, scope):
coeffs = dict(terms)
for t1, t1_k in updates:
_DimExpr._add_coeff(coeffs, t1, t1_k)
terms = _DimExpr._coeff_to_sorted_terms(coeffs)
# TODO: check the case when we need to apply multiple normalizations
break
if not terms: return 0
if terms[0][0].is_constant: return terms[0][1]
return _DimExpr(terms, scope)
def _to_term(self) -> _DimTerm | None:
"""Extract the single term from a symbolic expression.
Returns None if the expression is not a single term."""
if len(self._sorted_terms) > 1: return None
(t, t_k), = self._sorted_terms
return t if t_k == 1 else None
def _to_factor(self) -> _DimFactor | None:
"""Extract the factor from a symbolic expression.
Returns None if the expression is not a single factor."""
t = self._to_term()
return t.to_factor() if t is not None else None
def _to_var(self) -> str | None:
"""Extract the variable name from a symbolic expression.
Returns None if the expression is not a single variable."""
mon = self._to_factor()
return mon.to_var() if mon is not None else None
@staticmethod
def _to_constant(e: DimSize) -> int | None:
"""Extract the constant from a symbolic expression.
Returns None if the expression is not a single constant."""
if not isinstance(e, _DimExpr):
return int(e)
m, m_c = e._leading_term
return m_c if m.is_constant else None
@property
def _is_constant(self):
return _DimExpr._to_constant(self) is not None
def _get_vars(self) -> set[str]:
"""The variables that appear in a symbolic dimension."""
acc = set()
for mon, _ in self._sorted_terms:
acc.update(mon.get_vars())
return acc
# There are some uses already of `get_vars`, we keep it a while longer
# for backwards compatibility.
get_vars = _get_vars
@overload
@staticmethod
def _linear_combination_sorted_pairs(
e1: SortedTerms, i1: int, f1: int,
e2: SortedTerms, i2: int, f2: int) -> SortedTerms: ... # type: ignore[bad-return-type,unused-ignore]
@overload
@staticmethod
def _linear_combination_sorted_pairs(
e1: SortedFactors, i1: int, f1: int,
e2: SortedFactors, i2: int, f2: int) -> SortedFactors: ... # type: ignore[bad-return-type,unused-ignore]
@staticmethod
def _linear_combination_sorted_pairs(
pairs1, i1, f1,
pairs2, i2, f2):
"""Computes e1[i1:] * f1 + e2[i2:] * f2.
e1, e2, and the result are sorted with largest term first.
This is an optimization for a common operation. The unoptimized code would
compute each subexpression in turn. This works for both SortedTerms and SortedFactors.
"""
len1 = len(pairs1)
len2 = len(pairs2)
acc = []
while i1 < len1 and i2 < len2:
m1, m1_c = pairs1[i1]
m2, m2_c = pairs2[i2]
cmp = m1._syntactic_cmp(m2) # Pick the largest term
if cmp < 0:
acc.append((m2, m2_c * f2))
i2 += 1
elif cmp > 0:
acc.append((m1, m1_c * f1))
i1 += 1
else: # They are equal, combine them
i1 += 1
i2 += 1
m1_c = m1_c * f1 + m2_c * f2
if m1_c == 0: continue
acc.append((m1, m1_c))
if i1 < len1:
acc.extend((m1, m1_c * f1) for m1, m1_c in itertools.islice(pairs1, i1, len1) if m1_c != 0)
if i2 < len2:
acc.extend((m2, m2_c * f2) for m2, m2_c in itertools.islice(pairs2, i2, len2) if m2_c != 0)
return acc
def _syntactic_cmp(self, other: _DimExpr) -> int:
"""Returns -1 if self < other, 0 if self == other, 1 if self > other.
The comparison is done lexicographically (syntactic), to be used for sorting.
The result is not related to the semantic value.
"""
s_terms = self._sorted_terms
o_terms = other._sorted_terms
if c := cmp_comparable(self._size, other._size): return c
def cmp_factor(s_f: tuple[_DimTerm, int], o_f: tuple[_DimTerm, int]) -> int:
if c := s_f[0]._syntactic_cmp(o_f[0]): return c
return cmp_comparable(s_f[1], o_f[1])
return cmp_sequence(s_terms, o_terms, cmp_factor)
def _eq(self, other: _DimExpr) -> bool:
# Equality is used very frequently because expressions are cached. We could
# implement a more precise version based on `(self - other).bounds() = (0, 0)`
# but that would be too expensive. It would also have the unfortunate drawback
# that we cannot then cache `e.bounds()` because hashing invokes equality
# which would lead to infinite recursion.
diff = self - other
# We look for `self - other == k`, and we rely on the fact that when we
# normalize _DimExpr that represent integers as ints.
if is_symbolic_dim(diff):
# Here we really ought to raise InconclusiveDimensionOperation, but __eq__
# cannot raise exceptions, because it is used indirectly when hashing.
# So, we say that the expressions are disequal, which is really unsound.
# See https://docs.jax.dev/en/latest/export/shape_poly.html#comparison-of-symbolic-dimensions-is-partially-supported
return False
return diff == 0
def __hash__(self):
if self._hash is None:
self._hash = hash((self._sorted_terms, self.scope))
return self._hash
def __str__(self):
def _one_term(t, t_k):
abs_t_k = abs(t_k)
sgn_t_k = "+" if t_k > 0 else "-"
if t.is_constant:
return f"{sgn_t_k} {abs_t_k}" if abs_t_k != 0 else "0"
if abs_t_k == 1:
return f"{sgn_t_k} {t}"
return f"{sgn_t_k} {abs_t_k}*{t}"
# We print first the "larger" terms, so that the constant is last.
res = " ".join(_one_term(t, t_k)
for t, t_k in self._sorted_terms)
if res.startswith("+ "):
res = res[2:]
return res
def __repr__(self):
return str(self)
# A special case for linear combinations because they are common
@staticmethod
def _linear_combination(e1: DimSize, k1: int,
e2: DimSize, k2: int,
scope: SymbolicScope) -> DimSize:
"""Computes and normalizes `e1 * k1 + e2 * k2`"""
if isinstance(e1, _DimExpr):
e1_terms = e1._sorted_terms
if isinstance(e2, _DimExpr):
e1.scope._check_same_scope(e2, when="for linear combination")
else:
if not isinstance(e2, _DimExpr):
return e1 * k1 + e2 * k2 # Constants
e1_terms = ((_DimTerm_one, op.index(e1)),)
if isinstance(e2, _DimExpr):
e2_terms = e2._sorted_terms
elif e2 == 0:
e2_terms = ()
else:
e2_terms = ((_DimTerm_one, op.index(e2)),)
new_terms = _DimExpr._linear_combination_sorted_pairs(e1_terms, 0, k1,
e2_terms, 0, k2)
return _DimExpr._normalize_sorted_terms(new_terms, scope)
# We overload +, -, *, because they are fully defined for _DimExpr.
def __add__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__add__(other)
if isinstance(other, int) and other == 0: return self
return _DimExpr._linear_combination(self, 1, other, 1, self.scope)
def __radd__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__radd__(other)
if isinstance(other, int) and other == 0: return self
return _DimExpr._linear_combination(self, 1, other, 1, self.scope)
def __sub__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__sub__(other)
if isinstance(other, int) and other == 0: return self
return _DimExpr._linear_combination(self, 1, other, -1, self.scope)
def __rsub__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__rsub__(other)
return _DimExpr._linear_combination(self, -1, other, 1, self.scope)
def __neg__(self) -> DimSize:
return _DimExpr._linear_combination(self, -1, 0, 0, self.scope)
def __mul__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__mul__(other)
if isinstance(other, int):
if other == 1: return self
if other == 0: return 0
return _DimExpr._linear_combination(self, other, 0, 0, self.scope)
other = _ensure_poly(other, "mul", self.scope)
coeffs: dict[_DimTerm, int] = {}
for mon1, coeff1 in self._sorted_terms:
for mon2, coeff2 in other._sorted_terms:
mon = mon1.mul(mon2)
_DimExpr._add_coeff(coeffs, mon, coeff1 * coeff2)
return _DimExpr._normalize_sorted_terms(_DimExpr._coeff_to_sorted_terms(coeffs),
self.scope)
def __rmul__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__rmul__(other)
if isinstance(other, int):
if other == 1: return self
if other == 0: return 0
return _DimExpr._linear_combination(self, other, 0, 0, self.scope)
return _ensure_poly(other, "mul", self.scope).__mul__(self)
def __pow__(self, power: core.DimSize, modulo=None):
if modulo is not None:
raise NotImplementedError("__pow__ modulo not implemented")
if is_symbolic_dim(power):
return power.__rpow__(self) # type: ignore
if power != int(power):
raise ValueError(f"Symbolic dimension cannot be raised to non-integer powers: '{self}' ** '{power}'")
if power >= 0:
return functools.reduce(op.mul, [self] * power, 1)
# We don't support negative powers, because JAX does not allow negative
# powers for integers
raise ValueError(f"Symbolic dimension cannot be raised to negative powers: '{self}' ** '{power}'")
def __rpow__(self, other, modulo=None):
if modulo is not None:
raise NotImplementedError("__rpow__ modulo not implemented")
return self.__jax_array__().__rpow__(other)
def __floordiv__(self, divisor):
if isinstance(divisor, core.Tracer) or not _convertible_to_poly(divisor):
return self.__jax_array__().__floordiv__(divisor)
return self._divmod(divisor)[0]
def __rfloordiv__(self, other):
if isinstance(other, core.Tracer) or not _convertible_to_poly(other):
return self.__jax_array__().__rfloordiv__(other)
return _ensure_poly(other, "floordiv", self.scope).__floordiv__(self)
def __truediv__(self, divisor):
# Used for "/", which always returns a float
return self.__jax_array__().__truediv__(divisor)
def __rtruediv__(self, dividend):
# Used for "/", when dividend is not a _DimExpr
return self.__jax_array__().__rtruediv__(dividend)
def __mod__(self, divisor):
if isinstance(divisor, core.Tracer) or not _convertible_to_poly(divisor):
return self.__jax_array__().__mod__(divisor)
return self._divmod(divisor)[1]
def __rmod__(self, dividend):
if isinstance(dividend, core.Tracer) or not _convertible_to_poly(dividend):
return self.__jax_array__().__rmod__(dividend)
return _ensure_poly(dividend, "mod", self.scope).__mod__(self)
def __divmod__(self, divisor):
if isinstance(divisor, core.Tracer) or not _convertible_to_poly(divisor):
return self.__jax_array__().__divmod__(divisor)
return self._divmod(divisor)
def __rdivmod__(self, dividend):
if isinstance(dividend, core.Tracer) or not _convertible_to_poly(dividend):
return self.__jax_array__().__rdivmod__(dividend)
return _ensure_poly(dividend, "divmod", self.scope).__divmod__(self)
def __int__(self):
if (c := _DimExpr._to_constant(self)) is not None:
return c
raise InconclusiveDimensionOperation(f"Symbolic dimension '{self}' used in a context that requires a constant")
# We must overload __eq__ and __ne__, or else we get unsound defaults.
def __eq__(self, other: Any) -> bool:
if isinstance(other, _DimExpr):
if self.scope is not other.scope:
return False
elif not core.is_constant_dim(other):
return False
# Equality is used very frequently because expressions are cached. We could
# implement a more precise version based on `(self - other).bounds() = (0, 0)`
# but that would be too expensive. It would also have the unfortunate drawback
# that we cannot then cache `e.bounds()` because hashing invokes equality
# which would lead to infinite recursion.
diff = self - other
# We look for `self - other == k`, and we rely on the fact that when we
# normalize _DimExpr that represent integers as ints.
if is_symbolic_dim(diff):
# Here we really ought to raise InconclusiveDimensionOperation, but __eq__
# cannot raise exceptions, because it is used indirectly when hashing.
# So, we say that the expressions are disequal, which is really unsound.
# See https://docs.jax.dev/en/latest/export/shape_poly.html#comparison-of-symbolic-dimensions-is-partially-supported
return False
return diff == 0
def __ne__(self, other: Any) -> bool:
return not self.__eq__(other)
def __ge__(self, other: DimSize) -> bool:
return _geq_decision(self, other, lambda: f"'{self}' >= '{other}'")
def __le__(self, other: DimSize):
return _geq_decision(other, self, lambda: f"'{self}' <= '{other}'")
def __gt__(self, other: DimSize):
return not _geq_decision(other, self, lambda: f"'{self}' > '{other}'")
def __lt__(self, other: DimSize):
return not _geq_decision(self, other, lambda: f"'{self}' < '{other}'")
def _divmod(self, divisor: DimSize) -> tuple[DimSize, int]:
"""
Floor division with remainder (divmod) generalized to expressions.
If the `divisor` is not a constant, the remainder must be 0.
If the `divisor` is a constant, the remainder may be non 0, for consistency
with integer divmod.
:return: Quotient resulting from polynomial division and integer remainder.
"""
try:
dividend, quotient = self, 0
# invariant: self = dividend + divisor * quotient
# quotient and dividend are changed in the loop; the leading term of
# dividend decreases at each iteration.
while is_symbolic_dim(dividend) and not dividend._is_constant: # type: ignore[attribute-error,unused-ignore]
mon, count = dividend._leading_term
if isinstance(divisor, _DimExpr):
dterm, dcount = divisor._leading_term
qterm = mon.divide(dterm)
else:
qterm, dcount = mon, int(divisor)
qcount, rcount = divmod(count, dcount)
if rcount != 0:
raise InconclusiveDimensionOperation("")
q = _DimExpr._from_term(qterm, qcount, self.scope)
quotient += q
dividend -= q * divisor
dividend = int(dividend) # type: ignore[assignment]
if isinstance(divisor, _DimExpr):
if dividend != 0:
raise InconclusiveDimensionOperation("")
remainder = 0
else:
q, r = divmod(dividend, int(divisor))
quotient += q
remainder = r
if config.enable_checks.value:
v1 = divisor * quotient
v2 = v1 + remainder
assert self == _ensure_poly(v2, "check", self.scope), (
self, v2, type(self), type(v2))
assert self == _ensure_poly(divisor * quotient + remainder, "test", self.scope), (
self, divisor, quotient, remainder)
return quotient, remainder
except InconclusiveDimensionOperation:
return (_DimExpr._from_operation(_DimFactor.FLOORDIV, self, divisor,
scope=self.scope), # type: ignore
_DimExpr._from_operation(_DimFactor.MOD, self, divisor,
scope=self.scope))
def _evaluate(self, env: DimVarEnv):
# Evaluates as a value of dtype=core.dim_value_dtype()
terms = [_evaluate_multiply(t.evaluate(env, self.scope), core.dim_constant(t_k))
for t, t_k in self._sorted_terms]
return functools.reduce(_evaluate_add, terms) if len(terms) > 1 else terms[0]
def max(self, other: DimSize) -> DimSize:
lb, ub = _bounds_decision(self - other, BoundsPrecision.FOR_GEQ0_OR_LEQ0)
if 0 <= lb: return self
if ub <= 0: return other
return _DimExpr._from_operation(_DimFactor.MAX, self, other, scope=self.scope)
def rmax(self, other: DimSize) -> DimSize:
lb, ub = _bounds_decision(self - other, BoundsPrecision.FOR_GEQ0_OR_LEQ0)
if 0 <= lb: return self
if ub <= 0: return other
return _DimExpr._from_operation(_DimFactor.MAX, other, self, scope=self.scope)
def min(self, other: DimSize) -> DimSize:
lb, ub = _bounds_decision(self - other, BoundsPrecision.FOR_GEQ0_OR_LEQ0)
if 0 <= lb: return other
if ub <= 0: return self
return _DimExpr._from_operation(_DimFactor.MIN, self, other, scope=self.scope)
def rmin(self, other: DimSize) -> DimSize:
lb, ub = _bounds_decision(self - other, BoundsPrecision.FOR_GEQ0_OR_LEQ0)
if 0 <= lb: return other
if ub <= 0: return self
return _DimExpr._from_operation(_DimFactor.MIN, other, self, scope=self.scope)
@staticmethod
def _get_aval(dim: _DimExpr):
return core.dim_value_aval()
def dimension_as_value(self):
"""Turns a dimension size into a Jax value that we can compute with."""
return _dim_as_value(self)
def __jax_array__(self):
# Used for implicit coercions of polynomials as JAX arrays
return _dim_as_value(self)
def __deepcopy__(self, memo):
return _DimExpr(
copy.deepcopy(self._sorted_terms, memo),
copy.deepcopy(self._scope, memo))
def cmp_comparable(i1, i2) -> int:
if i1 < i2: return -1
if i1 > i2: return 1
return 0
def cmp_sequence(s1, s2, elem_cmp) -> int:
"""Compares two sequences using `elem_cmp`."""
l2 = len(s2)
for i, e1 in enumerate(s1):
if i >= l2: return 1
if c := elem_cmp(e1, s2[i]): return c
if len(s1) < l2: return -1
return 0
| _DimExpr |
python | kamyu104__LeetCode-Solutions | Python/determine-if-two-events-have-conflict.py | {
"start": 37,
"end": 284
} | class ____(object):
def haveConflict(self, event1, event2):
"""
:type event1: List[str]
:type event2: List[str]
:rtype: bool
"""
return max(event1[0], event2[0]) <= min(event1[1], event2[1])
| Solution |
python | sphinx-doc__sphinx | sphinx/domains/python/__init__.py | {
"start": 17995,
"end": 19081
} | class ____(XRefRole):
def process_link(
self,
env: BuildEnvironment,
refnode: Element,
has_explicit_title: bool,
title: str,
target: str,
) -> tuple[str, str]:
refnode['py:module'] = env.ref_context.get('py:module')
refnode['py:class'] = env.ref_context.get('py:class')
if not has_explicit_title:
title = title.lstrip('.') # only has a meaning for the target
target = target.lstrip('~') # only has a meaning for the title
# if the first character is a tilde, don't display the module/class
# parts of the contents
if title[0:1] == '~':
title = title[1:]
dot = title.rfind('.')
if dot != -1:
title = title[dot + 1 :]
# if the first character is a dot, search more specific namespaces first
# else search builtins first
if target[0:1] == '.':
target = target[1:]
refnode['refspecific'] = True
return title, target
| PyXRefRole |
python | wandb__wandb | tests/unit_tests/test_launch/test_runner/test_kubernetes.py | {
"start": 5907,
"end": 6289
} | class ____:
"""Mocks a kubernetes event stream that can be populated from tests."""
def __init__(self):
self.queue = []
async def __aiter__(self):
while True:
while not self.queue:
await asyncio.sleep(0)
yield self.queue.pop(0)
async def add(self, event: Any):
self.queue.append(event)
| MockEventStream |
python | ray-project__ray | python/ray/autoscaler/v2/scheduler.py | {
"start": 2735,
"end": 3395
} | class ____:
# Instances to launch.
to_launch: List[LaunchRequest] = field(default_factory=list)
# To terminate.
to_terminate: List[TerminationRequest] = field(default_factory=list)
# The infeasible resource bundles.
infeasible_resource_requests: List[ResourceRequest] = field(default_factory=list)
# The infeasible gang resource bundles.
infeasible_gang_resource_requests: List[GangResourceRequest] = field(
default_factory=list
)
# The infeasible cluster resource constraints.
infeasible_cluster_resource_constraints: List[ClusterResourceConstraint] = field(
default_factory=list
)
| SchedulingReply |
python | mlflow__mlflow | mlflow/tracing/processor/base_mlflow.py | {
"start": 1213,
"end": 8445
} | class ____(OtelMetricsMixin, SimpleSpanProcessor):
"""
Defines custom hooks to be executed when a span is started or ended (before exporting).
"""
def __init__(
self,
span_exporter: SpanExporter,
export_metrics: bool,
):
super().__init__(span_exporter)
self.span_exporter = span_exporter
self._export_metrics = export_metrics
self._env_metadata = resolve_env_metadata()
# Lock to prevent race conditions during concurrent span name deduplication
# This ensures that when multiple spans end simultaneously, their names are
# deduplicated atomically without interference
self._deduplication_lock = threading.RLock()
def on_start(self, span: OTelSpan, parent_context: Context | None = None):
"""
Handle the start of a span. This method is called when an OpenTelemetry span is started.
Args:
span: An OpenTelemetry Span object that is started.
parent_context: The context of the span. Note that this is only passed when the context
object is explicitly specified to OpenTelemetry start_span call. If the parent span
is obtained from the global context, it won't be passed here so we should not rely
on it.
"""
trace_id = self._trace_manager.get_mlflow_trace_id_from_otel_id(span.context.trace_id)
if not trace_id and span.parent is not None:
_logger.debug(
"Received a non-root span but the trace ID is not found."
"The trace has likely been halted due to a timeout expiration."
)
return
if span.parent is None:
trace_info = self._start_trace(span)
if trace_info is None:
return
trace_id = trace_info.trace_id
InMemoryTraceManager.get_instance().register_span(create_mlflow_span(span, trace_id))
def _start_trace(self, root_span: OTelSpan) -> TraceInfo:
raise NotImplementedError("Subclasses must implement this method.")
def on_end(self, span: OTelReadableSpan) -> None:
"""
Handle the end of a span. This method is called when an OpenTelemetry span is ended.
Args:
span: An OpenTelemetry ReadableSpan object that is ended.
"""
if self._export_metrics:
self.record_metrics_for_span(span)
trace_id = get_otel_attribute(span, SpanAttributeKey.REQUEST_ID)
# Acquire lock before accessing and modifying trace data to prevent race conditions
# during concurrent span endings. This ensures span name deduplication happens
# atomically without interference from other threads
with self._deduplication_lock:
with self._trace_manager.get_trace(trace_id) as trace:
if trace is not None:
if span._parent is None:
self._update_trace_info(trace, span)
else:
_logger.debug(f"Trace data with request ID {trace_id} not found.")
super().on_end(span)
def _get_basic_trace_metadata(self) -> dict[str, Any]:
metadata = self._env_metadata.copy()
metadata[TRACE_SCHEMA_VERSION_KEY] = str(TRACE_SCHEMA_VERSION)
# If the span is started within an active MLflow run, we should record it as a trace tag
# Note `mlflow.active_run()` can only get thread-local active run,
# but tracing routine might be applied to model inference worker threads
# in the following cases:
# - langchain model `chain.batch` which uses thread pool to spawn workers.
# - MLflow langchain pyfunc model `predict` which calls `api_request_parallel_processor`.
# Therefore, we use `_get_global_active_run()` instead to get the active run from
# all threads and set it as the tracing source run.
if run := _get_latest_active_run():
metadata[TraceMetadataKey.SOURCE_RUN] = run.info.run_id
# The order is:
# 1. model_id of the current active model set by `set_active_model`
# 2. model_id from the current prediction context
# (set by mlflow pyfunc predict, or explicitly using set_prediction_context)
if active_model_id := _get_active_model_id_global():
metadata[TraceMetadataKey.MODEL_ID] = active_model_id
elif model_id := maybe_get_logged_model_id():
metadata[TraceMetadataKey.MODEL_ID] = model_id
return metadata
def _get_basic_trace_tags(self, span: OTelReadableSpan) -> dict[str, Any]:
# If the trace is created in the context of MLflow model evaluation, we extract the request
# ID from the prediction context. Otherwise, we create a new trace info by calling the
# backend API.
tags = {}
if request_id := maybe_get_request_id(is_evaluate=True):
tags.update({TraceTagKey.EVAL_REQUEST_ID: request_id})
if dependencies_schema := maybe_get_dependencies_schemas():
tags.update(dependencies_schema)
tags.update({TraceTagKey.TRACE_NAME: span.name})
return tags
def _update_trace_info(self, trace: _Trace, root_span: OTelReadableSpan):
"""Update the trace info with the final values from the root span."""
# The trace/span start time needs adjustment to exclude the latency of
# the backend API call. We already adjusted the span start time in the
# on_start method, so we reflect the same to the trace start time here.
trace.info.request_time = root_span.start_time // 1_000_000 # nanosecond to millisecond
trace.info.execution_duration = (root_span.end_time - root_span.start_time) // 1_000_000
# Update trace state from span status, but only if the user hasn't explicitly set
# a different trace status
update_trace_state_from_span_conditionally(trace, root_span)
# TODO: Remove this once the new trace table UI is available that is based on V3 trace.
# Until then, these two are still used to render the "request" and "response" columns.
trace.info.trace_metadata.update(
{
TraceMetadataKey.INPUTS: self._truncate_metadata(
root_span.attributes.get(SpanAttributeKey.INPUTS)
),
TraceMetadataKey.OUTPUTS: self._truncate_metadata(
root_span.attributes.get(SpanAttributeKey.OUTPUTS)
),
}
)
# Aggregate token usage information from all spans
if usage := aggregate_usage_from_spans(trace.span_dict.values()):
trace.info.request_metadata[TraceMetadataKey.TOKEN_USAGE] = json.dumps(usage)
def _truncate_metadata(self, value: str | None) -> str:
"""Get truncated value of the attribute if it exceeds the maximum length."""
if not value:
return ""
if len(value) > MAX_CHARS_IN_TRACE_INFO_METADATA:
trunc_length = MAX_CHARS_IN_TRACE_INFO_METADATA - len(TRUNCATION_SUFFIX)
value = value[:trunc_length] + TRUNCATION_SUFFIX
return value
| BaseMlflowSpanProcessor |
python | pypa__warehouse | tests/unit/manage/test_forms.py | {
"start": 35774,
"end": 36691
} | class ____:
def test_validate(self, pyramid_request):
organization = OrganizationFactory()
pyramid_request.POST = MultiDict({"organization": organization.id})
form = forms.TransferOrganizationProjectForm(
pyramid_request.POST, organization_choices=[organization]
)
assert form.validate()
def test_rejects_inactive_company(self, pyramid_request):
organization = OrganizationFactory(orgtype="Company")
pyramid_request.POST = MultiDict({"organization": organization.id})
form = forms.TransferOrganizationProjectForm(
pyramid_request.POST, organization_choices=[organization]
)
assert not form.validate()
assert form.errors == {
"organization": [
"Cannot transfer to Company Organization with inactive billing"
]
}
| TestTransferOrganizationProjectForm |
python | huggingface__transformers | src/transformers/models/fsmt/configuration_fsmt.py | {
"start": 1225,
"end": 10363
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`FSMTModel`]. It is used to instantiate a FSMT
model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
defaults will yield a similar configuration to that of the FSMT
[facebook/wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
langs (`list[str]`):
A list with source language and target_language (e.g., ['en', 'ru']).
src_vocab_size (`int`):
Vocabulary size of the encoder. Defines the number of different tokens that can be represented by the
`inputs_ids` passed to the forward method in the encoder.
tgt_vocab_size (`int`):
Vocabulary size of the decoder. Defines the number of different tokens that can be represented by the
`inputs_ids` passed to the forward method in the decoder.
d_model (`int`, *optional*, defaults to 1024):
Dimensionality of the layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of encoder layers.
decoder_layers (`int`, *optional*, defaults to 12):
Number of decoder layers.
encoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
decoder_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
encoder_ffn_dim (`int`, *optional*, defaults to 4096):
Dimensionality of the "intermediate" (often named feed-forward) layer in decoder.
activation_function (`str` or `Callable`, *optional*, defaults to `"relu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"silu"` and `"gelu_new"` are supported.
dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for activations inside the fully connected layer.
max_position_embeddings (`int`, *optional*, defaults to 1024):
The maximum sequence length that this model might ever be used with. Typically set this to something large
just in case (e.g., 512 or 1024 or 2048).
init_std (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
scale_embedding (`bool`, *optional*, defaults to `True`):
Scale embeddings by diving by sqrt(d_model).
bos_token_id (`int`, *optional*, defaults to 0)
Beginning of stream token id.
pad_token_id (`int`, *optional*, defaults to 1)
Padding token id.
eos_token_id (`int`, *optional*, defaults to 2)
End of stream token id.
decoder_start_token_id (`int`, *optional*):
This model starts decoding with `eos_token_id`
encoder_layerdrop (`float`, *optional*, defaults to 0.0):
Google "layerdrop arxiv", as its not explainable in one line.
decoder_layerdrop (`float`, *optional*, defaults to 0.0):
Google "layerdrop arxiv", as its not explainable in one line.
is_encoder_decoder (`bool`, *optional*, defaults to `True`):
Whether this is an encoder/decoder model.
tie_word_embeddings (`bool`, *optional*, defaults to `False`):
Whether to tie input and output embeddings.
num_beams (`int`, *optional*, defaults to 5)
Number of beams for beam search that will be used by default in the `generate` method of the model. 1 means
no beam search.
length_penalty (`float`, *optional*, defaults to 1)
Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to
the sequence length, which in turn is used to divide the score of the sequence. Since the score is the log
likelihood of the sequence (i.e. negative), `length_penalty` > 0.0 promotes longer sequences, while
`length_penalty` < 0.0 encourages shorter sequences.
early_stopping (`bool`, *optional*, defaults to `False`)
Flag that will be used by default in the `generate` method of the model. Whether to stop the beam search
when at least `num_beams` sentences are finished per batch or not.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
forced_eos_token_id (`int`, *optional*, defaults to 2):
The id of the token to force as the last generated token when `max_length` is reached. Usually set to
`eos_token_id`.
Examples:
```python
>>> from transformers import FSMTConfig, FSMTModel
>>> # Initializing a FSMT facebook/wmt19-en-ru style configuration
>>> config = FSMTConfig()
>>> # Initializing a model (with random weights) from the configuration
>>> model = FSMTModel(config)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "fsmt"
attribute_map = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
sub_configs = {"decoder": DecoderConfig}
# update the defaults from config file
def __init__(
self,
langs=["en", "de"],
src_vocab_size=42024,
tgt_vocab_size=42024,
activation_function="relu",
d_model=1024,
max_length=200,
max_position_embeddings=1024,
encoder_ffn_dim=4096,
encoder_layers=12,
encoder_attention_heads=16,
encoder_layerdrop=0.0,
decoder_ffn_dim=4096,
decoder_layers=12,
decoder_attention_heads=16,
decoder_layerdrop=0.0,
attention_dropout=0.0,
dropout=0.1,
activation_dropout=0.0,
init_std=0.02,
decoder_start_token_id=2,
is_encoder_decoder=True,
scale_embedding=True,
tie_word_embeddings=False,
num_beams=5,
length_penalty=1.0,
early_stopping=False,
use_cache=True,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
forced_eos_token_id=2,
**common_kwargs,
):
self.langs = langs
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.d_model = d_model # encoder_embed_dim and decoder_embed_dim
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_layers = self.num_hidden_layers = encoder_layers
self.encoder_attention_heads = encoder_attention_heads
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layerdrop = decoder_layerdrop
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_layers = decoder_layers
self.decoder_attention_heads = decoder_attention_heads
self.max_position_embeddings = max_position_embeddings
self.init_std = init_std # Normal(0, this parameter)
self.activation_function = activation_function
self.decoder = DecoderConfig(
vocab_size=tgt_vocab_size,
bos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
num_hidden_layers=encoder_layers,
)
if "decoder" in common_kwargs:
del common_kwargs["decoder"]
self.scale_embedding = scale_embedding # scale factor will be sqrt(d_model) if True
# 3 Types of Dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.dropout = dropout
self.use_cache = use_cache
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
decoder_start_token_id=decoder_start_token_id,
is_encoder_decoder=is_encoder_decoder,
tie_word_embeddings=tie_word_embeddings,
forced_eos_token_id=forced_eos_token_id,
max_length=max_length,
num_beams=num_beams,
length_penalty=length_penalty,
early_stopping=early_stopping,
**common_kwargs,
)
__all__ = ["FSMTConfig"]
| FSMTConfig |
python | walkccc__LeetCode | solutions/5. Longest Palindromic Substring/5-2.py | {
"start": 0,
"end": 1125
} | class ____:
def longestPalindrome(self, s: str) -> str:
t = '#'.join('@' + s + '$')
p = self._manacher(t)
maxPalindromeLength, bestCenter = max((extend, i)
for i, extend in enumerate(p))
l = (bestCenter - maxPalindromeLength) // 2
r = (bestCenter + maxPalindromeLength) // 2
return s[l:r]
def _manacher(self, t: str) -> list[int]:
"""
Returns an array `p` s.t. `p[i]` is the length of the longest palindrome
centered at `t[i]`, where `t` is a string with delimiters and sentinels.
"""
p = [0] * len(t)
center = 0
for i in range(1, len(t) - 1):
rightBoundary = center + p[center]
mirrorIndex = center - (i - center)
if rightBoundary > i:
p[i] = min(rightBoundary - i, p[mirrorIndex])
# Try to expand the palindrome centered at i.
while t[i + 1 + p[i]] == t[i - 1 - p[i]]:
p[i] += 1
# If a palindrome centered at i expands past `rightBoundary`, adjust
# the center based on the expanded palindrome.
if i + p[i] > rightBoundary:
center = i
return p
| Solution |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 12920,
"end": 13030
} | class ____(HTTPBase):
scheme: Literal["bearer"] = "bearer"
bearerFormat: Optional[str] = None
| HTTPBearer |
python | rapidsai__cudf | python/cudf/cudf/core/udf/strings_typing.py | {
"start": 6823,
"end": 8222
} | class ____(AttributeTemplate):
key = string_view
def resolve_count(self, mod):
return types.BoundFunction(StringViewCount, string_view)
def resolve_replace(self, mod):
return types.BoundFunction(StringViewReplace, string_view)
bool_binary_funcs = ["startswith", "endswith"]
int_binary_funcs = ["find", "rfind"]
id_unary_funcs = [
"isalpha",
"isalnum",
"isdecimal",
"isdigit",
"isupper",
"islower",
"isspace",
"isnumeric",
"istitle",
]
string_unary_funcs = ["upper", "lower"]
string_return_attrs = ["strip", "lstrip", "rstrip"]
for func in bool_binary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_binary_attr(func, types.boolean),
)
for func in string_return_attrs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_binary_attr(func, managed_udf_string),
)
for func in int_binary_funcs:
setattr(
StringViewAttrs, f"resolve_{func}", create_binary_attr(func, size_type)
)
for func in id_unary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_identifier_attr(func, types.boolean),
)
for func in string_unary_funcs:
setattr(
StringViewAttrs,
f"resolve_{func}",
create_identifier_attr(func, managed_udf_string),
)
@cuda_decl_registry.register_attr
| StringViewAttrs |
python | huggingface__transformers | tests/models/xlm/test_modeling_xlm.py | {
"start": 17875,
"end": 19024
} | class ____(unittest.TestCase):
@slow
def test_lm_generate_xlm_mlm_en_2048(self):
model = XLMWithLMHeadModel.from_pretrained("FacebookAI/xlm-mlm-en-2048")
model.to(torch_device)
input_ids = torch.tensor([[14, 447]], dtype=torch.long, device=torch_device) # the president
expected_output_ids = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
output_ids = model.generate(input_ids, do_sample=False)
self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
| XLMModelLanguageGenerationTest |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 119193,
"end": 130245
} | class ____(_PrintableStructure):
_fields_ = [
('version', c_uint),
('str', c_char * NVML_PERF_MODES_BUFFER_SIZE),
]
nvmlDeviceCurrentClockFreqs_v1 = 0x1000804
@convertStrBytes
def nvmlDeviceGetCurrentClockFreqs(handle):
currentClockFreqs = c_nvmlDeviceCurrentClockFreqs_v1_t()
currentClockFreqs.version = nvmlDeviceCurrentClockFreqs_v1
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCurrentClockFreqs")
ret = fn(handle, byref(currentClockFreqs))
_nvmlCheckReturn(ret)
return currentClockFreqs.str
def nvmlDeviceGetBoardId(handle):
c_id = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardId")
ret = fn(handle, byref(c_id))
_nvmlCheckReturn(ret)
return c_id.value
def nvmlDeviceGetMultiGpuBoard(handle):
c_multiGpu = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMultiGpuBoard")
ret = fn(handle, byref(c_multiGpu))
_nvmlCheckReturn(ret)
return c_multiGpu.value
def nvmlDeviceGetBrand(handle):
c_type = _nvmlBrandType_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBrand")
ret = fn(handle, byref(c_type))
_nvmlCheckReturn(ret)
return c_type.value
def nvmlDeviceGetC2cModeInfoV1(handle):
c_info = c_nvmlC2cModeInfo_v1_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetC2cModeInfoV")
ret = fn(handle, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlDeviceGetC2cModeInfoV(handle):
return nvmlDeviceGetC2cModeInfoV1(handle)
@convertStrBytes
def nvmlDeviceGetBoardPartNumber(handle):
c_part_number = create_string_buffer(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetBoardPartNumber")
ret = fn(handle, c_part_number, c_uint(NVML_DEVICE_PART_NUMBER_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_part_number.value
@convertStrBytes
def nvmlDeviceGetSerial(handle):
c_serial = create_string_buffer(NVML_DEVICE_SERIAL_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSerial")
ret = fn(handle, c_serial, c_uint(NVML_DEVICE_SERIAL_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_serial.value
def nvmlDeviceGetModuleId(handle, moduleId=c_uint()):
isReference = type(moduleId) is not c_uint
moduleIdRef = moduleId if isReference else byref(moduleId)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetModuleId")
ret = fn(handle, moduleIdRef)
if isReference:
return ret
else:
_nvmlCheckReturn(ret)
return moduleId.value
def nvmlDeviceGetMemoryAffinity(handle, nodeSetSize, scope):
affinity_array = c_ulonglong * nodeSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMemoryAffinity")
ret = fn(handle, nodeSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceGetCpuAffinityWithinScope(handle, cpuSetSize, scope):
affinity_array = c_ulonglong * cpuSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinityWithinScope")
ret = fn(handle, cpuSetSize, byref(c_affinity), _nvmlAffinityScope_t(scope))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceGetCpuAffinity(handle, cpuSetSize):
affinity_array = c_ulonglong * cpuSetSize
c_affinity = affinity_array()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetCpuAffinity")
ret = fn(handle, cpuSetSize, byref(c_affinity))
_nvmlCheckReturn(ret)
return c_affinity
def nvmlDeviceSetCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceSetCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceClearCpuAffinity(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceClearCpuAffinity")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetNumaNodeId(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetNumaNodeId")
node = c_int()
ret = fn(handle, byref(node))
_nvmlCheckReturn(ret)
return node.value
def nvmlDeviceGetAddressingMode(device):
c_mode = c_nvmlDeviceAddressingMode_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetAddressingMode")
ret = fn(device, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetMinorNumber(handle):
c_minor_number = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMinorNumber")
ret = fn(handle, byref(c_minor_number))
_nvmlCheckReturn(ret)
return c_minor_number.value
@convertStrBytes
def nvmlDeviceGetUUID(handle):
c_uuid = create_string_buffer(NVML_DEVICE_UUID_V2_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetUUID")
ret = fn(handle, c_uuid, c_uint(NVML_DEVICE_UUID_V2_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_uuid.value
@convertStrBytes
def nvmlDeviceGetInforomVersion(handle, infoRomObject):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomVersion")
ret = fn(handle, _nvmlInforomObject_t(infoRomObject),
c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
@convertStrBytes
def nvmlDeviceGetInforomImageVersion(handle):
c_version = create_string_buffer(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomImageVersion")
ret = fn(handle, c_version, c_uint(NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE))
_nvmlCheckReturn(ret)
return c_version.value
# Added in 4.304
def nvmlDeviceGetInforomConfigurationChecksum(handle):
c_checksum = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetInforomConfigurationChecksum")
ret = fn(handle, byref(c_checksum))
_nvmlCheckReturn(ret)
return c_checksum.value
# Added in 4.304
def nvmlDeviceValidateInforom(handle):
fn = _nvmlGetFunctionPointer("nvmlDeviceValidateInforom")
ret = fn(handle)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetLastBBXFlushTime(handle):
c_timestamp = c_ulonglong()
c_durationUs = c_ulong()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetLastBBXFlushTime")
ret = fn(handle, byref(c_timestamp), byref(c_durationUs))
_nvmlCheckReturn(ret)
return [c_timestamp.value, c_durationUs.value]
def nvmlDeviceGetDisplayMode(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayMode")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetDisplayActive(handle):
c_mode = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDisplayActive")
ret = fn(handle, byref(c_mode))
_nvmlCheckReturn(ret)
return c_mode.value
def nvmlDeviceGetPersistenceMode(handle):
c_state = _nvmlEnableState_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPersistenceMode")
ret = fn(handle, byref(c_state))
_nvmlCheckReturn(ret)
return c_state.value
def nvmlDeviceGetPciInfoExt(handle, c_info):
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfoExt")
ret = fn(handle, c_info)
_nvmlCheckReturn(ret)
return None
def nvmlDeviceGetPciInfo_v3(handle):
c_info = nvmlPciInfo_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetPciInfo_v3")
ret = fn(handle, byref(c_info))
_nvmlCheckReturn(ret)
return c_info
def nvmlDeviceGetPciInfo(handle):
return nvmlDeviceGetPciInfo_v3(handle)
def nvmlDeviceGetClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 2.285
def nvmlDeviceGetMaxClockInfo(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxClockInfo")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
# Deprecated
def nvmlDeviceGetApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
def nvmlDeviceGetMaxCustomerBoostClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetMaxCustomerBoostClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
def nvmlDeviceGetClock(handle, type, id):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetClock")
ret = fn(handle, _nvmlClockType_t(type), _nvmlClockId_t(id), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 5.319
# Deprecated
def nvmlDeviceGetDefaultApplicationsClock(handle, type):
c_clock = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetDefaultApplicationsClock")
ret = fn(handle, _nvmlClockType_t(type), byref(c_clock))
_nvmlCheckReturn(ret)
return c_clock.value
# Added in 4.304
def nvmlDeviceGetSupportedMemoryClocks(handle):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedMemoryClocks")
ret = fn(handle, byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
# Added in 4.304
def nvmlDeviceGetSupportedGraphicsClocks(handle, memoryClockMHz):
# first call to get the size
c_count = c_uint(0)
fn = _nvmlGetFunctionPointer("nvmlDeviceGetSupportedGraphicsClocks")
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), None)
if (ret == NVML_SUCCESS):
# special case, no clocks
return []
elif (ret == NVML_ERROR_INSUFFICIENT_SIZE):
# typical case
clocks_array = c_uint * c_count.value
c_clocks = clocks_array()
# make the call again
ret = fn(handle, c_uint(memoryClockMHz), byref(c_count), c_clocks)
_nvmlCheckReturn(ret)
procs = []
for i in range(c_count.value):
procs.append(c_clocks[i])
return procs
else:
# error case
raise NVMLError(ret)
def nvmlDeviceGetFanSpeed(handle):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed")
ret = fn(handle, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
def nvmlDeviceGetFanSpeed_v2(handle, fan):
c_speed = c_uint()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetFanSpeed_v2")
ret = fn(handle, fan, byref(c_speed))
_nvmlCheckReturn(ret)
return c_speed.value
| c_nvmlDeviceCurrentClockFreqs_v1_t |
python | huggingface__transformers | src/transformers/models/wav2vec2_bert/modeling_wav2vec2_bert.py | {
"start": 6087,
"end": 7141
} | class ____(nn.Module):
def __init__(self, config, act_fn=None, hidden_size=None):
super().__init__()
act_fn = act_fn if act_fn is not None else config.hidden_act
hidden_size = hidden_size if hidden_size is not None else config.hidden_size
self.intermediate_dropout = nn.Dropout(config.activation_dropout)
self.intermediate_dense = nn.Linear(hidden_size, config.intermediate_size)
self.intermediate_act_fn = ACT2FN[act_fn] if isinstance(act_fn, str) else act_fn
self.output_dense = nn.Linear(config.intermediate_size, hidden_size)
self.output_dropout = nn.Dropout(config.hidden_dropout)
def forward(self, hidden_states):
hidden_states = self.intermediate_dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
hidden_states = self.intermediate_dropout(hidden_states)
hidden_states = self.output_dense(hidden_states)
hidden_states = self.output_dropout(hidden_states)
return hidden_states
| Wav2Vec2BertFeedForward |
python | sqlalchemy__sqlalchemy | test/dialect/mysql/test_reflection.py | {
"start": 1387,
"end": 8798
} | class ____(fixtures.TestBase):
__only_on__ = "mysql", "mariadb"
__backend__ = True
def _run_test(self, metadata, connection, specs, attributes):
columns = [Column("c%i" % (i + 1), t[0]) for i, t in enumerate(specs)]
# Early 5.0 releases seem to report more "general" for columns
# in a view, e.g. char -> varchar, tinyblob -> mediumblob
use_views = testing.db.dialect.server_version_info > (5, 0, 10)
m = metadata
Table("mysql_types", m, *columns)
if use_views:
event.listen(
m,
"after_create",
DDL(
"CREATE OR REPLACE VIEW mysql_types_v "
"AS SELECT * from mysql_types"
),
)
event.listen(
m, "before_drop", DDL("DROP VIEW IF EXISTS mysql_types_v")
)
m.create_all(connection)
m2 = MetaData()
tables = [Table("mysql_types", m2, autoload_with=connection)]
if use_views:
tables.append(Table("mysql_types_v", m2, autoload_with=connection))
for table in tables:
for i, (reflected_col, spec) in enumerate(zip(table.c, specs)):
expected_spec = spec[1]
reflected_type = reflected_col.type
is_(type(reflected_type), type(expected_spec))
for attr in attributes:
eq_(
getattr(reflected_type, attr),
getattr(expected_spec, attr),
"Column %s: Attribute %s value of %s does not "
"match %s for type %s"
% (
"c%i" % (i + 1),
attr,
getattr(reflected_type, attr),
getattr(expected_spec, attr),
spec[0],
),
)
def test_time_types(self, metadata, connection):
specs = []
if testing.requires.mysql_fsp.enabled:
fsps = [None, 0, 5]
else:
fsps = [None]
for type_ in (mysql.TIMESTAMP, mysql.DATETIME, mysql.TIME):
# MySQL defaults fsp to 0, and if 0 does not report it.
# we don't actually render 0 right now in DDL but even if we do,
# it comes back blank
for fsp in fsps:
if fsp:
specs.append((type_(fsp=fsp), type_(fsp=fsp)))
else:
specs.append((type_(), type_()))
specs.extend(
[(TIMESTAMP(), mysql.TIMESTAMP()), (DateTime(), mysql.DATETIME())]
)
# note 'timezone' should always be None on both
self._run_test(metadata, connection, specs, ["fsp", "timezone"])
def test_year_types(self, metadata, connection):
specs = [
(mysql.YEAR(), mysql.YEAR(display_width=4)),
(mysql.YEAR(display_width=4), mysql.YEAR(display_width=4)),
]
if testing.against("mysql>=8.0.19"):
self._run_test(metadata, connection, specs, [])
else:
self._run_test(metadata, connection, specs, ["display_width"])
def test_string_types(
self,
metadata,
connection,
):
specs = [
(String(1), mysql.MSString(1)),
(String(3), mysql.MSString(3)),
(Text(), mysql.MSText()),
(Unicode(1), mysql.MSString(1)),
(Unicode(3), mysql.MSString(3)),
(UnicodeText(), mysql.MSText()),
(mysql.MSChar(1), mysql.MSChar(1)),
(mysql.MSChar(3), mysql.MSChar(3)),
(NCHAR(2), mysql.MSChar(2)),
(mysql.MSNChar(2), mysql.MSChar(2)),
(mysql.MSNVarChar(22), mysql.MSString(22)),
]
self._run_test(metadata, connection, specs, ["length"])
def test_integer_types(self, metadata, connection):
specs = []
for type_ in [
mysql.TINYINT,
mysql.SMALLINT,
mysql.MEDIUMINT,
mysql.INTEGER,
mysql.BIGINT,
]:
for display_width in [None, 4, 7]:
for unsigned in [False, True]:
for zerofill in [None, True]:
kw = {}
if display_width:
kw["display_width"] = display_width
if unsigned is not None:
kw["unsigned"] = unsigned
if zerofill is not None:
kw["zerofill"] = zerofill
zerofill = bool(zerofill)
source_type = type_(**kw)
if display_width is None:
display_width = {
mysql.MEDIUMINT: 9,
mysql.SMALLINT: 6,
mysql.TINYINT: 4,
mysql.INTEGER: 11,
mysql.BIGINT: 20,
}[type_]
if zerofill:
unsigned = True
expected_type = type_(
display_width=display_width,
unsigned=unsigned,
zerofill=zerofill,
)
specs.append((source_type, expected_type))
specs.extend(
[
(SmallInteger(), mysql.SMALLINT(display_width=6)),
(Integer(), mysql.INTEGER(display_width=11)),
(BigInteger, mysql.BIGINT(display_width=20)),
]
)
# TODO: mysql 8.0.19-ish doesn't consistently report
# on display_width. need to test this more accurately though
# for the cases where it does
if testing.against("mysql >= 8.0.19"):
self._run_test(
metadata, connection, specs, ["unsigned", "zerofill"]
)
else:
self._run_test(
metadata,
connection,
specs,
["display_width", "unsigned", "zerofill"],
)
def test_binary_types(
self,
metadata,
connection,
):
specs = [
(LargeBinary(3), mysql.TINYBLOB()),
(LargeBinary(), mysql.BLOB()),
(mysql.MSBinary(3), mysql.MSBinary(3)),
(mysql.MSVarBinary(3), mysql.MSVarBinary(3)),
(mysql.MSTinyBlob(), mysql.MSTinyBlob()),
(mysql.MSBlob(), mysql.MSBlob()),
(mysql.MSBlob(1234), mysql.MSBlob()),
(mysql.MSMediumBlob(), mysql.MSMediumBlob()),
(mysql.MSLongBlob(), mysql.MSLongBlob()),
]
self._run_test(metadata, connection, specs, [])
def test_legacy_enum_types(
self,
metadata,
connection,
):
specs = [(mysql.ENUM("", "fleem"), mysql.ENUM("", "fleem"))]
self._run_test(metadata, connection, specs, ["enums"])
@testing.only_on("mariadb>=10.7")
def test_uuid(self, metadata, connection):
specs = [
(mysql.UUID(), mysql.UUID()),
]
self._run_test(metadata, connection, specs, [])
| TypeReflectionTest |
python | joke2k__faker | tests/providers/test_address.py | {
"start": 93806,
"end": 95678
} | class ____:
"""Test id_ID address provider methods"""
def test_street(self, faker, num_samples):
for _ in range(num_samples):
street = faker.street()
assert isinstance(street, str)
assert street in IdIdAddressProvider.streets
def test_street_prefix_short(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_short = faker.street_prefix_short()
assert isinstance(street_prefix_short, str)
assert street_prefix_short in IdIdAddressProvider.street_prefixes_short
def test_street_prefix_long(self, faker, num_samples):
for _ in range(num_samples):
street_prefix_long = faker.street_prefix_long()
assert isinstance(street_prefix_long, str)
assert street_prefix_long in IdIdAddressProvider.street_prefixes_long
def test_city_name(self, faker, num_samples):
for _ in range(num_samples):
city_name = faker.city_name()
assert isinstance(city_name, str)
assert city_name in IdIdAddressProvider.cities
def test_administrative_unit(self, faker, num_samples):
for _ in range(num_samples):
administrative_unit = faker.administrative_unit()
assert isinstance(administrative_unit, str)
assert administrative_unit in IdIdAddressProvider.states
def test_state_abbr(self, faker, num_samples):
for _ in range(num_samples):
state_abbr = faker.state_abbr()
assert isinstance(state_abbr, str)
assert state_abbr in IdIdAddressProvider.states_abbr
def test_country(self, faker, num_samples):
for _ in range(num_samples):
country = faker.country()
assert isinstance(country, str)
assert country in IdIdAddressProvider.countries
| TestIdId |
python | huggingface__transformers | tests/models/mamba2/test_modeling_mamba2.py | {
"start": 2480,
"end": 8645
} | class ____:
def __init__(
self,
parent,
batch_size=14,
num_heads=8,
n_groups=8,
state_size=2,
head_dim=8,
conv_kernel=4,
chunk_size=8,
seq_length=7,
is_training=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
num_hidden_layers=2,
hidden_act="silu",
hidden_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
type_sequence_label_size=2,
num_labels=3,
num_choices=4,
scope=None,
tie_word_embeddings=False,
):
self.parent = parent
self.num_heads = num_heads
self.n_groups = n_groups
self.head_dim = head_dim
self.state_size = state_size
self.conv_kernel = conv_kernel
self.chunk_size = chunk_size
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.type_sequence_label_size = type_sequence_label_size
self.num_labels = num_labels
self.num_choices = num_choices
self.scope = scope
self.bos_token_id = vocab_size - 1
self.eos_token_id = vocab_size - 1
self.pad_token_id = vocab_size - 1
self.tie_word_embeddings = tie_word_embeddings
def prepare_config_and_inputs(
self, gradient_checkpointing=False, scale_attn_by_inverse_layer_idx=False, reorder_and_upcast_attn=False
):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
# Only left padding is valid
attention_mask = torch.ones(size=(self.batch_size, self.seq_length), device=input_ids.device, dtype=torch.long)
attention_mask[0, :1] = 0
sequence_labels = None
token_labels = None
choice_labels = None
if self.use_labels:
sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
choice_labels = ids_tensor([self.batch_size], self.num_choices)
config = self.get_config(
gradient_checkpointing=gradient_checkpointing,
)
return (
config,
input_ids,
attention_mask,
sequence_labels,
token_labels,
choice_labels,
)
def get_config(self, gradient_checkpointing=False):
return Mamba2Config(
head_dim=self.head_dim,
num_heads=self.num_heads,
n_groups=self.n_groups,
state_size=self.state_size,
conv_kernel=self.conv_kernel,
chunk_size=self.chunk_size,
vocab_size=self.vocab_size,
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
activation_function=self.hidden_act,
n_positions=self.max_position_embeddings,
type_vocab_size=self.type_vocab_size,
use_cache=True,
bos_token_id=self.bos_token_id,
eos_token_id=self.eos_token_id,
pad_token_id=self.pad_token_id,
gradient_checkpointing=gradient_checkpointing,
tie_word_embeddings=self.tie_word_embeddings,
)
def prepare_config_and_inputs_for_common(self):
(
config,
input_ids,
_,
sequence_labels,
token_labels,
choice_labels,
) = self.prepare_config_and_inputs()
inputs_dict = {"input_ids": input_ids}
return config, inputs_dict
def create_and_check_mamba2_caching(self, config, input_ids, attention_mask, *args):
model = Mamba2Model(config=config)
model.to(torch_device)
model.eval()
output_whole = model(input_ids, attention_mask=attention_mask).last_hidden_state
outputs = model(
input_ids[:, :-1],
attention_mask=attention_mask[:, :-1],
use_cache=True,
cache_position=torch.arange(0, config.conv_kernel, device=input_ids.device),
)
output_one = outputs.last_hidden_state
# Using the state computed on the first inputs, we will get the same output
outputs = model(
input_ids[:, -1:],
attention_mask=attention_mask[:, -1:],
use_cache=True,
cache_params=outputs.cache_params,
cache_position=torch.arange(config.conv_kernel, config.conv_kernel + 1, device=input_ids.device),
)
output_two = outputs.last_hidden_state
self.parent.assertTrue(
torch.allclose(torch.cat([output_one, output_two], dim=1), output_whole, atol=1e-3, rtol=1e-3)
)
def create_and_check_mamba2_slow_vs_fast_forward(self, config, input_ids, *args, gradient_checkpointing=False):
model = Mamba2Model(config)
model.eval()
if not (is_mamba_2_ssm_available() and is_causal_conv1d_available()):
self.parent.skipTest(
"This test needs the Mamba2 fast path. Skipping as the necessary packages have not been found."
)
if torch_device != "cuda":
self.parent.skipTest("This test needs the Mamba2 fast path. Skipping as we need a cuda capable device.")
model.to(torch_device)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
token_emb = model.embeddings(input_ids)
outputs_fast = model.layers[0].mixer.cuda_kernels_forward(token_emb)
outputs_slow = model.layers[0].mixer.torch_forward(token_emb)
self.parent.assertTrue(torch.allclose(outputs_fast, outputs_slow, atol=1e-3, rtol=1e-3))
@require_torch
| Mamba2ModelTester |
python | doocs__leetcode | lcci/17.04.Missing Number/Solution.py | {
"start": 0,
"end": 199
} | class ____:
def missingNumber(self, nums: List[int]) -> int:
nums.sort()
for i, x in enumerate(nums):
if i != x:
return i
return len(nums)
| Solution |
python | django-mptt__django-mptt | tests/myapp/tests.py | {
"start": 102696,
"end": 103019
} | class ____(TestCase):
def test_get_user_field_names_with_not_concrete_fields(self):
"""Make sure _get_user_field_names only returns concrete fields"""
instance = NotConcreteFieldModel()
field_names = instance._get_user_field_names()
self.assertEqual(field_names, ["parent"])
| ModelMetaTests |
python | google__jax | jax/experimental/source_mapper/common.py | {
"start": 1126,
"end": 1275
} | class ____(Protocol):
def __call__(self, compile_result: Any, **kwargs) -> SourceMapDump:
...
@dataclasses.dataclass(frozen=True)
| GenerateDumpFn |
python | tensorflow__tensorflow | tensorflow/python/tpu/device_assignment.py | {
"start": 2560,
"end": 21704
} | class ____(object):
"""Mapping from logical cores in a computation to the physical TPU topology.
Prefer to use the `DeviceAssignment.build()` helper to construct a
`DeviceAssignment`; it is easier if less flexible than constructing a
`DeviceAssignment` directly.
"""
def __init__(self, topology: Topology, core_assignment: np.ndarray):
"""Constructs a `DeviceAssignment` object.
Args:
topology: A `Topology` object that describes the physical TPU topology.
core_assignment: A logical to physical core mapping, represented as a
rank 3 numpy array. See the description of the `core_assignment`
property for more details.
Raises:
ValueError: If `topology` is not `Topology` object.
ValueError: If `core_assignment` is not a rank 3 numpy array.
"""
if not isinstance(topology, Topology):
raise ValueError("topology must be a Topology object, got {}".format(
type(topology)))
core_assignment = numpy_compat.np_asarray(core_assignment, dtype=np.int32)
self._topology = topology
if core_assignment.ndim != 3:
raise ValueError("core_assignment must be a rank 3 numpy array, "
f"got shape {core_assignment.shape}")
self._num_replicas = core_assignment.shape[0]
self._num_cores_per_replica = core_assignment.shape[1]
if core_assignment.shape[-1] != topology.mesh_rank:
raise ValueError(
"core_assignment.shape[-1] must have size equal to topology "
f"rank ({topology.mesh_rank}), got "
f"core_assignment.shape={core_assignment.shape}")
self._core_assignment = core_assignment
self._task_and_cores_to_replicas = _compute_task_and_cores_to_replicas(
self._core_assignment, topology)
@property
def topology(self) -> Topology:
"""A `Topology` that describes the TPU topology."""
return self._topology
@property
def num_cores_per_replica(self) -> int:
"""The number of cores per replica."""
return self._num_cores_per_replica
@property
def num_replicas(self) -> int:
"""The number of replicas of the computation."""
return self._num_replicas
@property
def core_assignment(self) -> np.ndarray:
"""The logical to physical core mapping.
Returns:
An integer numpy array of rank 3, with shape
`[num_replicas, num_cores_per_replica, topology_rank]`. Maps
(replica, logical core) pairs to physical topology coordinates.
"""
return self._core_assignment
def coordinates(self, replica: int, logical_core: int) -> Tuple: # pylint:disable=g-bare-generic
"""Returns the physical topology coordinates of a logical core."""
return tuple(self.core_assignment[replica, logical_core, :])
def lookup_replicas(self, task_id: int, logical_core: int) -> List[int]:
"""Lookup replica ids by task number and logical core.
Args:
task_id: TensorFlow task number.
logical_core: An integer, identifying a logical core.
Returns:
A sorted list of the replicas that are attached to that task and
logical_core.
Raises:
ValueError: If no replica exists in the task which contains the logical
core.
"""
try:
return self._task_and_cores_to_replicas[task_id][logical_core]
except KeyError:
raise ValueError(
"Can not find any replica in task: {} contains logical_core: {} ".
format(task_id, logical_core))
def tpu_ordinal(self, replica: int = 0, logical_core: int = 0) -> int:
"""Returns the ordinal of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_ordinal_at_coordinates(coordinates)
def host_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[str] = None) -> str:
"""Returns the CPU device attached to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.cpu_device_name_at_coordinates(coordinates, job=job)
def tpu_device(self,
replica: int = 0,
logical_core: int = 0,
job: Optional[str] = None) -> str:
"""Returns the name of the TPU device assigned to a logical core."""
coordinates = self.coordinates(replica, logical_core)
return self._topology.tpu_device_name_at_coordinates(coordinates, job=job)
@classmethod
def build(
cls,
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO,
) -> "DeviceAssignment":
return device_assignment(
topology=topology,
computation_shape=computation_shape,
computation_stride=computation_stride,
num_replicas=num_replicas,
device_order_mode=device_order_mode,
)
def _open_ring_2d(x_size: int, y_size: int,
z_coord: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y mesh, with a fixed Z coordinate.
For example, in a 4x4 mesh, this returns the following order.
0 -- 1 -- 2 -- 3
| | | |
15-- 6 -- 5 -- 4
| | | |
14-- 7 -- 8 -- 9
| | | |
13-- 12-- 11-- 10
Note that chip 0 is not included in the output.
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_coord: An integer represents the z-coordinate to use for the chips in the
ring.
Returns:
A list of (x,y,z) triples in ring order.
"""
ret = []
for i in range(y_size // 2):
for j in range(1, x_size):
ret.append((j, 2 * i, z_coord))
for j in range(x_size - 1, 0, -1):
ret.append((j, 2 * i + 1, z_coord))
for i in range(y_size - 1, 0, -1):
ret.append((0, i, z_coord))
return ret
def _ring_3d(x_size: int, y_size: int,
z_size: int) -> List[Tuple[int, int, int]]:
"""Ring-order of a X by Y by Z mesh.
Constructs the 3d ring from 2d rings that are stacked in the Z dimension and
joined in one corner.
z == 0:
0 -- 1 -- 2 -- 3
| | | |
15 - 6 -- 5 -- 4
| | | |
14 - 7 -- 8 -- 9
| | | |
13 - 12 - 11 - 10
z == 1:
63 - 30 - 29 - 28
| | | |
16 - 25 - 26 - 27
| | | |
17 - 24 - 23 - 22
| | | |
18 - 19 - 20 - 21
z == 2:
62 - 31 - 32 - 33
| | | |
45 - 36 - 35 - 34
| | | |
44 - 37 - 38 - 39
| | | |
43 - 42 - 41 - 40
z == 3:
61 - 60 - 59 - 58
| | | |
46 - 55 - 56 - 57
| | | |
47 - 54 - 53 - 52
| | | |
48 - 49 - 50 - 51
Args:
x_size: An integer represents the mesh size in the x-dimension. Must be
larger than 1.
y_size: An integer represents the mesh size in the y-dimension. Must be
larger than 1.
z_size: An integer represents the mesh size in the z-dimension. Must be
larger than 1. For example, in a 4x4x4 mesh, this returns the following
order.
Returns:
A list of (x,y,z) triples in ring order.
"""
# Handle the case where 2 dimensions are size 1.
if x_size == 1 and y_size == 1:
return [(0, 0, i) for i in range(z_size)]
if x_size == 1 and z_size == 1:
return [(0, i, 0) for i in range(y_size)]
if y_size == 1 and z_size == 1:
return [(i, 0, 0) for i in range(x_size)]
# Handle odd mesh dimensions. This never happens in practice, so we don't
# bother to try building something optimal.
if (x_size > 1 and x_size % 2 != 0) or (y_size > 1 and
y_size % 2 != 0) or (z_size > 1 and
z_size % 2 != 0):
logging.warning("Odd dimension")
ret = []
for z in range(z_size):
for y in range(y_size):
ret.extend((x, y, z) for x in range(x_size))
return ret
# Always start with chip 0.
ret = [(0, 0, 0)]
# Handle the case where one dimension is size 1. We just build a flat, 2d
# ring.
if z_size == 1:
ret.extend(_open_ring_2d(x_size, y_size, 0))
return ret
if y_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (x, z, y) in _open_ring_2d(x_size, z_size, 0))
return ret
if x_size == 1:
ret = [(0, 0, 0)]
ret.extend((x, y, z) for (y, z, x) in _open_ring_2d(y_size, z_size, 0))
return ret
# Handle the case where all dimensions have size > 1 and even.
ret = [(0, 0, 0)]
for i in range(0, z_size):
r = _open_ring_2d(x_size, y_size, i)
if i % 2 == 0:
ret.extend(r)
else:
ret.extend(reversed(r))
for i in range(z_size - 1, 0, -1):
ret.append((0, 0, i))
return ret
def device_assignment(
topology: Topology,
computation_shape: Optional[np.ndarray] = None,
computation_stride: Optional[np.ndarray] = None,
num_replicas: int = 1,
device_order_mode: DeviceOrderMode = DeviceOrderMode.AUTO,
) -> DeviceAssignment:
"""Computes a device_assignment of a computation across a TPU topology.
Attempts to choose a compact grid of cores for locality.
Returns a `DeviceAssignment` that describes the cores in the topology assigned
to each core of each replica.
`computation_shape` and `computation_stride` values should be powers of 2 for
optimal packing.
Args:
topology: A `Topology` object that describes the TPU cluster topology. To
obtain a TPU topology, evaluate the `Tensor` returned by
`initialize_system` using `Session.run`. Either a serialized
`TopologyProto` or a `Topology` object may be passed. Note: you must
evaluate the `Tensor` first; you cannot pass an unevaluated `Tensor`
here.
computation_shape: A rank 1 int32 numpy array with size equal to the
topology rank, describing the shape of the computation's block of cores.
If None, the `computation_shape` is `[1] * topology_rank`.
computation_stride: A rank 1 int32 numpy array of size `topology_rank`,
describing the inter-core spacing of the `computation_shape` cores in the
TPU topology. If None, the `computation_stride` is `[1] * topology_rank`.
num_replicas: The number of computation replicas to run. The replicas will
be packed into the free spaces of the topology.
device_order_mode: An enum of `DeviceOrderMode` class which indicates
whether to assign devices to form rings or meshes, or let the library to
choose.
Returns:
A DeviceAssignment object, which describes the mapping between the logical
cores in each computation replica and the physical cores in the TPU
topology.
Raises:
ValueError: If `topology` is not a valid `Topology` object.
ValueError: If `computation_shape` or `computation_stride` are not 1D int32
numpy arrays with shape [3] where all values are positive.
ValueError: If computation's replicas cannot fit into the TPU topology.
"""
# Deserialize the Topology proto, if it is a string.
if isinstance(topology, bytes):
topology = Topology(serialized=topology)
if not isinstance(topology, Topology):
raise ValueError(
f"`topology` is not a Topology object; got {type(topology)}"
)
topology_rank = len(topology.mesh_shape)
mesh_shape = topology.mesh_shape
if computation_shape is None:
computation_shape = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_shape = numpy_compat.np_asarray(
computation_shape, dtype=np.int32
)
if computation_stride is None:
computation_stride = np.array([1] * topology_rank, dtype=np.int32)
else:
computation_stride = numpy_compat.np_asarray(
computation_stride, dtype=np.int32
)
if computation_shape.shape != (topology_rank,):
raise ValueError(
f"computation_shape must have shape [{topology_rank}]; "
f"got {computation_shape.shape}"
)
if computation_stride.shape != (topology_rank,):
raise ValueError(
f"computation_stride must have shape [{topology_rank}]; "
f"got {computation_stride.shape}"
)
if any(computation_shape < 1):
raise ValueError(
"computation_shape must be positive; got computation_shape={}".format(
computation_shape))
if any(computation_stride < 1):
raise ValueError(
"computation_stride must be positive; got computation_stride={}".format(
computation_stride))
# Computes the physical size of one computation instance.
computation_footprint = computation_shape * computation_stride
if any(computation_footprint > mesh_shape):
raise ValueError(
"computation footprint {} does not fit in TPU topology shape {}".format(
computation_footprint, mesh_shape))
# Computes how many copies of the computation footprint fit in the mesh.
block_counts = mesh_shape // computation_footprint
replica_counts = block_counts * computation_stride
max_replicas = np.prod(replica_counts)
if num_replicas > max_replicas:
raise ValueError(
"requested {} replicas but only {} replicas with shape {} and "
"computation_stride {} fit in a TPU mesh of shape {}".format(
num_replicas, max_replicas, computation_shape, computation_stride,
mesh_shape))
def ceil_of_ratio(n, m):
return (n + m - 1) // m
if topology.missing_devices.size == 0:
replica_shape = [0] * topology_rank
if num_replicas > 0:
remaining_replicas = num_replicas
remaining_dims = topology_rank
# Choose dimensions as close to an equal cube as possible,
# in order of increasing dimension size. By visiting dimensions
# in increasing size, we assign the most constrained dimension
# first, so we won't make infeasible choices.
#
# As a secondary sort order, visit the last dimension (core index) first,
# then the other dimensions in increasing order. This means we try to use
# both cores on the same chip in preference to two cores on different
# chips. We visit the x dimension first, and the z dimension last, so
# that we prefer to arrange adjacent replicas on the same machine when
# possible.
#
# For example, if num_replicas == 4, we prefer to use a replica_shape of
# (2,1,1,2) over (1,1,2,2).
for x, ni in sorted(((x, ((i + 1) % topology_rank))
for (i, x) in enumerate(replica_counts))):
i = (ni + topology_rank - 1) % topology_rank
target_size = int(math.ceil(remaining_replicas**(1.0 / remaining_dims)))
replica_shape[i] = min(target_size, x)
remaining_replicas = ceil_of_ratio(remaining_replicas, replica_shape[i])
remaining_dims -= 1
assert remaining_replicas == 1 and remaining_dims == 0
# Assigns an offset to each replica such that no two replicas overlap.
replica_offsets = np.full([num_replicas, topology_rank], -1, dtype=np.int32)
enable_3d_tiling = (
topology_rank == 4 and
computation_shape[-1] == mesh_shape[-1] # Only handle 3D case.
and np.prod(computation_stride) == 1 # Ensure no stride.
and num_replicas == max_replicas) # Full replication.
if device_order_mode != DeviceOrderMode.AUTO:
if device_order_mode == DeviceOrderMode.RING and not enable_3d_tiling:
raise ValueError(
"device_order_mode=DeviceOrderMode.RING is not compatible with the "
"3D tiling current topology. Try setting "
"device_order_mode=DeviceOrderMode.AUTO"
)
enable_3d_tiling = device_order_mode == DeviceOrderMode.RING
if enable_3d_tiling:
assignment = []
inner_ring = _ring_3d(computation_shape[0], computation_shape[1],
computation_shape[2])
outer_ring = _ring_3d(replica_shape[0], replica_shape[1],
replica_shape[2])
for replica in range(num_replicas):
outer_x, outer_y, outer_z = outer_ring[replica]
per_replica_assignment = []
for index in range(np.prod(computation_shape)):
inner_x, inner_y, inner_z = inner_ring[index // mesh_shape[-1]]
px = outer_x * computation_shape[0] + inner_x
py = outer_y * computation_shape[1] + inner_y
pz = outer_z * computation_shape[2] + inner_z
pi = index % mesh_shape[-1]
per_replica_assignment.append([px, py, pz, pi])
assignment.append(per_replica_assignment)
else:
for replica in range(num_replicas):
# Chooses a replica number in each axis.
t = replica
pos = []
# Visit the core number first.
for dim in np.concatenate([[replica_shape[-1]], replica_shape[:-1]]):
pos.append(t % dim)
t //= dim
replica_pos = np.concatenate([pos[1:], [pos[0]]])
# Determines where that replica starts in each axis.
outer = replica_pos // computation_stride
inner = replica_pos % computation_stride
replica_offsets[replica, :] = outer * computation_footprint + inner
# Computes a logical core -> physical core mapping for each replica.
indices = [
np.arange(0, computation_shape[i] * computation_stride[i],
computation_stride[i]) for i in range(topology_rank)
]
indices = np.concatenate(
[i[..., np.newaxis] for i in np.meshgrid(*indices, indexing="ij")],
axis=-1)
indices = indices.reshape((-1, topology_rank))
assignment = indices + replica_offsets[:, np.newaxis, :]
else:
# We have a slice with missing chips. We define a simple assignment by
# ignoring computation stride. This assignment should enable a consistent
# and correct device assignment on degraded slices. It is optimal when
# weights are not sharded. But this device assignment may be sub-optimal for
# other model parallelism scenarios.
assert np.prod(computation_stride) == 1
# Next, we check if we have sufficient devices.
assert num_replicas * np.prod(
computation_shape) <= topology.num_tasks * topology.num_tpus_per_task
# Map replicas to physical devices in task order.
device_coordinates = topology.device_coordinates
assignment = []
devices_per_replica = np.prod(computation_shape)
for rindex in range(num_replicas):
replica_assignment = []
for index in range(devices_per_replica):
logical_id = rindex * devices_per_replica + index
# Pick logical cores in task order
task = logical_id // topology.num_tpus_per_task
device = logical_id % topology.num_tpus_per_task
# Append physical cores to the replica assignment
replica_assignment.append(device_coordinates[task, device, :])
assignment.append(replica_assignment)
return DeviceAssignment(topology, core_assignment=assignment)
| DeviceAssignment |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock/llama_index/llms/bedrock/base.py | {
"start": 1371,
"end": 15034
} | class ____(LLM):
"""
Bedrock LLM.
Examples:
`pip install llama-index-llms-bedrock`
```python
from llama_index.llms.bedrock import Bedrock
llm = Bedrock(
model="amazon.titan-text-express-v1",
aws_access_key_id="AWS Access Key ID to use",
aws_secret_access_key="AWS Secret Access Key to use",
aws_session_token="AWS Session Token to use",
region_name="AWS Region to use, eg. us-east-1",
)
resp = llm.complete("Paul Graham is ")
print(resp)
```
"""
model: str = Field(description="The modelId of the Bedrock model to use.")
temperature: float = Field(description="The temperature to use for sampling.")
max_tokens: int = Field(description="The maximum number of tokens to generate.")
context_size: int = Field("The maximum number of tokens available for input.")
profile_name: Optional[str] = Field(
description="The name of aws profile to use. If not given, then the default profile is used."
)
aws_access_key_id: Optional[str] = Field(
description="AWS Access Key ID to use", exclude=True
)
aws_secret_access_key: Optional[str] = Field(
description="AWS Secret Access Key to use", exclude=True
)
aws_session_token: Optional[str] = Field(
description="AWS Session Token to use", exclude=True
)
region_name: Optional[str] = Field(
description="AWS region name to use. Uses region configured in AWS CLI if not passed",
exclude=True,
)
botocore_session: Optional[Any] = Field(
description="Use this Botocore session instead of creating a new default one.",
exclude=True,
)
botocore_config: Optional[Any] = Field(
description="Custom configuration object to use instead of the default generated one.",
exclude=True,
)
max_retries: int = Field(
default=10, description="The maximum number of API retries.", gt=0
)
timeout: float = Field(
default=60.0,
description="The timeout for the Bedrock API request in seconds. It will be used for both connect and read timeouts.",
)
guardrail_identifier: Optional[str] = (
Field(
description="The unique identifier of the guardrail that you want to use. If you don’t provide a value, no guardrail is applied to the invocation."
),
)
guardrail_version: Optional[str] = (
Field(
description="The version number for the guardrail. The value can also be DRAFT"
),
)
trace: Optional[str] = (
Field(
description="Specifies whether to enable or disable the Bedrock trace. If enabled, you can see the full Bedrock trace."
),
)
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict,
description="Additional kwargs for the bedrock invokeModel request.",
)
_client: Any = PrivateAttr()
_provider: Provider = PrivateAttr()
def __init__(
self,
model: str,
temperature: Optional[float] = DEFAULT_TEMPERATURE,
max_tokens: Optional[int] = 512,
context_size: Optional[int] = None,
profile_name: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region_name: Optional[str] = None,
botocore_session: Optional[Any] = None,
client: Optional[Any] = None,
timeout: Optional[float] = 60.0,
max_retries: Optional[int] = 10,
botocore_config: Optional[Any] = None,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
guardrail_identifier: Optional[str] = None,
guardrail_version: Optional[str] = None,
trace: Optional[str] = None,
provider_type: Optional[ProviderType] = None,
**kwargs: Any,
) -> None:
if context_size is None and model not in BEDROCK_FOUNDATION_LLMS:
raise ValueError(
"`context_size` argument not provided and"
" model provided refers to a non-foundation model."
" Please specify the context_size"
)
session_kwargs = {
"profile_name": profile_name,
"region_name": region_name,
"aws_access_key_id": aws_access_key_id,
"aws_secret_access_key": aws_secret_access_key,
"aws_session_token": aws_session_token,
"botocore_session": botocore_session,
}
config = None
try:
import boto3
from botocore.config import Config
config = (
Config(
retries={"max_attempts": max_retries, "mode": "standard"},
connect_timeout=timeout,
read_timeout=timeout,
user_agent_extra="x-client-framework:llama_index",
)
if botocore_config is None
else botocore_config
)
session = boto3.Session(**session_kwargs)
except ImportError:
raise ImportError(
"boto3 package not found, install with'pip install boto3'"
)
additional_kwargs = additional_kwargs or {}
callback_manager = callback_manager or CallbackManager([])
context_size = context_size or BEDROCK_FOUNDATION_LLMS[model]
super().__init__(
model=model,
temperature=temperature,
max_tokens=max_tokens,
context_size=context_size,
profile_name=profile_name,
timeout=timeout,
max_retries=max_retries,
botocore_config=config,
additional_kwargs=additional_kwargs,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region_name=region_name,
botocore_session=botocore_session,
callback_manager=callback_manager,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
guardrail_identifier=guardrail_identifier,
guardrail_version=guardrail_version,
trace=trace,
)
if provider_type is not None:
self._provider = get_provider_by_type(provider_type)
else:
self._provider = get_provider(model)
self.messages_to_prompt = (
messages_to_prompt
or self._provider.messages_to_prompt
or self.messages_to_prompt
)
self.completion_to_prompt = (
completion_to_prompt
or self._provider.completion_to_prompt
or self.completion_to_prompt
)
# Prior to general availability, custom boto3 wheel files were
# distributed that used the bedrock service to invokeModel.
# This check prevents any services still using those wheel files
# from breaking
if client is not None:
self._client = client
elif "bedrock-runtime" in session.get_available_services():
self._client = session.client("bedrock-runtime", config=config)
else:
self._client = session.client("bedrock", config=config)
@classmethod
def class_name(cls) -> str:
"""Get class name."""
return "Bedrock_LLM"
@property
def metadata(self) -> LLMMetadata:
return LLMMetadata(
context_window=self.context_size,
num_output=self.max_tokens,
is_chat_model=self.model in CHAT_ONLY_MODELS,
model_name=self.model,
)
@property
def _model_kwargs(self) -> Dict[str, Any]:
base_kwargs = {
"temperature": self.temperature,
self._provider.max_tokens_key: self.max_tokens,
}
if type(self._provider) is AnthropicProvider and self.system_prompt:
base_kwargs["system"] = self.system_prompt
return {
**base_kwargs,
**self.additional_kwargs,
}
def _get_all_kwargs(self, **kwargs: Any) -> Dict[str, Any]:
return {
**self._model_kwargs,
**kwargs,
}
@llm_completion_callback()
def complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
if not formatted:
prompt = self.completion_to_prompt(prompt)
all_kwargs = self._get_all_kwargs(**kwargs)
request_body = self._provider.get_request_body(prompt, all_kwargs)
request_body_str = json.dumps(request_body)
response = completion_with_retry(
client=self._client,
model=self.model,
request_body=request_body_str,
max_retries=self.max_retries,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
trace=self.trace,
**all_kwargs,
)
response_body = response["body"].read()
response_headers = response["ResponseMetadata"]["HTTPHeaders"]
response_body = json.loads(response_body)
return CompletionResponse(
text=self._provider.get_text_from_response(response_body),
raw=response_body,
additional_kwargs=self._get_response_token_counts(response_headers),
)
@llm_completion_callback()
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
if self.model in BEDROCK_FOUNDATION_LLMS and self.model not in STREAMING_MODELS:
raise ValueError(f"Model {self.model} does not support streaming")
if not formatted:
prompt = self.completion_to_prompt(prompt)
all_kwargs = self._get_all_kwargs(**kwargs)
request_body = self._provider.get_request_body(prompt, all_kwargs)
request_body_str = json.dumps(request_body)
response = completion_with_retry(
client=self._client,
model=self.model,
request_body=request_body_str,
max_retries=self.max_retries,
stream=True,
guardrail_identifier=self.guardrail_identifier,
guardrail_version=self.guardrail_version,
trace=self.trace,
**all_kwargs,
)
response_body = response["body"]
response_headers = response["ResponseMetadata"]["HTTPHeaders"]
def gen() -> CompletionResponseGen:
content = ""
for r in response_body:
r = json.loads(r["chunk"]["bytes"])
content_delta = self._provider.get_text_from_stream_response(r)
content += content_delta
yield CompletionResponse(
text=content,
delta=content_delta,
raw=r,
additional_kwargs=self._get_response_token_counts(response_headers),
)
return gen()
@llm_chat_callback()
def chat(self, messages: Sequence[ChatMessage], **kwargs: Any) -> ChatResponse:
prompt = self.messages_to_prompt(messages)
completion_response = self.complete(prompt, formatted=True, **kwargs)
return completion_response_to_chat_response(completion_response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
prompt = self.messages_to_prompt(messages)
completion_response = self.stream_complete(prompt, formatted=True, **kwargs)
return stream_completion_response_to_chat_response(completion_response)
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
"""Chat asynchronously."""
# TODO: do synchronous chat for now
return self.chat(messages, **kwargs)
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
raise NotImplementedError
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError
def _get_response_token_counts(self, headers: Any) -> dict:
"""Get the token usage reported by the response."""
if not isinstance(headers, dict):
return {}
input_tokens = headers.get("x-amzn-bedrock-input-token-count", None)
output_tokens = headers.get("x-amzn-bedrock-output-token-count", None)
# NOTE: other model providers that use the OpenAI client may not report usage
if (input_tokens and output_tokens) is None:
return {}
return {"prompt_tokens": input_tokens, "completion_tokens": output_tokens}
| Bedrock |
python | apache__airflow | providers/dbt/cloud/src/airflow/providers/dbt/cloud/hooks/dbt.py | {
"start": 5702,
"end": 38454
} | class ____(HttpHook):
"""
Interact with dbt Cloud using the V2 (V3 if supported) API.
:param dbt_cloud_conn_id: The ID of the :ref:`dbt Cloud connection <howto/connection:dbt-cloud>`.
:param timeout_seconds: Optional. The timeout in seconds for HTTP requests. If not provided, no timeout is applied.
:param retry_limit: The number of times to retry a request in case of failure.
:param retry_delay: The delay in seconds between retries.
:param retry_args: A dictionary of arguments to pass to the `tenacity.retry` decorator.
"""
conn_name_attr = "dbt_cloud_conn_id"
default_conn_name = "dbt_cloud_default"
conn_type = "dbt_cloud"
hook_name = "dbt Cloud"
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
"""Build custom field behavior for the dbt Cloud connection form in the Airflow UI."""
return {
"hidden_fields": ["schema", "port"],
"relabeling": {"login": "Account ID", "password": "API Token", "host": "Tenant"},
"placeholders": {
"host": "Defaults to 'cloud.getdbt.com'.",
"extra": "Optional JSON-formatted extra.",
},
}
def __init__(
self,
dbt_cloud_conn_id: str = default_conn_name,
timeout_seconds: int | None = None,
retry_limit: int = 1,
retry_delay: float = 1.0,
retry_args: dict[Any, Any] | None = None,
) -> None:
super().__init__(auth_type=TokenAuth)
self.dbt_cloud_conn_id = dbt_cloud_conn_id
self.timeout_seconds = timeout_seconds
if retry_limit < 1:
raise ValueError("Retry limit must be greater than or equal to 1")
self.retry_limit = retry_limit
self.retry_delay = retry_delay
def retry_after_func(retry_state: RetryCallState) -> None:
error_msg = str(retry_state.outcome.exception()) if retry_state.outcome else "Unknown error"
self._log_request_error(retry_state.attempt_number, error_msg)
if retry_args:
self.retry_args = copy.copy(retry_args)
self.retry_args["retry"] = retry_if_exception(self._retryable_error)
self.retry_args["after"] = retry_after_func
self.retry_args["reraise"] = True
else:
self.retry_args = {
"stop": stop_after_attempt(self.retry_limit),
"wait": wait_exponential(min=self.retry_delay, max=(2**retry_limit)),
"retry": retry_if_exception(self._retryable_error),
"after": retry_after_func,
"reraise": True,
}
@staticmethod
def _get_tenant_domain(conn: Connection) -> str:
return conn.host or "cloud.getdbt.com"
@staticmethod
def _get_proxies(conn: Connection) -> dict[str, str] | None:
return conn.extra_dejson.get("proxies", None)
@staticmethod
def get_request_url_params(
tenant: str, endpoint: str, include_related: list[str] | None = None, *, api_version: str = "v2"
) -> tuple[str, dict[str, Any]]:
"""
Form URL from base url and endpoint url.
:param tenant: The tenant domain name which is need to be replaced in base url.
:param endpoint: Endpoint url to be requested.
:param include_related: Optional. List of related fields to pull with the run.
Valid values are "trigger", "job", "repository", and "environment".
"""
data: dict[str, Any] = {}
if include_related:
data = {"include_related": include_related}
url = f"https://{tenant}/api/{api_version}/accounts/{endpoint or ''}"
return url, data
async def get_headers_tenants_from_connection(self) -> tuple[dict[str, Any], str]:
"""Get Headers, tenants from the connection details."""
headers: dict[str, Any] = {}
tenant = self._get_tenant_domain(self.connection)
package_name, provider_version = _get_provider_info()
headers["User-Agent"] = f"{package_name}-v{provider_version}"
headers["Content-Type"] = "application/json"
headers["Authorization"] = f"Token {self.connection.password}"
return headers, tenant
def _log_request_error(self, attempt_num: int, error: str) -> None:
self.log.error("Attempt %s API Request to DBT failed with reason: %s", attempt_num, error)
@staticmethod
def _retryable_error(exception: BaseException) -> bool:
if isinstance(exception, requests_exceptions.RequestException):
if isinstance(exception, (requests_exceptions.ConnectionError, requests_exceptions.Timeout)) or (
exception.response is not None
and (exception.response.status_code >= 500 or exception.response.status_code == 429)
):
return True
if isinstance(exception, aiohttp.ClientResponseError):
if exception.status >= 500 or exception.status == 429:
return True
if isinstance(exception, (aiohttp.ClientConnectorError, TimeoutError)):
return True
return False
def _a_get_retry_object(self) -> AsyncRetrying:
"""
Instantiate an async retry object.
:return: instance of AsyncRetrying class
"""
# for compatibility we use reraise to avoid handling request error
return AsyncRetrying(**self.retry_args)
@provide_account_id
async def get_job_details(
self, run_id: int, account_id: int | None = None, include_related: list[str] | None = None
) -> Any:
"""
Use Http async call to retrieve metadata for a specific run of a dbt Cloud job.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param include_related: Optional. List of related fields to pull with the run.
Valid values are "trigger", "job", "repository", and "environment".
"""
endpoint = f"{account_id}/runs/{run_id}/"
headers, tenant = await self.get_headers_tenants_from_connection()
url, params = self.get_request_url_params(tenant, endpoint, include_related)
proxies = self._get_proxies(self.connection) or {}
proxy = proxies.get("https") if proxies and url.startswith("https") else proxies.get("http")
extra_request_args = {}
if proxy:
extra_request_args["proxy"] = proxy
timeout = (
aiohttp.ClientTimeout(total=self.timeout_seconds) if self.timeout_seconds is not None else None
)
async with aiohttp.ClientSession(headers=headers, timeout=timeout) as session:
async for attempt in self._a_get_retry_object():
with attempt:
async with session.get(url, params=params, **extra_request_args) as response: # type: ignore[arg-type]
response.raise_for_status()
return await response.json()
async def get_job_status(
self, run_id: int, account_id: int | None = None, include_related: list[str] | None = None
) -> int:
"""
Retrieve the status for a specific run of a dbt Cloud job.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param include_related: Optional. List of related fields to pull with the run.
Valid values are "trigger", "job", "repository", and "environment".
"""
self.log.info("Getting the status of job run %s.", run_id)
response = await self.get_job_details(run_id, account_id=account_id, include_related=include_related)
job_run_status: int = response["data"]["status"]
return job_run_status
@cached_property
def connection(self) -> Connection:
_connection = self.get_connection(self.dbt_cloud_conn_id)
if not _connection.password:
raise AirflowException("An API token is required to connect to dbt Cloud.")
return _connection # type: ignore[return-value]
def get_conn(self, *args, **kwargs) -> Session:
tenant = self._get_tenant_domain(self.connection)
self.base_url = f"https://{tenant}/"
session = Session()
session.auth = self.auth_type(self.connection.password)
return session
def _paginate(
self, endpoint: str, payload: dict[str, Any] | None = None, proxies: dict[str, str] | None = None
) -> list[Response]:
extra_options: dict[str, Any] = {}
if self.timeout_seconds is not None:
extra_options["timeout"] = self.timeout_seconds
if proxies is not None:
extra_options["proxies"] = proxies
response = self.run_with_advanced_retry(
_retry_args=self.retry_args, endpoint=endpoint, data=payload, extra_options=extra_options or None
)
resp_json = response.json()
limit = resp_json["extra"]["filters"]["limit"]
num_total_results = resp_json["extra"]["pagination"]["total_count"]
num_current_results = resp_json["extra"]["pagination"]["count"]
results = [response]
if num_current_results != num_total_results:
_paginate_payload = payload.copy() if payload else {}
_paginate_payload["offset"] = limit
while num_current_results < num_total_results:
response = self.run_with_advanced_retry(
_retry_args=self.retry_args,
endpoint=endpoint,
data=_paginate_payload,
extra_options=extra_options,
)
resp_json = response.json()
results.append(response)
num_current_results += resp_json["extra"]["pagination"]["count"]
_paginate_payload["offset"] += limit
return results
def _run_and_get_response(
self,
*,
method: str = "GET",
endpoint: str | None = None,
payload: str | dict[str, Any] | None = None,
paginate: bool = False,
api_version: str = "v2",
) -> Any:
self.method = method
full_endpoint = f"api/{api_version}/accounts/{endpoint}" if endpoint else None
proxies = self._get_proxies(self.connection)
extra_options: dict[str, Any] = {}
if self.timeout_seconds is not None:
extra_options["timeout"] = self.timeout_seconds
if proxies is not None:
extra_options["proxies"] = proxies
if paginate:
if isinstance(payload, str):
raise ValueError("Payload cannot be a string to paginate a response.")
if full_endpoint:
return self._paginate(endpoint=full_endpoint, payload=payload, proxies=proxies)
raise ValueError("An endpoint is needed to paginate a response.")
return self.run_with_advanced_retry(
_retry_args=self.retry_args,
endpoint=full_endpoint,
data=payload,
extra_options=extra_options or None,
)
def list_accounts(self) -> list[Response]:
"""
Retrieve all of the dbt Cloud accounts the configured API token is authorized to access.
:return: List of request responses.
"""
return self._run_and_get_response()
@fallback_to_default_account
def get_account(self, account_id: int | None = None) -> Response:
"""
Retrieve metadata for a specific dbt Cloud account.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
return self._run_and_get_response(endpoint=f"{account_id}/")
@fallback_to_default_account
def list_projects(
self, account_id: int | None = None, name_contains: str | None = None
) -> list[Response]:
"""
Retrieve metadata for all projects tied to a specified dbt Cloud account.
:param account_id: Optional. The ID of a dbt Cloud account.
:param name_contains: Optional. The case-insensitive substring of a dbt Cloud project name to filter by.
:return: List of request responses.
"""
payload = {"name__icontains": name_contains} if name_contains else None
return self._run_and_get_response(
endpoint=f"{account_id}/projects/",
payload=payload,
paginate=True,
api_version="v3",
)
@fallback_to_default_account
def get_project(self, project_id: int, account_id: int | None = None) -> Response:
"""
Retrieve metadata for a specific project.
:param project_id: The ID of a dbt Cloud project.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
return self._run_and_get_response(endpoint=f"{account_id}/projects/{project_id}/", api_version="v3")
@fallback_to_default_account
def list_environments(
self, project_id: int, *, name_contains: str | None = None, account_id: int | None = None
) -> list[Response]:
"""
Retrieve metadata for all environments tied to a specified dbt Cloud project.
:param project_id: The ID of a dbt Cloud project.
:param name_contains: Optional. The case-insensitive substring of a dbt Cloud environment name to filter by.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: List of request responses.
"""
payload = {"name__icontains": name_contains} if name_contains else None
return self._run_and_get_response(
endpoint=f"{account_id}/projects/{project_id}/environments/",
payload=payload,
paginate=True,
api_version="v3",
)
@fallback_to_default_account
def get_environment(
self, project_id: int, environment_id: int, *, account_id: int | None = None
) -> Response:
"""
Retrieve metadata for a specific project's environment.
:param project_id: The ID of a dbt Cloud project.
:param environment_id: The ID of a dbt Cloud environment.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/projects/{project_id}/environments/{environment_id}/", api_version="v3"
)
@fallback_to_default_account
def list_jobs(
self,
account_id: int | None = None,
order_by: str | None = None,
project_id: int | None = None,
environment_id: int | None = None,
name_contains: str | None = None,
) -> list[Response]:
"""
Retrieve metadata for all jobs tied to a specified dbt Cloud account.
If a ``project_id`` is supplied, only jobs pertaining to this project will be retrieved.
If an ``environment_id`` is supplied, only jobs pertaining to this environment will be retrieved.
:param account_id: Optional. The ID of a dbt Cloud account.
:param order_by: Optional. Field to order the result by. Use '-' to indicate reverse order.
For example, to use reverse order by the run ID use ``order_by=-id``.
:param project_id: Optional. The ID of a dbt Cloud project.
:param environment_id: Optional. The ID of a dbt Cloud environment.
:param name_contains: Optional. The case-insensitive substring of a dbt Cloud job name to filter by.
:return: List of request responses.
"""
payload = {"order_by": order_by, "project_id": project_id}
if environment_id:
payload["environment_id"] = environment_id
if name_contains:
payload["name__icontains"] = name_contains
return self._run_and_get_response(
endpoint=f"{account_id}/jobs/",
payload=payload,
paginate=True,
)
@fallback_to_default_account
def get_job(self, job_id: int, account_id: int | None = None) -> Response:
"""
Retrieve metadata for a specific job.
:param job_id: The ID of a dbt Cloud job.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
return self._run_and_get_response(endpoint=f"{account_id}/jobs/{job_id}")
@fallback_to_default_account
def get_job_by_name(
self, *, project_name: str, environment_name: str, job_name: str, account_id: int | None = None
) -> dict:
"""
Retrieve metadata for a specific job by combination of project, environment, and job name.
Raises DbtCloudResourceLookupError if the job is not found or cannot be uniquely identified by provided parameters.
:param project_name: The name of a dbt Cloud project.
:param environment_name: The name of a dbt Cloud environment.
:param job_name: The name of a dbt Cloud job.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The details of a job.
"""
# get project_id using project_name
list_projects_responses = self.list_projects(name_contains=project_name, account_id=account_id)
# flatten & filter the list of responses to find the exact match
projects = [
project
for response in list_projects_responses
for project in response.json()["data"]
if project["name"] == project_name
]
if len(projects) != 1:
raise DbtCloudResourceLookupError(f"Found {len(projects)} projects with name `{project_name}`.")
project_id = projects[0]["id"]
# get environment_id using project_id and environment_name
list_environments_responses = self.list_environments(
project_id=project_id, name_contains=environment_name, account_id=account_id
)
# flatten & filter the list of responses to find the exact match
environments = [
env
for response in list_environments_responses
for env in response.json()["data"]
if env["name"] == environment_name
]
if len(environments) != 1:
raise DbtCloudResourceLookupError(
f"Found {len(environments)} environments with name `{environment_name}` in project `{project_name}`."
)
environment_id = environments[0]["id"]
# get job using project_id, environment_id and job_name
list_jobs_responses = self.list_jobs(
project_id=project_id,
environment_id=environment_id,
name_contains=job_name,
account_id=account_id,
)
# flatten & filter the list of responses to find the exact match
jobs = [
job
for response in list_jobs_responses
for job in response.json()["data"]
if job["name"] == job_name
]
if len(jobs) != 1:
raise DbtCloudResourceLookupError(
f"Found {len(jobs)} jobs with name `{job_name}` in environment `{environment_name}` in project `{project_name}`."
)
return jobs[0]
@fallback_to_default_account
def trigger_job_run(
self,
job_id: int,
cause: str,
account_id: int | None = None,
steps_override: list[str] | None = None,
schema_override: str | None = None,
retry_from_failure: bool = False,
additional_run_config: dict[str, Any] | None = None,
) -> Response:
"""
Triggers a run of a dbt Cloud job.
:param job_id: The ID of a dbt Cloud job.
:param cause: Description of the reason to trigger the job.
:param account_id: Optional. The ID of a dbt Cloud account.
:param steps_override: Optional. List of dbt commands to execute when triggering the job
instead of those configured in dbt Cloud.
:param schema_override: Optional. Override the destination schema in the configured target for this
job.
:param retry_from_failure: Optional. If set to True and the previous job run has failed, the job
will be triggered using the "rerun" endpoint. This parameter cannot be used alongside
steps_override, schema_override, or additional_run_config.
:param additional_run_config: Optional. Any additional parameters that should be included in the API
request when triggering the job.
:return: The request response.
"""
if additional_run_config is None:
additional_run_config = {}
if cause is not None and len(cause) > DBT_CAUSE_MAX_LENGTH:
warnings.warn(
f"Cause `{cause}` exceeds limit of {DBT_CAUSE_MAX_LENGTH} characters and will be truncated.",
UserWarning,
stacklevel=2,
)
cause = cause[:DBT_CAUSE_MAX_LENGTH]
payload = {
"cause": cause,
"steps_override": steps_override,
"schema_override": schema_override,
}
payload.update(additional_run_config)
if retry_from_failure:
latest_run = self.get_job_runs(
account_id=account_id,
payload={
"job_definition_id": job_id,
"order_by": "-created_at",
"limit": 1,
},
).json()["data"]
if latest_run and latest_run[0]["status"] == DbtCloudJobRunStatus.ERROR.value:
if steps_override is not None or schema_override is not None or additional_run_config != {}:
warnings.warn(
"steps_override, schema_override, or additional_run_config will be ignored when"
" retry_from_failure is True and previous job run has failed.",
UserWarning,
stacklevel=2,
)
return self.retry_failed_job_run(job_id, account_id)
return self._run_and_get_response(
method="POST",
endpoint=f"{account_id}/jobs/{job_id}/run/",
payload=json.dumps(payload),
)
@fallback_to_default_account
def list_job_runs(
self,
account_id: int | None = None,
include_related: list[str] | None = None,
job_definition_id: int | None = None,
order_by: str | None = None,
) -> list[Response]:
"""
Retrieve metadata for all dbt Cloud job runs for an account.
If a ``job_definition_id`` is supplied, only metadata for runs of that specific job are pulled.
:param account_id: Optional. The ID of a dbt Cloud account.
:param include_related: Optional. List of related fields to pull with the run.
Valid values are "trigger", "job", "repository", and "environment".
:param job_definition_id: Optional. The dbt Cloud job ID to retrieve run metadata.
:param order_by: Optional. Field to order the result by. Use '-' to indicate reverse order.
For example, to use reverse order by the run ID use ``order_by=-id``.
:return: List of request responses.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/runs/",
payload={
"include_related": include_related,
"job_definition_id": job_definition_id,
"order_by": order_by,
},
paginate=True,
)
@fallback_to_default_account
def get_job_runs(self, account_id: int | None = None, payload: dict[str, Any] | None = None) -> Response:
"""
Retrieve metadata for a specific run of a dbt Cloud job.
:param account_id: Optional. The ID of a dbt Cloud account.
:param paylod: Optional. Query Parameters
:return: The request response.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/runs/",
payload=payload,
)
@fallback_to_default_account
def get_job_run(
self, run_id: int, account_id: int | None = None, include_related: list[str] | None = None
) -> Response:
"""
Retrieve metadata for a specific run of a dbt Cloud job.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param include_related: Optional. List of related fields to pull with the run.
Valid values are "trigger", "job", "repository", and "environment".
:return: The request response.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/runs/{run_id}/",
payload={"include_related": include_related},
)
def get_job_run_status(self, run_id: int, account_id: int | None = None) -> int:
"""
Retrieve the status for a specific run of a dbt Cloud job.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The status of a dbt Cloud job run.
"""
self.log.info("Getting the status of job run %s.", run_id)
job_run = self.get_job_run(account_id=account_id, run_id=run_id)
job_run_status = job_run.json()["data"]["status"]
self.log.info("Current status of job run %s: %s", run_id, DbtCloudJobRunStatus(job_run_status).name)
return job_run_status
def wait_for_job_run_status(
self,
run_id: int,
account_id: int | None = None,
expected_statuses: int | Sequence[int] | set[int] = DbtCloudJobRunStatus.SUCCESS.value,
check_interval: int = 60,
timeout: int = 60 * 60 * 24 * 7,
) -> bool:
"""
Wait for a dbt Cloud job run to match an expected status.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param expected_statuses: Optional. The desired status(es) to check against a job run's current
status. Defaults to the success status value.
:param check_interval: Time in seconds to check on a pipeline run's status.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status or the expected
status.
:return: Boolean indicating if the job run has reached the ``expected_status``.
"""
expected_statuses = (expected_statuses,) if isinstance(expected_statuses, int) else expected_statuses
DbtCloudJobRunStatus.check_is_valid(expected_statuses)
job_run_info = JobRunInfo(account_id=account_id, run_id=run_id)
job_run_status = self.get_job_run_status(**job_run_info)
start_time = time.monotonic()
while (
not DbtCloudJobRunStatus.is_terminal(job_run_status) and job_run_status not in expected_statuses
):
# Check if the job-run duration has exceeded the ``timeout`` configured.
if start_time + timeout < time.monotonic():
raise DbtCloudJobRunException(
f"Job run {run_id} has not reached a terminal status after {timeout} seconds."
)
# Wait to check the status of the job run based on the ``check_interval`` configured.
time.sleep(check_interval)
job_run_status = self.get_job_run_status(**job_run_info)
return job_run_status in expected_statuses
@fallback_to_default_account
def cancel_job_run(self, run_id: int, account_id: int | None = None) -> None:
"""
Cancel a specific dbt Cloud job run.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
"""
self._run_and_get_response(method="POST", endpoint=f"{account_id}/runs/{run_id}/cancel/")
@fallback_to_default_account
def list_job_run_artifacts(
self, run_id: int, account_id: int | None = None, step: int | None = None
) -> list[Response]:
"""
Retrieve a list of the available artifact files generated for a completed run of a dbt Cloud job.
By default, this returns artifacts from the last step in the run. To
list artifacts from other steps in the run, use the ``step`` parameter.
:param run_id: The ID of a dbt Cloud job run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param step: Optional. The index of the Step in the Run to query for artifacts. The first step in the
run has the index 1. If the step parameter is omitted, artifacts for the last step in the run will
be returned.
:return: List of request responses.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/runs/{run_id}/artifacts/", payload={"step": step}
)
@fallback_to_default_account
def get_job_run_artifact(
self, run_id: int, path: str, account_id: int | None = None, step: int | None = None
) -> Response:
"""
Retrieve a list of the available artifact files generated for a completed run of a dbt Cloud job.
By default, this returns artifacts from the last step in the run. To
list artifacts from other steps in the run, use the ``step`` parameter.
:param run_id: The ID of a dbt Cloud job run.
:param path: The file path related to the artifact file. Paths are rooted at the target/ directory.
Use "manifest.json", "catalog.json", or "run_results.json" to download dbt-generated artifacts
for the run.
:param account_id: Optional. The ID of a dbt Cloud account.
:param step: Optional. The index of the Step in the Run to query for artifacts. The first step in the
run has the index 1. If the step parameter is omitted, artifacts for the last step in the run will
be returned.
:return: The request response.
"""
return self._run_and_get_response(
endpoint=f"{account_id}/runs/{run_id}/artifacts/{path}", payload={"step": step}
)
@fallback_to_default_account
async def get_job_run_artifacts_concurrently(
self,
run_id: int,
artifacts: list[str],
account_id: int | None = None,
step: int | None = None,
):
"""
Retrieve a list of chosen artifact files generated for a step in completed run of a dbt Cloud job.
By default, this returns artifacts from the last step in the run.
This takes advantage of the asynchronous calls to speed up the retrieval.
:param run_id: The ID of a dbt Cloud job run.
:param step: The index of the Step in the Run to query for artifacts. The first step in the
run has the index 1. If the step parameter is omitted, artifacts for the last step in the run will
be returned.
:param path: The file path related to the artifact file. Paths are rooted at the target/ directory.
Use "manifest.json", "catalog.json", or "run_results.json" to download dbt-generated artifacts
for the run.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
tasks = {
artifact: sync_to_async(self.get_job_run_artifact)(
run_id,
path=artifact,
account_id=account_id,
step=step,
)
for artifact in artifacts
}
results = await asyncio.gather(*tasks.values())
return {filename: result.json() for filename, result in zip(tasks.keys(), results)}
@fallback_to_default_account
def retry_failed_job_run(self, job_id: int, account_id: int | None = None) -> Response:
"""
Retry a failed run for a job from the point of failure, if the run failed. Otherwise, trigger a new run.
:param job_id: The ID of a dbt Cloud job.
:param account_id: Optional. The ID of a dbt Cloud account.
:return: The request response.
"""
return self._run_and_get_response(method="POST", endpoint=f"{account_id}/jobs/{job_id}/rerun/")
def test_connection(self) -> tuple[bool, str]:
"""Test dbt Cloud connection."""
try:
self._run_and_get_response()
return True, "Successfully connected to dbt Cloud."
except Exception as e:
return False, str(e)
| DbtCloudHook |
python | django__django | tests/admin_views/admin.py | {
"start": 4339,
"end": 6765
} | class ____(ArticleAdminWithExtraUrl):
list_display = (
"content",
"date",
callable_year,
"model_year",
"modeladmin_year",
"model_year_reversed",
"section",
lambda obj: obj.title,
"order_by_expression",
"model_property_year",
"model_month",
"order_by_f_expression",
"order_by_orderby_expression",
"model_property_is_from_past",
)
list_editable = ("section",)
list_filter = ("date", "section")
autocomplete_fields = ("section",)
view_on_site = False
form = ArticleForm
fieldsets = (
(
"Some fields",
{
"classes": ("collapse",),
"fields": ("title", "content", "extra_form_field"),
},
),
(
"Some other fields",
{"classes": ("wide",), "fields": ("date", "section", "sub_section")},
),
("이름", {"fields": ("another_section",)}),
)
# These orderings aren't particularly useful but show that expressions can
# be used for admin_order_field.
@admin.display(ordering=models.F("date") + datetime.timedelta(days=3))
def order_by_expression(self, obj):
return obj.model_year
@admin.display(ordering=models.F("date"))
def order_by_f_expression(self, obj):
return obj.model_year
@admin.display(ordering=models.F("date").asc(nulls_last=True))
def order_by_orderby_expression(self, obj):
return obj.model_year
def changelist_view(self, request):
return super().changelist_view(request, extra_context={"extra_var": "Hello!"})
@admin.display(ordering="date", description=None)
def modeladmin_year(self, obj):
return obj.date.year
def delete_model(self, request, obj):
EmailMessage(
"Greetings from a deleted object",
"I hereby inform you that some user deleted me",
"from@example.com",
["to@example.com"],
).send()
return super().delete_model(request, obj)
def save_model(self, request, obj, form, change=True):
EmailMessage(
"Greetings from a created object",
"I hereby inform you that some user created me",
"from@example.com",
["to@example.com"],
).send()
return super().save_model(request, obj, form, change)
| ArticleAdmin |
python | pytorch__pytorch | test/xpu/test_conv.py | {
"start": 942,
"end": 51340
} | class ____(NNTestCase):
def run_conv_double_back_test(
self,
kern,
stride,
padding,
chan_in,
chan_out,
batch_size,
inp_size,
dilation,
no_weight,
groups=1,
use_xpu=False,
use_bias=True,
dtype=torch.double,
):
device = torch.device("xpu" if use_xpu else "cpu")
x = torch.randn(
batch_size,
chan_in,
inp_size,
inp_size,
device=device,
dtype=dtype,
requires_grad=True,
)
weight = torch.randn(
chan_out,
chan_in // groups,
kern,
kern,
device=device,
dtype=dtype,
requires_grad=not no_weight,
)
if use_bias:
bias = torch.randn(chan_out, device=device, dtype=dtype, requires_grad=True)
else:
bias = None
def func(*inputs):
if use_bias:
lx, lweight, lbias = inputs
else:
lx, lweight = inputs
lbias = None
out = F.conv2d(lx, lweight, lbias, stride, padding, dilation, groups)
return out
if use_bias:
inputs = x, weight, bias
else:
inputs = x, weight
dummy_out = func(*inputs)
grad_y = torch.randn_like(
dummy_out, device=device, dtype=dtype, requires_grad=True
)
if dtype == torch.float:
(g,) = torch.autograd.grad(dummy_out.sum(), x, create_graph=True)
return g.requires_grad
return gradgradcheck(func, inputs, (grad_y,))
@dtypes(*floating_types_and(torch.half, torch.bfloat16))
def test_Conv2d_large_workspace(self, device, dtype):
sizes = [
(1, 256, 109, 175),
(1, 256, 80, 128),
(1, 256, 120, 192),
]
def run_test(benchmark):
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1).to(device, dtype)
for size in sizes:
x = torch.randn(size, device=device, dtype=dtype)
out = conv(x.detach().clone().requires_grad_())
out.backward(torch.ones_like(out))
run_test(benchmark=False)
run_test(benchmark=True)
@dtypes(torch.half, torch.float)
def test_ConvTranspose2d_large_output_padding(self, device, dtype):
net1 = torch.nn.ConvTranspose2d(
128, 64, kernel_size=3, stride=2, padding=1, output_padding=1
).to(device=device, dtype=dtype)
net2 = torch.nn.ConvTranspose2d(
64, 32, kernel_size=3, stride=2, padding=1, output_padding=1
).to(device=device, dtype=dtype)
net3 = torch.nn.ConvTranspose2d(
32, 3, kernel_size=3, stride=2, padding=1, output_padding=1
).to(device=device, dtype=dtype)
x = torch.rand(1, 128, 6, 6, device=device, dtype=dtype, requires_grad=True)
x = net1(x)
x = net2(x)
x = net3(x)
x.backward(torch.randn_like(x))
@dtypes(torch.float, torch.double, torch.half)
def test_Conv2d_depthwise_naive_groups(self, device, dtype):
if dtype == torch.half and "xpu" in device:
self.skipTest(
"The accuracy issue of dtype fp16 would be fixed in oneDNN v3.4"
)
for depth_multiplier in [1, 2]:
m = nn.Conv2d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(
device, dtype
)
i = (
torch.randn(2, 2, 6, 6, device=device, dtype=dtype)
.div_(2)
.requires_grad_()
)
output = m(i)
grad_output = (
torch.randn(2, 2 * depth_multiplier, 4, 4, device=device, dtype=dtype)
/ 2
)
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv2d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
self.assertEqual(
output,
torch.cat([output1, output2], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
@dtypes(torch.float, torch.double, torch.half)
def test_Conv3d_depthwise_naive_groups(self, device, dtype):
if dtype == torch.half and "xpu" in device:
self.skipTest(
"The accuracy issue of dtype fp16 would be fixed in oneDNN v3.4"
)
for depth_multiplier in [1, 2]:
m = nn.Conv3d(2, 2 * depth_multiplier, kernel_size=3, groups=2).to(
device, dtype
)
i = (
torch.randn(2, 2, 6, 6, 6, device=device, dtype=dtype)
.div_(2)
.requires_grad_()
)
output = m(i)
grad_output = (
torch.randn(
2, 2 * depth_multiplier, 4, 4, 4, device=device, dtype=dtype
)
/ 2
)
output.backward(grad_output)
offset = 1 * depth_multiplier
m1 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m1.weight.data = m.weight.data[:offset].clone()
m1.bias.data = m.bias.data[:offset].clone()
i1 = i.detach()[:, :1].clone().requires_grad_()
output1 = m1(i1)
output1.backward(grad_output[:, :offset].contiguous())
m2 = nn.Conv3d(1, 1 * depth_multiplier, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[offset:])
m2.bias.data.copy_(m.bias.data[offset:])
i2 = i.detach()[:, 1:].clone().requires_grad_()
output2 = m2(i2)
output2.backward(grad_output[:, offset:].contiguous())
atol, rtol = (3e-4, 3e-2)
self.assertEqual(
output, torch.cat([output1, output2], 1), atol=atol, rtol=rtol
)
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=atol,
rtol=rtol,
)
@dtypes(torch.float, torch.double, torch.half)
def test_noncontig_conv_grad(self, device, dtype):
module = nn.Conv2d(3, 5, kernel_size=3, padding=1).to(device, dtype)
input = torch.randn(
2, 3, 10, 10, dtype=dtype, device=device, requires_grad=True
)
output = module(input)
grad = torch.randn(2, 2, 5, 10, 10, dtype=dtype, device=device)[:, 1]
assert not grad.is_contiguous()
output.backward(grad, retain_graph=True)
self.assertIsNotNone(input.grad)
result = input.grad.data.clone()
input.grad.data.zero_()
output.backward(grad.contiguous())
self.assertEqual(
result, input.grad.data, atol=dtype2prec_DONTUSE[dtype], rtol=0
)
@dtypes(torch.double)
def test_conv_double_backward(self, device, dtype):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
batch_size = 1
for kern, inp_size, dilations in [(3, 5, [1, 2]), (4, 9, [1])]:
for stride, padding, chan_in, chan_out, dilation in product(
[1], [2], [2], [3], dilations
):
no_weight = stride == 2
result = self.run_conv_double_back_test(
kern,
stride,
padding,
chan_in,
chan_out,
batch_size,
inp_size,
dilation,
no_weight,
use_xpu=True,
dtype=dtype,
)
self.assertTrue(result, "Conv double backward test failed")
def test_conv_double_backward_no_bias(self):
kern, stride = 3, 2
chan_in, chan_out = 2, 4
batch_size, inp_size = 2, 5
padding, dilation = 1, 1
no_weight, use_bias = False, True
result = self.run_conv_double_back_test(
kern,
stride,
padding,
chan_in,
chan_out,
batch_size,
inp_size,
dilation,
no_weight,
use_bias=use_bias,
)
self.assertTrue(result, "Conv double backward test failed")
def test_conv_double_backward_groups(self):
kern, stride, padding = 3, 1, 2
chan_in, chan_out = 2, 4
batch_size, inp_size, dilation = 2, 6, 1
no_weight = False
groups = 2
result = self.run_conv_double_back_test(
kern,
stride,
padding,
chan_in * groups,
chan_out * groups,
batch_size,
inp_size,
dilation,
no_weight,
groups=groups,
)
self.assertTrue(result, "Conv double backward test failed")
def test_conv_double_backward_stride(self):
batch_size = 2
for kern, inp_size, dilations in [(3, 5, [1, 2]), (3, 7, [1])]:
for stride, padding, chan_in, chan_out, dilation in product(
[2], [0, 1], [1], [2], dilations
):
no_weight = False
self.run_conv_double_back_test(
kern,
stride,
padding,
chan_in,
chan_out,
batch_size,
inp_size,
dilation,
no_weight,
)
@dtypes(torch.float)
def test_conv1d_large_input(self, device, dtype):
N, C_in, L = 4, 512, 441
C_out, K, P = 512, 3, 1
torch.manual_seed(42)
conv_cpu = (
nn.Conv1d(C_in, C_out, kernel_size=K, padding=P, bias=True)
.to(torch.float32)
.requires_grad_()
)
x_cpu = torch.randn(N, C_in, L, dtype=torch.float32)
out_cpu = conv_cpu(x_cpu)
conv_dev = nn.Conv1d(C_in, C_out, kernel_size=K, padding=P, bias=True).to(
device, dtype
)
conv_dev.weight.data.copy_(conv_cpu.weight.data.to(dtype))
conv_dev.bias.data.copy_(conv_cpu.bias.data.to(dtype))
x_dev = x_cpu.to(device, dtype).requires_grad_()
out_dev = conv_dev(x_dev)
self.assertEqual(out_cpu, out_dev, atol=1e-5, rtol=1e-5, exact_device=False)
@dtypes(torch.float)
def test_conv1d_same_padding(self, device, dtype):
test_args = [
range(50, 55),
[1, 2, 3, 8],
range(1, 4),
[1],
]
for in_size, k_size, dilation, stride in itertools.product(*test_args):
x = torch.rand(1, 1, in_size, device=device, dtype=dtype)
y = torch.rand(1, 1, k_size, device=device, dtype=dtype)
z = F.conv1d(x, y, padding="same", dilation=dilation, stride=stride)
self.assertEqual(z.size(2), int(math.ceil(in_size / stride)))
x = torch.rand(1, 1, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 3, device=device, dtype=dtype)
expect = F.conv1d(x, y, padding=1)
actual = F.conv1d(x, y, padding="same")
self.assertEqual(expect, actual)
x = torch.rand(1, 1, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 4, device=device, dtype=dtype)
expect = F.conv1d(x, y, padding=3, dilation=2)
actual = F.conv1d(x, y, padding="same", dilation=2)
self.assertEqual(expect, actual)
expect = F.conv1d(x, y, padding=5, dilation=3)[..., 1:]
actual = F.conv1d(x, y, padding="same", dilation=3)
self.assertEqual(expect, actual)
@dtypes(torch.float)
def test_conv3d_same_padding(self, device, dtype):
rtol, atol = None, None
x = torch.rand(1, 1, 10, 11, 12, device=device, dtype=dtype)
y = torch.rand(1, 1, 1, 2, 5, device=device, dtype=dtype)
expect = F.conv3d(x, y, padding=(0, 1, 2))[..., :, 1:, :]
actual = F.conv3d(x, y, padding="same")
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
expect = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
actual = F.conv3d(x, y, padding="same", dilation=2)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
y = torch.rand(1, 1, 4, 4, 4, device=device, dtype=dtype)
expect = F.conv3d(x, y, padding=5, dilation=3)[..., 1:, 1:, 1:]
actual = F.conv3d(x, y, padding="same", dilation=3)
self.assertEqual(expect, actual, rtol=rtol, atol=atol)
@dtypes(torch.float)
def test_conv1d_valid_padding(self, device, dtype):
x = torch.rand(1, 1, 10, device=device, dtype=dtype)
y = torch.rand(1, 1, 4, device=device, dtype=dtype)
expect = F.conv1d(x, y)
actual = F.conv1d(x, y, padding="valid")
self.assertEqual(expect, actual)
@dtypes(torch.float)
def test_conv2d_valid_padding(self, device, dtype):
x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype)
y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype)
expect = F.conv2d(x, y)
actual = F.conv2d(x, y, padding="valid")
self.assertEqual(expect, actual)
@dtypes(torch.float)
def test_conv3d_valid_padding(self, device, dtype):
x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device)
y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device)
expect = F.conv3d(x, y)
actual = F.conv3d(x, y, padding="valid")
self.assertEqual(expect, actual)
@dtypes(torch.float)
def test_conv1d_same_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 12, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)
z = F.conv1d(x, y, padding=3, dilation=2)
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding="same", dilation=2)
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding=2)[..., 1:]
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv1d(x, y, padding="same")
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
@dtypes(torch.float)
def test_conv2d_same_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 10, 11, device=device, dtype=dtype, requires_grad=True)
y = torch.rand(1, 1, 4, 5, device=device, dtype=dtype, requires_grad=True)
z = F.conv2d(x, y, padding=(3, 4), dilation=2)
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding="same", dilation=2)
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
y = torch.rand(1, 1, 4, 4, device=device, dtype=dtype, requires_grad=True)
z = F.conv2d(x, y, padding=2)[..., 1:, 1:]
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv2d(x, y, padding="same")
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
@dtypes(torch.double)
def test_conv3d_same_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 1, 11, 12, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 2, 5, dtype=dtype, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=(0, 1, 4), dilation=2)
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding="same", dilation=2)
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
x.grad, y.grad = None, None
gradcheck(
lambda x, y: F.conv3d(x, y, padding="same", dilation=2),
(x, y),
check_forward_ad=True,
nondet_tol=1e-5,
)
gradgradcheck(
lambda x, y: F.conv3d(x, y, padding="same", dilation=2),
(x, y),
check_fwd_over_rev=True,
)
y = torch.rand(1, 1, 1, 4, 4, dtype=dtype, device=device, requires_grad=True)
z = F.conv3d(x, y, padding=2)[..., 1:, 1:]
z.sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
z = F.conv3d(x, y, padding="same")
z.sum().abs().backward()
self.assertEqual(gx_expect, x.grad)
self.assertEqual(gy_expect, y.grad)
gradcheck(
lambda x, y: F.conv3d(x, y, padding="same"),
(x, y),
check_forward_ad=True,
nondet_tol=1e-5,
)
gradgradcheck(
lambda x, y: F.conv3d(x, y, padding="same"),
(x, y),
check_fwd_over_rev=True,
)
@dtypes(torch.float)
def test_conv1d_valid_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 10, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 4, dtype=dtype, device=device, requires_grad=True)
F.conv1d(x, y, padding=0).sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv1d(x, y, padding="valid").sum().abs().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float)
@parametrize_test("mode", ("valid", "same"))
def test_conv1d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 10), device=device, dtype=dtype)
feat_dim = t.shape[1]
weight_even = make_tensor((1, 1, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
t_a = t.view(-1).cpu().numpy()
w_a = weight.view(-1).cpu().numpy()
expected = scipy.signal.convolve(t_a, w_a, mode=mode)
kwargs = {"padding": mode}
if mode == "same":
p = weight.shape[2] // 2
t = torch.nn.functional.pad(t, (p, p))
kwargs.pop("padding")
weight_flipped = torch.flip(weight, (2,))
actual = torch.nn.functional.conv1d(t, weight_flipped, **kwargs).squeeze(0)
if mode == "same":
actual = actual[:feat_dim]
self.assertEqual(actual, expected, atol=2e-5, rtol=2e-5)
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float)
@parametrize_test("mode", ("valid", "same"))
def test_conv2d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 5, 10), device=device, dtype=dtype)
weight_even = make_tensor((1, 1, 2, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 3, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
t_a = t.squeeze(0).cpu().numpy()
w_a = weight.squeeze(0).squeeze(0).cpu().numpy()
expected = scipy.signal.convolve2d(t_a, w_a, mode=mode)
kwargs = {"padding": mode}
if mode == "same":
left_right_pad = weight.shape[3] // 2
top_bottom_pad = weight.shape[2] // 2
p = (left_right_pad, left_right_pad, top_bottom_pad, top_bottom_pad)
t = torch.nn.functional.pad(t, p)
kwargs.pop("padding")
weight_flipped = torch.flip(weight, (2, 3))
actual = torch.nn.functional.conv2d(t, weight_flipped, **kwargs).squeeze(0)
if mode == "same":
actual = actual[:5, :10]
self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@unittest.skipIf(not TEST_SCIPY, "Scipy required for the test.")
@dtypes(torch.float)
@parametrize_test("mode", ("valid", "same"))
def test_conv3d_vs_scipy(self, device, dtype, mode):
t = make_tensor((1, 5, 5, 10), device=device, dtype=dtype)
weight_even = make_tensor((1, 1, 2, 2, 4), device=device, dtype=dtype)
weight_odd = make_tensor((1, 1, 2, 3, 5), device=device, dtype=dtype)
def _test(t, weight, mode):
t_a = t.squeeze(0).cpu().numpy()
w_a = weight.squeeze(0).squeeze(0).cpu().numpy()
expected = scipy.signal.convolve(t_a, w_a, mode=mode)
kwargs = {"padding": mode}
if mode == "same":
left_right_pad = weight.shape[4] // 2
top_bottom_pad = weight.shape[3] // 2
front_back_pad = weight.shape[2] // 2
p = (
left_right_pad,
left_right_pad,
top_bottom_pad,
top_bottom_pad,
front_back_pad,
front_back_pad,
)
t = torch.nn.functional.pad(t, p)
kwargs.pop("padding")
weight_flipped = torch.flip(weight, (2, 3, 4))
actual = torch.nn.functional.conv3d(t, weight_flipped, **kwargs).squeeze(0)
if mode == "same":
actual = actual[:5, :5, :10]
self.assertEqual(actual, expected, rtol=2e-5, atol=5e-6)
with set_default_dtype(torch.float):
_test(t, weight_even, mode)
_test(t, weight_odd, mode)
@dtypes(torch.float)
def test_conv2d_valid_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 1, 10, device=device, dtype=dtype, requires_grad=True)
y = torch.rand(1, 1, 1, 4, device=device, dtype=dtype, requires_grad=True)
F.conv2d(x, y, padding=0).sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv2d(x, y, padding="valid").sum().abs().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
@dtypes(torch.double)
def test_conv3d_valid_padding_backward(self, device, dtype):
x = torch.rand(1, 1, 1, 1, 10, dtype=dtype, device=device, requires_grad=True)
y = torch.rand(1, 1, 1, 1, 4, dtype=dtype, device=device, requires_grad=True)
F.conv3d(x, y, padding=0).sum().abs().backward()
gx_expect, gy_expect = x.grad, y.grad
x.grad, y.grad = None, None
F.conv3d(x, y, padding="valid").sum().abs().backward()
gx_actual, gy_actual = x.grad, y.grad
self.assertEqual(gx_expect, gx_actual)
self.assertEqual(gy_expect, gy_actual)
gradcheck(
lambda x, y: F.conv3d(x, y, padding="valid"),
(x, y),
check_forward_ad=True,
)
gradgradcheck(
lambda x, y: F.conv3d(x, y, padding="valid"),
(x, y),
check_fwd_over_rev=True,
)
@parametrize_test("N", range(2, 4), name_fn=lambda N: f"ConvTranspose{N}d")
def test_conv_transpose_with_output_size_and_no_batch_dim(self, device, N):
inp = torch.randn((1, 15, 13) if N == 2 else (1, 15, 13, 13), device=device)
output_size = (1, 240, 200) if N == 2 else (1, 240, 200, 200)
ConvTransposeNd = getattr(nn, f"ConvTranspose{N}d")
m = ConvTransposeNd(
1, 1, kernel_size=16, stride=16, padding=7, bias=False, device=device
)
output = m(inp, output_size=output_size)
self.assertEqual(output.shape, output_size)
@dtypes(torch.float)
def test_conv_empty_channel(self, device, dtype):
in_channels = 0
mod = torch.nn.Conv1d(in_channels, 8, 2, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 15, device=device, dtype=dtype)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 0, device=device, dtype=dtype)
mod(inp)
mod = torch.nn.Conv2d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 50, 100, device=device, dtype=dtype)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 40, 0, device=device, dtype=dtype)
mod(inp)
mod = torch.nn.Conv3d(in_channels, 33, 3, stride=2, dtype=dtype).to(device)
inp = torch.randn(2, 0, 50, 20, 40, device=device, dtype=dtype)
_test_module_empty_input(self, mod, inp, check_size=False)
with self.assertRaisesRegex(RuntimeError, "Given groups=1, weight"):
inp = torch.randn(2, 1, 50, 0, 40, device=device, dtype=dtype)
mod(inp)
def test_group_conv_empty(self, device):
mod = torch.nn.Conv2d(4, 4, stride=2, kernel_size=3, padding=1, groups=4).to(
device
)
inp = torch.randn(0, 4, 4, 4, device=device)
_test_module_empty_input(self, mod, inp, check_size=False)
def test_group_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(
4, 4, stride=2, kernel_size=3, padding=1, groups=4
).to(device)
inp = torch.randn(0, 4, 4, 4, device=device)
_test_module_empty_input(self, mod, inp, check_size=False)
def test_convTranspose_empty(self, device):
mod = torch.nn.ConvTranspose2d(4, 4, stride=2, kernel_size=3, padding=1).to(
device
)
inp = torch.randn(0, 4, 4, 4, device=device)
_test_module_empty_input(self, mod, inp, check_size=False)
def test_conv_large_nosplit(self, device):
dtype = torch.half
conv1 = nn.Conv2d(2, 2, 8, 8).to(device).to(dtype)
input_large = torch.randn(1, 2, 1024, 1024 * 1024, dtype=dtype, device=device)
conv1(input_large)
conv2 = torch.nn.Conv2d(1, 1024, 1, 1).to(device).to(dtype)
input_large = torch.randn(1, 1, 2048, 1024, dtype=dtype, device=device)
conv2(input_large)
def test_conv_noncontig_weights(self, device):
for dim in (1, 2, 3):
for grouped in (False, True):
nc = 3
groups = 3 if grouped else 1
w = torch.randn([3] * dim, device=device)
w = w.expand([nc, int(nc / groups)] + list(w.shape))
w = w.detach().requires_grad_()
x = torch.randn(
[1, nc] + ([5] * dim), device=device, requires_grad=True
)
y = getattr(F, f"conv{dim}d")(x, w, groups=groups)
y.sum().backward()
y = getattr(F, f"conv_transpose{dim}d")(x, w, groups=groups)
y.sum().backward()
def test_conv_noncontig_weights_and_bias(self, device):
for bias in [True, False]:
conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=bias).to(
device, torch.float
)
input_nc = torch.randn(
(1, 3, 224, 224, 2), device=device, dtype=torch.float
)[:, :, :, :, 1]
input_c = input_nc.contiguous()
weight_nc = torch.randn((64, 3, 7, 7, 2), device=device, dtype=torch.float)[
:, :, :, :, 1
]
conv1.weight = nn.Parameter(weight_nc)
weight_c = conv1.weight.contiguous()
if bias:
bias_nc = torch.randn((64, 2), device=device, dtype=torch.float)[:, 1]
conv1.bias = nn.Parameter(bias_nc)
bias_c = conv1.bias.contiguous()
out1 = conv1(input_nc)
conv1.weight = nn.Parameter(weight_c)
if bias:
conv1.bias = nn.Parameter(bias_c)
out2 = conv1(input_c)
self.assertEqual(out1, out2)
def test_conv_transposed_large(self, device):
dtype = torch.half if self.device_type == "cuda" else torch.float
conv = nn.ConvTranspose2d(1, 1, 1, 1, bias=False).to(device).to(dtype)
input_large = torch.randn(4096, 1, 512, 1024, dtype=dtype, device=device)
ret = conv(input_large)
maxdiff0 = (
(ret.narrow(0, 0, 1024) - conv(input_large.narrow(0, 0, 1024)))
.abs_()
.max()
.item()
)
maxdiff1 = (
(ret.narrow(0, 1024, 1024) - conv(input_large.narrow(0, 1024, 1024)))
.abs_()
.max()
.item()
)
maxdiff2 = (
(ret.narrow(0, 2048, 1024) - conv(input_large.narrow(0, 2048, 1024)))
.abs_()
.max()
.item()
)
maxdiff3 = (
(ret.narrow(0, 3072, 1024) - conv(input_large.narrow(0, 3072, 1024)))
.abs_()
.max()
.item()
)
self.assertEqual(maxdiff0, 0)
self.assertEqual(maxdiff1, 0)
self.assertEqual(maxdiff2, 0)
self.assertEqual(maxdiff3, 0)
def test_conv_large(self, device):
dtype = torch.half if self.device_type == "cuda" else torch.float
conv = nn.Conv2d(2, 2, 8, 8, bias=False).to(device).to(dtype)
input_large = torch.randn(4097, 2, 512, 512, dtype=dtype, device=device)
ret = conv(input_large)
self.assertEqual(ret[:2048], conv(input_large[:2048]))
self.assertEqual(ret[2048:4096], conv(input_large[2048:4096]))
self.assertEqual(ret[4096:], conv(input_large[4096:]))
conv.zero_grad()
ret.view(4097, -1).max(dim=1).values.sum().backward()
del ret
grad1 = conv.weight.grad.detach().clone()
conv.zero_grad()
conv(input_large[:2048]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[2048:4096]).view(2048, -1).max(dim=1).values.sum().backward()
conv(input_large[4096:]).view(1, -1).max(dim=1).values.sum().backward()
grad2 = conv.weight.grad.detach().clone()
scale = 1 / grad2.abs().mean()
grad1 = grad1 * scale
grad2 = grad2 * scale
self.assertEqual(grad1, grad2, atol=5e-2, rtol=5e-3)
def test_Conv2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.Conv2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
with cudnn.flags(enabled=False):
conv_cuda = torch.nn.Conv2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(
conv_cpu.bias.grad.data,
conv_cuda.bias.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
self.assertEqual(
conv_cpu.weight.grad.data,
conv_cuda.weight.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
def test_ConvTranspose2d_size_1_kernel(self, device):
x_cpu = torch.randn(2, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose2d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
conv_cuda = torch.nn.ConvTranspose2d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(
conv_cpu.bias.grad.data,
conv_cuda.bias.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
self.assertEqual(
conv_cpu.weight.grad.data,
conv_cuda.weight.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
def test_ConvTranspose3d_size_1_kernel(self, device):
with set_default_dtype(torch.double):
x_cpu = torch.randn(2, 3, 3, 5, 5)
conv_cpu = torch.nn.ConvTranspose3d(3, 3, kernel_size=1)
y_cpu = conv_cpu(x_cpu)
y = torch.rand_like(y_cpu)
y_cpu.backward(y)
conv_cuda = torch.nn.ConvTranspose3d(3, 3, kernel_size=1).to(device)
conv_cuda.bias.data.copy_(conv_cpu.bias.data)
conv_cuda.weight.data.copy_(conv_cpu.weight.data)
y_cuda = conv_cuda(x_cpu.to(device))
y_cuda.backward(y.to(device))
self.assertEqual(y_cpu, y_cuda, atol=1e-5, rtol=0, exact_device=False)
self.assertEqual(
conv_cpu.bias.grad.data,
conv_cuda.bias.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
self.assertEqual(
conv_cpu.weight.grad.data,
conv_cuda.weight.grad.data,
atol=1e-5,
rtol=0,
exact_device=False,
)
@dtypes(torch.float)
def test_Conv2d_naive_groups(self, device, dtype):
m = nn.Conv2d(4, 4, kernel_size=3, groups=2).to(device, dtype)
i = torch.randn(2, 4, 6, 6, device=device, dtype=dtype, requires_grad=True)
output = m(i)
grad_output = torch.randn(2, 4, 4, 4, device=device, dtype=dtype)
output.backward(grad_output)
m1 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m1.weight.data.copy_(m.weight.data[:2])
m1.bias.data.copy_(m.bias.data[:2])
i1 = i.data[:, :2].contiguous().requires_grad_(True)
output1 = m1(i1)
output1.backward(grad_output[:, :2].contiguous())
m2 = nn.Conv2d(2, 2, kernel_size=3).to(device, dtype)
m2.weight.data.copy_(m.weight.data[2:])
m2.bias.data.copy_(m.bias.data[2:])
i2 = i.data[:, 2:].contiguous().requires_grad_(True)
output2 = m2(i2)
output2.backward(grad_output[:, 2:].contiguous())
self.assertEqual(output, torch.cat([output1, output2], 1))
self.assertEqual(
i.grad.data,
torch.cat([i1.grad.data, i2.grad.data], 1),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.bias.grad.data,
torch.cat([m1.bias.grad.data, m2.bias.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
self.assertEqual(
m.weight.grad.data,
torch.cat([m1.weight.grad.data, m2.weight.grad.data], 0),
atol=dtype2prec_DONTUSE[dtype],
rtol=0,
)
@dtypes(torch.double)
def test_Conv2d_backward_depthwise(self, device, dtype):
x = torch.randn(2, 2, 4, 20, device=device, dtype=dtype, requires_grad=True)
weight = torch.randn(2, 1, 3, 5, device=device, dtype=dtype, requires_grad=True)
def conv2d_depthwise(x, weight):
return torch.nn.functional.conv2d(
x, weight, bias=None, stride=(1, 10), groups=2
)
torch.autograd.gradcheck(conv2d_depthwise, (x, weight))
@dtypes(torch.half, torch.float)
def test_conv_cudnn_nhwc(self, device, dtype):
def helper(n, c, h, w, out_channels, kernel_size, groups):
input = torch.randint(-3, 3, (n, c, h, w), dtype=dtype, device=device).to(
memory_format=torch.channels_last
)
input.requires_grad_()
conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups).to(
device=device, dtype=dtype, memory_format=torch.channels_last
)
for p in conv.parameters():
p.data = torch.randint_like(p, -3, 3)
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv2d(c, out_channels, kernel_size, groups=groups)
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(
device=device, dtype=torch.double, memory_format=torch.contiguous_format
)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -3, 3)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(input.grad.is_contiguous(memory_format=torch.channels_last))
self.assertTrue(
conv.weight.grad.is_contiguous(memory_format=torch.channels_last)
)
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 56, 56, out_channels=16, kernel_size=3, groups=16)
@dtypes(torch.half, torch.float)
def test_conv_cudnn_ndhwc(self, device, dtype):
def helper(n, c, d, h, w, out_channels, kernel_size, groups):
input = torch.randint(
-2, 2, (n, c, d, h, w), dtype=dtype, device=device
).to(memory_format=torch.channels_last_3d)
input.requires_grad_()
conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups).to(
device=device, dtype=dtype, memory_format=torch.channels_last_3d
)
for p in conv.parameters():
p.data = torch.randint_like(p, -2, 2)
ref_input = input.detach().clone().contiguous().double().requires_grad_()
ref_conv = nn.Conv3d(c, out_channels, kernel_size, groups=groups)
ref_conv.load_state_dict(conv.state_dict())
ref_conv = ref_conv.to(
device=device, dtype=torch.double, memory_format=torch.contiguous_format
)
out = conv(input)
ref_out = ref_conv(ref_input)
grad = torch.randint_like(out, -2, 2)
ref_grad = grad.detach().clone().double().contiguous()
out.backward(grad)
ref_out.backward(ref_grad)
self.assertTrue(out.is_contiguous(memory_format=torch.channels_last_3d))
self.assertTrue(
input.grad.is_contiguous(memory_format=torch.channels_last_3d)
)
self.assertTrue(
conv.weight.grad.is_contiguous(memory_format=torch.channels_last_3d)
)
self.assertTrue(ref_out.is_contiguous())
self.assertTrue(ref_input.grad.is_contiguous())
self.assertTrue(ref_conv.weight.grad.is_contiguous())
self.assertEqual(out, ref_out, exact_dtype=False)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad, exact_dtype=False)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad, exact_dtype=False)
self.assertEqual(input.grad, ref_input.grad, exact_dtype=False)
helper(2, 8, 4, 4, 4, out_channels=4, kernel_size=3, groups=1)
helper(2, 8, 4, 4, 4, out_channels=8, kernel_size=3, groups=8)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=1)
helper(1, 16, 18, 18, 18, out_channels=16, kernel_size=3, groups=16)
def _run_conv(
self,
layer,
device,
inp,
grad,
ref_conv,
ref_input,
ref_out,
input_format,
weight_format,
grad_format,
output_format,
):
conv = (
layer(inp.size(1), grad.size(1), ref_conv.weight.size(2)).float().to(device)
)
conv.load_state_dict(ref_conv.state_dict())
weight_data = (
conv.weight.detach().clone().contiguous(memory_format=weight_format)
)
conv.weight.data = weight_data.resize_(
weight_data.size(), memory_format=weight_format
)
input = inp.clone().contiguous(memory_format=input_format)
input.resize_(input.size(), memory_format=input_format)
input = input.requires_grad_()
grad = grad.contiguous(memory_format=grad_format)
grad.resize_(grad.size(), memory_format=grad_format)
out = conv(input)
out.backward(grad)
self.assertTrue(out.is_contiguous(memory_format=output_format))
self.assertEqual(out, ref_out)
self.assertEqual(conv.weight.grad, ref_conv.weight.grad)
self.assertEqual(conv.bias.grad, ref_conv.bias.grad)
self.assertEqual(input.grad, ref_input.grad)
def _test_conv_cudnn_nhwc_nchw(self, layer, n, c, h, w, k, filter_size, device):
data = torch.randint(1, 10, (n, c, h, w), dtype=torch.float32, device=device)
ref_input = data.clone().contiguous().requires_grad_(True)
ref_conv = layer(c, k, filter_size).float().to(device)
ref_out = ref_conv(ref_input)
grad = torch.randint(1, 10, ref_out.size(), dtype=torch.float32, device=device)
ref_out.backward(grad)
for w_f in [torch.contiguous_format, torch.channels_last]:
for g_f in [torch.contiguous_format, torch.channels_last]:
for input_format in [torch.contiguous_format, torch.channels_last]:
output_format = torch.contiguous_format
if input_format == torch.channels_last:
output_format = torch.channels_last
if w_f == torch.channels_last:
output_format = torch.channels_last
self._run_conv(
layer,
device,
data,
grad,
ref_conv,
ref_input,
ref_out,
input_format,
w_f,
g_f,
output_format,
)
@dtypes(torch.float, torch.double)
def test_conv_cudnn_nhwc_support(self, device, dtype):
input = torch.randn(
(1, 16, 1, 1), dtype=dtype, device=device, requires_grad=True
)
weight = torch.randn(
(8, 16, 3, 3), dtype=dtype, device=device, requires_grad=True
)
weight = weight.to(memory_format=torch.channels_last)
o = torch.conv2d(input, weight, None, (2, 1), (1, 1), (1, 1), 1)
self.assertTrue(o.is_contiguous(memory_format=torch.channels_last))
o.sum().backward()
@dtypes(torch.float)
def test_conv2d_no_grad(self, device, dtype):
for batch in [1, 2, 3]:
for groups in [1, 2, 4]:
input = torch.rand(batch, groups, 8, 8, dtype=dtype, device=device)
m = nn.Conv2d(
groups,
8,
kernel_size=(3, 3),
groups=groups,
dtype=dtype,
device=device,
)
with torch.no_grad():
output_ng = m(input)
output = m(input)
self.assertEqual(output, output_ng, rtol=1e-2, atol=1e-5)
@unittest.skipIf(torch.xpu.device_count() < 2, "only one GPU detected")
@dtypes(torch.double, torch.float, torch.half)
def test_conv2d_on_multi_device(self, dtype):
input = torch.randn(3, 256, 224, 224, dtype=dtype, requires_grad=True)
conv = torch.nn.Conv2d(256, 256, kernel_size=3, padding=1, dtype=dtype)
output_grad = torch.randn(3, 256, 224, 224, dtype=dtype)
input_0 = input.to(device="xpu:0")
conv_0 = copy.deepcopy(conv).to(device="xpu:0")
output_0 = conv_0(input_0)
input_1 = input.to(device="xpu:1")
conv_1 = copy.deepcopy(conv).to(device="xpu:1")
output_1 = conv_1(input_1)
self.assertEqual(output_0.cpu(), output_1.cpu())
output_grad_0 = output_grad.to(device="xpu:0")
output_0.backward(output_grad_0)
output_grad_1 = output_grad.to(device="xpu:1")
output_1.backward(output_grad_1)
self.assertEqual(output_grad_0.cpu(), output_grad_1.cpu())
def test_conv_double_backward_strided_with_3D_input_and_weight(self, device):
input = torch.randn(2, 3, 6, device=device)
weight = torch.randn(3, 3, 3, device=device)
bias = torch.randn(3, device=device)
stride = (2,)
padding = (1,)
dilation = (1,)
transposed = False
output_padding = (0,)
groups = 1
output = torch.ops.aten.convolution(
input,
weight,
bias,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
)
ggI = torch.randn(input.shape, device=device)
ggW = torch.randn(weight.shape, device=device)
ggB = torch.randn(bias.shape, device=device)
gO = torch.randn(output.shape, device=device)
output_mask = [True, True, True]
(
grad_grad_output,
grad_input,
grad_weight,
) = torch.ops.aten._convolution_double_backward(
ggI,
ggW,
ggB,
gO,
weight,
input,
stride,
padding,
dilation,
transposed,
output_padding,
groups,
output_mask,
)
self.assertEqual(grad_grad_output.shape, gO.shape)
self.assertEqual(grad_input.shape, input.shape)
self.assertEqual(grad_weight.shape, weight.shape)
@onlyXPU
@dtypes(torch.float16, torch.bfloat16, torch.float32, torch.float64)
def test_channels_last_ouput_stride(self, device, dtype):
input = torch.randn(
(2, 3, 16, 16), device=device, dtype=dtype, requires_grad=True
)
weight = torch.randn(
(512, 3, 3, 3), device=device, dtype=dtype, requires_grad=True
)
input = input.to(memory_format=torch.channels_last)
weight = weight.to(memory_format=torch.channels_last)
out = torch.conv2d(input, weight, None, (2, 2), (0, 0), (1, 1), 1)
# input NHWC, output NHWC
assert_size_stride(out, (2, 512, 7, 7), (25088, 1, 3584, 512))
@onlyXPU
def test_onednn_allow_tf32_get_set(self):
with torch.backends.mkldnn.flags(
enabled=None, deterministic=None, allow_tf32=False
):
self.assertFalse(torch.backends.mkldnn.allow_tf32)
with torch.backends.mkldnn.flags(
enabled=None, deterministic=None, allow_tf32=True
):
self.assertTrue(torch.backends.mkldnn.allow_tf32)
instantiate_device_type_tests(
TestConvolutionNNDeviceType, globals(), only_for="xpu", allow_xpu=True
)
if __name__ == "__main__":
run_tests()
| TestConvolutionNNDeviceType |
python | matplotlib__matplotlib | galleries/examples/images_contours_and_fields/colormap_normalizations.py | {
"start": 3259,
"end": 5402
} | class ____(colors.Normalize):
def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):
self.midpoint = midpoint
super().__init__(vmin, vmax, clip)
def __call__(self, value, clip=None):
# I'm ignoring masked values and all kinds of edge cases to make a
# simple example...
x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]
return np.ma.masked_array(np.interp(value, x, y))
# %%
fig, ax = plt.subplots(2, 1)
pcm = ax[0].pcolormesh(X, Y, Z, cmap='RdBu_r', shading='nearest',
vmin=-np.max(Z))
fig.colorbar(pcm, ax=ax[0], extend='both', label='linear scaling')
pcm = ax[1].pcolormesh(X, Y, Z, cmap='RdBu_r', shading='nearest',
norm=MidpointNormalize(midpoint=0))
fig.colorbar(pcm, ax=ax[1], extend='both', label='Custom norm')
# %%
# BoundaryNorm
# ------------
# For arbitrarily dividing the color scale, the `.BoundaryNorm` may be used; by
# providing the boundaries for colors, this norm puts the first color in between the
# first pair, the second color between the second pair, etc.
fig, ax = plt.subplots(3, 1, layout='constrained')
pcm = ax[0].pcolormesh(X, Y, Z, cmap='RdBu_r', shading='nearest',
vmin=-np.max(Z))
fig.colorbar(pcm, ax=ax[0], extend='both', orientation='vertical',
label='linear scaling')
# Evenly-spaced bounds gives a contour-like effect.
bounds = np.linspace(-2, 2, 11)
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
pcm = ax[1].pcolormesh(X, Y, Z, cmap='RdBu_r', shading='nearest',
norm=norm)
fig.colorbar(pcm, ax=ax[1], extend='both', orientation='vertical',
label='BoundaryNorm\nlinspace(-2, 2, 11)')
# Unevenly-spaced bounds changes the colormapping.
bounds = np.array([-1, -0.5, 0, 2.5, 5])
norm = colors.BoundaryNorm(boundaries=bounds, ncolors=256)
pcm = ax[2].pcolormesh(X, Y, Z, cmap='RdBu_r', shading='nearest',
norm=norm)
fig.colorbar(pcm, ax=ax[2], extend='both', orientation='vertical',
label='BoundaryNorm\n[-1, -0.5, 0, 2.5, 5]')
plt.show()
| MidpointNormalize |
python | pypa__pipenv | pipenv/vendor/click/shell_completion.py | {
"start": 5279,
"end": 9048
} | class ____:
"""Base class for providing shell completion support. A subclass for
a given shell will override attributes and methods to implement the
completion instructions (``source`` and ``complete``).
:param cli: Command being called.
:param prog_name: Name of the executable in the shell.
:param complete_var: Name of the environment variable that holds
the completion instruction.
.. versionadded:: 8.0
"""
name: t.ClassVar[str]
"""Name to register the shell as with :func:`add_completion_class`.
This is used in completion instructions (``{name}_source`` and
``{name}_complete``).
"""
source_template: t.ClassVar[str]
"""Completion script template formatted by :meth:`source`. This must
be provided by subclasses.
"""
def __init__(
self,
cli: BaseCommand,
ctx_args: t.MutableMapping[str, t.Any],
prog_name: str,
complete_var: str,
) -> None:
self.cli = cli
self.ctx_args = ctx_args
self.prog_name = prog_name
self.complete_var = complete_var
@property
def func_name(self) -> str:
"""The name of the shell function defined by the completion
script.
"""
safe_name = re.sub(r"\W*", "", self.prog_name.replace("-", "_"), flags=re.ASCII)
return f"_{safe_name}_completion"
def source_vars(self) -> t.Dict[str, t.Any]:
"""Vars for formatting :attr:`source_template`.
By default this provides ``complete_func``, ``complete_var``,
and ``prog_name``.
"""
return {
"complete_func": self.func_name,
"complete_var": self.complete_var,
"prog_name": self.prog_name,
}
def source(self) -> str:
"""Produce the shell script that defines the completion
function. By default this ``%``-style formats
:attr:`source_template` with the dict returned by
:meth:`source_vars`.
"""
return self.source_template % self.source_vars()
def get_completion_args(self) -> t.Tuple[t.List[str], str]:
"""Use the env vars defined by the shell script to return a
tuple of ``args, incomplete``. This must be implemented by
subclasses.
"""
raise NotImplementedError
def get_completions(
self, args: t.List[str], incomplete: str
) -> t.List[CompletionItem]:
"""Determine the context and last complete command or parameter
from the complete args. Call that object's ``shell_complete``
method to get the completions for the incomplete value.
:param args: List of complete args before the incomplete value.
:param incomplete: Value being completed. May be empty.
"""
ctx = _resolve_context(self.cli, self.ctx_args, self.prog_name, args)
obj, incomplete = _resolve_incomplete(ctx, args, incomplete)
return obj.shell_complete(ctx, incomplete)
def format_completion(self, item: CompletionItem) -> str:
"""Format a completion item into the form recognized by the
shell script. This must be implemented by subclasses.
:param item: Completion item to format.
"""
raise NotImplementedError
def complete(self) -> str:
"""Produce the completion data to send back to the shell.
By default this calls :meth:`get_completion_args`, gets the
completions, then calls :meth:`format_completion` for each
completion.
"""
args, incomplete = self.get_completion_args()
completions = self.get_completions(args, incomplete)
out = [self.format_completion(item) for item in completions]
return "\n".join(out)
| ShellComplete |
python | dask__dask | dask/utils.py | {
"start": 13517,
"end": 19285
} | class ____:
"""Provide getitem syntax for functions
>>> def inc(x):
... return x + 1
>>> I = IndexCallable(inc)
>>> I[3]
4
"""
__slots__ = ("fn",)
def __init__(self, fn):
self.fn = fn
def __getitem__(self, key):
return self.fn(key)
@contextmanager
def filetexts(d, open=open, mode="t", use_tmpdir=True):
"""Dumps a number of textfiles to disk
Parameters
----------
d : dict
a mapping from filename to text like {'a.csv': '1,1\n2,2'}
Since this is meant for use in tests, this context manager will
automatically switch to a temporary current directory, to avoid
race conditions when running tests in parallel.
"""
with tmp_cwd() if use_tmpdir else nullcontext():
for filename, text in d.items():
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
f = open(filename, f"w{mode}")
try:
f.write(text)
finally:
try:
f.close()
except AttributeError:
pass
yield list(d)
for filename in d:
if os.path.exists(filename):
with suppress(OSError):
os.remove(filename)
def concrete(seq):
"""Make nested iterators concrete lists
>>> data = [[1, 2], [3, 4]]
>>> seq = iter(map(iter, data))
>>> concrete(seq)
[[1, 2], [3, 4]]
"""
if isinstance(seq, Iterator):
seq = list(seq)
if isinstance(seq, (tuple, list)):
seq = list(map(concrete, seq))
return seq
def pseudorandom(n: int, p, random_state=None):
"""Pseudorandom array of integer indexes
>>> pseudorandom(5, [0.5, 0.5], random_state=123)
array([1, 0, 0, 1, 1], dtype=int8)
>>> pseudorandom(10, [0.5, 0.2, 0.2, 0.1], random_state=5)
array([0, 2, 0, 3, 0, 1, 2, 1, 0, 0], dtype=int8)
"""
import numpy as np
p = list(p)
cp = np.cumsum([0] + p)
assert np.allclose(1, cp[-1])
assert len(p) < 256
if not isinstance(random_state, np.random.RandomState):
random_state = np.random.RandomState(random_state)
x = random_state.random_sample(n)
out = np.empty(n, dtype="i1")
for i, (low, high) in enumerate(zip(cp[:-1], cp[1:])):
out[(x >= low) & (x < high)] = i
return out
def random_state_data(n: int, random_state=None) -> list:
"""Return a list of arrays that can initialize
``np.random.RandomState``.
Parameters
----------
n : int
Number of arrays to return.
random_state : int or np.random.RandomState, optional
If an int, is used to seed a new ``RandomState``.
"""
import numpy as np
if not all(
hasattr(random_state, attr) for attr in ["normal", "beta", "bytes", "uniform"]
):
random_state = np.random.RandomState(random_state)
random_data = random_state.bytes(624 * n * 4) # `n * 624` 32-bit integers
l = list(np.frombuffer(random_data, dtype="<u4").reshape((n, -1)))
assert len(l) == n
return l
def is_integer(i) -> bool:
"""
>>> is_integer(6)
True
>>> is_integer(42.0)
True
>>> is_integer('abc')
False
"""
return isinstance(i, Integral) or (isinstance(i, float) and i.is_integer())
ONE_ARITY_BUILTINS = {
abs,
all,
any,
ascii,
bool,
bytearray,
bytes,
callable,
chr,
classmethod,
complex,
dict,
dir,
enumerate,
eval,
float,
format,
frozenset,
hash,
hex,
id,
int,
iter,
len,
list,
max,
min,
next,
oct,
open,
ord,
range,
repr,
reversed,
round,
set,
slice,
sorted,
staticmethod,
str,
sum,
tuple,
type,
vars,
zip,
memoryview,
}
MULTI_ARITY_BUILTINS = {
compile,
delattr,
divmod,
filter,
getattr,
hasattr,
isinstance,
issubclass,
map,
pow,
setattr,
}
def getargspec(func):
"""Version of inspect.getargspec that works with partial and warps."""
if isinstance(func, functools.partial):
return getargspec(func.func)
func = getattr(func, "__wrapped__", func)
if isinstance(func, type):
return inspect.getfullargspec(func.__init__)
else:
return inspect.getfullargspec(func)
def takes_multiple_arguments(func, varargs=True):
"""Does this function take multiple arguments?
>>> def f(x, y): pass
>>> takes_multiple_arguments(f)
True
>>> def f(x): pass
>>> takes_multiple_arguments(f)
False
>>> def f(x, y=None): pass
>>> takes_multiple_arguments(f)
False
>>> def f(*args): pass
>>> takes_multiple_arguments(f)
True
>>> class Thing:
... def __init__(self, a): pass
>>> takes_multiple_arguments(Thing)
False
"""
if func in ONE_ARITY_BUILTINS:
return False
elif func in MULTI_ARITY_BUILTINS:
return True
try:
spec = getargspec(func)
except Exception:
return False
try:
is_constructor = spec.args[0] == "self" and isinstance(func, type)
except Exception:
is_constructor = False
if varargs and spec.varargs:
return True
ndefaults = 0 if spec.defaults is None else len(spec.defaults)
return len(spec.args) - ndefaults - is_constructor > 1
def get_named_args(func) -> list[str]:
"""Get all non ``*args/**kwargs`` arguments for a function"""
s = inspect.signature(func)
return [
n
for n, p in s.parameters.items()
if p.kind in [p.POSITIONAL_OR_KEYWORD, p.POSITIONAL_ONLY, p.KEYWORD_ONLY]
]
| IndexCallable |
python | pytorch__pytorch | torch/distributions/constraints.py | {
"start": 10415,
"end": 10599
} | class ____(Constraint):
"""
Constrain to the two values `{0, 1}`.
"""
is_discrete = True
def check(self, value):
return (value == 0) | (value == 1)
| _Boolean |
python | pytorch__pytorch | test/distributed/optim/test_zero_redundancy_optimizer.py | {
"start": 11298,
"end": 52298
} | class ____(TestZeroRedundancyOptimizer):
@property
def world_size(self):
return min(4, max(2, torch.get_device_module(self.device).device_count()))
@property
def context(self):
if requires_ddp_rank(self.device):
return torch.get_device_module(self.device).device(self.rank)
else:
return nullcontext()
def _check_same_model_params(
self,
model_a: torch.nn.Module,
model_b: torch.nn.Module,
message: str = "",
) -> None:
# Check that model parameters match
for p_a, p_b in zip(model_a.parameters(), model_b.parameters()):
torch.testing.assert_close(
p_a,
p_b,
atol=1e-3,
rtol=1e-5,
msg=f"Model parameters differ:\n{p_a} {p_b}\n" + message,
)
# Check that model buffers match
for b_a, b_b in zip(model_a.buffers(), model_b.buffers()):
torch.testing.assert_close(
b_a,
b_b,
msg=f"Model buffers differ:\n{b_a} {b_b}\n" + message,
)
@skip_if_no_gpu
@skip_if_rocm_multiprocess
def test_step(self):
"""Check that ZeroRedundancyOptimizer properly exposes the ``step()``
interface."""
self.create_pg(self.device)
LR = 0.01
with self.context:
x = torch.tensor([float(self.rank + 1)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[1.0]])
m.bias.data = torch.tensor([2.0])
m = m.to(self.device)
m_zero = copy.deepcopy(m).to(self.device)
o = SGD(m.parameters(), lr=LR)
o_zero = ZeroRedundancyOptimizer(
m_zero.parameters(),
optimizer_class=SGD,
lr=LR,
)
y = m(x)
y.backward(x)
y_zero = m_zero(x)
y_zero.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o.step()
for p in m_zero.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
o_zero.step()
self.assertEqual(m.weight, m_zero.weight)
self.assertEqual(m.bias, m_zero.bias)
@skip_if_no_gpu
@skip_if_rocm_multiprocess
def test_step_with_closure(self):
"""Check that ZeroRedundancyOptimizer properly exposes the
``step(closure)`` interface."""
self.create_pg(self.device)
with self.context:
for bucket_view in [False, True]:
x_val = self.rank + 1
weight = 1.0
bias = 2.0
error = 1.0
target = torch.tensor(
[x_val * weight + bias + error],
device=self.device,
)
loss_fn = torch.nn.L1Loss()
x = torch.tensor([float(x_val)], device=self.device)
m = torch.nn.Linear(1, 1)
m.weight.data = torch.tensor([[weight]])
m.bias.data = torch.tensor([bias])
m.to(self.device)
o = ZeroRedundancyOptimizer(
m.parameters(),
optimizer_class=SGD,
parameters_as_bucket_view=bucket_view,
lr=0.1,
)
y = m(x)
y.backward(x)
for p in m.parameters():
dist.all_reduce(p.grad.data, op=dist.ReduceOp.SUM)
p.grad.data /= self.world_size
def closure():
o.zero_grad()
output = m(x)
loss = loss_fn(output, target)
loss.backward()
return loss
loss = o.step(closure=closure)
self.assertEqual(loss, torch.tensor(error))
self.assertEqual(m.weight, torch.tensor([[1.1]]))
self.assertEqual(m.bias, torch.tensor([2.1]))
@skip_if_no_gpu
def test_lr_scheduler(self):
"""Check that a normal PyTorch ``lr_scheduler`` is usable with
ZeroRedundancyOptimizer."""
self.create_pg(self.device)
x = torch.tensor([1.0], device=self.device, requires_grad=True)
x2 = torch.tensor([1.0], device=self.device, requires_grad=True)
o = ZeroRedundancyOptimizer([x], optimizer_class=SGD, lr=0.01)
o2 = torch.optim.SGD([x2], lr=0.01)
s = torch.optim.lr_scheduler.StepLR(o, 1)
s2 = torch.optim.lr_scheduler.StepLR(o2, 1)
for _ in range(5):
x.backward()
o.zero_grad()
o.step()
s.step()
x2.backward()
o2.zero_grad()
o2.step()
s2.step()
self.assertEqual(x, x2)
def test_sharding(self):
"""
Check ZeroRedundancyOptimizer's parameter sharding at construction
time.
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
``ZeroRedundancyOptimizer._partition_parameters()`` in
zero_redundancy_optimizer.py.
"""
self.create_pg(self.device)
LR = 0.01
sizes = [9, 7, 5, 3]
params = []
for size in sizes * self.world_size:
params.append(torch.rand(size, 1))
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(
sum(x.numel() for x in o.optim.param_groups[0]["params"]),
sum(sizes),
)
def test_add_param_group(self):
"""Check that ZeroRedundancyOptimizer properly handles adding a new
parameter group a posteriori and that all ranks get a shard of the
contained parameters.
NOTE: The correctness of this test depends on the ZeRO implementation
using the sorted-greedy partitioning algorithm. For details, see
``ZeroRedundancyOptimizer._partition_parameters()`` in
zero_redundancy_optimizer.py.
"""
self.create_pg(self.device)
LR = 0.01
# Test with all parameters trainable to begin with
def all_trainable():
params = []
sizes = [9, 7, 5, 3]
sizes_world = sizes * self.world_size
for size in sizes_world[:-1]:
params.append(torch.rand(size, 1))
# Make sure that the params are trainable so that they are factored
# into the size-based parameter partitioning
for p in params:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(len(o.param_groups), 1)
o.add_param_group({"params": [torch.rand(3, 1)]})
# Verify that new group is added to the correct partition, making
# all partitions have the same elements
self.assertEqual(len(o.param_groups), 2)
self.assertEqual(
sum(x.numel() for g in o.optim.param_groups for x in g["params"]),
sum(sizes),
)
self.assertEqual(len(o.optim.param_groups), 2)
# Test a pathological config with a first big non-trainable param
def some_trainable():
params = []
for size in [100, 3, 5, 2, 6, 4]:
params.append(torch.rand(size, 1))
# Make sure that all but the first param are trainable so that they
# are factored into the size-based parameter partitioning
for p in params[1:]:
p.requires_grad = True
o = ZeroRedundancyOptimizer(params, optimizer_class=SGD, lr=LR)
self.assertEqual(len(o.param_groups), 1)
o.add_param_group({"params": [torch.rand(3, 1)]})
self.assertEqual(len(o.param_groups), 2)
self.assertEqual(len(o.optim.param_groups), 2)
all_trainable()
some_trainable()
@skip_if_no_gpu
def test_multiple_param_groups(self):
"""
Check parity between constructing ZeRO with multiple parameter groups
upfront versus adding parameter groups to ZeRO after construction
versus a non-sharded optimizer.
"""
self.create_pg(self.device)
BATCH_SIZE, NUM_ITERS = 8, 3
INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 5, 10, 5
WD, LR = 0.01, 0.01
model1 = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
)
model2 = copy.deepcopy(model1)
model3 = copy.deepcopy(model1)
model1 = model1.to(self.device)
model2 = model2.to(self.device)
model3 = model3.to(self.device)
inputs = [
torch.randn(BATCH_SIZE, INPUT_DIM).to(self.device) for _ in range(NUM_ITERS)
]
# Construct `optim1` with both parameter groups upfront
optim1 = ZeroRedundancyOptimizer(
[
{"params": [l.weight for l in model1], "weight_decay": 0.0},
{"params": [l.bias for l in model1], "weight_decay": WD},
],
optimizer_class=AdamW,
lr=LR,
)
# Construct `optim2` by adding the second parameter after
optim2 = ZeroRedundancyOptimizer(
[l.weight for l in model2],
optimizer_class=AdamW,
lr=LR,
weight_decay=0.0,
)
optim2.add_param_group({"params": [l.bias for l in model2], "weight_decay": WD})
# Construct `optim3` as a non-sharded optimizer
optim3 = AdamW(
[
{"params": [l.weight for l in model3], "weight_decay": 0.0},
{"params": [l.bias for l in model3], "weight_decay": WD},
],
lr=LR,
)
# Check parity over a few iterations
for input in inputs:
for model, optim in (
(model1, optim1),
(model2, optim2),
(model3, optim3),
):
optim.zero_grad()
out = model(input)
loss = out.sum()
loss.backward()
optim.step()
for layer1, layer2, layer3 in zip(model1, model2, model3):
torch.testing.assert_close(layer1.weight, layer2.weight)
torch.testing.assert_close(layer1.weight, layer3.weight)
torch.testing.assert_close(layer1.bias, layer2.bias)
torch.testing.assert_close(layer1.bias, layer3.bias)
@skip_if_no_gpu
@skip_if_rocm_multiprocess
def test_collect_shards(self):
"""Check the state consolidation mechanism and the state dict exposed
by ZeroRedundancyOptimizer."""
self.create_pg(self.device)
LR = 1e-3
MOMENTUM = 0.99
BATCH_SIZE, INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 3, 20, 10, 5
REFERENCE_RANK = 0
target = torch.rand((BATCH_SIZE, OUTPUT_DIM), device=self.device)
inputs = torch.rand((BATCH_SIZE, INPUT_DIM), device=self.device)
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(self.device)
loss_fn = torch.nn.L1Loss()
loss_fn.to(self.device)
optimizer = ZeroRedundancyOptimizer(
model.parameters(),
optimizer_class=SGD,
lr=LR,
momentum=MOMENTUM, # ensure there exists state to shard
)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss.backward()
return loss
# Run a dummy step so that the optimizer state dict exists
_ = optimizer.step(closure=closure)
# Get the optimizer state on the reference rank
optimizer.consolidate_state_dict(to=REFERENCE_RANK)
if self.rank == REFERENCE_RANK:
# Check that the state has the correct size
optimizer_state_dict = optimizer.state_dict()
self.assertEqual(
len(optimizer_state_dict["state"]),
len(list(model.parameters())),
)
else:
optimizer_state_dict = {}
# Load the optimizer state on all ranks without any exceptions
optimizer_state_dict = _broadcast_object(
optimizer_state_dict,
src_rank=REFERENCE_RANK,
group=dist.group.WORLD,
device=self.device,
)
optimizer.load_state_dict(optimizer_state_dict)
def test_nondefault_process_group(self):
"""Check that ZeroRedundancyOptimizer works with a non-default process
group consisting only of even ranks."""
# Skip the test if below the minimum world size since then the test is
# trivial
MIN_WORLD_SIZE = 4
if self.world_size < MIN_WORLD_SIZE:
logger.info(
"Skipping `test_nondefault_process_group()` since world size "
"of %s is less than %s",
self.world_size,
MIN_WORLD_SIZE,
)
return
# Use GPU if enough are available, or fall back to CPU otherwise
if torch.get_device_module(self.device).device_count() < self.world_size:
device = torch.device("cpu")
else:
device = torch.device(self.device)
self.create_pg(device.type)
# Create a new process group consisting of the even ranks to exercise
# the case where the global and local ranks do not necessarily match
subgroup_ranks = [r for r in range(self.world_size) if r % 2 == 0]
process_group = dist.new_group(
ranks=subgroup_ranks,
backend=self.backend(device.type),
)
# Ranks not participating in the new process group are no longer needed
if self.rank not in subgroup_ranks:
return
# Set different seeds across ranks so that each rank gets different
# training data and hence the model sync check is meaningful
torch.manual_seed(self.rank)
np.random.seed(self.rank)
EPOCHS, BATCH_SIZE, INPUT_DIM, HIDDEN_DIM, OUTPUT_DIM = 5, 3, 20, 10, 5
LR = 1e-3
MOMENTUM = 0.99
REFERENCE_RANK = 0
assert REFERENCE_RANK in subgroup_ranks, (
"Reference rank must be in the new process group"
)
loss_fn = torch.nn.L1Loss().to(device)
def check(optimizer):
for _ in range(EPOCHS):
target = torch.rand((BATCH_SIZE, OUTPUT_DIM), device=device)
inputs = torch.rand((BATCH_SIZE, INPUT_DIM), device=device)
def closure():
optimizer.zero_grad()
output = model(inputs)
loss = loss_fn(output, target)
loss /= self.world_size
loss.backward()
dist.all_reduce(loss, group=process_group)
return loss
_ = optimizer.step(closure=closure)
# Check that the parameters match across ranks after a step
for pg in optimizer.param_groups:
for p in pg["params"]:
receptacle = (
[p.clone() for _ in subgroup_ranks]
if self.rank == REFERENCE_RANK
else []
)
dist.gather(
p,
receptacle,
dst=REFERENCE_RANK,
group=process_group,
)
if self.rank == REFERENCE_RANK:
reference_param = receptacle[0]
for param in receptacle[1:]:
torch.testing.assert_close(
reference_param,
param,
msg="Models differ between ranks",
)
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(device)
optimizer = ZeroRedundancyOptimizer(
model.parameters(),
optimizer_class=SGD,
lr=LR,
momentum=MOMENTUM, # ensure there exists state to shard
process_group=process_group,
)
check(optimizer)
@skip_if_no_gpu
@parametrize(
"optimizer_class_str",
["Adam", "AdamW", "SGD"],
# Use string to appease the internal test name parser
)
@parametrize(
"maximize",
[False, True],
)
def test_local_optimizer_parity(
self,
optimizer_class_str: str,
maximize: bool,
):
"""When combined with DDP, check that a local optimizer gives the same
results as wrapping that optimizer with ZeroRedundancyOptimizer."""
self.create_pg(self.device)
BATCHES = 20
BATCH_SIZE = 64
LR = 1e-3
INPUT_DIM = 2
HIDDEN_DIM = 3
OUTPUT_DIM = 3
torch.manual_seed(self.rank)
np.random.seed(self.rank)
if optimizer_class_str == "Adam":
optimizer_class = torch.optim.Adam
elif optimizer_class_str == "AdamW":
optimizer_class = torch.optim.AdamW
elif optimizer_class_str == "SGD":
optimizer_class = torch.optim.SGD
else:
assert 0, f"Unsupported optimizer class: {optimizer_class_str}"
with self.context:
# Define a base model with a different buffer for each rank
model = torch.nn.Sequential(
torch.nn.Linear(INPUT_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, HIDDEN_DIM),
torch.nn.Linear(HIDDEN_DIM, OUTPUT_DIM),
).to(self.device)
model.test_buffer = torch.nn.Buffer(
torch.ones((1), device=self.device) * self.rank,
)
# Define models/optimizers for DDP with ZeRO and DDP with local
# optimizer
defaults = {"maximize": True} if maximize else {}
sharded_optimizer = ZeroRedundancyOptimizer(
params=model.parameters(),
optimizer_class=optimizer_class,
lr=LR,
**defaults,
)
sharded_ddp_model = DDP(
module=model,
device_ids=[self.rank] if requires_ddp_rank(self.device) else None,
broadcast_buffers=True,
find_unused_parameters=True,
)
local_model = copy.deepcopy(model).to(self.device)
ddp_optimizer = optimizer_class(
local_model.parameters(),
lr=LR,
**defaults,
)
ddp_model = DDP(
local_model,
device_ids=[self.rank] if requires_ddp_rank(self.device) else None,
broadcast_buffers=True,
find_unused_parameters=True,
)
# Check that the model is properly synchronized between ranks
# at construction time
self._check_same_model_params(
sharded_ddp_model,
ddp_model,
"Models differ from the start",
)
def check_step():
input_tensor = torch.rand((BATCH_SIZE, INPUT_DIM)).to(self.device)
def closure_ddp(input_tensor=input_tensor):
ddp_optimizer.zero_grad()
ddp_loss = ddp_model(input_tensor).abs().sum()
ddp_loss.backward()
return ddp_loss
def closure_sharded(input_tensor=input_tensor):
sharded_optimizer.zero_grad()
sharded_loss = sharded_ddp_model(input_tensor).abs().sum()
sharded_loss.backward()
return sharded_loss
loss_ddp = cast(
torch.Tensor,
ddp_optimizer.step(closure=closure_ddp),
)
loss_sharded_optim = cast(
torch.Tensor,
sharded_optimizer.step(closure=closure_sharded),
)
torch.testing.assert_close(
loss_ddp,
loss_sharded_optim,
msg="Losses differ between local optimizer and ZeRO",
)
self._check_same_model_params(
sharded_ddp_model,
ddp_model,
"Models differ after a step",
)
# Check that parity is maintained
for i in range(BATCHES):
check_step()
# For the second half of batches, change the parameter
# trainability to further test parity
if i > BATCHES // 2:
next(ddp_model.parameters()).requires_grad = bool(i % 2)
next(sharded_ddp_model.parameters()).requires_grad = bool(i % 2)
# Check that the `state_dict` checkpoints are compatible between
# the local optimizer and ZeRO
REFERENCE_RANK = 0
# - Get states
ddp_state_dict = ddp_optimizer.state_dict()
sharded_optimizer.consolidate_state_dict(to=REFERENCE_RANK)
sharded_optim_state_dict = [
sharded_optimizer.state_dict() if self.rank == REFERENCE_RANK else {}
]
dist.broadcast_object_list(
sharded_optim_state_dict,
src=REFERENCE_RANK,
group=dist.group.WORLD,
)
sharded_optim_state_dict = sharded_optim_state_dict[0]
# - Cross-load the states
# Run one step and check that the models are still the same
ddp_state_dict_ref = copy.deepcopy(ddp_state_dict)
ddp_optimizer.load_state_dict(sharded_optim_state_dict)
sharded_optimizer.load_state_dict(ddp_state_dict)
check_step()
# - Reload their respective states
# Run one step and check that the models are still the same
ddp_optimizer.load_state_dict(ddp_state_dict_ref)
sharded_optimizer.load_state_dict(sharded_optim_state_dict)
check_step()
def _test_zero_join(self, device):
"""Check that the ZeRO join hook allows training with uneven inputs
when using the given device."""
NUM_INPUTS = 3
NUM_EPOCHS = 2
LR = 0.01
torch.manual_seed(0)
if "cpu" not in device:
torch.get_device_module(device).manual_seed(0)
rank = self.rank
world_size = self.world_size
self.create_pg(device)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Linear(3, 3),
torch.nn.Linear(3, 3),
)
model.to(device)
# DDP ensures correct gradients in data parallel training, so DDP with
# local optimizers on uneven inputs should be equivalent to ZeRO on
# uneven inputs with gradients being manually set
ddp_model = (
DDP(model, device_ids=[rank]) if requires_ddp_rank(device) else DDP(model)
)
local_optim = torch.optim.Adam(ddp_model.parameters(), lr=LR)
zero_model = copy.deepcopy(model)
zero_model.to(device)
zero_optim = ZeroRedundancyOptimizer(
zero_model.parameters(),
torch.optim.Adam,
lr=LR,
)
loss_fn = torch.nn.MSELoss()
# Use uneven inputs: rank i has i extra inputs
inputs = [torch.randn(20, 2).to(device) for _ in range(NUM_INPUTS + rank)]
labels = torch.randn(20, 3).to(device)
# Save the gradients and parameters from DDP as the ground truth; do
# so on the last-joining rank (in this case, the largest rank)
grads_at_each_iter = []
params_at_each_iter = []
with ddp_model.join():
for _ in range(NUM_EPOCHS):
for input in inputs:
output = ddp_model(input)
loss_fn(output, labels).backward()
if rank == world_size - 1:
grads = []
for p in ddp_model.parameters():
grads.append(p.grad.detach().clone().to(device))
local_optim.step()
if rank == world_size - 1:
params = []
for p in ddp_model.parameters():
params.append(p.detach().clone().to(device))
grads_at_each_iter.append(grads)
params_at_each_iter.append(params)
# Broadcast the saved gradients and parameters to all of the other
# ranks (which joined early)
grads_and_params = [grads_at_each_iter, params_at_each_iter]
grads_and_params = _broadcast_object(
grads_and_params,
src_rank=world_size - 1,
group=dist.group.WORLD,
device=device,
)
grads_at_each_iter = grads_and_params[0]
params_at_each_iter = grads_and_params[1]
# TODO: Replace this `_broadcast_object` with `broadcast_object_list`
# once the latter supports loading to the destination device instead
# of the source device
# A process must still set the remaining gradients after joining, so we
# define a join hook to do this before the ZeRO join hook
class _JoinGradInfo:
def __init__(self, grads):
self.grads = grads # remaining gradients to set (in order)
self.index = 0
class _SetGradsJoinHook(JoinHook):
def __init__(self, zero_optim, grads):
zero_optim._join_grad_info = _JoinGradInfo(grads)
self.zero = zero_optim
super().__init__()
def main_hook(self):
join_grad_info = self.zero._join_grad_info
grads = self.zero._join_grad_info.grads[join_grad_info.index]
join_grad_info.index += 1
for p, grad in zip(self.zero._all_params, grads):
p.grad = grad.detach().clone().to(device)
class _GradientSetter(Joinable):
def __init__(self) -> None:
super().__init__()
def join_hook(self, **kwargs):
assert "zero_optim" in kwargs
assert "grads" in kwargs
zero_optim = kwargs["zero_optim"]
grads = kwargs["grads"]
return _SetGradsJoinHook(zero_optim, grads)
@property
def join_device(self):
return device
@property
def join_process_group(self):
return dist.group.WORLD
num_grads_after_joining = NUM_EPOCHS * (world_size - rank - 1)
grads = grads_at_each_iter[-num_grads_after_joining:]
gradient_setter = _GradientSetter()
iter = 0
with Join(
[gradient_setter, zero_optim],
zero_optim=zero_optim,
grads=grads,
):
for _ in range(NUM_EPOCHS):
for _input in inputs:
# Notify join context that this process has not joined
Join.notify_join_context(gradient_setter)
# Set gradients manually
for p, grad in zip(
zero_model.parameters(),
grads_at_each_iter[iter],
):
p.grad = grad.detach().clone().to(device)
# Perform optimizer step and check parity
zero_optim.step()
for p, ddp_p in zip(
zero_model.parameters(),
params_at_each_iter[iter],
):
torch.testing.assert_close(
p,
ddp_p,
msg="Parameters differ between using ZeRO and "
"local optimizer",
)
iter += 1
@requires_accelerator_dist_backend()
@skip_if_no_gpu
def test_zero_join_gpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs
on GPU."""
self._test_zero_join(self.device)
@requires_gloo()
def test_zero_join_cpu(self):
"""Check that the ZeRO join hook allows training with uneven inputs
on CPU."""
self._test_zero_join("cpu")
def _test_zero_model_parallel(self, parameters_as_bucket_view: bool, device: str):
# Use two processes each with two GPUs
assert self.rank < 2
NUM_EPOCHS = 2
NUM_INPUTS = 4
LR = 0.01
torch.manual_seed(0)
if "cpu" not in device:
torch.get_device_module(device).manual_seed(0)
class ModelParallelModel(torch.nn.Module):
def __init__(self, dev0, dev1):
super().__init__()
self.dev0 = dev0
self.dev1 = dev1
self.net0 = torch.nn.Linear(10, 10).to(dev0)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5).to(dev1)
def forward(self, x):
x = x.to(self.dev0)
x = self.relu(self.net0(x))
x = x.to(self.dev1)
return self.net1(x)
class LocalModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.net0 = torch.nn.Linear(10, 10)
self.relu = torch.nn.ReLU()
self.net1 = torch.nn.Linear(10, 5)
def forward(self, x):
return self.net1(self.relu(self.net0(x)))
dev0 = torch.device(2 * self.rank)
dev1 = torch.device(2 * self.rank + 1)
mp_model = ModelParallelModel(dev0, dev1)
ddp_model = DDP(mp_model)
local_model = LocalModel().to(dev0)
# Ensure the parameters are the same across the two models
def copy_param(p):
return torch.nn.Parameter(p.detach().clone().to(dev0))
local_model.net0.weight = copy_param(mp_model.net0.weight)
local_model.net0.bias = copy_param(mp_model.net0.bias)
local_model.net1.weight = copy_param(mp_model.net1.weight)
local_model.net1.bias = copy_param(mp_model.net1.bias)
# Compare parity between DDP with model parallelism using ZeRO and
# a local model using a local optimizer
zero_optim = ZeroRedundancyOptimizer(
ddp_model.parameters(),
optimizer_class=torch.optim.Adam,
parameters_as_bucket_view=parameters_as_bucket_view,
lr=LR,
)
local_optim = torch.optim.Adam(local_model.parameters(), lr=LR)
inputs = [torch.randn(20, 10).to(dev0) for _ in range(NUM_INPUTS)]
for _ in range(NUM_EPOCHS):
for input in inputs:
def closure_local():
local_optim.zero_grad()
local_loss = local_model(input).abs().sum()
local_loss.backward()
return local_loss
def closure_ddp():
zero_optim.zero_grad()
ddp_loss = ddp_model(input).abs().sum()
ddp_loss.backward()
return ddp_loss
local_loss = cast(torch.Tensor, local_optim.step(closure=closure_local))
ddp_loss = cast(torch.Tensor, zero_optim.step(closure=closure_ddp))
# Increased tolerances are needed to pass when using TF32
# See: https://github.com/pytorch/pytorch/issues/67764
torch.testing.assert_close(
local_loss.cpu(),
ddp_loss.cpu(),
rtol=1e-03,
atol=1e-08,
msg="Losses differ between local optimizer and ZeRO",
)
for local_p, ddp_p in zip(
local_model.parameters(), ddp_model.parameters()
):
torch.testing.assert_close(
local_p.cpu(),
ddp_p.cpu(),
rtol=1e-03,
atol=1e-04,
msg="Models differ after a step",
)
@skipIfHpu
@skip_if_lt_x_gpu(4)
@parametrize(
"parameters_as_bucket_view",
[False, True],
)
def test_zero_model_parallel(
self,
parameters_as_bucket_view: bool,
):
"""Check that ZeRO works with model parallelism where the model's
layers are assigned to different devices."""
if self.rank >= 2:
return
self.create_pg(self.device, world_size=2)
self._test_zero_model_parallel(parameters_as_bucket_view, self.device)
def _test_ddp_zero_overlap(
self,
device,
hook_constructor,
gradient_as_bucket_view,
static_graph,
**kwargs,
):
SGD_LR = 0.01
SGD_MOMENTUM = 0.9
SGD_WEIGHT_DECAY = 0.001
NUM_INPUTS = 5
torch.manual_seed(0)
if "cpu" not in device:
torch.get_device_module(device).manual_seed(0)
rank = self.rank
models_to_test = [
(
torch.nn.Sequential(
torch.nn.Linear(1000, 2000),
torch.nn.Linear(2000, 500),
),
[torch.randn(1, 1000).to(device) for _ in range(NUM_INPUTS)],
)
]
if HAS_TORCHVISION:
models_to_test.append(
(
torchvision.models.resnet50(),
[torch.randn(1, 3, 3, 1000).to(device) for _ in range(NUM_INPUTS)],
)
)
for model, inputs in models_to_test:
# Select deterministic context based on device
det_ctx = (
torch.backends.cudnn.flags(
enabled=True, deterministic=True, benchmark=False
)
if "cuda" in device
else torch.use_deterministic_algorithms(True)
)
with det_ctx:
device_ids = [rank] if requires_ddp_rank(device) else None
# Set up the DDP model overlapping with ZeRO
ddp_model_overlap = DDP(
copy.deepcopy(model).to(device),
device_ids=device_ids,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
ddp_model_overlap._set_static_graph()
zero_optim = ZeroRedundancyOptimizer(
ddp_model_overlap.parameters(),
optimizer_class=torch.optim.SGD,
overlap_with_ddp=True,
lr=SGD_LR,
momentum=SGD_MOMENTUM,
weight_decay=SGD_WEIGHT_DECAY,
)
ddp_model_overlap.register_comm_hook(
None,
hook_constructor(
allreduce_hook,
ddp_model_overlap,
zero_optim,
**kwargs,
),
)
# Set up the DDP model with local optimizer
ddp_model_local = DDP(
copy.deepcopy(model).to(device),
device_ids=device_ids,
gradient_as_bucket_view=gradient_as_bucket_view,
)
if static_graph:
ddp_model_local._set_static_graph()
local_optim = torch.optim.SGD(
ddp_model_local.parameters(),
lr=SGD_LR,
momentum=SGD_MOMENTUM,
weight_decay=SGD_WEIGHT_DECAY,
)
# Check that the parameters match initially
for p1, p2 in zip(
ddp_model_overlap.parameters(), ddp_model_local.parameters()
):
self.assertEqual(p1, p2)
# Save the parameters to ensure they were updated
init_params_overlap = copy.deepcopy(
list(ddp_model_overlap.parameters())
)
# Ensure that this test runs independently
dist.barrier()
# Run the DDP model overlapping with ZeRO
# NOTE: Overlapping currently requires 2 or 3 warmup iterations
# to ensure DDP buckets have been rebuilt (depending on the
# value of `static_graph`)
num_warmup_inputs = 2 if not static_graph else 3
for input in inputs[:num_warmup_inputs]:
output = ddp_model_overlap(input)
loss = output.sum()
loss.backward()
for input in inputs:
zero_optim.zero_grad()
output = ddp_model_overlap(input)
loss = output.sum()
loss.backward()
# Run the DDP model with local optimizer
for input in inputs:
local_optim.zero_grad()
output = ddp_model_local(input)
loss = output.sum()
loss.backward()
local_optim.step()
dist.barrier()
# Check that the parameters are equal
for p1, p2 in zip(
ddp_model_overlap.parameters(), ddp_model_local.parameters()
):
self.assertEqual(p1, p2)
# Check that the parameters were updated
self.assertNotEqual(
init_params_overlap,
list(ddp_model_overlap.parameters()),
)
# Ensure that this test runs independently
dist.barrier()
# NOTE: The test is skipped if using Windows since functional optimizers
# are not currently supported.
@skip_if_win32()
@requires_accelerator_dist_backend()
@skip_if_no_gpu
@skip_if_rocm_multiprocess
@parametrize(
"use_gpu",
[True],
# Add `False` once the Gloo sync issue causing hangs is fixed
# See: https://github.com/pytorch/pytorch/issues/62300
)
@parametrize(
"use_interleaved_hook",
[False, True],
)
@parametrize(
"gradient_as_bucket_view",
[False, True],
)
@parametrize(
"static_graph",
[False, True],
)
@parametrize(
"shard_buckets",
[False, True],
)
def test_ddp_zero_overlap(
self,
use_gpu: bool,
use_interleaved_hook: bool,
gradient_as_bucket_view: bool,
static_graph: bool,
shard_buckets: bool,
):
"""
Check that overlapping DDP with ZeRO using the given method determined
by ``hook_constructor`` and ``shard_buckets`` and using the given ZeRO
and DDP arguments achieves parity with DDP using a local optimizer.
"""
self.create_pg(self.device)
hook_constructor = (
hook_with_zero_step
if not use_interleaved_hook
else hook_with_zero_step_interleaved
)
self._test_ddp_zero_overlap(
self.device if use_gpu else "cpu",
hook_constructor,
gradient_as_bucket_view,
static_graph,
shard_buckets=shard_buckets,
)
instantiate_parametrized_tests(TestZeroRedundancyOptimizerSingleRank)
instantiate_parametrized_tests(TestZeroRedundancyOptimizerDistributed)
if __name__ == "__main__":
# ! unittest should not be used here, else the tests are not properly registered
run_tests()
| TestZeroRedundancyOptimizerDistributed |
python | getsentry__sentry | tests/sentry/integrations/discord/test_views.py | {
"start": 401,
"end": 950
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
self.guild_id = "guild-id"
self.discord_user_id = "user1234"
self.discord_integration = self.create_integration(
provider="discord",
name="Cool server",
external_id=self.guild_id,
organization=self.organization,
)
self.provider = self.create_identity_provider(integration=self.discord_integration)
@control_silo_test
| DiscordIntegrationLinkIdentityTestBase |
python | getsentry__sentry | src/sentry/profiles/flamegraph.py | {
"start": 1578,
"end": 2330
} | class ____:
project_id: int
profiler_id: str
thread_id: str
start: float
end: float
transaction_id: str | None = None
def as_condition(self) -> Condition:
return And(
conditions=[
Condition(Column("project_id"), Op.EQ, self.project_id),
Condition(Column("profiler_id"), Op.EQ, self.profiler_id),
Condition(
Column("end_timestamp"),
Op.GTE,
resolve_datetime64(self.start),
),
Condition(
Column("start_timestamp"),
Op.LT,
resolve_datetime64(self.end),
),
]
)
| ProfilerMeta |
python | astropy__astropy | astropy/table/tests/test_item_access.py | {
"start": 2916,
"end": 9267
} | class ____(BaseTestItems):
@pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)])
def test_column(self, table_data, idx):
"""Column access returns REFERENCE to data"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
a = self.t["a"]
assert a[idx] == 2
a[idx] = 0
assert self.t["a"][idx] == 0
@pytest.mark.parametrize("idx", [1, np.int64(1), np.array(1)])
def test_row(self, table_data, idx):
"""Row access returns REFERENCE to data"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
row = self.t[idx]
assert row["a"] == 2
assert row[idx] == 5
assert row.columns["a"].attrs_equal(table_data.COLS[0])
assert row.columns["b"].attrs_equal(table_data.COLS[1])
assert row.columns["c"].attrs_equal(table_data.COLS[2])
# Check that setting by col index sets the table and row value
row[idx] = 0
assert row[idx] == 0
assert row["b"] == 0
assert self.t["b"][idx] == 0
assert self.t[idx]["b"] == 0
# Check that setting by col name sets the table and row value
row["a"] = 0
assert row[0] == 0
assert row["a"] == 0
assert self.t["a"][1] == 0
assert self.t[1]["a"] == 0
def test_empty_iterable_item(self, table_data):
"""
Table item access with [], (), or np.array([]) returns the same table
with no rows.
"""
self.t = table_data.Table(table_data.COLS)
for item in [], (), np.array([]):
t2 = self.t[item]
assert not t2
assert len(t2) == 0
assert t2["a"].attrs_equal(table_data.COLS[0])
assert t2["b"].attrs_equal(table_data.COLS[1])
assert t2["c"].attrs_equal(table_data.COLS[2])
def test_table_slice(self, table_data):
"""Table slice returns REFERENCE to data"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
t2 = self.t[1:3]
assert np.all(t2["a"] == table_data.DATA["a"][1:3])
assert t2["a"].attrs_equal(table_data.COLS[0])
assert t2["b"].attrs_equal(table_data.COLS[1])
assert t2["c"].attrs_equal(table_data.COLS[2])
t2["a"][0] = 0
assert np.all(self.t["a"] == np.array([1, 0, 3]))
assert t2.masked == self.t.masked
assert t2._column_class == self.t._column_class
assert isinstance(t2, table_data.Table)
def test_fancy_index_slice(self, table_data):
"""Table fancy slice returns COPY of data"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
slice = np.array([0, 2])
t2 = self.t[slice]
assert np.all(t2["a"] == table_data.DATA["a"][slice])
assert t2["a"].attrs_equal(table_data.COLS[0])
assert t2["b"].attrs_equal(table_data.COLS[1])
assert t2["c"].attrs_equal(table_data.COLS[2])
t2["a"][0] = 0
assert np.all(self.t.as_array() == table_data.DATA)
assert np.any(t2["a"] != table_data.DATA["a"][slice])
assert t2.masked == self.t.masked
assert t2._column_class == self.t._column_class
assert isinstance(t2, table_data.Table)
def test_list_index_slice(self, table_data):
"""Table list index slice returns COPY of data"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
slice = [0, 2]
t2 = self.t[slice]
assert np.all(t2["a"] == table_data.DATA["a"][slice])
assert t2["a"].attrs_equal(table_data.COLS[0])
assert t2["b"].attrs_equal(table_data.COLS[1])
assert t2["c"].attrs_equal(table_data.COLS[2])
t2["a"][0] = 0
assert np.all(self.t.as_array() == table_data.DATA)
assert np.any(t2["a"] != table_data.DATA["a"][slice])
assert t2.masked == self.t.masked
assert t2._column_class == self.t._column_class
assert isinstance(t2, table_data.Table)
def test_select_columns(self, table_data):
"""Select columns returns COPY of data and all column
attributes"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
# try both lists and tuples
for columns in (("a", "c"), ["a", "c"]):
t2 = self.t[columns]
assert np.all(t2["a"] == table_data.DATA["a"])
assert np.all(t2["c"] == table_data.DATA["c"])
assert t2["a"].attrs_equal(table_data.COLS[0])
assert t2["c"].attrs_equal(table_data.COLS[2])
t2["a"][0] = 0
assert np.all(self.t.as_array() == table_data.DATA)
assert np.any(t2["a"] != table_data.DATA["a"])
assert t2.masked == self.t.masked
assert t2._column_class == self.t._column_class
def test_select_columns_fail(self, table_data):
"""Selecting a column that doesn't exist fails"""
self.t = table_data.Table(table_data.COLS)
with pytest.raises(KeyError) as err:
self.t[["xxxx"]]
assert "'xxxx'" in str(err.value)
with pytest.raises(KeyError) as err:
self.t[["xxxx", "yyyy"]]
assert "'xxxx'" in str(err.value)
def test_np_where(self, table_data):
"""Select rows using output of np.where"""
t = table_data.Table(table_data.COLS)
# Select last two rows
rows = np.where(t["a"] > 1.5)
t2 = t[rows]
assert np.all(t2["a"] == [2, 3])
assert np.all(t2["b"] == [5, 6])
assert isinstance(t2, table_data.Table)
# Select no rows
rows = np.where(t["a"] > 100)
t2 = t[rows]
assert len(t2) == 0
assert isinstance(t2, table_data.Table)
def test_np_integers(self, table_data):
"""
Select rows using numpy integers. This is a regression test for a
py 3.3 failure mode
"""
t = table_data.Table(table_data.COLS)
assert type(t[np.int64(0)]) is Row
def test_select_bad_column(self, table_data):
"""Select column name that does not exist"""
self.t = table_data.Table(table_data.COLS)
self.tc = self.t.columns
with pytest.raises(ValueError):
self.t["a", 1]
| TestTableItems |
python | walkccc__LeetCode | solutions/2898. Maximum Linear Stock Score/2898.py | {
"start": 0,
"end": 230
} | class ____:
def maxScore(self, prices: list[int]) -> int:
groupIdToSum = collections.defaultdict(int)
for i, price in enumerate(prices):
groupIdToSum[price - i] += price
return max(groupIdToSum.values())
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-average-of-smallest-and-largest-elements.py | {
"start": 40,
"end": 269
} | class ____(object):
def minimumAverage(self, nums):
"""
:type nums: List[int]
:rtype: float
"""
nums.sort()
return min((nums[i]+nums[~i])/2.0 for i in xrange(len(nums)//2))
| Solution |
python | walkccc__LeetCode | solutions/3394. Check if Grid can be Cut into Sections/3394.py | {
"start": 0,
"end": 567
} | class ____:
def checkValidCuts(self, n: int, rectangles: list[list[int]]) -> bool:
xs = [(startX, endX) for startX, _, endX, _ in rectangles]
ys = [(startY, endY) for _, startY, _, endY in rectangles]
return max(self._countMerged(xs),
self._countMerged(ys)) >= 3
def _countMerged(self, intervals: list[tuple[int, int]]) -> int:
count = 0
prevEnd = 0
for start, end in sorted(intervals):
if start < prevEnd:
prevEnd = max(prevEnd, end)
else:
prevEnd = end
count += 1
return count
| Solution |
python | huggingface__transformers | src/transformers/models/detr/image_processing_detr.py | {
"start": 2162,
"end": 27942
} | class ____(ImagesKwargs, total=False):
r"""
format (`str`, *optional*, defaults to `AnnotationFormat.COCO_DETECTION`):
Data format of the annotations. One of "coco_detection" or "coco_panoptic".
do_convert_annotations (`bool`, *optional*, defaults to `True`):
Controls whether to convert the annotations to the format expected by the DETR model. Converts the
bounding boxes to the format `(center_x, center_y, width, height)` and in the range `[0, 1]`.
Can be overridden by the `do_convert_annotations` parameter in the `preprocess` method.
return_segmentation_masks (`bool`, *optional*, defaults to `False`):
Whether to return segmentation masks.
annotations (`AnnotationType` or `list[AnnotationType]`, *optional*):
Annotations to transform according to the padding that is applied to the images.
masks_path (`str` or `pathlib.Path`, *optional*):
Path to the directory containing the segmentation masks.
"""
format: Union[str, AnnotationFormat]
do_convert_annotations: bool
return_segmentation_masks: bool
annotations: Optional[Union[AnnotationType, list[AnnotationType]]]
masks_path: Optional[Union[str, pathlib.Path]]
# From the original repo: https://github.com/facebookresearch/detr/blob/3af9fa878e73b6894ce3596450a8d9b89d918ca9/datasets/transforms.py#L76
def get_image_size_for_max_height_width(
input_image: np.ndarray,
max_height: int,
max_width: int,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Computes the output image size given the input image and the maximum allowed height and width. Keep aspect ratio.
Important, even if image_height < max_height and image_width < max_width, the image will be resized
to at least one of the edges be equal to max_height or max_width.
For example:
- input_size: (100, 200), max_height: 50, max_width: 50 -> output_size: (25, 50)
- input_size: (100, 200), max_height: 200, max_width: 500 -> output_size: (200, 400)
Args:
input_image (`np.ndarray`):
The image to resize.
max_height (`int`):
The maximum allowed height.
max_width (`int`):
The maximum allowed width.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
"""
image_size = get_image_size(input_image, input_data_format)
height, width = image_size
height_scale = max_height / height
width_scale = max_width / width
min_scale = min(height_scale, width_scale)
new_height = int(height * min_scale)
new_width = int(width * min_scale)
return new_height, new_width
def get_resize_output_image_size(
input_image: np.ndarray,
size: Union[int, tuple[int, int], list[int]],
max_size: Optional[int] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> tuple[int, int]:
"""
Computes the output image size given the input image size and the desired output size. If the desired output size
is a tuple or list, the output image size is returned as is. If the desired output size is an integer, the output
image size is computed by keeping the aspect ratio of the input image size.
Args:
input_image (`np.ndarray`):
The image to resize.
size (`int` or `tuple[int, int]` or `list[int]`):
The desired output size.
max_size (`int`, *optional*):
The maximum allowed output size.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred from the input image.
"""
image_size = get_image_size(input_image, input_data_format)
if isinstance(size, (list, tuple)):
return size
return get_size_with_aspect_ratio(image_size, size, max_size)
def safe_squeeze(arr: np.ndarray, axis: Optional[int] = None) -> np.ndarray:
"""
Squeezes an array, but only if the axis specified has dim 1.
"""
if axis is None:
return arr.squeeze()
try:
return arr.squeeze(axis=axis)
except ValueError:
return arr
def normalize_annotation(annotation: dict, image_size: tuple[int, int]) -> dict:
image_height, image_width = image_size
norm_annotation = {}
for key, value in annotation.items():
if key == "boxes":
boxes = value
boxes = corners_to_center_format(boxes)
boxes /= np.asarray([image_width, image_height, image_width, image_height], dtype=np.float32)
norm_annotation[key] = boxes
else:
norm_annotation[key] = value
return norm_annotation
# Copied from transformers.models.vilt.image_processing_vilt.max_across_indices
def max_across_indices(values: Iterable[Any]) -> list[Any]:
"""
Return the maximum value across all indices of an iterable of values.
"""
return [max(values_i) for values_i in zip(*values)]
# Copied from transformers.models.vilt.image_processing_vilt.get_max_height_width
def get_max_height_width(
images: list[np.ndarray], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> list[int]:
"""
Get the maximum height and width across all images in a batch.
"""
if input_data_format is None:
input_data_format = infer_channel_dimension_format(images[0])
if input_data_format == ChannelDimension.FIRST:
_, max_height, max_width = max_across_indices([img.shape for img in images])
elif input_data_format == ChannelDimension.LAST:
max_height, max_width, _ = max_across_indices([img.shape for img in images])
else:
raise ValueError(f"Invalid channel dimension format: {input_data_format}")
return (max_height, max_width)
# Copied from transformers.models.vilt.image_processing_vilt.make_pixel_mask
def make_pixel_mask(
image: np.ndarray, output_size: tuple[int, int], input_data_format: Optional[Union[str, ChannelDimension]] = None
) -> np.ndarray:
"""
Make a pixel mask for the image, where 1 indicates a valid pixel and 0 indicates padding.
Args:
image (`np.ndarray`):
Image to make the pixel mask for.
output_size (`tuple[int, int]`):
Output size of the mask.
"""
input_height, input_width = get_image_size(image, channel_dim=input_data_format)
mask = np.zeros(output_size, dtype=np.int64)
mask[:input_height, :input_width] = 1
return mask
# inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L33
def convert_coco_poly_to_mask(segmentations, height: int, width: int) -> np.ndarray:
"""
Convert a COCO polygon annotation to a mask.
Args:
segmentations (`list[list[float]]`):
List of polygons, each polygon represented by a list of x-y coordinates.
height (`int`):
Height of the mask.
width (`int`):
Width of the mask.
"""
try:
from pycocotools import mask as coco_mask
except ImportError:
raise ImportError("Pycocotools is not installed in your environment.")
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = np.asarray(mask, dtype=np.uint8)
mask = np.any(mask, axis=2)
masks.append(mask)
if masks:
masks = np.stack(masks, axis=0)
else:
masks = np.zeros((0, height, width), dtype=np.uint8)
return masks
# inspired by https://github.com/facebookresearch/detr/blob/master/datasets/coco.py#L50
def prepare_coco_detection_annotation(
image,
target,
return_segmentation_masks: bool = False,
input_data_format: Optional[Union[ChannelDimension, str]] = None,
):
"""
Convert the target in COCO format into the format expected by DETR.
"""
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
image_id = target["image_id"]
image_id = np.asarray([image_id], dtype=np.int64)
# Get all COCO annotations for the given image.
annotations = target["annotations"]
annotations = [obj for obj in annotations if "iscrowd" not in obj or obj["iscrowd"] == 0]
classes = [obj["category_id"] for obj in annotations]
classes = np.asarray(classes, dtype=np.int64)
# for conversion to coco api
area = np.asarray([obj["area"] for obj in annotations], dtype=np.float32)
iscrowd = np.asarray([obj.get("iscrowd", 0) for obj in annotations], dtype=np.int64)
boxes = [obj["bbox"] for obj in annotations]
# guard against no boxes via resizing
boxes = np.asarray(boxes, dtype=np.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2] = boxes[:, 0::2].clip(min=0, max=image_width)
boxes[:, 1::2] = boxes[:, 1::2].clip(min=0, max=image_height)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
new_target = {}
new_target["image_id"] = image_id
new_target["class_labels"] = classes[keep]
new_target["boxes"] = boxes[keep]
new_target["area"] = area[keep]
new_target["iscrowd"] = iscrowd[keep]
new_target["orig_size"] = np.asarray([int(image_height), int(image_width)], dtype=np.int64)
if annotations and "keypoints" in annotations[0]:
keypoints = [obj["keypoints"] for obj in annotations]
# Converting the filtered keypoints list to a numpy array
keypoints = np.asarray(keypoints, dtype=np.float32)
# Apply the keep mask here to filter the relevant annotations
keypoints = keypoints[keep]
num_keypoints = keypoints.shape[0]
keypoints = keypoints.reshape((-1, 3)) if num_keypoints else keypoints
new_target["keypoints"] = keypoints
if return_segmentation_masks:
segmentation_masks = [obj["segmentation"] for obj in annotations]
masks = convert_coco_poly_to_mask(segmentation_masks, image_height, image_width)
new_target["masks"] = masks[keep]
return new_target
def masks_to_boxes(masks: np.ndarray) -> np.ndarray:
"""
Compute the bounding boxes around the provided panoptic segmentation masks.
Args:
masks: masks in format `[number_masks, height, width]` where N is the number of masks
Returns:
boxes: bounding boxes in format `[number_masks, 4]` in xyxy format
"""
if masks.size == 0:
return np.zeros((0, 4))
h, w = masks.shape[-2:]
y = np.arange(0, h, dtype=np.float32)
x = np.arange(0, w, dtype=np.float32)
# see https://github.com/pytorch/pytorch/issues/50276
y, x = np.meshgrid(y, x, indexing="ij")
x_mask = masks * np.expand_dims(x, axis=0)
x_max = x_mask.reshape(x_mask.shape[0], -1).max(-1)
x = np.ma.array(x_mask, mask=~(np.array(masks, dtype=bool)))
x_min = x.filled(fill_value=1e8)
x_min = x_min.reshape(x_min.shape[0], -1).min(-1)
y_mask = masks * np.expand_dims(y, axis=0)
y_max = y_mask.reshape(x_mask.shape[0], -1).max(-1)
y = np.ma.array(y_mask, mask=~(np.array(masks, dtype=bool)))
y_min = y.filled(fill_value=1e8)
y_min = y_min.reshape(y_min.shape[0], -1).min(-1)
return np.stack([x_min, y_min, x_max, y_max], 1)
def prepare_coco_panoptic_annotation(
image: np.ndarray,
target: dict,
masks_path: Union[str, pathlib.Path],
return_masks: bool = True,
input_data_format: Union[ChannelDimension, str] = None,
) -> dict:
"""
Prepare a coco panoptic annotation for DETR.
"""
image_height, image_width = get_image_size(image, channel_dim=input_data_format)
annotation_path = pathlib.Path(masks_path) / target["file_name"]
new_target = {}
new_target["image_id"] = np.asarray([target["image_id"] if "image_id" in target else target["id"]], dtype=np.int64)
new_target["size"] = np.asarray([image_height, image_width], dtype=np.int64)
new_target["orig_size"] = np.asarray([image_height, image_width], dtype=np.int64)
if "segments_info" in target:
masks = np.asarray(PIL.Image.open(annotation_path), dtype=np.uint32)
masks = rgb_to_id(masks)
ids = np.array([segment_info["id"] for segment_info in target["segments_info"]])
masks = masks == ids[:, None, None]
masks = masks.astype(np.uint8)
if return_masks:
new_target["masks"] = masks
new_target["boxes"] = masks_to_boxes(masks)
new_target["class_labels"] = np.array(
[segment_info["category_id"] for segment_info in target["segments_info"]], dtype=np.int64
)
new_target["iscrowd"] = np.asarray(
[segment_info["iscrowd"] for segment_info in target["segments_info"]], dtype=np.int64
)
new_target["area"] = np.asarray(
[segment_info["area"] for segment_info in target["segments_info"]], dtype=np.float32
)
return new_target
def get_segmentation_image(
masks: np.ndarray, input_size: tuple, target_size: tuple, stuff_equiv_classes, deduplicate=False
):
h, w = input_size
final_h, final_w = target_size
m_id = scipy.special.softmax(masks.transpose(0, 1), -1)
if m_id.shape[-1] == 0:
# We didn't detect any mask :(
m_id = np.zeros((h, w), dtype=np.int64)
else:
m_id = m_id.argmax(-1).reshape(h, w)
if deduplicate:
# Merge the masks corresponding to the same stuff class
for equiv in stuff_equiv_classes.values():
for eq_id in equiv:
m_id[m_id == eq_id] = equiv[0]
seg_img = id_to_rgb(m_id)
seg_img = resize(seg_img, (final_w, final_h), resample=PILImageResampling.NEAREST)
return seg_img
def get_mask_area(seg_img: np.ndarray, target_size: tuple[int, int], n_classes: int) -> np.ndarray:
final_h, final_w = target_size
np_seg_img = seg_img.astype(np.uint8)
np_seg_img = np_seg_img.reshape(final_h, final_w, 3)
m_id = rgb_to_id(np_seg_img)
area = [(m_id == i).sum() for i in range(n_classes)]
return area
def score_labels_from_class_probabilities(logits: np.ndarray) -> tuple[np.ndarray, np.ndarray]:
probs = scipy.special.softmax(logits, axis=-1)
labels = probs.argmax(-1, keepdims=True)
scores = np.take_along_axis(probs, labels, axis=-1)
scores, labels = scores.squeeze(-1), labels.squeeze(-1)
return scores, labels
def post_process_panoptic_sample(
out_logits: np.ndarray,
masks: np.ndarray,
boxes: np.ndarray,
processed_size: tuple[int, int],
target_size: tuple[int, int],
is_thing_map: dict,
threshold=0.85,
) -> dict:
"""
Converts the output of [`DetrForSegmentation`] into panoptic segmentation predictions for a single sample.
Args:
out_logits (`torch.Tensor`):
The logits for this sample.
masks (`torch.Tensor`):
The predicted segmentation masks for this sample.
boxes (`torch.Tensor`):
The predicted bounding boxes for this sample. The boxes are in the normalized format `(center_x, center_y,
width, height)` and values between `[0, 1]`, relative to the size the image (disregarding padding).
processed_size (`tuple[int, int]`):
The processed size of the image `(height, width)`, as returned by the preprocessing step i.e. the size
after data augmentation but before batching.
target_size (`tuple[int, int]`):
The target size of the image, `(height, width)` corresponding to the requested final size of the
prediction.
is_thing_map (`Dict`):
A dictionary mapping class indices to a boolean value indicating whether the class is a thing or not.
threshold (`float`, *optional*, defaults to 0.85):
The threshold used to binarize the segmentation masks.
"""
# we filter empty queries and detection below threshold
scores, labels = score_labels_from_class_probabilities(out_logits)
keep = (labels != out_logits.shape[-1] - 1) & (scores > threshold)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_boxes = center_to_corners_format(boxes[keep])
if len(cur_boxes) != len(cur_classes):
raise ValueError("Not as many boxes as there are classes")
cur_masks = masks[keep]
cur_masks = resize(cur_masks[:, None], processed_size, resample=PILImageResampling.BILINEAR)
cur_masks = safe_squeeze(cur_masks, 1)
b, h, w = cur_masks.shape
# It may be that we have several predicted masks for the same stuff class.
# In the following, we track the list of masks ids for each stuff class (they are merged later on)
cur_masks = cur_masks.reshape(b, -1)
stuff_equiv_classes = defaultdict(list)
for k, label in enumerate(cur_classes):
if not is_thing_map[label]:
stuff_equiv_classes[label].append(k)
seg_img = get_segmentation_image(cur_masks, processed_size, target_size, stuff_equiv_classes, deduplicate=True)
area = get_mask_area(cur_masks, processed_size, n_classes=len(cur_scores))
# We filter out any mask that is too small
if cur_classes.size() > 0:
# We know filter empty masks as long as we find some
filtered_small = np.array([a <= 4 for a in area], dtype=bool)
while filtered_small.any():
cur_masks = cur_masks[~filtered_small]
cur_scores = cur_scores[~filtered_small]
cur_classes = cur_classes[~filtered_small]
seg_img = get_segmentation_image(cur_masks, (h, w), target_size, stuff_equiv_classes, deduplicate=True)
area = get_mask_area(seg_img, target_size, n_classes=len(cur_scores))
filtered_small = np.array([a <= 4 for a in area], dtype=bool)
else:
cur_classes = np.ones((1, 1), dtype=np.int64)
segments_info = [
{"id": i, "isthing": is_thing_map[cat], "category_id": int(cat), "area": a}
for i, (cat, a) in enumerate(zip(cur_classes, area))
]
del cur_classes
with io.BytesIO() as out:
PIL.Image.fromarray(seg_img).save(out, format="PNG")
predictions = {"png_string": out.getvalue(), "segments_info": segments_info}
return predictions
def resize_annotation(
annotation: dict[str, Any],
orig_size: tuple[int, int],
target_size: tuple[int, int],
threshold: float = 0.5,
resample: PILImageResampling = PILImageResampling.NEAREST,
):
"""
Resizes an annotation to a target size.
Args:
annotation (`dict[str, Any]`):
The annotation dictionary.
orig_size (`tuple[int, int]`):
The original size of the input image.
target_size (`tuple[int, int]`):
The target size of the image, as returned by the preprocessing `resize` step.
threshold (`float`, *optional*, defaults to 0.5):
The threshold used to binarize the segmentation masks.
resample (`PILImageResampling`, defaults to `PILImageResampling.NEAREST`):
The resampling filter to use when resizing the masks.
"""
ratios = tuple(float(s) / float(s_orig) for s, s_orig in zip(target_size, orig_size))
ratio_height, ratio_width = ratios
new_annotation = {}
new_annotation["size"] = target_size
for key, value in annotation.items():
if key == "boxes":
boxes = value
scaled_boxes = boxes * np.asarray([ratio_width, ratio_height, ratio_width, ratio_height], dtype=np.float32)
new_annotation["boxes"] = scaled_boxes
elif key == "area":
area = value
scaled_area = area * (ratio_width * ratio_height)
new_annotation["area"] = scaled_area
elif key == "masks":
masks = value[:, None]
masks = np.array([resize(mask, target_size, resample=resample) for mask in masks])
masks = masks.astype(np.float32)
masks = masks[:, 0] > threshold
new_annotation["masks"] = masks
elif key == "size":
new_annotation["size"] = target_size
else:
new_annotation[key] = value
return new_annotation
def binary_mask_to_rle(mask):
"""
Converts given binary mask of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
mask (`torch.Tensor` or `numpy.array`):
A binary mask tensor of shape `(height, width)` where 0 denotes background and 1 denotes the target
segment_id or class_id.
Returns:
`List`: Run-length encoded list of the binary mask. Refer to COCO API for more information about the RLE
format.
"""
if is_torch_tensor(mask):
mask = mask.numpy()
pixels = mask.flatten()
pixels = np.concatenate([[0], pixels, [0]])
runs = np.where(pixels[1:] != pixels[:-1])[0] + 1
runs[1::2] -= runs[::2]
return list(runs)
def convert_segmentation_to_rle(segmentation):
"""
Converts given segmentation map of shape `(height, width)` to the run-length encoding (RLE) format.
Args:
segmentation (`torch.Tensor` or `numpy.array`):
A segmentation map of shape `(height, width)` where each value denotes a segment or class id.
Returns:
`list[List]`: A list of lists, where each list is the run-length encoding of a segment / class id.
"""
segment_ids = torch.unique(segmentation)
run_length_encodings = []
for idx in segment_ids:
mask = torch.where(segmentation == idx, 1, 0)
rle = binary_mask_to_rle(mask)
run_length_encodings.append(rle)
return run_length_encodings
def remove_low_and_no_objects(masks, scores, labels, object_mask_threshold, num_labels):
"""
Binarize the given masks using `object_mask_threshold`, it returns the associated values of `masks`, `scores` and
`labels`.
Args:
masks (`torch.Tensor`):
A tensor of shape `(num_queries, height, width)`.
scores (`torch.Tensor`):
A tensor of shape `(num_queries)`.
labels (`torch.Tensor`):
A tensor of shape `(num_queries)`.
object_mask_threshold (`float`):
A number between 0 and 1 used to binarize the masks.
Raises:
`ValueError`: Raised when the first dimension doesn't match in all input tensors.
Returns:
`tuple[`torch.Tensor`, `torch.Tensor`, `torch.Tensor`]`: The `masks`, `scores` and `labels` without the region
< `object_mask_threshold`.
"""
if not (masks.shape[0] == scores.shape[0] == labels.shape[0]):
raise ValueError("mask, scores and labels must have the same shape!")
to_keep = labels.ne(num_labels) & (scores > object_mask_threshold)
return masks[to_keep], scores[to_keep], labels[to_keep]
def check_segment_validity(mask_labels, mask_probs, k, mask_threshold=0.5, overlap_mask_area_threshold=0.8):
# Get the mask associated with the k class
mask_k = mask_labels == k
mask_k_area = mask_k.sum()
# Compute the area of all the stuff in query k
original_area = (mask_probs[k] >= mask_threshold).sum()
mask_exists = mask_k_area > 0 and original_area > 0
# Eliminate disconnected tiny segments
if mask_exists:
area_ratio = mask_k_area / original_area
if not area_ratio.item() > overlap_mask_area_threshold:
mask_exists = False
return mask_exists, mask_k
def compute_segments(
mask_probs,
pred_scores,
pred_labels,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
label_ids_to_fuse: Optional[set[int]] = None,
target_size: Optional[tuple[int, int]] = None,
):
height = mask_probs.shape[1] if target_size is None else target_size[0]
width = mask_probs.shape[2] if target_size is None else target_size[1]
segmentation = torch.zeros((height, width), dtype=torch.int32, device=mask_probs.device)
segments: list[dict] = []
if target_size is not None:
mask_probs = nn.functional.interpolate(
mask_probs.unsqueeze(0), size=target_size, mode="bilinear", align_corners=False
)[0]
current_segment_id = 0
# Weigh each mask by its prediction score
mask_probs *= pred_scores.view(-1, 1, 1)
mask_labels = mask_probs.argmax(0) # [height, width]
# Keep track of instances of each class
stuff_memory_list: dict[str, int] = {}
for k in range(pred_labels.shape[0]):
pred_class = pred_labels[k].item()
should_fuse = pred_class in label_ids_to_fuse
# Check if mask exists and large enough to be a segment
mask_exists, mask_k = check_segment_validity(
mask_labels, mask_probs, k, mask_threshold, overlap_mask_area_threshold
)
if mask_exists:
if pred_class in stuff_memory_list:
current_segment_id = stuff_memory_list[pred_class]
else:
current_segment_id += 1
# Add current object segment to final segmentation map
segmentation[mask_k] = current_segment_id
segment_score = round(pred_scores[k].item(), 6)
segments.append(
{
"id": current_segment_id,
"label_id": pred_class,
"was_fused": should_fuse,
"score": segment_score,
}
)
if should_fuse:
stuff_memory_list[pred_class] = current_segment_id
return segmentation, segments
@requires(backends=("vision",))
| DetrImageProcessorKwargs |
python | milvus-io__pymilvus | pymilvus/milvus_client/async_milvus_client.py | {
"start": 824,
"end": 41455
} | class ____(BaseMilvusClient):
"""AsyncMilvusClient is an EXPERIMENTAL class
which only provides part of MilvusClient's methods"""
def __init__(
self,
uri: str = "http://localhost:19530",
user: str = "",
password: str = "",
db_name: str = "",
token: str = "",
timeout: Optional[float] = None,
**kwargs,
) -> None:
self._using = create_connection(
uri,
token,
db_name,
use_async=True,
user=user,
password=password,
timeout=timeout,
**kwargs,
)
self.is_self_hosted = bool(self.get_server_type() == "milvus")
async def create_collection(
self,
collection_name: str,
dimension: Optional[int] = None,
primary_field_name: str = "id", # default is "id"
id_type: str = "int", # or "string",
vector_field_name: str = "vector", # default is "vector"
metric_type: str = "COSINE",
auto_id: bool = False,
timeout: Optional[float] = None,
schema: Optional[CollectionSchema] = None,
index_params: Optional[IndexParams] = None,
**kwargs,
):
if schema is None:
return await self._fast_create_collection(
collection_name,
dimension,
primary_field_name=primary_field_name,
id_type=id_type,
vector_field_name=vector_field_name,
metric_type=metric_type,
auto_id=auto_id,
timeout=timeout,
**kwargs,
)
return await self._create_collection_with_schema(
collection_name, schema, index_params, timeout=timeout, **kwargs
)
async def _fast_create_collection(
self,
collection_name: str,
dimension: int,
primary_field_name: str = "id", # default is "id"
id_type: Union[DataType, str] = DataType.INT64, # or "string",
vector_field_name: str = "vector", # default is "vector"
metric_type: str = "COSINE",
auto_id: bool = False,
timeout: Optional[float] = None,
**kwargs,
):
if dimension is None:
msg = "missing requried argument: 'dimension'"
raise TypeError(msg)
if "enable_dynamic_field" not in kwargs:
kwargs["enable_dynamic_field"] = True
schema = self.create_schema(auto_id=auto_id, **kwargs)
if id_type in ("int", DataType.INT64):
pk_data_type = DataType.INT64
elif id_type in ("string", "str", DataType.VARCHAR):
pk_data_type = DataType.VARCHAR
else:
raise PrimaryKeyException(message=ExceptionsMessage.PrimaryFieldType)
pk_args = {}
if "max_length" in kwargs and pk_data_type == DataType.VARCHAR:
pk_args["max_length"] = kwargs["max_length"]
schema.add_field(primary_field_name, pk_data_type, is_primary=True, **pk_args)
vector_type = DataType.FLOAT_VECTOR
schema.add_field(vector_field_name, vector_type, dim=dimension)
schema.verify()
conn = self._get_connection()
if "consistency_level" not in kwargs:
kwargs["consistency_level"] = DEFAULT_CONSISTENCY_LEVEL
await conn.create_collection(collection_name, schema, timeout=timeout, **kwargs)
index_params = IndexParams()
index_params.add_index(vector_field_name, index_type="AUTOINDEX", metric_type=metric_type)
await self.create_index(collection_name, index_params, timeout=timeout)
await self.load_collection(collection_name, timeout=timeout)
async def _create_collection_with_schema(
self,
collection_name: str,
schema: CollectionSchema,
index_params: IndexParams,
timeout: Optional[float] = None,
**kwargs,
):
schema.verify()
conn = self._get_connection()
if "consistency_level" not in kwargs:
kwargs["consistency_level"] = DEFAULT_CONSISTENCY_LEVEL
await conn.create_collection(collection_name, schema, timeout=timeout, **kwargs)
if index_params:
await self.create_index(collection_name, index_params, timeout=timeout)
await self.load_collection(collection_name, timeout=timeout)
async def drop_collection(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.drop_collection(collection_name, timeout=timeout, **kwargs)
async def rename_collection(
self,
old_name: str,
new_name: str,
target_db: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.rename_collection(old_name, new_name, target_db, timeout=timeout, **kwargs)
async def load_collection(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.load_collection(collection_name, timeout=timeout, **kwargs)
async def release_collection(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.release_collection(collection_name, timeout=timeout, **kwargs)
async def create_index(
self,
collection_name: str,
index_params: IndexParams,
timeout: Optional[float] = None,
**kwargs,
):
validate_param("collection_name", collection_name, str)
validate_param("index_params", index_params, IndexParams)
if len(index_params) == 0:
raise ParamError(message="IndexParams is empty, no index can be created")
for index_param in index_params:
await self._create_index(collection_name, index_param, timeout=timeout, **kwargs)
async def _create_index(
self,
collection_name: str,
index_param: IndexParam,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.create_index(
collection_name,
index_param.field_name,
index_param.get_index_configs(),
timeout=timeout,
index_name=index_param.index_name,
**kwargs,
)
async def drop_index(
self, collection_name: str, index_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.drop_index(collection_name, "", index_name, timeout=timeout, **kwargs)
async def create_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.create_partition(collection_name, partition_name, timeout=timeout, **kwargs)
async def drop_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.drop_partition(collection_name, partition_name, timeout=timeout, **kwargs)
async def load_partitions(
self,
collection_name: str,
partition_names: Union[str, List[str]],
timeout: Optional[float] = None,
**kwargs,
):
if isinstance(partition_names, str):
partition_names = [partition_names]
conn = self._get_connection()
await conn.load_partitions(collection_name, partition_names, timeout=timeout, **kwargs)
async def release_partitions(
self,
collection_name: str,
partition_names: Union[str, List[str]],
timeout: Optional[float] = None,
**kwargs,
):
if isinstance(partition_names, str):
partition_names = [partition_names]
conn = self._get_connection()
await conn.release_partitions(collection_name, partition_names, timeout=timeout, **kwargs)
async def has_partition(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
) -> bool:
conn = self._get_connection()
return await conn.has_partition(collection_name, partition_name, timeout=timeout, **kwargs)
async def list_partitions(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> List[str]:
conn = self._get_connection()
return await conn.list_partitions(collection_name, timeout=timeout, **kwargs)
async def insert(
self,
collection_name: str,
data: Union[Dict, List[Dict]],
timeout: Optional[float] = None,
partition_name: Optional[str] = "",
**kwargs,
) -> Dict:
# If no data provided, we cannot input anything
if isinstance(data, Dict):
data = [data]
msg = "wrong type of argument 'data',"
msg += f"expected 'Dict' or list of 'Dict', got '{type(data).__name__}'"
if not isinstance(data, List):
raise TypeError(msg)
if len(data) == 0:
return {"insert_count": 0, "ids": []}
conn = self._get_connection()
# Insert into the collection.
res = await conn.insert_rows(
collection_name, data, partition_name=partition_name, timeout=timeout, **kwargs
)
return OmitZeroDict(
{
"insert_count": res.insert_count,
"ids": res.primary_keys,
"cost": res.cost,
}
)
async def upsert(
self,
collection_name: str,
data: Union[Dict, List[Dict]],
timeout: Optional[float] = None,
partition_name: Optional[str] = "",
**kwargs,
) -> Dict:
"""Upsert data into the collection asynchronously.
Args:
collection_name (str): Name of the collection to upsert into.
data (List[Dict[str, any]]): A list of dicts to pass in. If list not provided, will
cast to list.
timeout (float, optional): The timeout to use, will override init timeout. Defaults
to None.
partition_name (str, optional): Name of the partition to upsert into.
**kwargs (dict): Extra keyword arguments.
* *partial_update* (bool, optional): Whether this is a partial update operation.
If True, only the specified fields will be updated while others remain unchanged
Default is False.
Raises:
DataNotMatchException: If the data has missing fields an exception will be thrown.
MilvusException: General Milvus error on upsert.
Returns:
Dict: Number of rows that were upserted.
"""
# If no data provided, we cannot input anything
if isinstance(data, Dict):
data = [data]
msg = "wrong type of argument 'data',"
msg += f"expected 'Dict' or list of 'Dict', got '{type(data).__name__}'"
if not isinstance(data, List):
raise TypeError(msg)
if len(data) == 0:
return {"upsert_count": 0, "ids": []}
conn = self._get_connection()
# Upsert into the collection.
try:
res = await conn.upsert_rows(
collection_name, data, partition_name=partition_name, timeout=timeout, **kwargs
)
except Exception as ex:
raise ex from ex
return OmitZeroDict(
{
"upsert_count": res.upsert_count,
"cost": res.cost,
"ids": res.primary_keys,
}
)
async def hybrid_search(
self,
collection_name: str,
reqs: List[AnnSearchRequest],
ranker: Union[BaseRanker, Function],
limit: int = 10,
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[List[dict]]:
conn = self._get_connection()
return await conn.hybrid_search(
collection_name,
reqs,
ranker,
limit=limit,
partition_names=partition_names,
output_fields=output_fields,
timeout=timeout,
**kwargs,
)
async def search(
self,
collection_name: str,
data: Union[List[list], list],
filter: str = "",
limit: int = 10,
output_fields: Optional[List[str]] = None,
search_params: Optional[dict] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
anns_field: Optional[str] = None,
ranker: Optional[Union[Function, FunctionScore]] = None,
**kwargs,
) -> List[List[dict]]:
conn = self._get_connection()
return await conn.search(
collection_name,
data,
anns_field or "",
search_params or {},
expression=filter,
limit=limit,
output_fields=output_fields,
partition_names=partition_names,
expr_params=kwargs.pop("filter_params", {}),
timeout=timeout,
ranker=ranker,
**kwargs,
)
async def query(
self,
collection_name: str,
filter: str = "",
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
ids: Optional[Union[List, str, int]] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[dict]:
if filter and not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
if filter and ids is not None:
raise ParamError(message=ExceptionsMessage.AmbiguousQueryFilterParam)
if isinstance(ids, (int, str)):
ids = [ids]
conn = self._get_connection()
if ids:
try:
schema_dict, _ = await conn._get_schema_from_cache_or_remote(
collection_name, timeout=timeout
)
except Exception as ex:
raise ex from ex
filter = self._pack_pks_expr(schema_dict, ids)
if not output_fields:
output_fields = ["*"]
return await conn.query(
collection_name,
expr=filter,
output_fields=output_fields,
partition_names=partition_names,
timeout=timeout,
expr_params=kwargs.pop("filter_params", {}),
**kwargs,
)
async def get(
self,
collection_name: str,
ids: Union[list, str, int],
output_fields: Optional[List[str]] = None,
timeout: Optional[float] = None,
partition_names: Optional[List[str]] = None,
**kwargs,
) -> List[dict]:
if not isinstance(ids, list):
ids = [ids]
if len(ids) == 0:
return []
conn = self._get_connection()
try:
schema_dict, _ = await conn._get_schema_from_cache_or_remote(
collection_name, timeout=timeout
)
except Exception as ex:
raise ex from ex
if not output_fields:
output_fields = ["*"]
expr = self._pack_pks_expr(schema_dict, ids)
return await conn.query(
collection_name,
expr=expr,
output_fields=output_fields,
partition_names=partition_names,
timeout=timeout,
**kwargs,
)
async def delete(
self,
collection_name: str,
ids: Optional[Union[list, str, int]] = None,
timeout: Optional[float] = None,
filter: Optional[str] = None,
partition_name: Optional[str] = None,
**kwargs,
) -> Dict[str, int]:
pks = kwargs.get("pks", [])
if isinstance(pks, (int, str)):
pks = [pks]
for pk in pks:
if not isinstance(pk, (int, str)):
msg = f"wrong type of argument pks, expect list, int or str, got '{type(pk).__name__}'"
raise TypeError(msg)
if ids is not None:
if isinstance(ids, (int, str)):
pks.append(ids)
elif isinstance(ids, list):
for id in ids:
if not isinstance(id, (int, str)):
msg = f"wrong type of argument ids, expect list, int or str, got '{type(id).__name__}'"
raise TypeError(msg)
pks.extend(ids)
else:
msg = f"wrong type of argument ids, expect list, int or str, got '{type(ids).__name__}'"
raise TypeError(msg)
# validate ambiguous delete filter param before describe collection rpc
if filter and len(pks) > 0:
raise ParamError(message=ExceptionsMessage.AmbiguousDeleteFilterParam)
expr = ""
conn = self._get_connection()
if len(pks) > 0:
try:
schema_dict, _ = await conn._get_schema_from_cache_or_remote(
collection_name, timeout=timeout
)
except Exception as ex:
raise ex from ex
expr = self._pack_pks_expr(schema_dict, pks)
else:
if not isinstance(filter, str):
raise DataTypeNotMatchException(message=ExceptionsMessage.ExprType % type(filter))
expr = filter
ret_pks = []
res = await conn.delete(
collection_name=collection_name,
expression=expr,
partition_name=partition_name,
expr_params=kwargs.pop("filter_params", {}),
timeout=timeout,
**kwargs,
)
if res.primary_keys:
ret_pks.extend(res.primary_keys)
# compatible with deletions that returns primary keys
if ret_pks:
return ret_pks
return OmitZeroDict({"delete_count": res.delete_count, "cost": res.cost})
async def describe_collection(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> dict:
conn = self._get_connection()
result = await conn.describe_collection(collection_name, timeout=timeout, **kwargs)
# Convert internal struct_array_fields to user-friendly format
if isinstance(result, dict) and "struct_array_fields" in result:
converted_fields = convert_struct_fields_to_user_format(result["struct_array_fields"])
result["fields"].extend(converted_fields)
# Remove internal struct_array_fields from user-facing response
result.pop("struct_array_fields")
return result
async def has_collection(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> bool:
conn = self._get_connection()
return await conn.has_collection(collection_name, timeout=timeout, **kwargs)
async def list_collections(self, timeout: Optional[float] = None, **kwargs) -> List[str]:
conn = self._get_connection()
return await conn.list_collections(timeout=timeout, **kwargs)
async def get_collection_stats(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
stats = await conn.get_collection_stats(collection_name, timeout=timeout, **kwargs)
result = {stat.key: stat.value for stat in stats}
if "row_count" in result:
result["row_count"] = int(result["row_count"])
return result
async def get_partition_stats(
self, collection_name: str, partition_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
stats = await conn.get_partition_stats(
collection_name, partition_name, timeout=timeout, **kwargs
)
result = {stat.key: stat.value for stat in stats}
if "row_count" in result:
result["row_count"] = int(result["row_count"])
return result
async def get_load_state(
self,
collection_name: str,
partition_names: Optional[List[str]] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
state = await conn.get_load_state(
collection_name, partition_names, timeout=timeout, **kwargs
)
ret = {"state": state}
if state == LoadState.Loading:
progress = await conn.get_loading_progress(
collection_name, partition_names, timeout=timeout
)
ret["progress"] = progress
return ret
async def refresh_load(
self,
collection_name: str,
partition_names: Optional[List[str]] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
return await conn.refresh_load(collection_name, partition_names, timeout=timeout, **kwargs)
async def get_server_version(self, timeout: Optional[float] = None, **kwargs) -> str:
conn = self._get_connection()
return await conn.get_server_version(timeout=timeout, **kwargs)
async def describe_replica(
self, collection_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
return await conn.describe_replica(collection_name, timeout=timeout, **kwargs)
async def alter_collection_properties(
self, collection_name: str, properties: dict, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.alter_collection_properties(
collection_name,
properties=properties,
timeout=timeout,
**kwargs,
)
async def drop_collection_properties(
self,
collection_name: str,
property_keys: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.drop_collection_properties(
collection_name, property_keys=property_keys, timeout=timeout, **kwargs
)
async def alter_collection_field(
self,
collection_name: str,
field_name: str,
field_params: dict,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.alter_collection_field(
collection_name,
field_name=field_name,
field_params=field_params,
timeout=timeout,
**kwargs,
)
async def add_collection_field(
self,
collection_name: str,
field_name: str,
data_type: DataType,
desc: str = "",
timeout: Optional[float] = None,
**kwargs,
):
field_schema = self.create_field_schema(field_name, data_type, desc, **kwargs)
conn = self._get_connection()
await conn.add_collection_field(
collection_name,
field_schema,
timeout=timeout,
**kwargs,
)
async def close(self):
await connections.async_remove_connection(self._using)
async def list_indexes(self, collection_name: str, field_name: Optional[str] = "", **kwargs):
conn = self._get_connection()
indexes = await conn.list_indexes(collection_name, **kwargs)
index_name_list = []
for index in indexes:
if not index:
continue
if not field_name or index.field_name == field_name:
index_name_list.append(index.index_name)
return index_name_list
async def describe_index(
self, collection_name: str, index_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
return await conn.describe_index(collection_name, index_name, timeout=timeout, **kwargs)
async def alter_index_properties(
self,
collection_name: str,
index_name: str,
properties: dict,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.alter_index_properties(
collection_name, index_name, properties=properties, timeout=timeout, **kwargs
)
async def drop_index_properties(
self,
collection_name: str,
index_name: str,
property_keys: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.drop_index_properties(
collection_name, index_name, property_keys=property_keys, timeout=timeout, **kwargs
)
async def create_alias(
self, collection_name: str, alias: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.create_alias(collection_name, alias, timeout=timeout, **kwargs)
async def drop_alias(self, alias: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.drop_alias(alias, timeout=timeout, **kwargs)
async def alter_alias(
self, collection_name: str, alias: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.alter_alias(collection_name, alias, timeout=timeout, **kwargs)
async def describe_alias(self, alias: str, timeout: Optional[float] = None, **kwargs) -> Dict:
conn = self._get_connection()
return await conn.describe_alias(alias, timeout=timeout, **kwargs)
async def list_aliases(
self, collection_name: str = "", timeout: Optional[float] = None, **kwargs
) -> List[str]:
conn = self._get_connection()
return await conn.list_aliases(collection_name, timeout=timeout, **kwargs)
def using_database(self, db_name: str, **kwargs):
conn = self._get_connection()
conn.reset_db_name(db_name)
def use_database(self, db_name: str, **kwargs):
conn = self._get_connection()
conn.reset_db_name(db_name)
async def create_database(
self,
db_name: str,
properties: Optional[dict] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.create_database(
db_name=db_name, properties=properties, timeout=timeout, **kwargs
)
async def drop_database(self, db_name: str, **kwargs):
conn = self._get_connection()
await conn.drop_database(db_name, **kwargs)
async def list_databases(self, timeout: Optional[float] = None, **kwargs) -> List[str]:
conn = self._get_connection()
return await conn.list_database(timeout=timeout, **kwargs)
async def describe_database(self, db_name: str, **kwargs) -> dict:
conn = self._get_connection()
return await conn.describe_database(db_name, **kwargs)
async def alter_database_properties(self, db_name: str, properties: dict, **kwargs):
conn = self._get_connection()
await conn.alter_database(db_name, properties, **kwargs)
async def drop_database_properties(self, db_name: str, property_keys: List[str], **kwargs):
conn = self._get_connection()
await conn.drop_database_properties(db_name, property_keys, **kwargs)
async def create_user(
self, user_name: str, password: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.create_user(user_name, password, timeout=timeout, **kwargs)
async def drop_user(self, user_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.drop_user(user_name, timeout=timeout, **kwargs)
async def update_password(
self,
user_name: str,
old_password: str,
new_password: str,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.update_password(user_name, old_password, new_password, timeout=timeout, **kwargs)
async def list_users(self, timeout: Optional[float] = None, **kwargs) -> List[str]:
conn = self._get_connection()
return await conn.list_users(timeout=timeout, **kwargs)
async def describe_user(
self, user_name: str, timeout: Optional[float] = None, **kwargs
) -> dict:
conn = self._get_connection()
res = await conn.describe_user(user_name, True, timeout=timeout, **kwargs)
if hasattr(res, "results") and res.results:
user_info = UserInfo(res.results)
if user_info.groups:
item = user_info.groups[0]
return {"user_name": user_name, "roles": item.roles}
return {}
async def create_privilege_group(
self,
group_name: str,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.create_privilege_group(group_name, timeout=timeout, **kwargs)
async def drop_privilege_group(
self,
group_name: str,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.drop_privilege_group(group_name, timeout=timeout, **kwargs)
async def list_privilege_groups(
self,
timeout: Optional[float] = None,
**kwargs,
) -> List[Dict[str, Union[str, List[str]]]]:
conn = self._get_connection()
res = await conn.list_privilege_groups(timeout=timeout, **kwargs)
ret = []
for g in res:
privileges = []
for p in g.privileges:
privileges.append(p.name)
ret.append({"privilege_group": g.group_name, "privileges": privileges})
return ret
async def add_privileges_to_group(
self,
group_name: str,
privileges: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.add_privileges_to_group(group_name, privileges, timeout=timeout, **kwargs)
async def remove_privileges_from_group(
self,
group_name: str,
privileges: List[str],
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.remove_privileges_from_group(group_name, privileges, timeout=timeout, **kwargs)
async def create_role(self, role_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.create_role(role_name, timeout=timeout, **kwargs)
async def drop_role(
self, role_name: str, force_drop: bool = False, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.drop_role(role_name, force_drop=force_drop, timeout=timeout, **kwargs)
async def grant_role(
self, user_name: str, role_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.grant_role(user_name, role_name, timeout=timeout, **kwargs)
async def revoke_role(
self, user_name: str, role_name: str, timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.revoke_role(user_name, role_name, timeout=timeout, **kwargs)
async def grant_privilege(
self,
role_name: str,
object_type: str,
privilege: str,
object_name: str,
db_name: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.grant_privilege(
role_name, object_type, object_name, privilege, db_name, timeout=timeout, **kwargs
)
async def revoke_privilege(
self,
role_name: str,
object_type: str,
privilege: str,
object_name: str,
db_name: Optional[str] = "",
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.revoke_privilege(
role_name, object_type, object_name, privilege, db_name, timeout=timeout, **kwargs
)
async def grant_privilege_v2(
self,
role_name: str,
privilege: str,
collection_name: str,
db_name: Optional[str] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.grant_privilege_v2(
role_name,
privilege,
collection_name,
db_name=db_name,
timeout=timeout,
**kwargs,
)
async def revoke_privilege_v2(
self,
role_name: str,
privilege: str,
collection_name: str,
db_name: Optional[str] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.revoke_privilege_v2(
role_name,
privilege,
collection_name,
db_name=db_name,
timeout=timeout,
**kwargs,
)
async def describe_role(
self, role_name: str, timeout: Optional[float] = None, **kwargs
) -> Dict:
conn = self._get_connection()
db_name = kwargs.pop("db_name", "")
res = await conn.select_grant_for_one_role(role_name, db_name, timeout=timeout, **kwargs)
ret = {}
ret["role"] = role_name
ret["privileges"] = [dict(i) for i in res.groups]
return ret
async def list_roles(self, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
res = await conn.list_roles(False, timeout=timeout, **kwargs)
role_info = RoleInfo(res)
return [g.role_name for g in role_info.groups]
async def create_resource_group(self, name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.create_resource_group(name, timeout=timeout, **kwargs)
async def drop_resource_group(self, name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.drop_resource_group(name, timeout=timeout, **kwargs)
async def update_resource_groups(
self, configs: Dict[str, ResourceGroupConfig], timeout: Optional[float] = None, **kwargs
):
conn = self._get_connection()
await conn.update_resource_groups(configs, timeout=timeout, **kwargs)
async def describe_resource_group(self, name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
return await conn.describe_resource_group(name, timeout=timeout, **kwargs)
async def list_resource_groups(self, timeout: Optional[float] = None, **kwargs) -> List[str]:
conn = self._get_connection()
return await conn.list_resource_groups(timeout=timeout, **kwargs)
async def transfer_replica(
self,
source: str,
target: str,
collection_name: str,
num_replica: int,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
await conn.transfer_replica(
source, target, collection_name, num_replica, timeout=timeout, **kwargs
)
async def flush(self, collection_name: str, timeout: Optional[float] = None, **kwargs):
conn = self._get_connection()
await conn.flush([collection_name], timeout=timeout, **kwargs)
async def flush_all(self, timeout: Optional[float] = None, **kwargs) -> None:
"""Flush all collections.
Args:
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
"""
conn = self._get_connection()
await conn.flush_all(timeout=timeout, **kwargs)
async def get_flush_all_state(self, timeout: Optional[float] = None, **kwargs) -> bool:
"""Get the flush all state.
Args:
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
bool: True if flush all operation is completed, False otherwise.
"""
conn = self._get_connection()
return await conn.get_flush_all_state(timeout=timeout, **kwargs)
async def compact(
self,
collection_name: str,
is_clustering: Optional[bool] = False,
is_l0: Optional[bool] = False,
timeout: Optional[float] = None,
**kwargs,
) -> int:
conn = self._get_connection()
return await conn.compact(
collection_name, is_clustering=is_clustering, is_l0=is_l0, timeout=timeout, **kwargs
)
async def get_compaction_state(
self, job_id: int, timeout: Optional[float] = None, **kwargs
) -> str:
conn = self._get_connection()
result = await conn.get_compaction_state(job_id, timeout=timeout, **kwargs)
return result.state_name
async def get_compaction_plans(
self,
job_id: int,
timeout: Optional[float] = None,
**kwargs,
):
"""Get compaction plans for a specific job.
Args:
job_id (int): The ID of the compaction job.
timeout (Optional[float]): An optional duration of time in seconds to allow for the RPC.
**kwargs: Additional arguments.
Returns:
CompactionPlans: The compaction plans for the specified job.
"""
conn = self._get_connection()
return await conn.get_compaction_plans(job_id, timeout=timeout, **kwargs)
async def run_analyzer(
self,
texts: Union[str, List[str]],
analyzer_params: Optional[Union[str, Dict]] = None,
with_hash: bool = False,
with_detail: bool = False,
collection_name: Optional[str] = None,
field_name: Optional[str] = None,
analyzer_names: Optional[Union[str, List[str]]] = None,
timeout: Optional[float] = None,
**kwargs,
):
conn = self._get_connection()
return await conn.run_analyzer(
texts,
analyzer_params=analyzer_params,
with_hash=with_hash,
with_detail=with_detail,
collection_name=collection_name,
field_name=field_name,
analyzer_names=analyzer_names,
timeout=timeout,
**kwargs,
)
async def update_replicate_configuration(
self,
clusters: Optional[List[Dict]] = None,
cross_cluster_topology: Optional[List[Dict]] = None,
timeout: Optional[float] = None,
**kwargs,
):
"""
Update replication configuration across Milvus clusters.
Args:
clusters (List[Dict], optional): List of cluster configurations.
Each dict should contain:
- cluster_id (str): Unique identifier for the cluster
- connection_param (Dict): Connection parameters with 'uri' and 'token'
- pchannels (List[str], optional): Physical channels for the cluster
cross_cluster_topology (List[Dict], optional): List of replication relationships.
Each dict should contain:
- source_cluster_id (str): ID of the source cluster
- target_cluster_id (str): ID of the target cluster
timeout (float, optional): An optional duration of time in seconds to allow for the RPC
**kwargs: Additional arguments
Returns:
Status: The status of the operation
Raises:
ParamError: If neither clusters nor cross_cluster_topology is provided
MilvusException: If the operation fails
"""
conn = self._get_connection()
return await conn.update_replicate_configuration(
clusters=clusters,
cross_cluster_topology=cross_cluster_topology,
timeout=timeout,
**kwargs,
)
| AsyncMilvusClient |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/strategies/_internal/numbers.py | {
"start": 4905,
"end": 20473
} | class ____(SearchStrategy[float]):
"""A strategy for floating point numbers."""
def __init__(
self,
*,
min_value: float,
max_value: float,
allow_nan: bool,
# The smallest nonzero number we can represent is usually a subnormal, but may
# be the smallest normal if we're running in unsafe denormals-are-zero mode.
# While that's usually an explicit error, we do need to handle the case where
# the user passes allow_subnormal=False.
smallest_nonzero_magnitude: float = SMALLEST_SUBNORMAL,
):
super().__init__()
assert isinstance(allow_nan, bool)
assert smallest_nonzero_magnitude >= 0.0, "programmer error if this is negative"
if smallest_nonzero_magnitude == 0.0: # pragma: no cover
raise FloatingPointError(
"Got allow_subnormal=True, but we can't represent subnormal floats "
"right now, in violation of the IEEE-754 floating-point "
"specification. This is usually because something was compiled with "
"-ffast-math or a similar option, which sets global processor state. "
"See https://simonbyrne.github.io/notes/fastmath/ for a more detailed "
"writeup - and good luck!"
)
self.min_value = min_value
self.max_value = max_value
self.allow_nan = allow_nan
self.smallest_nonzero_magnitude = smallest_nonzero_magnitude
def __repr__(self) -> str:
return (
f"{self.__class__.__name__}({self.min_value=}, {self.max_value=}, "
f"{self.allow_nan=}, {self.smallest_nonzero_magnitude=})"
).replace("self.", "")
def do_draw(self, data: ConjectureData) -> float:
return data.draw_float(
min_value=self.min_value,
max_value=self.max_value,
allow_nan=self.allow_nan,
smallest_nonzero_magnitude=self.smallest_nonzero_magnitude,
)
def filter(self, condition):
# Handle a few specific weird cases.
if condition is math.isfinite:
return FloatStrategy(
min_value=max(self.min_value, next_up(float("-inf"))),
max_value=min(self.max_value, next_down(float("inf"))),
allow_nan=False,
smallest_nonzero_magnitude=self.smallest_nonzero_magnitude,
)
if condition is math.isinf:
if permitted_infs := [
x
for x in (-math.inf, math.inf)
if self.min_value <= x <= self.max_value
]:
return SampledFromStrategy(permitted_infs)
return nothing()
if condition is math.isnan:
if not self.allow_nan:
return nothing()
return NanStrategy()
constraints, pred = get_float_predicate_bounds(condition)
if not constraints:
return super().filter(pred)
min_bound = max(constraints.get("min_value", -math.inf), self.min_value)
max_bound = min(constraints.get("max_value", math.inf), self.max_value)
# Adjustments for allow_subnormal=False, if any need to be made
if -self.smallest_nonzero_magnitude < min_bound < 0:
min_bound = -0.0
elif 0 < min_bound < self.smallest_nonzero_magnitude:
min_bound = self.smallest_nonzero_magnitude
if -self.smallest_nonzero_magnitude < max_bound < 0:
max_bound = -self.smallest_nonzero_magnitude
elif 0 < max_bound < self.smallest_nonzero_magnitude:
max_bound = 0.0
if min_bound > max_bound:
return nothing()
if (
min_bound > self.min_value
or self.max_value > max_bound
or (self.allow_nan and (-math.inf < min_bound or max_bound < math.inf))
):
self = type(self)(
min_value=min_bound,
max_value=max_bound,
allow_nan=False,
smallest_nonzero_magnitude=self.smallest_nonzero_magnitude,
)
if pred is None:
return self
return super().filter(pred)
@cacheable
@defines_strategy(force_reusable_values=True)
def floats(
min_value: Real | None = None,
max_value: Real | None = None,
*,
allow_nan: bool | None = None,
allow_infinity: bool | None = None,
allow_subnormal: bool | None = None,
width: Literal[16, 32, 64] = 64,
exclude_min: bool = False,
exclude_max: bool = False,
) -> SearchStrategy[float]:
"""Returns a strategy which generates floats.
- If min_value is not None, all values will be ``>= min_value``
(or ``> min_value`` if ``exclude_min``).
- If max_value is not None, all values will be ``<= max_value``
(or ``< max_value`` if ``exclude_max``).
- If min_value or max_value is not None, it is an error to enable
allow_nan.
- If both min_value and max_value are not None, it is an error to enable
allow_infinity.
- If inferred values range does not include subnormal values, it is an error
to enable allow_subnormal.
Where not explicitly ruled out by the bounds,
:wikipedia:`subnormals <Subnormal_number>`, infinities, and NaNs are possible
values generated by this strategy.
The width argument specifies the maximum number of bits of precision
required to represent the generated float. Valid values are 16, 32, or 64.
Passing ``width=32`` will still use the builtin 64-bit :class:`~python:float` class,
but always for values which can be exactly represented as a 32-bit float.
The exclude_min and exclude_max argument can be used to generate numbers
from open or half-open intervals, by excluding the respective endpoints.
Excluding either signed zero will also exclude the other.
Attempting to exclude an endpoint which is None will raise an error;
use ``allow_infinity=False`` to generate finite floats. You can however
use e.g. ``min_value=-math.inf, exclude_min=True`` to exclude only
one infinite endpoint.
Examples from this strategy have a complicated and hard to explain
shrinking behaviour, but it tries to improve "human readability". Finite
numbers will be preferred to infinity and infinity will be preferred to
NaN.
"""
check_type(bool, exclude_min, "exclude_min")
check_type(bool, exclude_max, "exclude_max")
if allow_nan is None:
allow_nan = bool(min_value is None and max_value is None)
elif allow_nan and (min_value is not None or max_value is not None):
raise InvalidArgument(f"Cannot have {allow_nan=}, with min_value or max_value")
if width not in (16, 32, 64):
raise InvalidArgument(
f"Got {width=}, but the only valid values "
"are the integers 16, 32, and 64."
)
# Literal[16] accepts both 16 and 16.0. Normalize to the int 16 here, mainly
# for mypyc. We want to support width=16.0 to make e.g. width=mywidth / 2 for
# mywidth=32 easy.
width = cast(Literal[16, 32, 64], int(width))
check_valid_bound(min_value, "min_value")
check_valid_bound(max_value, "max_value")
if math.copysign(1.0, -0.0) == 1.0: # pragma: no cover
raise FloatingPointError(
"Your Python install can't represent -0.0, which is required by the "
"IEEE-754 floating-point specification. This is probably because it was "
"compiled with an unsafe option like -ffast-math; for a more detailed "
"explanation see https://simonbyrne.github.io/notes/fastmath/"
)
if allow_subnormal and next_up(0.0, width=width) == 0: # pragma: no cover
# Not worth having separate CI envs and dependencies just to cover this branch;
# discussion in https://github.com/HypothesisWorks/hypothesis/issues/3092
#
# Erroring out here ensures that the database contents are interpreted
# consistently - which matters for such a foundational strategy, even if it's
# not always true for all user-composed strategies further up the stack.
from _hypothesis_ftz_detector import identify_ftz_culprits
try:
ftz_pkg = identify_ftz_culprits()
except Exception:
ftz_pkg = None
if ftz_pkg:
ftz_msg = (
f"This seems to be because the `{ftz_pkg}` package was compiled with "
f"-ffast-math or a similar option, which sets global processor state "
f"- see https://simonbyrne.github.io/notes/fastmath/ for details. "
f"If you don't know why {ftz_pkg} is installed, `pipdeptree -rp "
f"{ftz_pkg}` will show which packages depend on it."
)
else:
ftz_msg = (
"This is usually because something was compiled with -ffast-math "
"or a similar option, which sets global processor state. See "
"https://simonbyrne.github.io/notes/fastmath/ for a more detailed "
"writeup - and good luck!"
)
raise FloatingPointError(
f"Got {allow_subnormal=}, but we can't represent "
f"subnormal floats right now, in violation of the IEEE-754 floating-point "
f"specification. {ftz_msg}"
)
min_arg, max_arg = min_value, max_value
if min_value is not None:
min_value = float_of(min_value, width)
assert isinstance(min_value, float)
if max_value is not None:
max_value = float_of(max_value, width)
assert isinstance(max_value, float)
if min_value != min_arg:
raise InvalidArgument(
f"min_value={min_arg!r} cannot be exactly represented as a float "
f"of width {width} - use {min_value=} instead."
)
if max_value != max_arg:
raise InvalidArgument(
f"max_value={max_arg!r} cannot be exactly represented as a float "
f"of width {width} - use {max_value=} instead."
)
if exclude_min and (min_value is None or min_value == math.inf):
raise InvalidArgument(f"Cannot exclude {min_value=}")
if exclude_max and (max_value is None or max_value == -math.inf):
raise InvalidArgument(f"Cannot exclude {max_value=}")
assumed_allow_subnormal = allow_subnormal is None or allow_subnormal
if min_value is not None and (
exclude_min or (min_arg is not None and min_value < min_arg)
):
min_value = next_up_normal(
min_value, width, allow_subnormal=assumed_allow_subnormal
)
if min_value == min_arg:
assert min_value == min_arg == 0
assert is_negative(min_arg)
assert not is_negative(min_value)
min_value = next_up_normal(
min_value, width, allow_subnormal=assumed_allow_subnormal
)
assert min_value > min_arg # type: ignore
if max_value is not None and (
exclude_max or (max_arg is not None and max_value > max_arg)
):
max_value = next_down_normal(
max_value, width, allow_subnormal=assumed_allow_subnormal
)
if max_value == max_arg:
assert max_value == max_arg == 0
assert is_negative(max_value)
assert not is_negative(max_arg)
max_value = next_down_normal(
max_value, width, allow_subnormal=assumed_allow_subnormal
)
assert max_value < max_arg # type: ignore
if min_value == -math.inf:
min_value = None
if max_value == math.inf:
max_value = None
bad_zero_bounds = (
min_value == max_value == 0
and is_negative(max_value)
and not is_negative(min_value)
)
if (
min_value is not None
and max_value is not None
and (min_value > max_value or bad_zero_bounds)
):
# This is a custom alternative to check_valid_interval, because we want
# to include the bit-width and exclusion information in the message.
msg = (
f"There are no {width}-bit floating-point values between "
f"min_value={min_arg!r} and max_value={max_arg!r}"
)
if exclude_min or exclude_max:
msg += f", {exclude_min=} and {exclude_max=}"
raise InvalidArgument(msg)
if allow_infinity is None:
allow_infinity = bool(min_value is None or max_value is None)
elif allow_infinity:
if min_value is not None and max_value is not None:
raise InvalidArgument(
f"Cannot have {allow_infinity=}, with both min_value and max_value"
)
elif min_value == math.inf:
if min_arg == math.inf:
raise InvalidArgument("allow_infinity=False excludes min_value=inf")
raise InvalidArgument(
f"exclude_min=True turns min_value={min_arg!r} into inf, "
"but allow_infinity=False"
)
elif max_value == -math.inf:
if max_arg == -math.inf:
raise InvalidArgument("allow_infinity=False excludes max_value=-inf")
raise InvalidArgument(
f"exclude_max=True turns max_value={max_arg!r} into -inf, "
"but allow_infinity=False"
)
smallest_normal = width_smallest_normals[width]
if allow_subnormal is None:
if min_value is not None and max_value is not None:
if min_value == max_value:
allow_subnormal = -smallest_normal < min_value < smallest_normal
else:
allow_subnormal = (
min_value < smallest_normal and max_value > -smallest_normal
)
elif min_value is not None:
allow_subnormal = min_value < smallest_normal
elif max_value is not None:
allow_subnormal = max_value > -smallest_normal
else:
allow_subnormal = True
if allow_subnormal:
if min_value is not None and min_value >= smallest_normal:
raise InvalidArgument(
f"allow_subnormal=True, but minimum value {min_value} "
f"excludes values below float{width}'s "
f"smallest positive normal {smallest_normal}"
)
if max_value is not None and max_value <= -smallest_normal:
raise InvalidArgument(
f"allow_subnormal=True, but maximum value {max_value} "
f"excludes values above float{width}'s "
f"smallest negative normal {-smallest_normal}"
)
if min_value is None:
min_value = float("-inf")
if max_value is None:
max_value = float("inf")
if not allow_infinity:
min_value = max(min_value, next_up(float("-inf")))
max_value = min(max_value, next_down(float("inf")))
assert isinstance(min_value, float)
assert isinstance(max_value, float)
smallest_nonzero_magnitude = (
SMALLEST_SUBNORMAL if allow_subnormal else smallest_normal
)
result: SearchStrategy = FloatStrategy(
min_value=min_value,
max_value=max_value,
allow_nan=allow_nan,
smallest_nonzero_magnitude=smallest_nonzero_magnitude,
)
if width < 64:
def downcast(x: float) -> float:
try:
return float_of(x, width)
except OverflowError: # pragma: no cover
reject()
result = result.map(downcast)
return result
| FloatStrategy |
python | doocs__leetcode | solution/0700-0799/0745.Prefix and Suffix Search/Solution.py | {
"start": 0,
"end": 524
} | class ____:
def __init__(self, words: List[str]):
self.d = {}
for k, w in enumerate(words):
n = len(w)
for i in range(n + 1):
a = w[:i]
for j in range(n + 1):
b = w[j:]
self.d[(a, b)] = k
def f(self, pref: str, suff: str) -> int:
return self.d.get((pref, suff), -1)
# Your WordFilter object will be instantiated and called as such:
# obj = WordFilter(words)
# param_1 = obj.f(pref,suff)
| WordFilter |
python | pytorch__pytorch | torch/ao/nn/quantized/modules/__init__.py | {
"start": 1623,
"end": 3833
} | class ____(torch.nn.Module):
r"""Quantizes an incoming tensor
Args:
`scale`: scale of the output Quantized Tensor
`zero_point`: zero_point of output Quantized Tensor
`dtype`: data type of output Quantized Tensor
`factory_kwargs`: Dictionary of kwargs used for configuring initialization
of internal buffers. Currently, `device` and `dtype` are supported.
Example: `factory_kwargs={'device': 'cuda', 'dtype': torch.float64}`
will initialize internal buffers as type `torch.float64` on the current CUDA device.
Note that `dtype` only applies to floating-point buffers.
Examples::
>>> t = torch.tensor([[1., -1.], [1., -1.]])
>>> scale, zero_point, dtype = 1.0, 2, torch.qint8
>>> qm = Quantize(scale, zero_point, dtype)
>>> # xdoctest: +SKIP
>>> qt = qm(t)
>>> print(qt)
tensor([[ 1., -1.],
[ 1., -1.]], size=(2, 2), dtype=torch.qint8, scale=1.0, zero_point=2)
"""
scale: torch.Tensor
zero_point: torch.Tensor
def __init__(self, scale, zero_point, dtype, factory_kwargs=None):
factory_kwargs = torch.nn.factory_kwargs(factory_kwargs)
super().__init__()
self.register_buffer("scale", torch.tensor([scale], **factory_kwargs))
self.register_buffer(
"zero_point",
torch.tensor(
[zero_point],
dtype=torch.long,
**{k: v for k, v in factory_kwargs.items() if k != "dtype"},
),
)
self.dtype = dtype
def forward(self, X):
return torch.quantize_per_tensor(
X, float(self.scale), int(self.zero_point), self.dtype
)
@staticmethod
def from_float(mod, use_precomputed_fake_quant=False):
assert hasattr(mod, "activation_post_process")
scale, zero_point = mod.activation_post_process.calculate_qparams()
return Quantize(
scale.float().item(),
zero_point.long().item(),
mod.activation_post_process.dtype,
)
def extra_repr(self):
return f"scale={self.scale}, zero_point={self.zero_point}, dtype={self.dtype}"
| Quantize |
python | pandas-dev__pandas | pandas/core/arrays/arrow/extension_types.py | {
"start": 1612,
"end": 5476
} | class ____(pyarrow.ExtensionType):
def __init__(self, subtype, closed: IntervalClosedType) -> None:
# attributes need to be set first before calling
# super init (as that calls serialize)
assert closed in VALID_CLOSED
self._closed: IntervalClosedType = closed
if not isinstance(subtype, pyarrow.DataType):
subtype = pyarrow.type_for_alias(str(subtype))
self._subtype = subtype
storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
@property
def subtype(self):
return self._subtype
@property
def closed(self) -> IntervalClosedType:
return self._closed
def __arrow_ext_serialize__(self) -> bytes:
metadata = {"subtype": str(self.subtype), "closed": self.closed}
return json.dumps(metadata).encode()
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
metadata = json.loads(serialized.decode())
subtype = pyarrow.type_for_alias(metadata["subtype"])
closed = metadata["closed"]
return ArrowIntervalType(subtype, closed)
def __eq__(self, other):
if isinstance(other, pyarrow.BaseExtensionType):
return (
type(self) == type(other)
and self.subtype == other.subtype
and self.closed == other.closed
)
else:
return NotImplemented
def __ne__(self, other) -> bool:
return not self == other
def __hash__(self) -> int:
return hash((str(self), str(self.subtype), self.closed))
def to_pandas_dtype(self) -> IntervalDtype:
return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed)
# register the type with a dummy instance
_interval_type = ArrowIntervalType(pyarrow.int64(), "left")
pyarrow.register_extension_type(_interval_type)
_ERROR_MSG = """\
Disallowed deserialization of 'arrow.py_extension_type':
storage_type = {storage_type}
serialized = {serialized}
pickle disassembly:\n{pickle_disassembly}
Reading of untrusted Parquet or Feather files with a PyExtensionType column
allows arbitrary code execution.
If you trust this file, you can enable reading the extension type by one of:
- upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
- install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running
`import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
We strongly recommend updating your Parquet/Feather files to use extension types
derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
"""
def patch_pyarrow() -> None:
# starting from pyarrow 14.0.1, it has its own mechanism
if not pa_version_under14p1:
return
# if https://github.com/pitrou/pyarrow-hotfix was installed and enabled
if getattr(pyarrow, "_hotfix_installed", False):
return
class ForbiddenExtensionType(pyarrow.ExtensionType):
def __arrow_ext_serialize__(self) -> bytes:
return b""
@classmethod
def __arrow_ext_deserialize__(cls, storage_type, serialized):
import io
import pickletools
out = io.StringIO()
pickletools.dis(serialized, out)
raise RuntimeError(
_ERROR_MSG.format(
storage_type=storage_type,
serialized=serialized,
pickle_disassembly=out.getvalue(),
)
)
pyarrow.unregister_extension_type("arrow.py_extension_type")
pyarrow.register_extension_type(
ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type")
)
pyarrow._hotfix_installed = True
patch_pyarrow()
| ArrowIntervalType |
python | pyca__cryptography | src/cryptography/hazmat/primitives/serialization/pkcs7.py | {
"start": 1673,
"end": 6165
} | class ____:
def __init__(
self,
data: utils.Buffer | None = None,
signers: list[
tuple[
x509.Certificate,
PKCS7PrivateKeyTypes,
PKCS7HashTypes,
padding.PSS | padding.PKCS1v15 | None,
]
] = [],
additional_certs: list[x509.Certificate] = [],
):
self._data = data
self._signers = signers
self._additional_certs = additional_certs
def set_data(self, data: utils.Buffer) -> PKCS7SignatureBuilder:
_check_byteslike("data", data)
if self._data is not None:
raise ValueError("data may only be set once")
return PKCS7SignatureBuilder(data, self._signers)
def add_signer(
self,
certificate: x509.Certificate,
private_key: PKCS7PrivateKeyTypes,
hash_algorithm: PKCS7HashTypes,
*,
rsa_padding: padding.PSS | padding.PKCS1v15 | None = None,
) -> PKCS7SignatureBuilder:
if not isinstance(
hash_algorithm,
(
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
),
):
raise TypeError(
"hash_algorithm must be one of hashes.SHA224, "
"SHA256, SHA384, or SHA512"
)
if not isinstance(certificate, x509.Certificate):
raise TypeError("certificate must be a x509.Certificate")
if not isinstance(
private_key, (rsa.RSAPrivateKey, ec.EllipticCurvePrivateKey)
):
raise TypeError("Only RSA & EC keys are supported at this time.")
if rsa_padding is not None:
if not isinstance(rsa_padding, (padding.PSS, padding.PKCS1v15)):
raise TypeError("Padding must be PSS or PKCS1v15")
if not isinstance(private_key, rsa.RSAPrivateKey):
raise TypeError("Padding is only supported for RSA keys")
return PKCS7SignatureBuilder(
self._data,
[
*self._signers,
(certificate, private_key, hash_algorithm, rsa_padding),
],
)
def add_certificate(
self, certificate: x509.Certificate
) -> PKCS7SignatureBuilder:
if not isinstance(certificate, x509.Certificate):
raise TypeError("certificate must be a x509.Certificate")
return PKCS7SignatureBuilder(
self._data, self._signers, [*self._additional_certs, certificate]
)
def sign(
self,
encoding: serialization.Encoding,
options: Iterable[PKCS7Options],
backend: typing.Any = None,
) -> bytes:
if len(self._signers) == 0:
raise ValueError("Must have at least one signer")
if self._data is None:
raise ValueError("You must add data to sign")
options = list(options)
if not all(isinstance(x, PKCS7Options) for x in options):
raise ValueError("options must be from the PKCS7Options enum")
if encoding not in (
serialization.Encoding.PEM,
serialization.Encoding.DER,
serialization.Encoding.SMIME,
):
raise ValueError(
"Must be PEM, DER, or SMIME from the Encoding enum"
)
# Text is a meaningless option unless it is accompanied by
# DetachedSignature
if (
PKCS7Options.Text in options
and PKCS7Options.DetachedSignature not in options
):
raise ValueError(
"When passing the Text option you must also pass "
"DetachedSignature"
)
if PKCS7Options.Text in options and encoding in (
serialization.Encoding.DER,
serialization.Encoding.PEM,
):
raise ValueError(
"The Text option is only available for SMIME serialization"
)
# No attributes implies no capabilities so we'll error if you try to
# pass both.
if (
PKCS7Options.NoAttributes in options
and PKCS7Options.NoCapabilities in options
):
raise ValueError(
"NoAttributes is a superset of NoCapabilities. Do not pass "
"both values."
)
return rust_pkcs7.sign_and_serialize(self, encoding, options)
| PKCS7SignatureBuilder |
python | keon__algorithms | algorithms/compression/huffman_coding.py | {
"start": 360,
"end": 973
} | class ____:
def __init__(self, frequency=0, sign=None, left=None, right=None):
self.frequency = frequency
self.sign = sign
self.left = left
self.right = right
def __lt__(self, other):
return self.frequency < other.frequency
def __gt__(self, other):
return self.frequency > other.frequency
def __eq__(self, other):
return self.frequency == other.frequency
def __str__(self):
return "<ch: {0}: {1}>".format(self.sign, self.frequency)
def __repr__(self):
return "<ch: {0}: {1}>".format(self.sign, self.frequency)
| Node |
python | getsentry__sentry-python | sentry_sdk/integrations/bottle.py | {
"start": 1215,
"end": 4444
} | class ____(Integration):
identifier = "bottle"
origin = f"auto.http.{identifier}"
transaction_style = ""
def __init__(
self,
transaction_style="endpoint", # type: str
*,
failed_request_status_codes=_DEFAULT_FAILED_REQUEST_STATUS_CODES, # type: Set[int]
):
# type: (...) -> None
if transaction_style not in TRANSACTION_STYLE_VALUES:
raise ValueError(
"Invalid value for transaction_style: %s (must be in %s)"
% (transaction_style, TRANSACTION_STYLE_VALUES)
)
self.transaction_style = transaction_style
self.failed_request_status_codes = failed_request_status_codes
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(BOTTLE_VERSION)
_check_minimum_version(BottleIntegration, version)
old_app = Bottle.__call__
@ensure_integration_enabled(BottleIntegration, old_app)
def sentry_patched_wsgi_app(self, environ, start_response):
# type: (Any, Dict[str, str], Callable[..., Any]) -> _ScopedResponse
middleware = SentryWsgiMiddleware(
lambda *a, **kw: old_app(self, *a, **kw),
span_origin=BottleIntegration.origin,
)
return middleware(environ, start_response)
Bottle.__call__ = sentry_patched_wsgi_app
old_handle = Bottle._handle
@functools.wraps(old_handle)
def _patched_handle(self, environ):
# type: (Bottle, Dict[str, Any]) -> Any
integration = sentry_sdk.get_client().get_integration(BottleIntegration)
if integration is None:
return old_handle(self, environ)
scope = sentry_sdk.get_isolation_scope()
scope._name = "bottle"
scope.add_event_processor(
_make_request_event_processor(self, bottle_request, integration)
)
res = old_handle(self, environ)
return res
Bottle._handle = _patched_handle
old_make_callback = Route._make_callback
@functools.wraps(old_make_callback)
def patched_make_callback(self, *args, **kwargs):
# type: (Route, *object, **object) -> Any
prepared_callback = old_make_callback(self, *args, **kwargs)
integration = sentry_sdk.get_client().get_integration(BottleIntegration)
if integration is None:
return prepared_callback
def wrapped_callback(*args, **kwargs):
# type: (*object, **object) -> Any
try:
res = prepared_callback(*args, **kwargs)
except Exception as exception:
_capture_exception(exception, handled=False)
raise exception
if (
isinstance(res, HTTPResponse)
and res.status_code in integration.failed_request_status_codes
):
_capture_exception(res, handled=True)
return res
return wrapped_callback
Route._make_callback = patched_make_callback
| BottleIntegration |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 811,
"end": 1290
} | class ____(ValidationError, GreatExpectationsError):
def __init__(self, message, validation_error=None) -> None:
self.message = message
self.messages: Union[List[str], List[Any], Dict] = []
if validation_error is not None:
self.messages = validation_error.messages
@override
def __str__(self) -> str:
if self.message is None:
return str(self.messages)
return self.message
| GreatExpectationsValidationError |
python | kamyu104__LeetCode-Solutions | Python/maximum-total-area-occupied-by-pistons.py | {
"start": 933,
"end": 1741
} | class ____(object):
def maxArea(self, height, positions, directions):
"""
:type height: int
:type positions: List[int]
:type directions: str
:rtype: int
"""
diff = collections.defaultdict(int)
for d, i in itertools.izip(directions, positions):
if d == 'U':
diff[height-i] -= 1
diff[(height-i)+height] += 1
else:
diff[i] += 1
diff[i+height] -= 1
result = total = sum(positions)
cnt = directions.count('U')
prev = 0
for t, d in sorted(diff.iteritems()):
total += (t-prev)*(-(len(directions)-cnt)+cnt)
result = max(result, total)
cnt += d
prev = t
return result
| Solution2 |
python | spyder-ide__spyder | external-deps/python-lsp-server/test/plugins/test_jedi_rename.py | {
"start": 300,
"end": 3236
} | class ____(Test1):
pass
"""
DOC_NAME_EXTRA = "test2.py"
DOC_EXTRA = """from test1 import Test1
x = Test1()
"""
DOC_NAME_SIMPLE = "test3.py"
DOC_SIMPLE = "foo = 12"
@pytest.fixture
def tmp_workspace(temp_workspace_factory):
return temp_workspace_factory(
{DOC_NAME: DOC, DOC_NAME_EXTRA: DOC_EXTRA, DOC_NAME_SIMPLE: DOC_SIMPLE}
)
def test_jedi_rename(tmp_workspace, config) -> None:
# rename the `Test1` class
position = {"line": 0, "character": 6}
DOC_URI = uris.from_fs_path(os.path.join(tmp_workspace.root_path, DOC_NAME))
doc = Document(DOC_URI, tmp_workspace)
result = pylsp_rename(config, tmp_workspace, doc, position, "ShouldBeRenamed")
assert len(result.keys()) == 1
changes = result.get("documentChanges")
assert len(changes) == 2
assert changes[0]["textDocument"]["uri"] == doc.uri
assert changes[0]["textDocument"]["version"] == doc.version
assert changes[0].get("edits") == [
{
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 5, "character": 0},
},
"newText": "class ShouldBeRenamed():\n pass\n\nclass Test2(ShouldBeRenamed):\n pass\n",
}
]
path = os.path.join(tmp_workspace.root_path, DOC_NAME_EXTRA)
uri_extra = uris.from_fs_path(path)
assert changes[1]["textDocument"]["uri"] == uri_extra
# This also checks whether documents not yet added via textDocument/didOpen
# but that do need to be renamed in the project have a `null` version
# number.
assert changes[1]["textDocument"]["version"] is None
expected = "from test1 import ShouldBeRenamed\nx = ShouldBeRenamed()\n"
if os.name == "nt":
# The .write method in the temp_workspace_factory functions writes
# Windows-style line-endings.
expected = expected.replace("\n", "\r\n")
assert changes[1].get("edits") == [
{
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 2, "character": 0},
},
"newText": expected,
}
]
# Regression test for issue python-lsp/python-lsp-server#413
# rename foo
position = {"line": 0, "character": 0}
DOC_URI = uris.from_fs_path(os.path.join(tmp_workspace.root_path, DOC_NAME_SIMPLE))
doc = Document(DOC_URI, tmp_workspace)
result = pylsp_rename(config, tmp_workspace, doc, position, "bar")
assert len(result.keys()) == 1
changes = result.get("documentChanges")
assert len(changes) == 1
assert changes[0]["textDocument"]["uri"] == doc.uri
assert changes[0]["textDocument"]["version"] == doc.version
assert changes[0].get("edits") == [
{
"range": {
"start": {"line": 0, "character": 0},
"end": {"line": 0, "character": 0},
},
"newText": "bar = 12",
}
]
| Test2 |
python | pytorch__pytorch | torch/fx/experimental/_backward_state.py | {
"start": 18,
"end": 967
} | class ____:
"""
BackwardState is used to pass Python hooks from the forwards pass
into the backwards pass in Dynamo+Compiled Autograd.
It is created by TorchDynamo and has special handling there.
Dynamo will pass an empty BackwardState to the forwards, then populate
members on it (via setattr) only after the forwards graph is finished.
Later on, in CompileAutograd we will inline and add the needed guards
on the BackwardState.
BackwardState is identified and has special handling in AOTAutograd.
During AOTAutograd:
1) BackwardState is an input to the forwards graph
2) It must only be used in the backwards
3) It will be empty in the forwards
4) In the forwards we add a wrapper to save it
5) In the backwards it becomes an input
6) There can only be one per graph
BackwardState requires CompiledAutograd.
"""
proxy: torch.fx.Proxy
| BackwardState |
python | ansible__ansible | lib/ansible/plugins/strategy/free.py | {
"start": 1892,
"end": 15590
} | class ____(StrategyBase):
# This strategy manages throttling on its own, so we don't want it done in queue_task
ALLOW_BASE_THROTTLING = False
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self._host_pinned = False
def run(self, iterator, play_context):
"""
The "free" strategy is a bit more complex, in that it allows tasks to
be sent to hosts as quickly as they can be processed. This means that
some hosts may finish very quickly if run tasks result in little or no
work being done versus other systems.
The algorithm used here also tries to be more "fair" when iterating
through hosts by remembering the last host in the list to be given a task
and starting the search from there as opposed to the top of the hosts
list again, which would end up favoring hosts near the beginning of the
list.
"""
# the last host to be given a task
last_host = 0
result = self._tqm.RUN_OK
# start with all workers being counted as being free
workers_free = len(self._workers)
self._set_hosts_cache(iterator._play)
if iterator._play.max_fail_percentage is not None:
display.warning("Using max_fail_percentage with the free strategy is not supported, as tasks are executed independently on each host")
work_to_do = True
while work_to_do and not self._tqm._terminated:
hosts_left = self.get_hosts_left(iterator)
if len(hosts_left) == 0:
self._tqm.send_callback('v2_playbook_on_no_hosts_remaining')
result = False
break
work_to_do = False # assume we have no more work to do
starting_host = last_host # save current position so we know when we've looped back around and need to break
# try and find an unblocked host with a task to run
host_results = []
meta_task_dummy_results_count = 0
while True:
host = hosts_left[last_host]
display.debug("next free host: %s" % host)
host_name = host.get_name()
# peek at the next task for the host, to see if there's
# anything to do do for this host
(state, task) = iterator.get_next_task_for_host(host, peek=True)
display.debug("free host state: %s" % state, host=host_name)
display.debug("free host task: %s" % task, host=host_name)
# check if there is work to do, either there is a task or the host is still blocked which could
# mean that it is processing an include task and after its result is processed there might be
# more tasks to run
if (task or self._blocked_hosts.get(host_name, False)) and not self._tqm._unreachable_hosts.get(host_name, False):
display.debug("this host has work to do", host=host_name)
# set the flag so the outer loop knows we've still found
# some work which needs to be done
work_to_do = True
if not self._tqm._unreachable_hosts.get(host_name, False) and task:
# check to see if this host is blocked (still executing a previous task)
if not self._blocked_hosts.get(host_name, False):
display.debug("getting variables", host=host_name)
task_vars = self._variable_manager.get_vars(play=iterator._play, host=host, task=task,
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all)
self.add_tqm_variables(task_vars, play=iterator._play)
templar = TemplateEngine(loader=self._loader, variables=task_vars)
display.debug("done getting variables", host=host_name)
try:
throttle = int(templar.template(task.throttle))
except Exception as ex:
raise AnsibleError("Failed to convert the throttle value to an integer.", obj=task.throttle) from ex
if throttle > 0:
same_tasks = 0
for worker in self._workers:
if worker and worker.is_alive() and worker._task._uuid == task._uuid:
same_tasks += 1
display.debug("task: %s, same_tasks: %d" % (task.get_name(), same_tasks))
if same_tasks >= throttle:
break
# advance the host, mark the host blocked, and queue it
self._blocked_hosts[host_name] = True
iterator.set_state_for_host(host.name, state)
if isinstance(task, Handler):
task.remove_host(host)
try:
action = action_loader.get(task.action, class_only=True, collection_list=task.collections)
except KeyError:
# we don't care here, because the action may simply not have a
# corresponding action plugin
action = None
task.post_validate_attribute("name", templar=templar)
run_once = templar.template(task.run_once) or action and getattr(action, 'BYPASS_HOST_LOOP', False)
if run_once:
if action and getattr(action, 'BYPASS_HOST_LOOP', False):
raise AnsibleError("The '%s' module bypasses the host loop, which is currently not supported in the free strategy "
"and would instead execute for every host in the inventory list." % task.action, obj=task._ds)
else:
display.warning("Using run_once with the free strategy is not currently supported. This task will still be "
"executed for every host in the inventory list.")
if task.action in C._ACTION_META:
if self._host_pinned:
meta_task_dummy_results_count += 1
workers_free -= 1
self._execute_meta(task, play_context, iterator, target_host=host)
self._blocked_hosts[host_name] = False
else:
# handle step if needed, skip meta actions as they are used internally
if not self._step or self._take_step(task, host_name):
if task.any_errors_fatal:
display.warning("Using any_errors_fatal with the free strategy is not supported, "
"as tasks are executed independently on each host")
if isinstance(task, Handler):
self._tqm.send_callback('v2_playbook_on_handler_task_start', task)
else:
self._tqm.send_callback('v2_playbook_on_task_start', task, is_conditional=False)
self._queue_task(host, task, task_vars, play_context)
# each task is counted as a worker being busy
workers_free -= 1
del task_vars
else:
display.debug("%s is blocked, skipping for now" % host_name)
# all workers have tasks to do (and the current host isn't done with the play).
# loop back to starting host and break out
if self._host_pinned and workers_free == 0 and work_to_do:
last_host = starting_host
break
# move on to the next host and make sure we
# haven't gone past the end of our hosts list
last_host += 1
if last_host > len(hosts_left) - 1:
last_host = 0
# if we've looped around back to the start, break out
if last_host == starting_host:
break
results = self._process_pending_results(iterator)
host_results.extend(results)
# each result is counted as a worker being free again
workers_free += len(results) + meta_task_dummy_results_count
self.update_active_connections(results)
included_files = IncludedFile.process_include_results(
host_results,
iterator=iterator,
loader=self._loader,
variable_manager=self._variable_manager
)
if len(included_files) > 0:
all_blocks = dict((host, []) for host in hosts_left)
failed_includes_hosts = set()
for included_file in included_files:
display.debug("collecting new blocks for %s" % included_file)
is_handler = False
try:
if included_file._is_role:
new_ir = self._copy_included_file(included_file)
new_blocks, handler_blocks = new_ir.get_block_list(
play=iterator._play,
variable_manager=self._variable_manager,
loader=self._loader,
)
else:
is_handler = isinstance(included_file._task, Handler)
new_blocks = self._load_included_file(
included_file,
iterator=iterator,
is_handler=is_handler,
)
# let PlayIterator know about any new handlers included via include_role or
# import_role within include_role/include_taks
iterator.handlers = [h for b in iterator._play.handlers for h in b.block]
except AnsibleParserError:
raise
except AnsibleError as ex:
# FIXME: send the error to the callback; don't directly write to display here
display.error(ex)
for r in included_file._results:
r._return_data['failed'] = True
r._return_data['reason'] = str(ex)
self._tqm._stats.increment('failures', r.host.name)
self._tqm.send_callback('v2_runner_on_failed', r)
failed_includes_hosts.add(r.host)
continue
else:
# since we skip incrementing the stats when the task result is
# first processed, we do so now for each host in the list
for host in included_file._hosts:
self._tqm._stats.increment('ok', host.name)
self._tqm.send_callback('v2_playbook_on_include', included_file)
for new_block in new_blocks:
if is_handler:
for task in new_block.block:
task.notified_hosts = included_file._hosts[:]
final_block = new_block
else:
task_vars = self._variable_manager.get_vars(
play=iterator._play,
task=new_block.get_first_parent_include(),
_hosts=self._hosts_cache,
_hosts_all=self._hosts_cache_all,
)
final_block = new_block.filter_tagged_tasks(task_vars)
for host in hosts_left:
if host in included_file._hosts:
all_blocks[host].append(final_block)
display.debug("done collecting new blocks for %s" % included_file)
for host in failed_includes_hosts:
self._tqm._failed_hosts[host.name] = True
iterator.mark_host_failed(host)
display.debug("adding all collected blocks from %d included file(s) to iterator" % len(included_files))
for host in hosts_left:
iterator.add_tasks(host, all_blocks[host])
display.debug("done adding collected blocks to iterator")
# pause briefly so we don't spin lock
time.sleep(C.DEFAULT_INTERNAL_POLL_INTERVAL)
# collect all the final results
results = self._wait_on_pending_results(iterator)
# run the base class run() method, which executes the cleanup function
# and runs any outstanding handlers which have been triggered
return super(StrategyModule, self).run(iterator, play_context, result)
| StrategyModule |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 3286,
"end": 3920
} | class ____(PrefectBaseModel, OperatorMixin):
"""Filter by `FlowRun.tags`."""
all_: Optional[List[str]] = Field(
default=None,
examples=[["tag-1", "tag-2"]],
description=(
"A list of tags. Flow runs will be returned only if their tags are a"
" superset of the list"
),
)
any_: Optional[List[str]] = Field(
default=None,
examples=[["tag-1", "tag-2"]],
description="A list of tags to include",
)
is_null_: Optional[bool] = Field(
default=None, description="If true, only include flow runs without tags"
)
| FlowRunFilterTags |
python | aimacode__aima-python | text.py | {
"start": 8326,
"end": 10856
} | class ____:
"""Metadata for a document: title and url; maybe add others later."""
def __init__(self, title, url, nwords):
self.title = title
self.url = url
self.nwords = nwords
def words(text, reg=re.compile('[a-z0-9]+')):
"""Return a list of the words in text, ignoring punctuation and
converting everything to lowercase (to canonicalize).
>>> words("``EGAD!'' Edgar cried.")
['egad', 'edgar', 'cried']
"""
return reg.findall(text.lower())
def canonicalize(text):
"""Return a canonical text: only lowercase letters and blanks.
>>> canonicalize("``EGAD!'' Edgar cried.")
'egad edgar cried'
"""
return ' '.join(words(text))
# ______________________________________________________________________________
# Example application (not in book): decode a cipher.
# A cipher is a code that substitutes one character for another.
# A shift cipher is a rotation of the letters in the alphabet,
# such as the famous rot13, which maps A to N, B to M, etc.
alphabet = 'abcdefghijklmnopqrstuvwxyz'
# Encoding
def shift_encode(plaintext, n):
"""Encode text with a shift cipher that moves each letter up by n letters.
>>> shift_encode('abc z', 1)
'bcd a'
"""
return encode(plaintext, alphabet[n:] + alphabet[:n])
def rot13(plaintext):
"""Encode text by rotating letters by 13 spaces in the alphabet.
>>> rot13('hello')
'uryyb'
>>> rot13(rot13('hello'))
'hello'
"""
return shift_encode(plaintext, 13)
def translate(plaintext, function):
"""Translate chars of a plaintext with the given function."""
result = ""
for char in plaintext:
result += function(char)
return result
def maketrans(from_, to_):
"""Create a translation table and return the proper function."""
trans_table = {}
for n, char in enumerate(from_):
trans_table[char] = to_[n]
return lambda char: trans_table.get(char, char)
def encode(plaintext, code):
"""Encode text using a code which is a permutation of the alphabet."""
trans = maketrans(alphabet + alphabet.upper(), code + code.upper())
return translate(plaintext, trans)
def bigrams(text):
"""Return a list of pairs in text (a sequence of letters or words).
>>> bigrams('this')
['th', 'hi', 'is']
>>> bigrams(['this', 'is', 'a', 'test'])
[['this', 'is'], ['is', 'a'], ['a', 'test']]
"""
return [text[i:i + 2] for i in range(len(text) - 1)]
# Decoding a Shift (or Caesar) Cipher
| Document |
python | Textualize__textual | src/textual/widgets/_option_list.py | {
"start": 3009,
"end": 34228
} | class ____(ScrollView, can_focus=True):
"""A navigable list of options."""
ALLOW_SELECT = False
BINDINGS: ClassVar[list[BindingType]] = [
Binding("down", "cursor_down", "Down", show=False),
Binding("end", "last", "Last", show=False),
Binding("enter", "select", "Select", show=False),
Binding("home", "first", "First", show=False),
Binding("pagedown", "page_down", "Page Down", show=False),
Binding("pageup", "page_up", "Page Up", show=False),
Binding("up", "cursor_up", "Up", show=False),
]
"""
| Key(s) | Description |
| :- | :- |
| down | Move the highlight down. |
| end | Move the highlight to the last option. |
| enter | Select the current option. |
| home | Move the highlight to the first option. |
| pagedown | Move the highlight down a page of options. |
| pageup | Move the highlight up a page of options. |
| up | Move the highlight up. |
"""
DEFAULT_CSS = """
OptionList {
height: auto;
max-height: 100%;
color: $foreground;
overflow-x: hidden;
border: tall $border-blurred;
padding: 0 1;
background: $surface;
&.-textual-compact {
border: none !important;
padding: 0;
& > .option-list--option {
padding: 0;
}
}
& > .option-list--option-highlighted {
color: $block-cursor-blurred-foreground;
background: $block-cursor-blurred-background;
text-style: $block-cursor-blurred-text-style;
}
&:focus {
border: tall $border;
background-tint: $foreground 5%;
& > .option-list--option-highlighted {
color: $block-cursor-foreground;
background: $block-cursor-background;
text-style: $block-cursor-text-style;
}
}
& > .option-list--separator {
color: $foreground 15%;
}
& > .option-list--option-highlighted {
color: $foreground;
background: $block-cursor-blurred-background;
}
& > .option-list--option-disabled {
color: $text-disabled;
}
& > .option-list--option-hover {
background: $block-hover-background;
}
}
"""
COMPONENT_CLASSES: ClassVar[set[str]] = {
"option-list--option",
"option-list--option-disabled",
"option-list--option-highlighted",
"option-list--option-hover",
"option-list--separator",
}
"""
| Class | Description |
| :- | :- |
| `option-list--option` | Target options that are not disabled, highlighted or have the mouse over them. |
| `option-list--option-disabled` | Target disabled options. |
| `option-list--option-highlighted` | Target the highlighted option. |
| `option-list--option-hover` | Target an option that has the mouse over it. |
| `option-list--separator` | Target the separators. |
"""
highlighted: reactive[int | None] = reactive(None)
"""The index of the currently-highlighted option, or `None` if no option is highlighted."""
_mouse_hovering_over: reactive[int | None] = reactive(None)
"""The index of the option under the mouse or `None`."""
compact: reactive[bool] = reactive(False, toggle_class="-textual-compact")
"""Enable compact display?"""
class OptionMessage(Message):
"""Base class for all option messages."""
def __init__(self, option_list: OptionList, option: Option, index: int) -> None:
"""Initialise the option message.
Args:
option_list: The option list that owns the option.
index: The index of the option that the message relates to.
"""
super().__init__()
self.option_list: OptionList = option_list
"""The option list that sent the message."""
self.option: Option = option
"""The highlighted option."""
self.option_id: str | None = option.id
"""The ID of the option that the message relates to."""
self.option_index: int = index
"""The index of the option that the message relates to."""
@property
def control(self) -> OptionList:
"""The option list that sent the message.
This is an alias for [`OptionMessage.option_list`][textual.widgets.OptionList.OptionMessage.option_list]
and is used by the [`on`][textual.on] decorator.
"""
return self.option_list
def __rich_repr__(self) -> rich.repr.Result:
try:
yield "option_list", self.option_list
yield "option", self.option
yield "option_id", self.option_id
yield "option_index", self.option_index
except AttributeError:
return
class OptionHighlighted(OptionMessage):
"""Message sent when an option is highlighted.
Can be handled using `on_option_list_option_highlighted` in a subclass of
`OptionList` or in a parent node in the DOM.
"""
class OptionSelected(OptionMessage):
"""Message sent when an option is selected.
Can be handled using `on_option_list_option_selected` in a subclass of
`OptionList` or in a parent node in the DOM.
"""
def __init__(
self,
*content: OptionListContent,
name: str | None = None,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
markup: bool = True,
compact: bool = False,
):
"""Initialize an OptionList.
Args:
*content: Positional arguments become the options.
name: Name of the OptionList.
id: The ID of the OptionList in the DOM.
classes: Initial CSS classes.
disabled: Disable the widget?
markup: Strips should be rendered as content markup if `True`, or plain text if `False`.
compact: Enable compact style?
"""
super().__init__(name=name, id=id, classes=classes, disabled=disabled)
self._markup = markup
self.compact = compact
self._options: list[Option] = []
"""List of options."""
self._id_to_option: dict[str, Option] = {}
"""Maps an Options's ID on to the option itself."""
self._option_to_index: dict[Option, int] = {}
"""Maps an Option to its index in self._options."""
self._option_render_cache: LRUCache[tuple[Option, Style, Spacing], list[Strip]]
self._option_render_cache = LRUCache(maxsize=1024 * 2)
"""Caches rendered options."""
self._line_cache = _LineCache()
"""Used to cache additional information that can be recomputed."""
self.add_options(content)
if self._options:
# TODO: Inherited from previous version. Do we always want this?
self.action_first()
@property
def options(self) -> Sequence[Option]:
"""Sequence of options in the OptionList.
!!! note "This is read-only"
"""
return self._options
@property
def option_count(self) -> int:
"""The number of options."""
return len(self._options)
@property
def highlighted_option(self) -> Option | None:
"""The currently highlighted option, or `None` if no option is highlighted.
Returns:
An Option, or `None`.
"""
if self.highlighted is not None:
return self.options[self.highlighted]
else:
return None
def clear_options(self) -> Self:
"""Clear the content of the option list.
Returns:
The `OptionList` instance.
"""
self._options.clear()
self._line_cache.clear()
self._option_render_cache.clear()
self._id_to_option.clear()
self._option_to_index.clear()
self.highlighted = None
self.refresh()
self.scroll_y = 0
self._update_lines()
return self
def set_options(self, options: Iterable[OptionListContent]) -> Self:
"""Set options, potentially clearing existing options.
Args:
options: Options to set.
Returns:
The `OptionList` instance.
"""
self._options.clear()
self._line_cache.clear()
self._option_render_cache.clear()
self._id_to_option.clear()
self._option_to_index.clear()
self.highlighted = None
self.scroll_y = 0
self.add_options(options)
return self
def add_options(self, new_options: Iterable[OptionListContent]) -> Self:
"""Add new options.
Args:
new_options: Content of new options.
Returns:
The `OptionList` instance.
"""
new_options = list(new_options)
option_ids = [
option._id
for option in new_options
if isinstance(option, Option) and option._id is not None
]
if len(option_ids) != len(set(option_ids)):
raise DuplicateID(
"New options contain duplicated IDs; Ensure that the IDs are unique."
)
if not new_options:
return self
if new_options[0] is None:
# Handle the case where the first new option is None,
# which would update the previous option.
# This is sub-optimal, but hopefully not a common occurrence
self._clear_caches()
options = self._options
add_option = self._options.append
for prompt in new_options:
if isinstance(prompt, Option):
option = prompt
elif prompt is None:
if options:
options[-1]._divider = True
continue
else:
option = Option(prompt)
self._option_to_index[option] = len(options)
if option._id is not None:
if option._id in self._id_to_option:
raise DuplicateID(f"Unable to add {option!r} due to duplicate ID")
self._id_to_option[option._id] = option
add_option(option)
if self.is_mounted:
self.refresh(layout=self.styles.auto_dimensions)
self._update_lines()
return self
def add_option(self, option: Option | VisualType | None = None) -> Self:
"""Add a new option to the end of the option list.
Args:
option: New option to add, or `None` for a separator.
Returns:
The `OptionList` instance.
Raises:
DuplicateID: If there is an attempt to use a duplicate ID.
"""
self.add_options([option])
return self
def get_option(self, option_id: str) -> Option:
"""Get the option with the given ID.
Args:
option_id: The ID of the option to get.
Returns:
The option with the ID.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
try:
return self._id_to_option[option_id]
except KeyError:
raise OptionDoesNotExist(
f"There is no option with an ID of {option_id!r}"
) from None
def get_option_index(self, option_id: str) -> int:
"""Get the index (offset in `self.options`) of the option with the given ID.
Args:
option_id: The ID of the option to get the index of.
Returns:
The index of the item with the given ID.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
option = self.get_option(option_id)
return self._option_to_index[option]
def get_option_at_index(self, index: int) -> Option:
"""Get the option at the given index.
Args:
index: The index of the option to get.
Returns:
The option at that index.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
try:
return self._options[index]
except IndexError:
raise OptionDoesNotExist(
f"There is no option with an index of {index}"
) from None
def _set_option_disabled(self, index: int, disabled: bool) -> Self:
"""Set the disabled state of an option in the list.
Args:
index: The index of the option to set the disabled state of.
disabled: The disabled state to set.
Returns:
The `OptionList` instance.
"""
self._options[index].disabled = disabled
if index == self.highlighted:
self.highlighted = _widget_navigation.find_next_enabled(
self._options, anchor=index, direction=1
)
# TODO: Refresh only if the affected option is visible.
self.refresh()
return self
def enable_option_at_index(self, index: int) -> Self:
"""Enable the option at the given index.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
try:
return self._set_option_disabled(index, False)
except IndexError:
raise OptionDoesNotExist(
f"There is no option with an index of {index}"
) from None
def disable_option_at_index(self, index: int) -> Self:
"""Disable the option at the given index.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
try:
return self._set_option_disabled(index, True)
except IndexError:
raise OptionDoesNotExist(
f"There is no option with an index of {index}"
) from None
def enable_option(self, option_id: str) -> Self:
"""Enable the option with the given ID.
Args:
option_id: The ID of the option to enable.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
return self.enable_option_at_index(self.get_option_index(option_id))
def disable_option(self, option_id: str) -> Self:
"""Disable the option with the given ID.
Args:
option_id: The ID of the option to disable.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
return self.disable_option_at_index(self.get_option_index(option_id))
def _remove_option(self, option: Option) -> Self:
"""Remove the option with the given ID.
Args:
option: The Option to return.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
index = self._option_to_index[option]
self._mouse_hovering_over = None
self._pre_remove_option(option, index)
for option in self.options[index + 1 :]:
current_index = self._option_to_index[option]
self._option_to_index[option] = current_index - 1
option = self._options[index]
del self._options[index]
if option._id is not None:
del self._id_to_option[option._id]
del self._option_to_index[option]
self.highlighted = self.highlighted
self._clear_caches()
return self
def _pre_remove_option(self, option: Option, index: int) -> None:
"""Hook called prior to removing an option.
Args:
option: Option being removed.
index: Index of option being removed.
"""
def remove_option(self, option_id: str) -> Self:
"""Remove the option with the given ID.
Args:
option_id: The ID of the option to remove.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
option = self.get_option(option_id)
return self._remove_option(option)
def remove_option_at_index(self, index: int) -> Self:
"""Remove the option at the given index.
Args:
index: The index of the option to remove.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
try:
option = self._options[index]
except IndexError:
raise OptionDoesNotExist(
f"Unable to remove; there is no option at index {index}"
) from None
return self._remove_option(option)
def _replace_option_prompt(self, index: int, prompt: VisualType) -> None:
"""Replace the prompt of an option in the list.
Args:
index: The index of the option to replace the prompt of.
prompt: The new prompt for the option.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
self.get_option_at_index(index)._set_prompt(prompt)
self._clear_caches()
def replace_option_prompt(self, option_id: str, prompt: VisualType) -> Self:
"""Replace the prompt of the option with the given ID.
Args:
option_id: The ID of the option to replace the prompt of.
prompt: The new prompt for the option.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If no option has the given ID.
"""
self._replace_option_prompt(self.get_option_index(option_id), prompt)
return self
def replace_option_prompt_at_index(self, index: int, prompt: VisualType) -> Self:
"""Replace the prompt of the option at the given index.
Args:
index: The index of the option to replace the prompt of.
prompt: The new prompt for the option.
Returns:
The `OptionList` instance.
Raises:
OptionDoesNotExist: If there is no option with the given index.
"""
self._replace_option_prompt(index, prompt)
return self
@property
def _lines(self) -> Sequence[tuple[int, int]]:
"""A sequence of pairs of ints for each line, used internally.
The first int is the index of the option, and second is the line offset.
!!! note "This is read-only"
Returns:
A sequence of tuples.
"""
self._update_lines()
return self._line_cache.lines
@property
def _heights(self) -> dict[int, int]:
self._update_lines()
return self._line_cache.heights
@property
def _index_to_line(self) -> dict[int, int]:
self._update_lines()
return self._line_cache.index_to_line
def _clear_caches(self) -> None:
self._option_render_cache.clear()
self._line_cache.clear()
self.refresh()
def notify_style_update(self) -> None:
self.refresh()
super().notify_style_update()
def _on_resize(self):
self._clear_caches()
def on_show(self) -> None:
self.scroll_to_highlight()
def on_mount(self) -> None:
self._update_lines()
async def _on_click(self, event: events.Click) -> None:
"""React to the mouse being clicked on an item.
Args:
event: The click event.
"""
clicked_option: int | None = event.style.meta.get("option")
if clicked_option is not None and not self._options[clicked_option].disabled:
self.highlighted = clicked_option
self.action_select()
def _get_left_gutter_width(self) -> int:
"""Returns the size of any left gutter that should be taken into account.
Returns:
The width of the left gutter.
"""
return 0
def _on_mouse_move(self, event: events.MouseMove) -> None:
"""React to the mouse moving.
Args:
event: The mouse movement event.
"""
self._mouse_hovering_over = event.style.meta.get("option")
def _on_leave(self, _: events.Leave) -> None:
"""React to the mouse leaving the widget."""
self._mouse_hovering_over = None
def _get_visual(self, option: Option) -> Visual:
"""Get a visual for the given option.
Args:
option: An option.
Returns:
A Visual.
"""
if (visual := option._visual) is None:
visual = visualize(self, option.prompt, markup=self._markup)
option._visual = visual
return visual
def _get_visual_from_index(self, index: int) -> Visual:
"""Get a visual from the given index.
Args:
index: An index (offset in self.options).
Returns:
A Visual.
"""
option = self.get_option_at_index(index)
return self._get_visual(option)
def _get_option_render(self, option: Option, style: Style) -> list[Strip]:
"""Get rendered option with a given style.
Args:
option: An option.
style: Style of render.
Returns:
A list of strips.
"""
padding = self.get_component_styles("option-list--option").padding
render_width = self.scrollable_content_region.width
width = render_width - self._get_left_gutter_width()
cache_key = (option, style, padding)
if (strips := self._option_render_cache.get(cache_key)) is None:
visual = self._get_visual(option)
if padding:
visual = Padding(visual, padding)
strips = visual.to_strips(self, visual, width, None, style)
meta = {"option": self._option_to_index[option]}
strips = [
strip.extend_cell_length(width, style.rich_style).apply_meta(meta)
for strip in strips
]
if option._divider:
style = self.get_visual_style("option-list--separator")
rule_segments = [Segment("─" * width, style.rich_style)]
strips.append(Strip(rule_segments, width))
self._option_render_cache[cache_key] = strips
return strips
def _update_lines(self) -> None:
"""Update internal structures when new lines are added."""
if not self.scrollable_content_region:
return
line_cache = self._line_cache
lines = line_cache.lines
next_index = lines[-1][0] + 1 if lines else 0
get_visual = self._get_visual
width = self.scrollable_content_region.width - self._get_left_gutter_width()
if next_index < len(self.options):
padding = self.get_component_styles("option-list--option").padding
for index, option in enumerate(self.options[next_index:], next_index):
line_cache.index_to_line[index] = len(line_cache.lines)
line_count = (
get_visual(option).get_height(self.styles, width - padding.width)
+ option._divider
)
line_cache.heights[index] = line_count
line_cache.lines.extend(
[(index, line_no) for line_no in range(0, line_count)]
)
last_divider = self.options and self.options[-1]._divider
virtual_size = Size(width, len(lines) - (1 if last_divider else 0))
if virtual_size != self.virtual_size:
self.virtual_size = virtual_size
self._scroll_update(virtual_size)
def get_content_width(self, container: Size, viewport: Size) -> int:
"""Get maximum width of options."""
if not self.options:
return 0
styles = self.styles
get_visual_from_index = self._get_visual_from_index
padding = self.get_component_styles("option-list--option").padding
gutter_width = self._get_left_gutter_width()
container_width = container.width
width = (
max(
get_visual_from_index(index).get_optimal_width(styles, container_width)
for index in range(len(self.options))
)
+ padding.width
+ gutter_width
)
return width
def get_content_height(self, container: Size, viewport: Size, width: int) -> int:
"""Get height for the given width."""
styles = self.styles
rules = cast(RulesMap, styles)
padding_width = self.get_component_styles("option-list--option").padding.width
get_visual = self._get_visual
height = sum(
(
get_visual(option).get_height(rules, width - padding_width)
+ (1 if option._divider and not last else 0)
)
for last, option in loop_last(self.options)
)
return height
def _get_line(self, style: Style, y: int) -> Strip:
index, line_offset = self._lines[y]
option = self.get_option_at_index(index)
strips = self._get_option_render(option, style)
return strips[line_offset]
def render_lines(self, crop: Region) -> list[Strip]:
self._update_lines()
return super().render_lines(crop)
def render_line(self, y: int) -> Strip:
line_number = self.scroll_offset.y + y
try:
option_index, line_offset = self._lines[line_number]
option = self.options[option_index]
except IndexError:
return Strip.blank(
self.scrollable_content_region.width,
self.get_visual_style("option-list--option").rich_style,
)
mouse_over = self._mouse_hovering_over == option_index
component_class = ""
if option.disabled:
component_class = "option-list--option-disabled"
elif self.highlighted == option_index:
component_class = "option-list--option-highlighted"
elif mouse_over:
component_class = "option-list--option-hover"
if component_class:
style = self.get_visual_style("option-list--option", component_class)
else:
style = self.get_visual_style("option-list--option")
strips = self._get_option_render(option, style)
try:
strip = strips[line_offset]
except IndexError:
return Strip.blank(
self.scrollable_content_region.width,
self.get_visual_style("option-list--option").rich_style,
)
return strip
def validate_highlighted(self, highlighted: int | None) -> int | None:
"""Validate the `highlighted` property value on access."""
if highlighted is None or not self.options:
return None
elif highlighted < 0:
return 0
elif highlighted >= len(self.options):
return len(self.options) - 1
return highlighted
def watch_highlighted(self, highlighted: int | None) -> None:
"""React to the highlighted option having changed."""
if highlighted is None:
return
if not self._options[highlighted].disabled:
self.scroll_to_highlight()
self.post_message(
self.OptionHighlighted(self, self.options[highlighted], highlighted)
)
def scroll_to_highlight(self, top: bool = False) -> None:
"""Scroll to the highlighted option.
Args:
top: Ensure highlighted option is at the top of the widget.
"""
highlighted = self.highlighted
if highlighted is None or not self.is_mounted:
return
self._update_lines()
try:
y = self._index_to_line[highlighted]
except KeyError:
return
height = self._heights[highlighted]
self.scroll_to_region(
Region(0, y, self.scrollable_content_region.width, height),
force=True,
animate=False,
top=top,
immediate=True,
)
def action_cursor_up(self) -> None:
"""Move the highlight up to the previous enabled option."""
self.highlighted = _widget_navigation.find_next_enabled(
self.options,
anchor=self.highlighted,
direction=-1,
)
def action_cursor_down(self) -> None:
"""Move the highlight down to the next enabled option."""
self.highlighted = _widget_navigation.find_next_enabled(
self.options,
anchor=self.highlighted,
direction=1,
)
def action_first(self) -> None:
"""Move the highlight to the first enabled option."""
self.highlighted = _widget_navigation.find_first_enabled(self.options)
def action_last(self) -> None:
"""Move the highlight to the last enabled option."""
self.highlighted = _widget_navigation.find_last_enabled(self.options)
def _move_page(self, direction: _widget_navigation.Direction) -> None:
"""Move the height roughly by one page in the given direction.
This method will attempt to avoid selecting a disabled option.
Args:
direction: `-1` to move up a page, `1` to move down a page.
"""
if not self._options:
return
height = self.scrollable_content_region.height
y = clamp(
self._index_to_line[self.highlighted or 0] + direction * height,
0,
len(self._lines) - 1,
)
option_index = self._lines[y][0]
self.highlighted = _widget_navigation.find_next_enabled_no_wrap(
candidates=self._options,
anchor=option_index,
direction=direction,
with_anchor=True,
)
def action_page_up(self):
"""Move the highlight up one page."""
if self.highlighted is None:
self.action_first()
else:
self._move_page(-1)
def action_page_down(self):
"""Move the highlight down one page."""
if self.highlighted is None:
self.action_last()
else:
self._move_page(1)
def action_select(self) -> None:
"""Select the currently highlighted option.
If an option is selected then a
[OptionList.OptionSelected][textual.widgets.OptionList.OptionSelected] will be posted.
"""
highlighted = self.highlighted
if highlighted is None:
return
option = self._options[highlighted]
if highlighted is not None and not option.disabled:
self.post_message(self.OptionSelected(self, option, highlighted))
| OptionList |
python | getsentry__sentry | src/sentry/integrations/bitbucket_server/client.py | {
"start": 3321,
"end": 9360
} | class ____(ApiClient, RepositoryClient):
"""
Contains the BitBucket Server specifics in order to communicate with bitbucket
You can find BitBucket REST API docs here:
https://developer.atlassian.com/server/bitbucket/reference/rest-api/
"""
integration_name = IntegrationProviderSlug.BITBUCKET_SERVER.value
def __init__(
self,
integration: RpcIntegration | Integration,
identity: RpcIdentity,
):
self.base_url = integration.metadata["base_url"]
self.identity = identity
super().__init__(
verify_ssl=integration.metadata["verify_ssl"],
integration_id=integration.id,
logging_context=None,
)
def finalize_request(self, prepared_request: PreparedRequest) -> PreparedRequest:
return self.authorize_request(prepared_request=prepared_request)
def authorize_request(self, prepared_request: PreparedRequest):
"""Bitbucket Server authorizes with RSA-signed OAuth1 scheme"""
if not self.identity:
return prepared_request
auth_scheme = OAuth1(
client_key=self.identity.data["consumer_key"],
rsa_key=self.identity.data["private_key"],
resource_owner_key=self.identity.data["access_token"],
resource_owner_secret=self.identity.data["access_token_secret"],
signature_method=SIGNATURE_RSA,
signature_type="auth_header",
decoding=None,
)
prepared_request.prepare_auth(auth=auth_scheme)
return prepared_request
def get_repos(self):
return self.get(
BitbucketServerAPIPath.repositories,
params={"limit": 250, "permission": "REPO_ADMIN"},
)
def search_repositories(self, query_string):
return self.get(
BitbucketServerAPIPath.repositories,
params={"limit": 250, "permission": "REPO_ADMIN", "name": query_string},
)
def get_repo(self, project, repo):
return self.get(
BitbucketServerAPIPath.repository.format(project=project, repo=repo),
)
def create_hook(self, project, repo, data):
return self.post(
BitbucketServerAPIPath.repository_hooks.format(project=project, repo=repo),
data=data,
)
def delete_hook(self, project, repo, webhook_id):
return self.delete(
BitbucketServerAPIPath.repository_hook.format(
project=project, repo=repo, id=webhook_id
),
)
def get_commits(self, project, repo, from_hash, to_hash, limit=1000):
logger.info(
"load.commits",
extra={
"bitbucket_repo": repo,
"bitbucket_project": project,
"bitbucket_from_hash": from_hash,
"bitbucket_to_hash": to_hash,
},
)
return self._get_values(
BitbucketServerAPIPath.repository_commits.format(project=project, repo=repo),
{"limit": limit, "since": from_hash, "until": to_hash, "merges": "exclude"},
)
def get_last_commits(self, project, repo, limit=10):
return self.get(
BitbucketServerAPIPath.repository_commits.format(project=project, repo=repo),
params={"merges": "exclude", "limit": limit},
)["values"]
def get_commit_filechanges(self, project, repo, commit, limit=1000):
logger.info(
"load.filechanges",
extra={
"bitbucket_repo": repo,
"bitbucket_project": project,
"bitbucket_commit": commit,
},
)
return self._get_values(
BitbucketServerAPIPath.commit_changes.format(project=project, repo=repo, commit=commit),
{"limit": limit},
)
def _get_values(self, uri, params, max_pages=1000000):
values = []
start = 0
logger.info(
"load.paginated_uri",
extra={
"bitbucket_uri": uri,
"bitbucket_max_pages": max_pages,
"bitbucket_params": params,
},
)
for i in range(max_pages):
new_params = dict.copy(params)
new_params["start"] = start
logger.debug(
"Loading values for paginated uri starting from %s",
start,
extra={"uri": uri, "params": new_params},
)
data = self.get(uri, params=new_params)
logger.debug(
"%s values loaded", len(data["values"]), extra={"uri": uri, "params": new_params}
)
values += data["values"]
if "isLastPage" not in data or data["isLastPage"]:
logger.debug("Reached last page for paginated uri", extra={"uri": uri})
return values
else:
start = data["nextPageStart"]
logger.warning(
"load.paginated_uri.max_pages",
extra={
"bitbucket_uri": uri,
"bitbucket_params": params,
"bitbucket_max_pages": max_pages,
},
)
return values
def check_file(self, repo: Repository, path: str, version: str | None) -> object | None:
return self.head_cached(
path=BitbucketServerAPIPath.build_source(
project=repo.config["project"],
repo=repo.config["repo"],
path=path,
sha=version,
),
)
def get_file(
self, repo: Repository, path: str, ref: str | None, codeowners: bool = False
) -> str:
response = self.get_cached(
path=BitbucketServerAPIPath.build_raw(
project=repo.config["project"],
repo=repo.config["repo"],
path=path,
sha=ref,
),
raw_response=True,
)
return response.text
| BitbucketServerClient |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/type_api.py | {
"start": 2713,
"end": 2819
} | class ____(Protocol[_T_con]):
def __call__(self, value: Optional[_T_con]) -> Any: ...
| _BindProcessorType |
python | getsentry__sentry | tests/sentry/uptime/endpoints/test_detector.py | {
"start": 8859,
"end": 13409
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-detector-index"
method = "post"
def setUp(self):
super().setUp()
self.login_as(user=self.user)
def test_create_detector_validation_error(self):
invalid_data = _get_valid_data(
self.project.id, self.environment.name, dataSources=[{"timeout_ms": 80000}]
)
response = self.get_error_response(
self.organization.slug,
**invalid_data,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert "dataSources" in response.data
assert "Ensure this value is less than or equal to 60000" in str(
response.data["dataSources"]
)
def test_create_detector(self):
valid_data = _get_valid_data(
self.project.id,
self.environment.name,
)
response = self.get_success_response(
self.organization.slug,
**valid_data,
status_code=status.HTTP_201_CREATED,
)
detector: Detector = Detector.objects.get(id=response.data["id"])
created_sub: UptimeSubscription = get_uptime_subscription(detector)
assert detector.name == "Test Uptime Detector"
assert detector.type == UptimeDomainCheckFailure.slug
assert detector.project_id == self.project.id
assert created_sub.timeout_ms == 30000
assert created_sub.url == "https://www.google.com"
assert created_sub.interval_seconds == UptimeSubscription.IntervalSeconds.ONE_MINUTE
def test_create_detector_optional_fields(self):
valid_data = _get_valid_data(
self.project.id,
self.environment.name,
dataSources=[
{
"timeout_ms": 30000,
"name": "Test Uptime Detector",
"url": "https://www.google.com",
"interval_seconds": UptimeSubscription.IntervalSeconds.ONE_MINUTE,
"method": "PUT",
"headers": [["key", "value"]],
"body": "<html/>",
"trace_sampling": True,
}
],
)
response = self.get_success_response(
self.organization.slug,
**valid_data,
status_code=status.HTTP_201_CREATED,
)
detector: Detector = Detector.objects.get(id=response.data["id"])
created_sub: UptimeSubscription = get_uptime_subscription(detector)
assert detector.name == "Test Uptime Detector"
assert detector.type == UptimeDomainCheckFailure.slug
assert detector.project_id == self.project.id
assert created_sub.timeout_ms == 30000
assert created_sub.url == "https://www.google.com"
assert created_sub.interval_seconds == UptimeSubscription.IntervalSeconds.ONE_MINUTE
assert created_sub.method == "PUT"
assert created_sub.headers == [["key", "value"]]
assert created_sub.body == "<html/>"
assert created_sub.trace_sampling is True
def test_create_detector_missing_config_property(self):
invalid_data = _get_valid_data(
self.project.id,
self.environment.name,
config={
"environment": self.environment.name,
"mode": UptimeMonitorMode.MANUAL.value,
"recovery_threshold": 1,
},
)
response = self.get_error_response(
self.organization.slug,
**invalid_data,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert "config" in response.data
assert "downtime_threshold" in str(response.data["config"])
def test_create_detector_non_superuser_cannot_set_auto_detected_mode(self):
"""Integration test: non-superuser cannot create with AUTO_DETECTED mode via API."""
invalid_data = _get_valid_data(
self.project.id,
self.environment.name,
config={
"environment": self.environment.name,
"mode": UptimeMonitorMode.AUTO_DETECTED_ACTIVE.value,
"recovery_threshold": 1,
"downtime_threshold": 1,
},
)
response = self.get_error_response(
self.organization.slug,
**invalid_data,
status_code=status.HTTP_400_BAD_REQUEST,
)
assert response.data["config"] == ["Only superusers can modify `mode`"]
| OrganizationDetectorIndexPostTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/assets/definition/cacheable_assets_definition.py | {
"start": 1203,
"end": 6069
} | class ____(
NamedTuple(
"_AssetsDefinitionCacheableData",
[
("keys_by_input_name", Optional[Mapping[str, AssetKey]]),
("keys_by_output_name", Optional[Mapping[str, AssetKey]]),
("internal_asset_deps", Optional[Mapping[str, AbstractSet[AssetKey]]]),
("group_name", Optional[str]),
("metadata_by_output_name", Optional[Mapping[str, RawMetadataMapping]]),
("key_prefix", Optional[CoercibleToAssetKeyPrefix]),
("can_subset", bool),
("extra_metadata", Optional[Mapping[Any, Any]]),
(
"legacy_freshness_policies_by_output_name",
Optional[Mapping[str, LegacyFreshnessPolicy]],
),
(
"auto_materialize_policies_by_output_name",
Optional[Mapping[str, AutoMaterializePolicy]],
),
("backfill_policy", Optional[BackfillPolicy]),
],
)
):
"""Data representing cacheable metadata about assets, which can be used to generate
AssetsDefinition objects in other processes.
"""
def __new__(
cls,
keys_by_input_name: Optional[Mapping[str, AssetKey]] = None,
keys_by_output_name: Optional[Mapping[str, AssetKey]] = None,
internal_asset_deps: Optional[Mapping[str, AbstractSet[AssetKey]]] = None,
group_name: Optional[str] = None,
metadata_by_output_name: Optional[Mapping[str, RawMetadataMapping]] = None,
key_prefix: Optional[Sequence[str]] = None,
can_subset: bool = False,
extra_metadata: Optional[Mapping[Any, Any]] = None,
legacy_freshness_policies_by_output_name: Optional[
Mapping[str, LegacyFreshnessPolicy]
] = None,
auto_materialize_policies_by_output_name: Optional[
Mapping[str, AutoMaterializePolicy]
] = None,
backfill_policy: Optional[BackfillPolicy] = None,
):
extra_metadata = check.opt_nullable_mapping_param(extra_metadata, "extra_metadata")
try:
# check that the value can pass through the serdes layer
serialize_value(extra_metadata)
except SerializationError:
check.failed("Value for `extra_metadata` is not JSON serializable.")
return super().__new__(
cls,
keys_by_input_name=check.opt_nullable_mapping_param(
keys_by_input_name, "keys_by_input_name", key_type=str, value_type=AssetKey
),
keys_by_output_name=check.opt_nullable_mapping_param(
keys_by_output_name, "keys_by_output_name", key_type=str, value_type=AssetKey
),
internal_asset_deps=check.opt_nullable_mapping_param(
internal_asset_deps,
"internal_asset_deps",
key_type=str,
value_type=(set, frozenset),
),
group_name=check.opt_str_param(group_name, "group_name"),
metadata_by_output_name=check.opt_nullable_mapping_param(
metadata_by_output_name, "metadata_by_output_name", key_type=str
),
key_prefix=(
[key_prefix]
if isinstance(key_prefix, str)
else check.opt_list_param(key_prefix, "key_prefix", of_type=str)
),
can_subset=check.opt_bool_param(can_subset, "can_subset", default=False),
extra_metadata=extra_metadata,
legacy_freshness_policies_by_output_name=check.opt_nullable_mapping_param(
legacy_freshness_policies_by_output_name,
"legacy_freshness_policies_by_output_name",
key_type=str,
value_type=LegacyFreshnessPolicy,
),
auto_materialize_policies_by_output_name=check.opt_nullable_mapping_param(
auto_materialize_policies_by_output_name,
"auto_materialize_policies_by_output_name",
key_type=str,
value_type=AutoMaterializePolicy,
),
backfill_policy=check.opt_inst_param(
backfill_policy, "backfill_policy", BackfillPolicy
),
)
# Allow this to be hashed for use in `lru_cache`. This is needed because:
# - `ReconstructableJob` uses `lru_cache`
# - `ReconstructableJob` has a `ReconstructableRepository` attribute
# - `ReconstructableRepository` has a `RepositoryLoadData` attribute
# - `RepositoryLoadData` has a `Mapping` attribute containing `AssetsDefinitionCacheableData`
# - `AssetsDefinitionCacheableData` has collection attributes that are unhashable by default
def __hash__(self) -> int:
if not hasattr(self, "_hash"):
self._hash = hash_collection(self)
return self._hash
| AssetsDefinitionCacheableData |
python | mlflow__mlflow | mlflow/store/tracking/sqlalchemy_store.py | {
"start": 5471,
"end": 212771
} | class ____(AbstractStore):
"""
SQLAlchemy compliant backend store for tracking meta data for MLflow entities. MLflow
supports the database dialects ``mysql``, ``mssql``, ``sqlite``, and ``postgresql``.
As specified in the
`SQLAlchemy docs <https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_ ,
the database URI is expected in the format
``<dialect>+<driver>://<username>:<password>@<host>:<port>/<database>``. If you do not
specify a driver, SQLAlchemy uses a dialect's default driver.
This store interacts with SQL store using SQLAlchemy abstractions defined for MLflow entities.
:py:class:`mlflow.store.dbmodels.models.SqlExperiment`,
:py:class:`mlflow.store.dbmodels.models.SqlRun`,
:py:class:`mlflow.store.dbmodels.models.SqlTag`,
:py:class:`mlflow.store.dbmodels.models.SqlMetric`, and
:py:class:`mlflow.store.dbmodels.models.SqlParam`.
Run artifacts are stored in a separate location using artifact stores conforming to
:py:class:`mlflow.store.artifact_repo.ArtifactRepository`. Default artifact locations for
user experiments are stored in the database along with metadata. Each run artifact location
is recorded in :py:class:`mlflow.store.dbmodels.models.SqlRun` and stored in the backend DB.
"""
ARTIFACTS_FOLDER_NAME = "artifacts"
MODELS_FOLDER_NAME = "models"
TRACE_FOLDER_NAME = "traces"
DEFAULT_EXPERIMENT_ID = "0"
EVALUATION_DATASET_ID_PREFIX = "d-"
_db_uri_sql_alchemy_engine_map = {}
_db_uri_sql_alchemy_engine_map_lock = threading.Lock()
def __init__(self, db_uri, default_artifact_root):
"""
Create a database backed store.
Args:
db_uri: The SQLAlchemy database URI string to connect to the database. See
the `SQLAlchemy docs
<https://docs.sqlalchemy.org/en/latest/core/engines.html#database-urls>`_
for format specifications. MLflow supports the dialects ``mysql``,
``mssql``, ``sqlite``, and ``postgresql``.
default_artifact_root: Path/URI to location suitable for large data (such as a blob
store object, DBFS path, or shared NFS file system).
"""
super().__init__()
self.db_uri = db_uri
self.db_type = extract_db_type_from_uri(db_uri)
self.artifact_root_uri = resolve_uri_if_local(default_artifact_root)
# Quick check to see if the respective SQLAlchemy database engine has already been created.
if db_uri not in SqlAlchemyStore._db_uri_sql_alchemy_engine_map:
with SqlAlchemyStore._db_uri_sql_alchemy_engine_map_lock:
# Repeat check to prevent race conditions where one thread checks for an existing
# engine while another is creating the respective one, resulting in multiple
# engines being created. It isn't combined with the above check to prevent
# inefficiency from multiple threads waiting for the lock to check for engine
# existence if it has already been created.
if db_uri not in SqlAlchemyStore._db_uri_sql_alchemy_engine_map:
SqlAlchemyStore._db_uri_sql_alchemy_engine_map[db_uri] = (
mlflow.store.db.utils.create_sqlalchemy_engine_with_retry(db_uri)
)
self.engine = SqlAlchemyStore._db_uri_sql_alchemy_engine_map[db_uri]
# On a completely fresh MLflow installation against an empty database (verify database
# emptiness by checking that 'experiments' etc aren't in the list of table names), run all
# DB migrations
if not mlflow.store.db.utils._all_tables_exist(self.engine):
mlflow.store.db.utils._initialize_tables(self.engine)
SessionMaker = sqlalchemy.orm.sessionmaker(bind=self.engine)
self.ManagedSessionMaker = mlflow.store.db.utils._get_managed_session_maker(
SessionMaker, self.db_type
)
mlflow.store.db.utils._verify_schema(self.engine)
if is_local_uri(default_artifact_root):
mkdir(local_file_uri_to_path(default_artifact_root))
# Check if default experiment exists (not just if any experiments exist)
# This is important for databases that persist across test runs
try:
self.get_experiment(str(self.DEFAULT_EXPERIMENT_ID))
except MlflowException:
# Default experiment doesn't exist, create it
with self.ManagedSessionMaker() as session:
self._create_default_experiment(session)
def _get_dialect(self):
return self.engine.dialect.name
def _dispose_engine(self):
self.engine.dispose()
def _set_zero_value_insertion_for_autoincrement_column(self, session):
if self.db_type == MYSQL:
# config letting MySQL override default
# to allow 0 value for experiment ID (auto increment column)
session.execute(sql.text("SET @@SESSION.sql_mode='NO_AUTO_VALUE_ON_ZERO';"))
if self.db_type == MSSQL:
# config letting MSSQL override default
# to allow any manual value inserted into IDENTITY column
session.execute(sql.text("SET IDENTITY_INSERT experiments ON;"))
# DB helper methods to allow zero values for columns with auto increments
def _unset_zero_value_insertion_for_autoincrement_column(self, session):
if self.db_type == MYSQL:
session.execute(sql.text("SET @@SESSION.sql_mode='';"))
if self.db_type == MSSQL:
session.execute(sql.text("SET IDENTITY_INSERT experiments OFF;"))
def _create_default_experiment(self, session):
"""
MLflow UI and client code expects a default experiment with ID 0.
This method uses SQL insert statement to create the default experiment as a hack, since
experiment table uses 'experiment_id' column is a PK and is also set to auto increment.
MySQL and other implementation do not allow value '0' for such cases.
ToDo: Identify a less hacky mechanism to create default experiment 0
"""
table = SqlExperiment.__tablename__
creation_time = get_current_time_millis()
default_experiment = {
SqlExperiment.experiment_id.name: int(SqlAlchemyStore.DEFAULT_EXPERIMENT_ID),
SqlExperiment.name.name: Experiment.DEFAULT_EXPERIMENT_NAME,
SqlExperiment.artifact_location.name: str(self._get_artifact_location(0)),
SqlExperiment.lifecycle_stage.name: LifecycleStage.ACTIVE,
SqlExperiment.creation_time.name: creation_time,
SqlExperiment.last_update_time.name: creation_time,
}
def decorate(s):
if is_string_type(s):
return repr(s)
else:
return str(s)
# Get a list of keys to ensure we have a deterministic ordering
columns = list(default_experiment.keys())
values = ", ".join([decorate(default_experiment.get(c)) for c in columns])
try:
self._set_zero_value_insertion_for_autoincrement_column(session)
session.execute(
sql.text(f"INSERT INTO {table} ({', '.join(columns)}) VALUES ({values});")
)
finally:
self._unset_zero_value_insertion_for_autoincrement_column(session)
def _get_or_create(self, session, model, **kwargs):
instance = session.query(model).filter_by(**kwargs).first()
created = False
if instance:
return instance, created
else:
instance = model(**kwargs)
session.add(instance)
created = True
return instance, created
def _get_artifact_location(self, experiment_id):
return append_to_uri_path(self.artifact_root_uri, str(experiment_id))
def create_experiment(self, name, artifact_location=None, tags=None):
_validate_experiment_name(name)
if artifact_location:
artifact_location = resolve_uri_if_local(artifact_location)
_validate_experiment_artifact_location_length(artifact_location)
with self.ManagedSessionMaker() as session:
try:
creation_time = get_current_time_millis()
experiment = SqlExperiment(
name=name,
lifecycle_stage=LifecycleStage.ACTIVE,
artifact_location=artifact_location,
creation_time=creation_time,
last_update_time=creation_time,
)
experiment.tags = (
[SqlExperimentTag(key=tag.key, value=tag.value) for tag in tags] if tags else []
)
session.add(experiment)
if not artifact_location:
# this requires a double write. The first one to generate an autoincrement-ed ID
eid = session.query(SqlExperiment).filter_by(name=name).first().experiment_id
experiment.artifact_location = self._get_artifact_location(eid)
session.flush()
except sqlalchemy.exc.IntegrityError as e:
raise MlflowException(
f"Experiment(name={name}) already exists. Error: {e}",
RESOURCE_ALREADY_EXISTS,
)
return str(experiment.experiment_id)
def _search_experiments(
self,
view_type,
max_results,
filter_string,
order_by,
page_token,
):
def compute_next_token(current_size):
next_token = None
if max_results + 1 == current_size:
final_offset = offset + max_results
next_token = SearchExperimentsUtils.create_page_token(final_offset)
return next_token
self._validate_max_results_param(max_results)
with self.ManagedSessionMaker() as session:
parsed_filters = SearchExperimentsUtils.parse_search_filter(filter_string)
attribute_filters, non_attribute_filters = _get_search_experiments_filter_clauses(
parsed_filters, self._get_dialect()
)
order_by_clauses = _get_search_experiments_order_by_clauses(order_by)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
lifecycle_stags = set(LifecycleStage.view_type_to_stages(view_type))
stmt = (
reduce(lambda s, f: s.join(f), non_attribute_filters, select(SqlExperiment))
.options(*self._get_eager_experiment_query_options())
.filter(
*attribute_filters,
SqlExperiment.lifecycle_stage.in_(lifecycle_stags),
)
.order_by(*order_by_clauses)
.offset(offset)
.limit(max_results + 1)
)
queried_experiments = session.execute(stmt).scalars(SqlExperiment).all()
experiments = [e.to_mlflow_entity() for e in queried_experiments]
next_page_token = compute_next_token(len(experiments))
return experiments[:max_results], next_page_token
def search_experiments(
self,
view_type=ViewType.ACTIVE_ONLY,
max_results=SEARCH_MAX_RESULTS_DEFAULT,
filter_string=None,
order_by=None,
page_token=None,
):
experiments, next_page_token = self._search_experiments(
view_type, max_results, filter_string, order_by, page_token
)
return PagedList(experiments, next_page_token)
def _get_experiment(self, session, experiment_id, view_type, eager=False):
"""
Args:
eager: If ``True``, eagerly loads the experiments's tags. If ``False``, these tags
are not eagerly loaded and will be loaded if/when their corresponding
object properties are accessed from the resulting ``SqlExperiment`` object.
"""
experiment_id = experiment_id or SqlAlchemyStore.DEFAULT_EXPERIMENT_ID
stages = LifecycleStage.view_type_to_stages(view_type)
query_options = self._get_eager_experiment_query_options() if eager else []
experiment = (
session.query(SqlExperiment)
.options(*query_options)
.filter(
SqlExperiment.experiment_id == int(experiment_id),
SqlExperiment.lifecycle_stage.in_(stages),
)
.one_or_none()
)
if experiment is None:
raise MlflowException(
f"No Experiment with id={experiment_id} exists", RESOURCE_DOES_NOT_EXIST
)
return experiment
@staticmethod
def _get_eager_experiment_query_options():
"""
A list of SQLAlchemy query options that can be used to eagerly load the following
experiment attributes when fetching an experiment: ``tags``.
"""
return [
# Use a subquery load rather than a joined load in order to minimize the memory overhead
# of the eager loading procedure. For more information about relationship loading
# techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
sqlalchemy.orm.subqueryload(SqlExperiment.tags),
]
def get_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
return self._get_experiment(
session, experiment_id, ViewType.ALL, eager=True
).to_mlflow_entity()
def get_experiment_by_name(self, experiment_name):
"""
Specialized implementation for SQL backed store.
"""
with self.ManagedSessionMaker() as session:
stages = LifecycleStage.view_type_to_stages(ViewType.ALL)
experiment = (
session.query(SqlExperiment)
.options(*self._get_eager_experiment_query_options())
.filter(
SqlExperiment.name == experiment_name,
SqlExperiment.lifecycle_stage.in_(stages),
)
.one_or_none()
)
return experiment.to_mlflow_entity() if experiment is not None else None
def delete_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.ACTIVE_ONLY)
experiment.lifecycle_stage = LifecycleStage.DELETED
experiment.last_update_time = get_current_time_millis()
runs = self._list_run_infos(session, experiment_id)
for run in runs:
self._mark_run_deleted(session, run)
session.add(experiment)
def _hard_delete_experiment(self, experiment_id):
"""
Permanently delete a experiment (metadata and metrics, tags, parameters).
This is used by the ``mlflow gc`` command line and is not intended to be used elsewhere.
"""
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(
experiment_id=experiment_id,
session=session,
view_type=ViewType.DELETED_ONLY,
)
session.delete(experiment)
def _mark_run_deleted(self, session, run):
run.lifecycle_stage = LifecycleStage.DELETED
run.deleted_time = get_current_time_millis()
session.add(run)
def _mark_run_active(self, session, run):
run.lifecycle_stage = LifecycleStage.ACTIVE
run.deleted_time = None
session.add(run)
def _list_run_infos(self, session, experiment_id):
return session.query(SqlRun).filter(SqlRun.experiment_id == int(experiment_id)).all()
def restore_experiment(self, experiment_id):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.DELETED_ONLY)
experiment.lifecycle_stage = LifecycleStage.ACTIVE
experiment.last_update_time = get_current_time_millis()
runs = self._list_run_infos(session, experiment_id)
for run in runs:
self._mark_run_active(session, run)
session.add(experiment)
def rename_experiment(self, experiment_id, new_name):
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(session, experiment_id, ViewType.ALL)
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException("Cannot rename a non-active experiment.", INVALID_STATE)
experiment.name = new_name
experiment.last_update_time = get_current_time_millis()
session.add(experiment)
def create_run(self, experiment_id, user_id, start_time, tags, run_name):
with self.ManagedSessionMaker() as session:
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# Note: we need to ensure the generated "run_id" only contains digits and lower
# case letters, because some query filters contain "IN" clause, and in MYSQL the
# "IN" clause is case-insensitive, we use a trick that filters out comparison values
# containing upper case letters when parsing "IN" clause inside query filter.
run_id = uuid.uuid4().hex
artifact_location = append_to_uri_path(
experiment.artifact_location,
run_id,
SqlAlchemyStore.ARTIFACTS_FOLDER_NAME,
)
tags = tags.copy() if tags else []
run_name_tag = _get_run_name_from_tags(tags)
if run_name and run_name_tag and (run_name != run_name_tag):
raise MlflowException(
"Both 'run_name' argument and 'mlflow.runName' tag are specified, but with "
f"different values (run_name='{run_name}', mlflow.runName='{run_name_tag}').",
INVALID_PARAMETER_VALUE,
)
run_name = run_name or run_name_tag or _generate_random_name()
if not run_name_tag:
tags.append(RunTag(key=MLFLOW_RUN_NAME, value=run_name))
run = SqlRun(
name=run_name,
artifact_uri=artifact_location,
run_uuid=run_id,
experiment_id=experiment_id,
source_type=SourceType.to_string(SourceType.UNKNOWN),
source_name="",
entry_point_name="",
user_id=user_id,
status=RunStatus.to_string(RunStatus.RUNNING),
start_time=start_time,
end_time=None,
deleted_time=None,
source_version="",
lifecycle_stage=LifecycleStage.ACTIVE,
)
run.tags = [SqlTag(key=tag.key, value=tag.value) for tag in tags]
session.add(run)
run = run.to_mlflow_entity()
inputs_list = self._get_run_inputs(session, [run_id])
dataset_inputs = inputs_list[0] if inputs_list else []
return Run(run.info, run.data, RunInputs(dataset_inputs=dataset_inputs))
def _get_run(self, session, run_uuid, eager=False):
"""
Args:
eager: If ``True``, eagerly loads the run's summary metrics (``latest_metrics``),
params, and tags when fetching the run. If ``False``, these attributes
are not eagerly loaded and will be loaded when their corresponding
object properties are accessed from the resulting ``SqlRun`` object.
"""
query_options = self._get_eager_run_query_options() if eager else []
runs = (
session.query(SqlRun).options(*query_options).filter(SqlRun.run_uuid == run_uuid).all()
)
if len(runs) == 0:
raise MlflowException(f"Run with id={run_uuid} not found", RESOURCE_DOES_NOT_EXIST)
if len(runs) > 1:
raise MlflowException(
f"Expected only 1 run with id={run_uuid}. Found {len(runs)}.",
INVALID_STATE,
)
return runs[0]
def _get_run_inputs(self, session, run_uuids):
datasets_with_tags = (
session.query(
SqlInput.input_uuid,
SqlInput.destination_id.label("run_uuid"),
SqlDataset,
SqlInputTag,
)
.select_from(SqlInput)
.join(SqlDataset, SqlInput.source_id == SqlDataset.dataset_uuid)
.outerjoin(SqlInputTag, SqlInputTag.input_uuid == SqlInput.input_uuid)
.filter(SqlInput.destination_type == "RUN", SqlInput.destination_id.in_(run_uuids))
.order_by("run_uuid")
).all()
dataset_inputs_per_run = defaultdict(dict)
for input_uuid, run_uuid, dataset_sql, tag_sql in datasets_with_tags:
dataset_inputs = dataset_inputs_per_run[run_uuid]
dataset_uuid = dataset_sql.dataset_uuid
dataset_input = dataset_inputs.get(dataset_uuid)
if dataset_input is None:
dataset_entity = dataset_sql.to_mlflow_entity()
dataset_input = DatasetInput(dataset=dataset_entity, tags=[])
dataset_inputs[dataset_uuid] = dataset_input
if tag_sql is not None:
dataset_input.tags.append(tag_sql.to_mlflow_entity())
return [list(dataset_inputs_per_run[run_uuid].values()) for run_uuid in run_uuids]
@staticmethod
def _get_eager_run_query_options():
"""
A list of SQLAlchemy query options that can be used to eagerly load the following
run attributes when fetching a run: ``latest_metrics``, ``params``, and ``tags``.
"""
return [
# Use a select in load rather than a joined load in order to minimize the memory
# overhead of the eager loading procedure. For more information about relationship
# loading techniques, see https://docs.sqlalchemy.org/en/13/orm/
# loading_relationships.html#relationship-loading-techniques
sqlalchemy.orm.selectinload(SqlRun.latest_metrics),
sqlalchemy.orm.selectinload(SqlRun.params),
sqlalchemy.orm.selectinload(SqlRun.tags),
]
def _check_run_is_active(self, run):
if run.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException(
(
f"The run {run.run_uuid} must be in the 'active' state. "
f"Current state is {run.lifecycle_stage}."
),
INVALID_PARAMETER_VALUE,
)
def _check_experiment_is_active(self, experiment):
if experiment.lifecycle_stage != LifecycleStage.ACTIVE:
raise MlflowException(
(
f"The experiment {experiment.experiment_id} must be in the 'active' state. "
f"Current state is {experiment.lifecycle_stage}."
),
INVALID_PARAMETER_VALUE,
)
def update_run_info(self, run_id, run_status, end_time, run_name):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
if run_status is not None:
run.status = RunStatus.to_string(run_status)
if end_time is not None:
run.end_time = end_time
if run_name:
run.name = run_name
run_name_tag = self._try_get_run_tag(session, run_id, MLFLOW_RUN_NAME)
if run_name_tag is None:
run.tags.append(SqlTag(key=MLFLOW_RUN_NAME, value=run_name))
else:
run_name_tag.value = run_name
session.add(run)
run = run.to_mlflow_entity()
return run.info
def _try_get_run_tag(self, session, run_id, tagKey, eager=False):
query_options = self._get_eager_run_query_options() if eager else []
return (
session.query(SqlTag)
.options(*query_options)
.filter(SqlTag.run_uuid == run_id, SqlTag.key == tagKey)
.one_or_none()
)
def get_run(self, run_id):
with self.ManagedSessionMaker() as session:
# Load the run with the specified id and eagerly load its summary metrics, params, and
# tags. These attributes are referenced during the invocation of
# ``run.to_mlflow_entity()``, so eager loading helps avoid additional database queries
# that are otherwise executed at attribute access time under a lazy loading model.
run = self._get_run(run_uuid=run_id, session=session, eager=True)
mlflow_run = run.to_mlflow_entity()
# Get the run inputs and add to the run
inputs = self._get_run_inputs(run_uuids=[run_id], session=session)[0]
model_inputs = self._get_model_inputs(run_id, session)
model_outputs = self._get_model_outputs(run_id, session)
return Run(
mlflow_run.info,
mlflow_run.data,
RunInputs(dataset_inputs=inputs, model_inputs=model_inputs),
RunOutputs(model_outputs),
)
def restore_run(self, run_id):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
run.lifecycle_stage = LifecycleStage.ACTIVE
run.deleted_time = None
session.add(run)
def delete_run(self, run_id):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
run.lifecycle_stage = LifecycleStage.DELETED
run.deleted_time = get_current_time_millis()
session.add(run)
def _hard_delete_run(self, run_id):
"""
Permanently delete a run (metadata and metrics, tags, parameters).
This is used by the ``mlflow gc`` command line and is not intended to be used elsewhere.
"""
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
session.delete(run)
def _get_deleted_runs(self, older_than=0):
"""
Get all deleted run ids.
Args:
older_than: get runs that is older than this variable in number of milliseconds.
defaults to 0 ms to get all deleted runs.
"""
current_time = get_current_time_millis()
with self.ManagedSessionMaker() as session:
runs = (
session.query(SqlRun)
.filter(
SqlRun.lifecycle_stage == LifecycleStage.DELETED,
SqlRun.deleted_time <= (current_time - older_than),
)
.all()
)
return [run.run_uuid for run in runs]
def log_metric(self, run_id, metric):
# simply call _log_metrics and let it handle the rest
if metric.model_id is not None:
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
experiment_id = run.experiment_id
self._log_model_metrics(run_id, [metric], experiment_id=experiment_id)
self._log_metrics(run_id, [metric])
def sanitize_metric_value(self, metric_value: float) -> tuple[bool, float]:
"""
Returns a tuple of two values:
- A boolean indicating whether the metric is NaN.
- The metric value, which is set to 0 if the metric is NaN.
"""
is_nan = math.isnan(metric_value)
if is_nan:
value = 0
elif math.isinf(metric_value):
# NB: Sql can not represent Infs = > We replace +/- Inf with max/min 64b float
# value
value = 1.7976931348623157e308 if metric_value > 0 else -1.7976931348623157e308
else:
value = metric_value
return is_nan, value
def _log_metrics(self, run_id, metrics):
# Duplicate metric values are eliminated here to maintain
# the same behavior in log_metric
metric_instances = []
seen = set()
is_single_metric = len(metrics) == 1
for idx, metric in enumerate(metrics):
_validate_metric(
metric.key,
metric.value,
metric.timestamp,
metric.step,
path="" if is_single_metric else f"metrics[{idx}]",
)
if metric not in seen:
is_nan, value = self.sanitize_metric_value(metric.value)
metric_instances.append(
SqlMetric(
run_uuid=run_id,
key=metric.key,
value=value,
timestamp=metric.timestamp,
step=metric.step,
is_nan=is_nan,
)
)
seen.add(metric)
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
def _insert_metrics(metric_instances):
session.add_all(metric_instances)
self._update_latest_metrics_if_necessary(metric_instances, session)
session.commit()
try:
_insert_metrics(metric_instances)
except sqlalchemy.exc.IntegrityError:
# Primary key can be violated if it is tried to log a metric with same value,
# timestamp, step, and key within the same run.
# Roll back the current session to make it usable for further transactions. In
# the event of an error during "commit", a rollback is required in order to
# continue using the session. In this case, we re-use the session to query
# SqlMetric
session.rollback()
# Divide metric keys into batches of 100 to avoid loading too much metric
# history data into memory at once
metric_keys = [m.key for m in metric_instances]
metric_key_batches = [
metric_keys[i : i + 100] for i in range(0, len(metric_keys), 100)
]
for metric_key_batch in metric_key_batches:
# obtain the metric history corresponding to the given metrics
metric_history = (
session.query(SqlMetric)
.filter(
SqlMetric.run_uuid == run_id,
SqlMetric.key.in_(metric_key_batch),
)
.all()
)
# convert to a set of Metric instance to take advantage of its hashable
# and then obtain the metrics that were not logged earlier within this
# run_id
metric_history = {m.to_mlflow_entity() for m in metric_history}
non_existing_metrics = [
m for m in metric_instances if m.to_mlflow_entity() not in metric_history
]
# if there exist metrics that were tried to be logged & rolled back even
# though they were not violating the PK, log them
_insert_metrics(non_existing_metrics)
def _log_model_metrics(
self,
run_id: str,
metrics: list[Metric],
experiment_id: str,
dataset_uuid: str | None = None,
) -> None:
if not metrics:
return
is_single_metric = len(metrics) == 1
seen: set[Metric] = set()
sanitized_metrics: list[tuple[Metric, float]] = []
for idx, metric in enumerate(metrics):
if metric.model_id is None:
continue
if metric in seen:
continue
seen.add(metric)
_validate_metric(
metric.key,
metric.value,
metric.timestamp,
metric.step,
path="" if is_single_metric else f"metrics[{idx}]",
)
_, value = self.sanitize_metric_value(metric.value)
sanitized_metrics.append((metric, value))
if not sanitized_metrics:
return
with self.ManagedSessionMaker() as session:
metric_instances = [
SqlLoggedModelMetric(
model_id=metric.model_id,
metric_name=metric.key,
metric_timestamp_ms=metric.timestamp,
metric_step=metric.step,
metric_value=value,
experiment_id=experiment_id,
run_id=run_id,
dataset_uuid=dataset_uuid,
dataset_name=metric.dataset_name,
dataset_digest=metric.dataset_digest,
)
for metric, value in sanitized_metrics
]
try:
session.add_all(metric_instances)
session.commit()
except sqlalchemy.exc.IntegrityError:
# Primary key can be violated if it is tried to log a metric with same value,
# timestamp, step, and key within the same run.
session.rollback()
metric_keys = [m.metric_name for m in metric_instances]
metric_key_batches = (
metric_keys[i : i + 100] for i in range(0, len(metric_keys), 100)
)
for batch in metric_key_batches:
existing_metrics = (
session.query(SqlLoggedModelMetric)
.filter(
SqlLoggedModelMetric.run_id == run_id,
SqlLoggedModelMetric.metric_name.in_(batch),
)
.all()
)
existing_metrics = {m.to_mlflow_entity() for m in existing_metrics}
non_existing_metrics = [
m for m in metric_instances if m.to_mlflow_entity() not in existing_metrics
]
session.add_all(non_existing_metrics)
def _update_latest_metrics_if_necessary(self, logged_metrics, session):
def _compare_metrics(metric_a, metric_b):
"""
Returns:
True if ``metric_a`` is strictly more recent than ``metric_b``, as determined
by ``step``, ``timestamp``, and ``value``. False otherwise.
"""
return (metric_a.step, metric_a.timestamp, metric_a.value) > (
metric_b.step,
metric_b.timestamp,
metric_b.value,
)
def _overwrite_metric(new_metric, old_metric):
"""
Writes content of new_metric over old_metric. The content are `value`, `step`,
`timestamp`, and `is_nan`.
Returns:
old_metric with its content updated.
"""
old_metric.value = new_metric.value
old_metric.step = new_metric.step
old_metric.timestamp = new_metric.timestamp
old_metric.is_nan = new_metric.is_nan
return old_metric
if not logged_metrics:
return
# Fetch the latest metric value corresponding to the specified run_id and metric keys and
# lock their associated rows for the remainder of the transaction in order to ensure
# isolation
latest_metrics = {}
metric_keys = [m.key for m in logged_metrics]
# Divide metric keys into batches of 500 to avoid binding too many parameters to the SQL
# query, which may produce limit exceeded errors or poor performance on certain database
# platforms
metric_key_batches = [metric_keys[i : i + 500] for i in range(0, len(metric_keys), 500)]
for metric_key_batch in metric_key_batches:
# First, determine which metric keys are present in the database
latest_metrics_key_records_from_db = (
session.query(SqlLatestMetric.key)
.filter(
SqlLatestMetric.run_uuid == logged_metrics[0].run_uuid,
SqlLatestMetric.key.in_(metric_key_batch),
)
.all()
)
# Then, take a write lock on the rows corresponding to metric keys that are present,
# ensuring that they aren't modified by another transaction until they can be
# compared to the metric values logged by this transaction while avoiding gap locking
# and next-key locking which may otherwise occur when issuing a `SELECT FOR UPDATE`
# against nonexistent rows
if len(latest_metrics_key_records_from_db) > 0:
latest_metric_keys_from_db = [
record[0] for record in latest_metrics_key_records_from_db
]
latest_metrics_batch = (
session.query(SqlLatestMetric)
.filter(
SqlLatestMetric.run_uuid == logged_metrics[0].run_uuid,
SqlLatestMetric.key.in_(latest_metric_keys_from_db),
)
# Order by the metric run ID and key to ensure a consistent locking order
# across transactions, reducing deadlock likelihood
.order_by(SqlLatestMetric.run_uuid, SqlLatestMetric.key)
.with_for_update()
.all()
)
latest_metrics.update({m.key: m for m in latest_metrics_batch})
# iterate over all logged metrics and compare them with corresponding
# SqlLatestMetric entries
# if there's no SqlLatestMetric entry for the current metric key,
# create a new SqlLatestMetric instance and put it in
# new_latest_metric_dict so that they can be saved later.
new_latest_metric_dict = {}
for logged_metric in logged_metrics:
latest_metric = latest_metrics.get(logged_metric.key)
# a metric key can be passed more then once within logged metrics
# with different step/timestamp/value. However SqlLatestMetric
# entries are inserted after this loop is completed.
# so, retrieve the instances they were just created and use them
# for comparison.
new_latest_metric = new_latest_metric_dict.get(logged_metric.key)
# just create a new SqlLatestMetric instance since both
# latest_metric row or recently created instance does not exist
if not latest_metric and not new_latest_metric:
new_latest_metric = SqlLatestMetric(
run_uuid=logged_metric.run_uuid,
key=logged_metric.key,
value=logged_metric.value,
timestamp=logged_metric.timestamp,
step=logged_metric.step,
is_nan=logged_metric.is_nan,
)
new_latest_metric_dict[logged_metric.key] = new_latest_metric
# there's no row but a new instance is recently created.
# so, update the recent instance in new_latest_metric_dict if
# metric comparison is successful.
elif not latest_metric and new_latest_metric:
if _compare_metrics(logged_metric, new_latest_metric):
new_latest_metric = _overwrite_metric(logged_metric, new_latest_metric)
new_latest_metric_dict[logged_metric.key] = new_latest_metric
# compare with the row
elif _compare_metrics(logged_metric, latest_metric):
# editing the attributes of latest_metric, which is a
# SqlLatestMetric instance will result in UPDATE in DB side.
latest_metric = _overwrite_metric(logged_metric, latest_metric)
if new_latest_metric_dict:
session.add_all(new_latest_metric_dict.values())
def get_metric_history(self, run_id, metric_key, max_results=None, page_token=None):
"""
Return all logged values for a given metric.
Args:
run_id: Unique identifier for run.
metric_key: Metric name within the run.
max_results: An indicator for paginated results.
page_token: Token indicating the page of metric history to fetch.
Returns:
A :py:class:`mlflow.store.entities.paged_list.PagedList` of
:py:class:`mlflow.entities.Metric` entities if ``metric_key`` values
have been logged to the ``run_id``, else an empty list.
"""
with self.ManagedSessionMaker() as session:
query = session.query(SqlMetric).filter_by(run_uuid=run_id, key=metric_key)
# Parse offset from page_token for pagination
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
# Add ORDER BY clause to satisfy MSSQL requirement for OFFSET
query = query.order_by(SqlMetric.timestamp, SqlMetric.step, SqlMetric.value)
query = query.offset(offset)
if max_results is not None:
query = query.limit(max_results + 1)
metrics = query.all()
# Compute next token if more results are available
next_token = None
if max_results is not None and len(metrics) == max_results + 1:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
metrics = metrics[:max_results]
return PagedList([metric.to_mlflow_entity() for metric in metrics], next_token)
def get_metric_history_bulk(self, run_ids, metric_key, max_results):
"""
Return all logged values for a given metric.
Args:
run_ids: Unique identifiers of the runs from which to fetch the metric histories for
the specified key.
metric_key: Metric name within the runs.
max_results: The maximum number of results to return.
Returns:
A List of SqlAlchemyStore.MetricWithRunId objects if metric_key values have been logged
to one or more of the specified run_ids, else an empty list. Results are sorted by run
ID in lexicographically ascending order, followed by timestamp, step, and value in
numerically ascending order.
"""
# NB: The SQLAlchemyStore does not currently support pagination for this API.
# Raise if `page_token` is specified, as the functionality to support paged queries
# is not implemented.
with self.ManagedSessionMaker() as session:
metrics = (
session.query(SqlMetric)
.filter(
SqlMetric.key == metric_key,
SqlMetric.run_uuid.in_(run_ids),
)
.order_by(
SqlMetric.run_uuid,
SqlMetric.timestamp,
SqlMetric.step,
SqlMetric.value,
)
.limit(max_results)
.all()
)
return [
MetricWithRunId(
run_id=metric.run_uuid,
metric=metric.to_mlflow_entity(),
)
for metric in metrics
]
def get_max_step_for_metric(self, run_id, metric_key):
with self.ManagedSessionMaker() as session:
max_step = (
session.query(func.max(SqlMetric.step))
.filter(SqlMetric.run_uuid == run_id, SqlMetric.key == metric_key)
.scalar()
)
return max_step or 0
def get_metric_history_bulk_interval_from_steps(self, run_id, metric_key, steps, max_results):
with self.ManagedSessionMaker() as session:
metrics = (
session.query(SqlMetric)
.filter(
SqlMetric.key == metric_key,
SqlMetric.run_uuid == run_id,
SqlMetric.step.in_(steps),
)
.order_by(
SqlMetric.run_uuid,
SqlMetric.step,
SqlMetric.timestamp,
SqlMetric.value,
)
.limit(max_results)
.all()
)
return [
MetricWithRunId(
run_id=metric.run_uuid,
metric=metric.to_mlflow_entity(),
)
for metric in metrics
]
def _search_datasets(self, experiment_ids):
"""
Return all dataset summaries associated to the given experiments.
Args:
experiment_ids: List of experiment ids to scope the search
Returns:
A List of :py:class:`SqlAlchemyStore.DatasetSummary` entities.
"""
MAX_DATASET_SUMMARIES_RESULTS = 1000
experiment_ids = [int(e) for e in experiment_ids]
with self.ManagedSessionMaker() as session:
# Note that the join with the input tag table is a left join. This is required so if an
# input does not have the MLFLOW_DATASET_CONTEXT tag, we still return that entry as part
# of the final result with the context set to None.
summaries = (
session.query(
SqlDataset.experiment_id,
SqlDataset.name,
SqlDataset.digest,
SqlInputTag.value,
)
.select_from(SqlDataset)
.distinct()
.join(SqlInput, SqlInput.source_id == SqlDataset.dataset_uuid)
.join(
SqlInputTag,
and_(
SqlInput.input_uuid == SqlInputTag.input_uuid,
SqlInputTag.name == MLFLOW_DATASET_CONTEXT,
),
isouter=True,
)
.filter(SqlDataset.experiment_id.in_(experiment_ids))
.limit(MAX_DATASET_SUMMARIES_RESULTS)
.all()
)
return [
_DatasetSummary(
experiment_id=str(summary.experiment_id),
name=summary.name,
digest=summary.digest,
context=summary.value,
)
for summary in summaries
]
def log_param(self, run_id, param):
param = _validate_param(param.key, param.value)
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
# if we try to update the value of an existing param this will fail
# because it will try to create it with same run_uuid, param key
try:
# This will check for various integrity checks for params table.
# ToDo: Consider prior checks for null, type, param name validations, ... etc.
self._get_or_create(
model=SqlParam,
session=session,
run_uuid=run_id,
key=param.key,
value=param.value,
)
# Explicitly commit the session in order to catch potential integrity errors
# while maintaining the current managed session scope ("commit" checks that
# a transaction satisfies uniqueness constraints and throws integrity errors
# when they are violated; "get_or_create()" does not perform these checks). It is
# important that we maintain the same session scope because, in the case of
# an integrity error, we want to examine the uniqueness of parameter values using
# the same database state that the session uses during "commit". Creating a new
# session synchronizes the state with the database. As a result, if the conflicting
# parameter value were to be removed prior to the creation of a new session,
# we would be unable to determine the cause of failure for the first session's
# "commit" operation.
session.commit()
except sqlalchemy.exc.IntegrityError:
# Roll back the current session to make it usable for further transactions. In the
# event of an error during "commit", a rollback is required in order to continue
# using the session. In this case, we re-use the session because the SqlRun, `run`,
# is lazily evaluated during the invocation of `run.params`.
session.rollback()
existing_params = [p.value for p in run.params if p.key == param.key]
if len(existing_params) > 0:
old_value = existing_params[0]
if old_value != param.value:
raise MlflowException(
"Changing param values is not allowed. Param with key='{}' was already"
" logged with value='{}' for run ID='{}'. Attempted logging new value"
" '{}'.".format(param.key, old_value, run_id, param.value),
INVALID_PARAMETER_VALUE,
)
else:
raise
def _log_params(self, run_id, params):
if not params:
return
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
existing_params = {p.key: p.value for p in run.params}
new_params = []
non_matching_params = []
for param in params:
if param.key in existing_params:
if param.value != existing_params[param.key]:
non_matching_params.append(
{
"key": param.key,
"old_value": existing_params[param.key],
"new_value": param.value,
}
)
continue
new_params.append(SqlParam(run_uuid=run_id, key=param.key, value=param.value))
if non_matching_params:
raise MlflowException(
"Changing param values is not allowed. Params were already"
f" logged='{non_matching_params}' for run ID='{run_id}'.",
INVALID_PARAMETER_VALUE,
)
if not new_params:
return
session.add_all(new_params)
def set_experiment_tag(self, experiment_id, tag):
"""
Set a tag for the specified experiment
Args:
experiment_id: String ID of the experiment
tag: ExperimentRunTag instance to log
"""
_validate_experiment_tag(tag.key, tag.value)
with self.ManagedSessionMaker() as session:
tag = _validate_tag(tag.key, tag.value)
experiment = self._get_experiment(
session, experiment_id, ViewType.ALL
).to_mlflow_entity()
self._check_experiment_is_active(experiment)
session.merge(
SqlExperimentTag(experiment_id=experiment_id, key=tag.key, value=tag.value)
)
def delete_experiment_tag(self, experiment_id, key):
"""
Delete a tag from the specified experiment
Args:
experiment_id: String ID of the experiment
key: String name of the tag to be deleted
"""
with self.ManagedSessionMaker() as session:
experiment = self._get_experiment(
session, experiment_id, ViewType.ALL
).to_mlflow_entity()
self._check_experiment_is_active(experiment)
filtered_tags = (
session.query(SqlExperimentTag)
.filter_by(experiment_id=int(experiment_id), key=key)
.all()
)
if len(filtered_tags) == 0:
raise MlflowException(
f"No tag with name: {key} in experiment with id {experiment_id}",
error_code=RESOURCE_DOES_NOT_EXIST,
)
elif len(filtered_tags) > 1:
raise MlflowException(
"Bad data in database - tags for a specific experiment must have "
"a single unique value. "
"See https://mlflow.org/docs/latest/ml/getting-started/logging-first-model/step3-create-experiment/#notes-on-tags-vs-experiments",
error_code=INVALID_STATE,
)
session.delete(filtered_tags[0])
def set_tag(self, run_id, tag):
"""
Set a tag on a run.
Args:
run_id: String ID of the run.
tag: RunTag instance to log.
"""
with self.ManagedSessionMaker() as session:
tag = _validate_tag(tag.key, tag.value)
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
if tag.key == MLFLOW_RUN_NAME:
run_status = RunStatus.from_string(run.status)
self.update_run_info(run_id, run_status, run.end_time, tag.value)
else:
# NB: Updating the run_info will set the tag. No need to do it twice.
session.merge(SqlTag(run_uuid=run_id, key=tag.key, value=tag.value))
def _set_tags(self, run_id, tags):
"""
Set multiple tags on a run
Args:
run_id: String ID of the run
tags: List of RunTag instances to log
path: current json path for error messages
"""
if not tags:
return
tags = [_validate_tag(t.key, t.value, path=f"tags[{idx}]") for (idx, t) in enumerate(tags)]
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
def _try_insert_tags(attempt_number, max_retries):
try:
current_tags = (
session.query(SqlTag)
.filter(
SqlTag.run_uuid == run_id,
SqlTag.key.in_([t.key for t in tags]),
)
.all()
)
current_tags = {t.key: t for t in current_tags}
new_tag_dict = {}
for tag in tags:
# NB: If the run name tag is explicitly set, update the run info attribute
# and do not resubmit the tag for overwrite as the tag will be set within
# `set_tag()` with a call to `update_run_info()`
if tag.key == MLFLOW_RUN_NAME:
self.set_tag(run_id, tag)
else:
current_tag = current_tags.get(tag.key)
new_tag = new_tag_dict.get(tag.key)
# update the SqlTag if it is already present in DB
if current_tag:
current_tag.value = tag.value
continue
# if a SqlTag instance is already present in `new_tag_dict`,
# this means that multiple tags with the same key were passed to
# `set_tags`.
# In this case, we resolve potential conflicts by updating the value
# of the existing instance to the value of `tag`
if new_tag:
new_tag.value = tag.value
# otherwise, put it into the dict
else:
new_tag = SqlTag(run_uuid=run_id, key=tag.key, value=tag.value)
new_tag_dict[tag.key] = new_tag
# finally, save new entries to DB.
session.add_all(new_tag_dict.values())
session.commit()
except sqlalchemy.exc.IntegrityError:
session.rollback()
# two concurrent operations may try to attempt to insert tags.
# apply retry here.
if attempt_number > max_retries:
raise MlflowException(
"Failed to set tags with given within {} retries. Keys: {}".format(
max_retries, [t.key for t in tags]
)
)
sleep_duration = (2**attempt_number) - 1
sleep_duration += random.uniform(0, 1)
time.sleep(sleep_duration)
_try_insert_tags(attempt_number + 1, max_retries=max_retries)
_try_insert_tags(attempt_number=0, max_retries=3)
def delete_tag(self, run_id, key):
"""
Delete a tag from a run. This is irreversible.
Args:
run_id: String ID of the run
key: Name of the tag
"""
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
filtered_tags = session.query(SqlTag).filter_by(run_uuid=run_id, key=key).all()
if len(filtered_tags) == 0:
raise MlflowException(
f"No tag with name: {key} in run with id {run_id}",
error_code=RESOURCE_DOES_NOT_EXIST,
)
elif len(filtered_tags) > 1:
raise MlflowException(
"Bad data in database - tags for a specific run must have "
"a single unique value. "
"See https://mlflow.org/docs/latest/tracking.html#adding-tags-to-runs",
error_code=INVALID_STATE,
)
session.delete(filtered_tags[0])
def _search_runs(
self,
experiment_ids,
filter_string,
run_view_type,
max_results,
order_by,
page_token,
):
def compute_next_token(current_size):
next_token = None
if max_results == current_size:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
return next_token
self._validate_max_results_param(max_results, allow_null=True)
stages = set(LifecycleStage.view_type_to_stages(run_view_type))
with self.ManagedSessionMaker() as session:
# Fetch the appropriate runs and eagerly load their summary metrics, params, and
# tags. These run attributes are referenced during the invocation of
# ``run.to_mlflow_entity()``, so eager loading helps avoid additional database queries
# that are otherwise executed at attribute access time under a lazy loading model.
parsed_filters = SearchUtils.parse_search_filter(filter_string)
cases_orderby, parsed_orderby, sorting_joins = _get_orderby_clauses(order_by, session)
stmt = select(SqlRun, *cases_orderby)
(
attribute_filters,
non_attribute_filters,
dataset_filters,
) = _get_sqlalchemy_filter_clauses(parsed_filters, session, self._get_dialect())
for non_attr_filter in non_attribute_filters:
stmt = stmt.join(non_attr_filter)
for idx, dataset_filter in enumerate(dataset_filters):
# need to reference the anon table in the join condition
anon_table_name = f"anon_{idx + 1}"
stmt = stmt.join(
dataset_filter,
text(f"runs.run_uuid = {anon_table_name}.destination_id"),
)
# using an outer join is necessary here because we want to be able to sort
# on a column (tag, metric or param) without removing the lines that
# do not have a value for this column (which is what inner join would do)
for j in sorting_joins:
stmt = stmt.outerjoin(j)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
experiment_ids = [int(e) for e in experiment_ids]
stmt = (
stmt.distinct()
.options(*self._get_eager_run_query_options())
.filter(
SqlRun.experiment_id.in_(experiment_ids),
SqlRun.lifecycle_stage.in_(stages),
*attribute_filters,
)
.order_by(*parsed_orderby)
.offset(offset)
.limit(max_results)
)
queried_runs = session.execute(stmt).scalars(SqlRun).all()
runs = [run.to_mlflow_entity() for run in queried_runs]
run_ids = [run.info.run_id for run in runs]
# add inputs and outputs to runs
inputs = self._get_run_inputs(run_uuids=run_ids, session=session)
model_outputs_map = self._get_model_outputs_bulk(run_ids=run_ids, session=session)
runs_with_inputs_outputs = []
for i, run in enumerate(runs):
runs_with_inputs_outputs.append(
Run(
run.info,
run.data,
RunInputs(dataset_inputs=inputs[i]),
RunOutputs(model_outputs_map[run.info.run_id]),
)
)
next_page_token = compute_next_token(len(runs_with_inputs_outputs))
return runs_with_inputs_outputs, next_page_token
def log_batch(self, run_id, metrics, params, tags):
_validate_run_id(run_id)
metrics, params, tags = _validate_batch_log_data(metrics, params, tags)
_validate_batch_log_limits(metrics, params, tags)
_validate_param_keys_unique(params)
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
try:
self._log_params(run_id, params)
self._log_metrics(run_id, metrics)
self._log_model_metrics(run_id, metrics, experiment_id=run.experiment_id)
self._set_tags(run_id, tags)
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(e, INTERNAL_ERROR)
def record_logged_model(self, run_id, mlflow_model):
from mlflow.models import Model
if not isinstance(mlflow_model, Model):
raise TypeError(
f"Argument 'mlflow_model' should be mlflow.models.Model, got '{type(mlflow_model)}'"
)
model_dict = mlflow_model.get_tags_dict()
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
if previous_tag := [t for t in run.tags if t.key == MLFLOW_LOGGED_MODELS]:
value = json.dumps(json.loads(previous_tag[0].value) + [model_dict])
else:
value = json.dumps([model_dict])
_validate_tag(MLFLOW_LOGGED_MODELS, value)
session.merge(SqlTag(key=MLFLOW_LOGGED_MODELS, value=value, run_uuid=run_id))
def log_inputs(
self,
run_id: str,
datasets: list[DatasetInput] | None = None,
models: list[LoggedModelInput] | None = None,
):
"""
Log inputs, such as datasets, to the specified run.
Args:
run_id: String id for the run
datasets: List of :py:class:`mlflow.entities.DatasetInput` instances to log
as inputs to the run.
models: List of :py:class:`mlflow.entities.LoggedModelInput` instances to log
as inputs to the run.
Returns:
None.
"""
_validate_run_id(run_id)
if datasets is not None:
if not isinstance(datasets, list):
raise TypeError(f"Argument 'datasets' should be a list, got '{type(datasets)}'")
_validate_dataset_inputs(datasets)
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
experiment_id = run.experiment_id
self._check_run_is_active(run)
try:
self._log_inputs_impl(experiment_id, run_id, datasets, models)
except MlflowException as e:
raise e
except Exception as e:
raise MlflowException(e, INTERNAL_ERROR)
def _log_inputs_impl(
self,
experiment_id,
run_id,
dataset_inputs: list[DatasetInput] | None = None,
models: list[LoggedModelInput] | None = None,
):
dataset_inputs = dataset_inputs or []
for dataset_input in dataset_inputs:
if dataset_input.dataset is None:
raise MlflowException(
"Dataset input must have a dataset associated with it.",
INTERNAL_ERROR,
)
# dedup dataset_inputs list if two dataset inputs have the same name and digest
# keeping the first occurrence
name_digest_keys = {}
for dataset_input in dataset_inputs:
key = (dataset_input.dataset.name, dataset_input.dataset.digest)
if key not in name_digest_keys:
name_digest_keys[key] = dataset_input
dataset_inputs = list(name_digest_keys.values())
with self.ManagedSessionMaker() as session:
dataset_names_to_check = [
dataset_input.dataset.name for dataset_input in dataset_inputs
]
dataset_digests_to_check = [
dataset_input.dataset.digest for dataset_input in dataset_inputs
]
# find all datasets with the same name and digest
# if the dataset already exists, use the existing dataset uuid
existing_datasets = (
session.query(SqlDataset)
.filter(SqlDataset.name.in_(dataset_names_to_check))
.filter(SqlDataset.digest.in_(dataset_digests_to_check))
.all()
)
dataset_uuids = {}
for existing_dataset in existing_datasets:
dataset_uuids[(existing_dataset.name, existing_dataset.digest)] = (
existing_dataset.dataset_uuid
)
# collect all objects to write to DB in a single list
objs_to_write = []
# add datasets to objs_to_write
for dataset_input in dataset_inputs:
if (
dataset_input.dataset.name,
dataset_input.dataset.digest,
) not in dataset_uuids:
new_dataset_uuid = uuid.uuid4().hex
dataset_uuids[(dataset_input.dataset.name, dataset_input.dataset.digest)] = (
new_dataset_uuid
)
objs_to_write.append(
SqlDataset(
dataset_uuid=new_dataset_uuid,
experiment_id=experiment_id,
name=dataset_input.dataset.name,
digest=dataset_input.dataset.digest,
dataset_source_type=dataset_input.dataset.source_type,
dataset_source=dataset_input.dataset.source,
dataset_schema=dataset_input.dataset.schema,
dataset_profile=dataset_input.dataset.profile,
)
)
# find all inputs with the same source_id and destination_id
# if the input already exists, use the existing input uuid
existing_inputs = (
session.query(SqlInput)
.filter(SqlInput.source_type == "DATASET")
.filter(SqlInput.source_id.in_(dataset_uuids.values()))
.filter(SqlInput.destination_type == "RUN")
.filter(SqlInput.destination_id == run_id)
.all()
)
input_uuids = {}
for existing_input in existing_inputs:
input_uuids[(existing_input.source_id, existing_input.destination_id)] = (
existing_input.input_uuid
)
# add input edges to objs_to_write
for dataset_input in dataset_inputs:
dataset_uuid = dataset_uuids[
(dataset_input.dataset.name, dataset_input.dataset.digest)
]
if (dataset_uuid, run_id) not in input_uuids:
new_input_uuid = uuid.uuid4().hex
input_uuids[(dataset_input.dataset.name, dataset_input.dataset.digest)] = (
new_input_uuid
)
objs_to_write.append(
SqlInput(
input_uuid=new_input_uuid,
source_type="DATASET",
source_id=dataset_uuid,
destination_type="RUN",
destination_id=run_id,
)
)
# add input tags to objs_to_write
objs_to_write.extend(
SqlInputTag(
input_uuid=new_input_uuid,
name=input_tag.key,
value=input_tag.value,
)
for input_tag in dataset_input.tags
)
if models:
for model in models:
session.merge(
SqlInput(
input_uuid=uuid.uuid4().hex,
source_type="RUN_INPUT",
source_id=run_id,
destination_type="MODEL_INPUT",
destination_id=model.model_id,
)
)
session.add_all(objs_to_write)
def log_outputs(self, run_id: str, models: list[LoggedModelOutput]):
with self.ManagedSessionMaker() as session:
run = self._get_run(run_uuid=run_id, session=session)
self._check_run_is_active(run)
session.add_all(
SqlInput(
input_uuid=uuid.uuid4().hex,
source_type="RUN_OUTPUT",
source_id=run_id,
destination_type="MODEL_OUTPUT",
destination_id=model.model_id,
step=model.step,
)
for model in models
)
def _get_model_inputs(
self,
run_id: str,
session: sqlalchemy.orm.Session | None = None,
) -> list[LoggedModelInput]:
return [
LoggedModelInput(model_id=input.destination_id)
for input in (
session.query(SqlInput)
.filter(
SqlInput.source_type == "RUN_INPUT",
SqlInput.source_id == run_id,
SqlInput.destination_type == "MODEL_INPUT",
)
.all()
)
]
def _get_model_outputs(
self,
run_id: str,
session: sqlalchemy.orm.Session,
) -> list[LoggedModelOutput]:
return [
LoggedModelOutput(model_id=output.destination_id, step=output.step)
for output in session.query(SqlInput)
.filter(
SqlInput.source_type == "RUN_OUTPUT",
SqlInput.source_id == run_id,
SqlInput.destination_type == "MODEL_OUTPUT",
)
.all()
]
def _get_model_outputs_bulk(
self,
run_ids: list[str],
session: sqlalchemy.orm.Session,
) -> dict[str, list[LoggedModelOutput]]:
"""
Fetch model outputs for multiple runs in a single query.
Returns a dict mapping run_id to list of LoggedModelOutput.
"""
outputs = (
session.query(SqlInput)
.filter(
SqlInput.source_type == "RUN_OUTPUT",
SqlInput.source_id.in_(run_ids),
SqlInput.destination_type == "MODEL_OUTPUT",
)
.all()
)
outputs_per_run = defaultdict(list)
for output in outputs:
outputs_per_run[output.source_id].append(
LoggedModelOutput(model_id=output.destination_id, step=output.step)
)
# Ensure all run_ids are present in the result, even if they have no outputs
return {run_id: outputs_per_run.get(run_id, []) for run_id in run_ids}
#######################################################################################
# Logged models
#######################################################################################
def create_logged_model(
self,
experiment_id: str,
name: str | None = None,
source_run_id: str | None = None,
tags: list[LoggedModelTag] | None = None,
params: list[LoggedModelParameter] | None = None,
model_type: str | None = None,
) -> LoggedModel:
_validate_logged_model_name(name)
with self.ManagedSessionMaker() as session:
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
model_id = f"m-{str(uuid.uuid4()).replace('-', '')}"
artifact_location = append_to_uri_path(
experiment.artifact_location,
SqlAlchemyStore.MODELS_FOLDER_NAME,
model_id,
SqlAlchemyStore.ARTIFACTS_FOLDER_NAME,
)
name = name or _generate_random_name()
creation_timestamp = get_current_time_millis()
logged_model = SqlLoggedModel(
model_id=model_id,
experiment_id=experiment_id,
name=name,
artifact_location=artifact_location,
creation_timestamp_ms=creation_timestamp,
last_updated_timestamp_ms=creation_timestamp,
model_type=model_type,
status=LoggedModelStatus.PENDING.to_int(),
lifecycle_stage=LifecycleStage.ACTIVE,
source_run_id=source_run_id,
)
session.add(logged_model)
if params:
session.add_all(
SqlLoggedModelParam(
model_id=logged_model.model_id,
experiment_id=experiment_id,
param_key=param.key,
param_value=param.value,
)
for param in params
)
if tags:
session.add_all(
SqlLoggedModelTag(
model_id=logged_model.model_id,
experiment_id=experiment_id,
tag_key=tag.key,
tag_value=tag.value,
)
for tag in tags
)
session.commit()
return logged_model.to_mlflow_entity()
def log_logged_model_params(self, model_id: str, params: list[LoggedModelParameter]):
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
session.add_all(
SqlLoggedModelParam(
model_id=model_id,
experiment_id=logged_model.experiment_id,
param_key=param.key,
param_value=param.value,
)
for param in params
)
def _raise_model_not_found(self, model_id: str):
raise MlflowException(
f"Logged model with ID '{model_id}' not found.",
RESOURCE_DOES_NOT_EXIST,
)
def get_logged_model(self, model_id: str, allow_deleted: bool = False) -> LoggedModel:
with self.ManagedSessionMaker() as session:
query = session.query(SqlLoggedModel).filter(SqlLoggedModel.model_id == model_id)
if not allow_deleted:
query = query.filter(SqlLoggedModel.lifecycle_stage != LifecycleStage.DELETED)
logged_model = query.first()
if not logged_model:
self._raise_model_not_found(model_id)
return logged_model.to_mlflow_entity()
def delete_logged_model(self, model_id):
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
logged_model.lifecycle_stage = LifecycleStage.DELETED
logged_model.last_updated_timestamp_ms = get_current_time_millis()
session.commit()
def _hard_delete_logged_model(self, model_id):
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
session.delete(logged_model)
def _get_deleted_logged_models(self, older_than=0):
current_time = get_current_time_millis()
with self.ManagedSessionMaker() as session:
models = (
session.query(SqlLoggedModel)
.filter(
SqlLoggedModel.lifecycle_stage == LifecycleStage.DELETED,
SqlLoggedModel.last_updated_timestamp_ms <= (current_time - older_than),
)
.all()
)
return [m.model_id for m in models]
def finalize_logged_model(self, model_id: str, status: LoggedModelStatus) -> LoggedModel:
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
logged_model.status = status.to_int()
logged_model.last_updated_timestamp_ms = get_current_time_millis()
session.commit()
return logged_model.to_mlflow_entity()
def set_logged_model_tags(self, model_id: str, tags: list[LoggedModelTag]) -> None:
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
# TODO: Consider upserting tags in a single transaction for performance
for tag in tags:
session.merge(
SqlLoggedModelTag(
model_id=model_id,
experiment_id=logged_model.experiment_id,
tag_key=tag.key,
tag_value=tag.value,
)
)
def delete_logged_model_tag(self, model_id: str, key: str) -> None:
with self.ManagedSessionMaker() as session:
logged_model = session.get(SqlLoggedModel, model_id)
if not logged_model:
self._raise_model_not_found(model_id)
count = (
session.query(SqlLoggedModelTag)
.filter(
SqlLoggedModelTag.model_id == model_id,
SqlLoggedModelTag.tag_key == key,
)
.delete()
)
if count == 0:
raise MlflowException(
f"No tag with key {key!r} found for model with ID {model_id!r}.",
RESOURCE_DOES_NOT_EXIST,
)
def register_scorer(
self, experiment_id: str, name: str, serialized_scorer: str
) -> ScorerVersion:
"""
Register a scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
serialized_scorer: The serialized scorer string (JSON).
Returns:
mlflow.entities.ScorerVersion: The newly registered scorer version with scorer_id.
"""
with self.ManagedSessionMaker() as session:
# Validate experiment exists and is active
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# First, check if the scorer exists in the scorers table
scorer = (
session.query(SqlScorer)
.filter(
SqlScorer.experiment_id == experiment_id,
SqlScorer.scorer_name == name,
)
.first()
)
if scorer is None:
# Create the scorer record with a new UUID
scorer_id = str(uuid.uuid4())
scorer = SqlScorer(
experiment_id=experiment_id,
scorer_name=name,
scorer_id=scorer_id,
)
session.add(scorer)
session.flush() # Flush to get the scorer record
# Find the maximum version for this scorer
max_version = (
session.query(func.max(SqlScorerVersion.scorer_version))
.filter(SqlScorerVersion.scorer_id == scorer.scorer_id)
.scalar()
)
# Set new version (1 if no existing scorer, otherwise max + 1)
new_version = 1 if max_version is None else max_version + 1
# Create and save the new scorer version record
sql_scorer_version = SqlScorerVersion(
scorer_id=scorer.scorer_id,
scorer_version=new_version,
serialized_scorer=serialized_scorer,
)
session.add(sql_scorer_version)
session.flush()
return sql_scorer_version.to_mlflow_entity()
def list_scorers(self, experiment_id) -> list[ScorerVersion]:
"""
List all scorers for an experiment.
Args:
experiment_id: The experiment ID.
Returns:
List of mlflow.entities.scorer.ScorerVersion objects
(latest version for each scorer name).
"""
with self.ManagedSessionMaker() as session:
# Validate experiment exists and is active
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# First, get all scorer_ids for this experiment
scorer_ids = [
scorer.scorer_id
for scorer in session.query(SqlScorer.scorer_id)
.filter(SqlScorer.experiment_id == experiment.experiment_id)
.all()
]
if not scorer_ids:
return []
# Query the latest version for each scorer_id
latest_versions = (
session.query(
SqlScorerVersion.scorer_id,
func.max(SqlScorerVersion.scorer_version).label("max_version"),
)
.filter(SqlScorerVersion.scorer_id.in_(scorer_ids))
.group_by(SqlScorerVersion.scorer_id)
.subquery()
)
# Query the actual scorer version records with the latest versions
sql_scorer_versions = (
session.query(SqlScorerVersion)
.join(
latest_versions,
(SqlScorerVersion.scorer_id == latest_versions.c.scorer_id)
& (SqlScorerVersion.scorer_version == latest_versions.c.max_version),
)
.join(SqlScorer, SqlScorerVersion.scorer_id == SqlScorer.scorer_id)
.order_by(SqlScorer.scorer_name)
.all()
)
return [
sql_scorer_version.to_mlflow_entity() for sql_scorer_version in sql_scorer_versions
]
def get_scorer(self, experiment_id, name, version=None) -> ScorerVersion:
"""
Get a specific scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
version: The scorer version. If None, returns the scorer with
maximum version.
Returns:
A ScorerVersion entity object.
Raises:
MlflowException: If scorer is not found.
"""
with self.ManagedSessionMaker() as session:
# Validate experiment exists and is active
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# First, get the scorer record
scorer = (
session.query(SqlScorer)
.filter(
SqlScorer.experiment_id == experiment.experiment_id,
SqlScorer.scorer_name == name,
)
.first()
)
if scorer is None:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
# Build query for scorer versions
query = session.query(SqlScorerVersion).filter(
SqlScorerVersion.scorer_id == scorer.scorer_id
)
if version is not None:
# Get specific version
sql_scorer_version = query.filter(
SqlScorerVersion.scorer_version == version
).first()
if sql_scorer_version is None:
raise MlflowException(
f"Scorer with name '{name}' and version {version} not found for "
f"experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
else:
# Get maximum version
sql_scorer_version = query.order_by(SqlScorerVersion.scorer_version.desc()).first()
if sql_scorer_version is None:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
return sql_scorer_version.to_mlflow_entity()
def delete_scorer(self, experiment_id, name, version=None) -> None:
"""
Delete a scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
version: The scorer version to delete. If None, deletes all versions.
Raises:
MlflowException: If scorer is not found.
"""
with self.ManagedSessionMaker() as session:
# Validate experiment exists and is active
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# First, get the scorer record
scorer = (
session.query(SqlScorer)
.filter(
SqlScorer.experiment_id == experiment.experiment_id,
SqlScorer.scorer_name == name,
)
.first()
)
if scorer is None:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
# Build the query for scorer versions
query = session.query(SqlScorerVersion).filter(
SqlScorerVersion.scorer_id == scorer.scorer_id
)
# If version is specified, filter by version
if version is not None:
query = query.filter(SqlScorerVersion.scorer_version == version)
sql_scorer_versions = query.all()
if not sql_scorer_versions:
if version is not None:
raise MlflowException(
f"Scorer with name '{name}' and version {version} not found for"
f" experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
else:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
# Delete the scorer versions
for sql_scorer_version in sql_scorer_versions:
session.delete(sql_scorer_version)
# If we're deleting all versions, also delete the scorer record
if version is None:
session.delete(scorer)
def list_scorer_versions(self, experiment_id, name) -> list[ScorerVersion]:
"""
List all versions of a specific scorer for an experiment.
Args:
experiment_id: The experiment ID.
name: The scorer name.
Returns:
List of mlflow.entities.scorer.ScorerVersion objects for all versions of the scorer.
Raises:
MlflowException: If scorer is not found.
"""
with self.ManagedSessionMaker() as session:
# Validate experiment exists and is active
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
# First, get the scorer record
scorer = (
session.query(SqlScorer)
.filter(
SqlScorer.experiment_id == experiment.experiment_id,
SqlScorer.scorer_name == name,
)
.first()
)
if scorer is None:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
# Query for all versions of the scorer
sql_scorer_versions = (
session.query(SqlScorerVersion)
.filter(SqlScorerVersion.scorer_id == scorer.scorer_id)
.order_by(SqlScorerVersion.scorer_version.asc())
.all()
)
if not sql_scorer_versions:
raise MlflowException(
f"Scorer with name '{name}' not found for experiment {experiment_id}.",
RESOURCE_DOES_NOT_EXIST,
)
# Convert to mlflow.entities.scorer.ScorerVersion objects
return [
sql_scorer_version.to_mlflow_entity() for sql_scorer_version in sql_scorer_versions
]
def _apply_order_by_search_logged_models(
self,
models: sqlalchemy.orm.Query,
session: sqlalchemy.orm.Session,
order_by: list[dict[str, Any]] | None = None,
) -> sqlalchemy.orm.Query:
order_by_clauses = []
has_creation_timestamp = False
for ob in order_by or []:
field_name = ob.get("field_name")
ascending = ob.get("ascending", True)
if "." not in field_name:
name = SqlLoggedModel.ALIASES.get(field_name, field_name)
if name == "creation_timestamp_ms":
has_creation_timestamp = True
try:
col = getattr(SqlLoggedModel, name)
except AttributeError:
raise MlflowException.invalid_parameter_value(
f"Invalid order by field name: {field_name}"
)
# Why not use `nulls_last`? Because it's not supported by all dialects (e.g., MySQL)
order_by_clauses.extend(
[
# Sort nulls last
sqlalchemy.case((col.is_(None), 1), else_=0).asc(),
col.asc() if ascending else col.desc(),
]
)
continue
entity, name = field_name.split(".", 1)
# TODO: Support filtering by other entities such as params if needed
if entity != "metrics":
raise MlflowException.invalid_parameter_value(
f"Invalid order by field name: {field_name}. Only metrics are supported."
)
# Sub query to get the latest metrics value for each (model_id, metric_name) pair
dataset_filter = []
if dataset_name := ob.get("dataset_name"):
dataset_filter.append(SqlLoggedModelMetric.dataset_name == dataset_name)
if dataset_digest := ob.get("dataset_digest"):
dataset_filter.append(SqlLoggedModelMetric.dataset_digest == dataset_digest)
subquery = (
session.query(
SqlLoggedModelMetric.model_id,
SqlLoggedModelMetric.metric_value,
func.rank()
.over(
partition_by=[
SqlLoggedModelMetric.model_id,
SqlLoggedModelMetric.metric_name,
],
order_by=[
SqlLoggedModelMetric.metric_timestamp_ms.desc(),
SqlLoggedModelMetric.metric_step.desc(),
],
)
.label("rank"),
)
.filter(
SqlLoggedModelMetric.metric_name == name,
*dataset_filter,
)
.subquery()
)
subquery = select(subquery.c).where(subquery.c.rank == 1).subquery()
models = models.outerjoin(subquery)
# Why not use `nulls_last`? Because it's not supported by all dialects (e.g., MySQL)
order_by_clauses.extend(
[
# Sort nulls last
sqlalchemy.case((subquery.c.metric_value.is_(None), 1), else_=0).asc(),
subquery.c.metric_value.asc() if ascending else subquery.c.metric_value.desc(),
]
)
if not has_creation_timestamp:
order_by_clauses.append(SqlLoggedModel.creation_timestamp_ms.desc())
return models.order_by(*order_by_clauses)
def _apply_filter_string_datasets_search_logged_models(
self,
models: sqlalchemy.orm.Query,
session: sqlalchemy.orm.Session,
experiment_ids: list[str],
filter_string: str | None,
datasets: list[dict[str, Any]] | None,
):
from mlflow.utils.search_logged_model_utils import EntityType, parse_filter_string
comparisons = parse_filter_string(filter_string)
dialect = self._get_dialect()
attr_filters: list[sqlalchemy.BinaryExpression] = []
non_attr_filters: list[sqlalchemy.BinaryExpression] = []
dataset_filters = []
if datasets:
for dataset in datasets:
dataset_filter = SqlLoggedModelMetric.dataset_name == dataset["dataset_name"]
if "dataset_digest" in dataset:
dataset_filter = dataset_filter & (
SqlLoggedModelMetric.dataset_digest == dataset["dataset_digest"]
)
dataset_filters.append(dataset_filter)
has_metric_filters = False
for comp in comparisons:
comp_func = SearchUtils.get_sql_comparison_func(comp.op, dialect)
if comp.entity.type == EntityType.ATTRIBUTE:
attr_filters.append(comp_func(getattr(SqlLoggedModel, comp.entity.key), comp.value))
elif comp.entity.type == EntityType.METRIC:
has_metric_filters = True
metric_filters = [
SqlLoggedModelMetric.metric_name == comp.entity.key,
comp_func(SqlLoggedModelMetric.metric_value, comp.value),
]
if dataset_filters:
metric_filters.append(sqlalchemy.or_(*dataset_filters))
non_attr_filters.append(
session.query(SqlLoggedModelMetric).filter(*metric_filters).subquery()
)
elif comp.entity.type == EntityType.PARAM:
non_attr_filters.append(
session.query(SqlLoggedModelParam)
.filter(
SqlLoggedModelParam.param_key == comp.entity.key,
comp_func(SqlLoggedModelParam.param_value, comp.value),
)
.subquery()
)
elif comp.entity.type == EntityType.TAG:
non_attr_filters.append(
session.query(SqlLoggedModelTag)
.filter(
SqlLoggedModelTag.tag_key == comp.entity.key,
comp_func(SqlLoggedModelTag.tag_value, comp.value),
)
.subquery()
)
for f in non_attr_filters:
models = models.join(f)
# If there are dataset filters but no metric filters,
# filter for models that have any metrics on the datasets
if dataset_filters and not has_metric_filters:
subquery = (
session.query(SqlLoggedModelMetric.model_id)
.filter(sqlalchemy.or_(*dataset_filters))
.distinct()
.subquery()
)
models = models.join(subquery)
experiment_ids = [int(e) for e in experiment_ids]
return models.filter(
SqlLoggedModel.lifecycle_stage != LifecycleStage.DELETED,
SqlLoggedModel.experiment_id.in_(experiment_ids),
*attr_filters,
)
def search_logged_models(
self,
experiment_ids: list[str],
filter_string: str | None = None,
datasets: list[DatasetFilter] | None = None,
max_results: int | None = None,
order_by: list[dict[str, Any]] | None = None,
page_token: str | None = None,
) -> PagedList[LoggedModel]:
if datasets and not all(d.get("dataset_name") for d in datasets):
raise MlflowException(
"`dataset_name` in the `datasets` clause must be specified.",
INVALID_PARAMETER_VALUE,
)
if page_token:
token = SearchLoggedModelsPaginationToken.decode(page_token)
token.validate(experiment_ids, filter_string, order_by)
offset = token.offset
else:
offset = 0
max_results = max_results or SEARCH_LOGGED_MODEL_MAX_RESULTS_DEFAULT
with self.ManagedSessionMaker() as session:
models = session.query(SqlLoggedModel)
models = self._apply_filter_string_datasets_search_logged_models(
models, session, experiment_ids, filter_string, datasets
)
models = self._apply_order_by_search_logged_models(models, session, order_by)
models = models.offset(offset).limit(max_results + 1).all()
if len(models) > max_results:
token = SearchLoggedModelsPaginationToken(
offset=offset + max_results,
experiment_ids=experiment_ids,
filter_string=filter_string,
order_by=order_by,
).encode()
else:
token = None
return PagedList([lm.to_mlflow_entity() for lm in models[:max_results]], token=token)
#######################################################################################
# Below are Tracing APIs. We may refactor them to be in a separate class in the future.
#######################################################################################
def _get_trace_artifact_location_tag(self, experiment, trace_id: str) -> SqlTraceTag:
# Trace data is stored as file artifacts regardless of the tracking backend choice.
# We use subdirectory "/traces" under the experiment's artifact location to isolate
# them from run artifacts.
artifact_uri = append_to_uri_path(
experiment.artifact_location,
SqlAlchemyStore.TRACE_FOLDER_NAME,
trace_id,
SqlAlchemyStore.ARTIFACTS_FOLDER_NAME,
)
return SqlTraceTag(request_id=trace_id, key=MLFLOW_ARTIFACT_LOCATION, value=artifact_uri)
def start_trace(self, trace_info: "TraceInfo") -> TraceInfo:
"""
Create a trace using the V3 API format with a complete Trace object.
Args:
trace_info: The TraceInfo object to create in the backend.
Returns:
The created TraceInfo object from the backend.
"""
with self.ManagedSessionMaker() as session:
experiment = self.get_experiment(trace_info.experiment_id)
self._check_experiment_is_active(experiment)
# Use the provided trace_id
trace_id = trace_info.trace_id
# Create SqlTraceInfo with V3 fields directly
sql_trace_info = SqlTraceInfo(
request_id=trace_id,
experiment_id=trace_info.experiment_id,
timestamp_ms=trace_info.request_time,
execution_time_ms=trace_info.execution_duration,
status=trace_info.state.value,
client_request_id=trace_info.client_request_id,
request_preview=trace_info.request_preview,
response_preview=trace_info.response_preview,
)
tags = [
SqlTraceTag(request_id=trace_id, key=k, value=v) for k, v in trace_info.tags.items()
] + [
self._get_trace_artifact_location_tag(experiment, trace_id),
SqlTraceTag(
request_id=trace_id,
key=TraceTagKey.SPANS_LOCATION,
value=SpansLocation.TRACKING_STORE.value,
),
]
sql_trace_info.tags = tags
sql_trace_info.request_metadata = [
SqlTraceMetadata(request_id=trace_id, key=k, value=v)
for k, v in trace_info.trace_metadata.items()
]
sql_trace_info.assessments = [
SqlAssessments.from_mlflow_entity(a) for a in trace_info.assessments
]
try:
session.add(sql_trace_info)
session.flush()
except IntegrityError:
# Trace already exists (likely created by log_spans())
# Use merge to update with start_trace() data, preserving any logged spans
session.rollback()
session.merge(sql_trace_info)
session.flush()
return sql_trace_info.to_mlflow_entity()
def get_trace_info(self, trace_id: str) -> TraceInfo:
"""
Fetch the trace info for the given trace id.
Args:
trace_id: Unique string identifier of the trace.
Returns:
The TraceInfo object.
"""
with self.ManagedSessionMaker() as session:
sql_trace_info = self._get_sql_trace_info(session, trace_id)
return sql_trace_info.to_mlflow_entity()
def _get_sql_trace_info(self, session, trace_id) -> SqlTraceInfo:
sql_trace_info = (
session.query(SqlTraceInfo).filter(SqlTraceInfo.request_id == trace_id).one_or_none()
)
if sql_trace_info is None:
raise MlflowException(
f"Trace with ID '{trace_id}' not found.",
RESOURCE_DOES_NOT_EXIST,
)
return sql_trace_info
def search_traces(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
model_id: str | None = None,
locations: list[str] | None = None,
) -> tuple[list[TraceInfo], str | None]:
"""
Return traces that match the given list of search expressions within the experiments.
Args:
experiment_ids: List of experiment ids to scope the search.
filter_string: A search filter string.
max_results: Maximum number of traces desired.
order_by: List of order_by clauses.
page_token: Token specifying the next page of results. It should be obtained from
a ``search_traces`` call.
model_id: If specified, search traces associated with the given model ID.
locations: A list of locations to search over. To search over experiments, provide
a list of experiment IDs.
Returns:
A tuple of a list of :py:class:`TraceInfo <mlflow.entities.TraceInfo>` objects that
satisfy the search expressions and a pagination token for the next page of results.
"""
locations = _resolve_experiment_ids_and_locations(experiment_ids, locations)
self._validate_max_results_param(max_results)
with self.ManagedSessionMaker() as session:
cases_orderby, parsed_orderby, sorting_joins = _get_orderby_clauses_for_search_traces(
order_by or [], session
)
stmt = select(SqlTraceInfo, *cases_orderby)
attribute_filters, non_attribute_filters, span_filters, run_id_filter = (
_get_filter_clauses_for_search_traces(filter_string, session, self._get_dialect())
)
# Apply non-attribute filters (tags and metadata)
for non_attr_filter in non_attribute_filters:
stmt = stmt.join(non_attr_filter)
# Apply span filters with explicit join condition
for span_filter in span_filters:
stmt = stmt.join(span_filter, SqlTraceInfo.request_id == span_filter.c.request_id)
# If run_id filter is present, we need to handle it specially to include linked traces
if run_id_filter:
# Create a subquery to check if a trace is linked to the run via entity associations
linked_trace_exists = exists().where(
(SqlEntityAssociation.source_id == SqlTraceInfo.request_id)
& (SqlEntityAssociation.source_type == EntityAssociationType.TRACE)
& (SqlEntityAssociation.destination_type == EntityAssociationType.RUN)
& (SqlEntityAssociation.destination_id == run_id_filter)
)
# Create a subquery to check if trace has run_id in metadata
metadata_exists = exists().where(
(SqlTraceMetadata.request_id == SqlTraceInfo.request_id)
& (SqlTraceMetadata.key == TraceMetadataKey.SOURCE_RUN)
& (SqlTraceMetadata.value == run_id_filter)
)
# using an outer join is necessary here because we want to be able to sort
# on a column (tag, metric or param) without removing the lines that
# do not have a value for this column (which is what inner join would do)
for j in sorting_joins:
stmt = stmt.outerjoin(j)
offset = SearchTraceUtils.parse_start_offset_from_page_token(page_token)
locations = [int(e) for e in locations]
# Build the filter conditions
filter_conditions = [
SqlTraceInfo.experiment_id.in_(locations),
*attribute_filters,
]
# If run_id filter is present, add OR condition for linked traces
if run_id_filter:
filter_conditions.append(
or_(
linked_trace_exists, # Trace is linked via entity associations
metadata_exists, # Trace has run_id in metadata
)
)
stmt = (
# NB: We don't need to distinct the results of joins because of the fact that
# the right tables of the joins are unique on the join key, trace_id.
# This is because the subquery that is joined on the right side is conditioned
# by a key and value pair of tags/metadata, and the combination of key and
# trace_id is unique in those tables.
# Be careful when changing the query building logic, as it may break this
# uniqueness property and require deduplication, which can be expensive.
stmt.filter(*filter_conditions)
.order_by(*parsed_orderby)
.offset(offset)
.limit(max_results)
)
queried_traces = session.execute(stmt).scalars(SqlTraceInfo).all()
trace_infos = [t.to_mlflow_entity() for t in queried_traces]
# Compute next search token
if max_results == len(trace_infos):
final_offset = offset + max_results
next_token = SearchTraceUtils.create_page_token(final_offset)
else:
next_token = None
return trace_infos, next_token
def _validate_max_results_param(self, max_results: int, allow_null=False):
if (not allow_null and max_results is None) or max_results < 1:
raise MlflowException(
f"Invalid value {max_results} for parameter 'max_results' supplied. It must be "
f"a positive integer",
INVALID_PARAMETER_VALUE,
)
if max_results > SEARCH_MAX_RESULTS_THRESHOLD:
raise MlflowException(
f"Invalid value {max_results} for parameter 'max_results' supplied. It must be at "
f"most {SEARCH_MAX_RESULTS_THRESHOLD}",
INVALID_PARAMETER_VALUE,
)
def set_trace_tag(self, trace_id: str, key: str, value: str):
"""
Set a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
value: The string value of the tag.
"""
with self.ManagedSessionMaker() as session:
key, value = _validate_trace_tag(key, value)
session.merge(SqlTraceTag(request_id=trace_id, key=key, value=value))
def delete_trace_tag(self, trace_id: str, key: str):
"""
Delete a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
"""
with self.ManagedSessionMaker() as session:
tags = session.query(SqlTraceTag).filter_by(request_id=trace_id, key=key)
if tags.count() == 0:
raise MlflowException(
f"No trace tag with key '{key}' for trace with ID '{trace_id}'",
RESOURCE_DOES_NOT_EXIST,
)
tags.delete()
def _delete_traces(
self,
experiment_id: str,
max_timestamp_millis: int | None = None,
max_traces: int | None = None,
trace_ids: list[str] | None = None,
) -> int:
"""
Delete traces based on the specified criteria.
Args:
experiment_id: ID of the associated experiment.
max_timestamp_millis: The maximum timestamp in milliseconds since the UNIX epoch for
deleting traces. Traces older than or equal to this timestamp will be deleted.
max_traces: The maximum number of traces to delete.
trace_ids: A set of request IDs to delete.
Returns:
The number of traces deleted.
"""
with self.ManagedSessionMaker() as session:
filters = [SqlTraceInfo.experiment_id == int(experiment_id)]
if max_timestamp_millis:
filters.append(SqlTraceInfo.timestamp_ms <= max_timestamp_millis)
if trace_ids:
filters.append(SqlTraceInfo.request_id.in_(trace_ids))
if max_traces:
filters.append(
SqlTraceInfo.request_id.in_(
session.query(SqlTraceInfo.request_id)
.filter(*filters)
# Delete the oldest traces first
.order_by(SqlTraceInfo.timestamp_ms)
.limit(max_traces)
.subquery()
)
)
return (
session.query(SqlTraceInfo)
.filter(and_(*filters))
.delete(synchronize_session="fetch")
)
def create_assessment(self, assessment: Assessment) -> Assessment:
"""
Create a new assessment in the database.
If the assessment has an 'overrides' field set, this will also mark the
overridden assessment as invalid.
Args:
assessment: The Assessment object to create (without assessment_id).
Returns:
The created Assessment object with backend-generated metadata.
"""
with self.ManagedSessionMaker() as session:
self._get_sql_trace_info(session, assessment.trace_id)
sql_assessment = SqlAssessments.from_mlflow_entity(assessment)
if sql_assessment.overrides:
update_count = (
session.query(SqlAssessments)
.filter(
SqlAssessments.trace_id == sql_assessment.trace_id,
SqlAssessments.assessment_id == sql_assessment.overrides,
)
.update({"valid": False})
)
if update_count == 0:
raise MlflowException(
f"Assessment with ID '{sql_assessment.overrides}' not found "
"for trace '{trace_id}'",
RESOURCE_DOES_NOT_EXIST,
)
session.add(sql_assessment)
return sql_assessment.to_mlflow_entity()
def get_assessment(self, trace_id: str, assessment_id: str) -> Assessment:
"""
Fetch the assessment for the given trace_id and assessment_id.
Args:
trace_id: The ID of the trace containing the assessment.
assessment_id: The ID of the assessment to retrieve.
Returns:
The Assessment object.
"""
with self.ManagedSessionMaker() as session:
sql_assessment = self._get_sql_assessment(session, trace_id, assessment_id)
return sql_assessment.to_mlflow_entity()
def update_assessment(
self,
trace_id: str,
assessment_id: str,
name: str | None = None,
expectation: ExpectationValue | None = None,
feedback: FeedbackValue | None = None,
rationale: str | None = None,
metadata: dict[str, str] | None = None,
) -> Assessment:
"""
Updates an existing assessment with new values while preserving immutable fields.
Only source and span_id are immutable.
The last_update_time_ms will always be updated to the current timestamp.
Metadata will be merged with the new metadata taking precedence.
Args:
trace_id: The unique identifier of the trace containing the assessment.
assessment_id: The unique identifier of the assessment to update.
name: The updated name of the assessment. If None, preserves existing name.
expectation: Updated expectation value for expectation assessments.
feedback: Updated feedback value for feedback assessments.
rationale: Updated rationale text. If None, preserves existing rationale.
metadata: Updated metadata dict. Will be merged with existing metadata.
Returns:
Assessment: The updated assessment object with new last_update_time_ms.
Raises:
MlflowException: If the assessment doesn't exist, if immutable fields have
changed, or if there's an error saving the assessment.
"""
with self.ManagedSessionMaker() as session:
existing_sql = self._get_sql_assessment(session, trace_id, assessment_id)
existing = existing_sql.to_mlflow_entity()
if expectation is not None and feedback is not None:
raise MlflowException.invalid_parameter_value(
"Cannot specify both `expectation` and `feedback` parameters."
)
if expectation is not None and not isinstance(existing, Expectation):
raise MlflowException.invalid_parameter_value(
"Cannot update expectation value on a Feedback assessment."
)
if feedback is not None and not isinstance(existing, Feedback):
raise MlflowException.invalid_parameter_value(
"Cannot update feedback value on an Expectation assessment."
)
merged_metadata = None
if existing.metadata or metadata:
merged_metadata = (existing.metadata or {}).copy()
if metadata:
merged_metadata.update(metadata)
updated_timestamp = get_current_time_millis()
if isinstance(existing, Expectation):
new_value = expectation.value if expectation is not None else existing.value
updated_assessment = Expectation(
name=name if name is not None else existing.name,
value=new_value,
source=existing.source,
trace_id=trace_id,
metadata=merged_metadata,
span_id=existing.span_id,
create_time_ms=existing.create_time_ms,
last_update_time_ms=updated_timestamp,
)
else:
if feedback is not None:
new_value = feedback.value
new_error = feedback.error
else:
new_value = existing.value
new_error = existing.error
updated_assessment = Feedback(
name=name if name is not None else existing.name,
value=new_value,
error=new_error,
source=existing.source,
trace_id=trace_id,
metadata=merged_metadata,
span_id=existing.span_id,
create_time_ms=existing.create_time_ms,
last_update_time_ms=updated_timestamp,
rationale=rationale if rationale is not None else existing.rationale,
)
updated_assessment.assessment_id = existing.assessment_id
updated_assessment.valid = existing.valid
updated_assessment.overrides = existing.overrides
if hasattr(existing, "run_id"):
updated_assessment.run_id = existing.run_id
if updated_assessment.feedback is not None:
value_json = json.dumps(updated_assessment.feedback.value)
error_json = (
json.dumps(updated_assessment.feedback.error.to_dictionary())
if updated_assessment.feedback.error
else None
)
elif updated_assessment.expectation is not None:
value_json = json.dumps(updated_assessment.expectation.value)
error_json = None
metadata_json = (
json.dumps(updated_assessment.metadata) if updated_assessment.metadata else None
)
session.query(SqlAssessments).filter(
SqlAssessments.trace_id == trace_id, SqlAssessments.assessment_id == assessment_id
).update(
{
"name": updated_assessment.name,
"value": value_json,
"error": error_json,
"last_updated_timestamp": updated_timestamp,
"rationale": updated_assessment.rationale,
"assessment_metadata": metadata_json,
}
)
return updated_assessment
def delete_assessment(self, trace_id: str, assessment_id: str) -> None:
"""
Delete an assessment from a trace.
If the deleted assessment was overriding another assessment, the overridden
assessment will be restored to valid=True.
Args:
trace_id: The ID of the trace containing the assessment.
assessment_id: The ID of the assessment to delete.
"""
with self.ManagedSessionMaker() as session:
assessment_to_delete = (
session.query(SqlAssessments)
.filter_by(trace_id=trace_id, assessment_id=assessment_id)
.first()
)
if assessment_to_delete is None:
# Assessment doesn't exist - this is idempotent, so just return
return
# If this assessment was overriding another assessment, restore the original
if assessment_to_delete.overrides:
session.query(SqlAssessments).filter_by(
assessment_id=assessment_to_delete.overrides
).update({"valid": True})
session.delete(assessment_to_delete)
session.commit()
def _get_sql_assessment(self, session, trace_id: str, assessment_id: str) -> SqlAssessments:
"""Helper method to get SqlAssessments object."""
sql_assessment = (
session.query(SqlAssessments)
.filter(
SqlAssessments.trace_id == trace_id, SqlAssessments.assessment_id == assessment_id
)
.one_or_none()
)
if sql_assessment is None:
trace_exists = (
session.query(SqlTraceInfo).filter(SqlTraceInfo.request_id == trace_id).first()
is not None
)
if not trace_exists:
raise MlflowException(
f"Trace with request_id '{trace_id}' not found",
RESOURCE_DOES_NOT_EXIST,
)
else:
raise MlflowException(
f"Assessment with ID '{assessment_id}' not found for trace '{trace_id}'",
RESOURCE_DOES_NOT_EXIST,
)
return sql_assessment
def link_traces_to_run(self, trace_ids: list[str], run_id: str) -> None:
"""
Link multiple traces to a run by creating entity associations.
Args:
trace_ids: List of trace IDs to link to the run. Maximum 100 traces allowed.
run_id: ID of the run to link traces to.
Raises:
MlflowException: If more than 100 traces are provided.
"""
MAX_TRACES_PER_REQUEST = 100
if not trace_ids:
return
if len(trace_ids) > MAX_TRACES_PER_REQUEST:
raise MlflowException(
f"Cannot link more than {MAX_TRACES_PER_REQUEST} traces to a run in "
f"a single request. Provided {len(trace_ids)} traces.",
error_code=INVALID_PARAMETER_VALUE,
)
with self.ManagedSessionMaker() as session:
existing_associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_type == EntityAssociationType.TRACE,
SqlEntityAssociation.source_id.in_(trace_ids),
SqlEntityAssociation.destination_type == EntityAssociationType.RUN,
SqlEntityAssociation.destination_id == run_id,
)
.all()
)
existing_trace_ids = [association.source_id for association in existing_associations]
trace_ids_to_add = [
trace_id for trace_id in trace_ids if trace_id not in existing_trace_ids
]
session.add_all(
SqlEntityAssociation(
association_id=uuid.uuid4().hex,
source_type=EntityAssociationType.TRACE,
source_id=trace_id,
destination_type=EntityAssociationType.RUN,
destination_id=run_id,
)
for trace_id in trace_ids_to_add
)
def calculate_trace_filter_correlation(
self,
experiment_ids: list[str],
filter_string1: str,
filter_string2: str,
base_filter: str | None = None,
) -> TraceFilterCorrelationResult:
"""
Calculate correlation between two trace filter conditions using NPMI.
Args:
experiment_ids: List of experiment_ids to search over
filter_string1: First filter condition in search_traces filter syntax
filter_string2: Second filter condition in search_traces filter syntax
base_filter: Optional base filter that both filter1 and filter2 are tested on top of
(e.g. 'request_time > ... and request_time < ...' for time windows)
Returns:
TraceFilterCorrelationResult which containst the NPMI analytics data.
"""
with self.ManagedSessionMaker() as session:
filter1_combined = (
f"{base_filter} and {filter_string1}" if base_filter else filter_string1
)
filter2_combined = (
f"{base_filter} and {filter_string2}" if base_filter else filter_string2
)
filter1_subquery = self._build_trace_filter_subquery(
session, experiment_ids, filter1_combined
)
filter2_subquery = self._build_trace_filter_subquery(
session, experiment_ids, filter2_combined
)
counts = self._get_trace_correlation_counts(
session, experiment_ids, filter1_subquery, filter2_subquery, base_filter
)
npmi_result = trace_correlation.calculate_npmi_from_counts(
counts.joint_count,
counts.filter1_count,
counts.filter2_count,
counts.total_count,
)
return TraceFilterCorrelationResult(
npmi=npmi_result.npmi,
npmi_smoothed=npmi_result.npmi_smoothed,
filter1_count=counts.filter1_count,
filter2_count=counts.filter2_count,
joint_count=counts.joint_count,
total_count=counts.total_count,
)
def _build_trace_filter_subquery(self, session, experiment_ids: list[str], filter_string: str):
"""Build a subquery for traces that match a given filter in the specified experiments."""
stmt = select(SqlTraceInfo.request_id).where(SqlTraceInfo.experiment_id.in_(experiment_ids))
if filter_string:
attribute_filters, non_attribute_filters, span_filters, run_id_filter = (
_get_filter_clauses_for_search_traces(filter_string, session, self._get_dialect())
)
for non_attr_filter in non_attribute_filters:
stmt = stmt.join(non_attr_filter)
for span_filter in span_filters:
stmt = stmt.join(span_filter, SqlTraceInfo.request_id == span_filter.c.request_id)
for attr_filter in attribute_filters:
stmt = stmt.where(attr_filter)
return stmt
def _get_trace_correlation_counts(
self,
session,
experiment_ids: list[str],
filter1_subquery,
filter2_subquery,
base_filter: str | None = None,
) -> trace_correlation.TraceCorrelationCounts:
"""
Get trace counts for correlation analysis using a single SQL query.
This method efficiently calculates all necessary counts for NPMI calculation
in a single database round-trip using LEFT JOINs instead of EXISTS subqueries
for MSSQL compatibility.
When base_filter is provided, the total count refers to traces matching the base filter.
"""
f1_subq = filter1_subquery.subquery()
f2_subq = filter2_subquery.subquery()
filter1_alias = aliased(f1_subq)
filter2_alias = aliased(f2_subq)
# If base_filter is provided, use traces matching the base filter as the universe
# Otherwise, use all traces in the experiments
if base_filter:
base_subquery = self._build_trace_filter_subquery(session, experiment_ids, base_filter)
base_subq = base_subquery.subquery()
base_table = aliased(base_subq)
base_request_id = base_table.c.request_id
else:
base_table = SqlTraceInfo
base_request_id = SqlTraceInfo.request_id
# NB: MSSQL does not support exists queries within subjoins so a slightly
# less efficient subquery LEFT JOIN is used to support all backends.
query = (
session.query(
func.count(base_request_id).label("total"),
func.count(filter1_alias.c.request_id).label("filter1"),
func.count(filter2_alias.c.request_id).label("filter2"),
func.count(
case(
(
(filter1_alias.c.request_id.isnot(None))
& (filter2_alias.c.request_id.isnot(None)),
base_request_id,
),
else_=None,
)
).label("joint"),
)
.select_from(base_table)
.outerjoin(filter1_alias, base_request_id == filter1_alias.c.request_id)
.outerjoin(filter2_alias, base_request_id == filter2_alias.c.request_id)
)
# Only add experiment filter if we're using SqlTraceInfo directly (no base_filter)
if not base_filter:
query = query.filter(SqlTraceInfo.experiment_id.in_(experiment_ids))
result = query.one()
# Cast to int (some databases return Decimal)
# Handle None values from empty result sets
total_count = int(result.total or 0)
filter1_count = int(result.filter1 or 0)
filter2_count = int(result.filter2 or 0)
joint_count = int(result.joint or 0)
return trace_correlation.TraceCorrelationCounts(
total_count=total_count,
filter1_count=filter1_count,
filter2_count=filter2_count,
joint_count=joint_count,
)
def log_spans(self, location: str, spans: list[Span], tracking_uri=None) -> list[Span]:
"""
Log multiple span entities to the tracking store.
Args:
location: The location to log spans to. It should be experiment ID of an MLflow
experiment.
spans: List of Span entities to log. All spans must belong to the same trace.
tracking_uri: The tracking URI to use. Default to None.
Returns:
List of logged Span entities.
Raises:
MlflowException: If spans belong to different traces.
"""
if not spans:
return []
# Validate all spans belong to the same trace
trace_ids = {span.trace_id for span in spans}
if len(trace_ids) > 1:
raise MlflowException(
f"All spans must belong to the same trace. Found trace IDs: {trace_ids}",
error_code=INVALID_PARAMETER_VALUE,
)
trace_id = next(iter(trace_ids))
# Calculate trace time bounds from spans
min_start_ms = min(span.start_time_ns for span in spans) // 1_000_000
# If no spans have ended, max_end_time should be None (trace still in progress)
end_times = [span.end_time_ns for span in spans if span.end_time_ns is not None]
max_end_ms = (max(end_times) // 1_000_000) if end_times else None
# Determine trace status from root span if available
root_span_status = self._get_trace_status_from_root_span(spans)
trace_status = root_span_status or TraceState.IN_PROGRESS.value
with self.ManagedSessionMaker() as session:
# Try to get the trace info to check if trace exists
sql_trace_info = (
session.query(SqlTraceInfo)
.filter(SqlTraceInfo.request_id == trace_id)
.one_or_none()
)
# If trace doesn't exist, create it
if sql_trace_info is None:
# Get experiment to add artifact location tag
experiment = self.get_experiment(location)
# Create trace info for this new trace. We need to establish the trace
# before we can add spans to it, as spans have a foreign key to trace_info.
sql_trace_info = SqlTraceInfo(
request_id=trace_id,
experiment_id=location,
timestamp_ms=min_start_ms,
execution_time_ms=((max_end_ms - min_start_ms) if max_end_ms else None),
status=trace_status,
client_request_id=None,
)
# Add the artifact location tag that's required for search_traces to work
tags = [
SqlTraceTag(
key=TraceTagKey.SPANS_LOCATION,
value=SpansLocation.TRACKING_STORE.value,
request_id=trace_id,
),
self._get_trace_artifact_location_tag(experiment, trace_id),
]
sql_trace_info.tags = tags
session.add(sql_trace_info)
try:
session.flush()
except IntegrityError:
# IntegrityError indicates a race condition: another process/thread
# created the trace between our initial check and insert attempt.
# This is expected in concurrent scenarios. We rollback and fetch
# the trace that was created by the other process.
session.rollback()
sql_trace_info = (
session.query(SqlTraceInfo)
.filter(SqlTraceInfo.request_id == trace_id)
.one()
)
# Atomic update of trace time range using SQLAlchemy's case expressions.
# This is necessary to handle concurrent span additions from multiple processes/threads
# without race conditions. The database performs the min/max comparisons atomically,
# ensuring the trace always reflects the earliest start and latest end times across
# all spans, even when multiple log_spans calls happen simultaneously.
timestamp_update_expr = case(
(SqlTraceInfo.timestamp_ms > min_start_ms, min_start_ms),
else_=SqlTraceInfo.timestamp_ms,
)
update_dict = {
SqlTraceInfo.timestamp_ms: timestamp_update_expr,
}
# Only attempt to update execution_time_ms if we have at least one ended span
if max_end_ms is not None:
update_dict[SqlTraceInfo.execution_time_ms] = (
case(
(
(SqlTraceInfo.timestamp_ms + SqlTraceInfo.execution_time_ms)
> max_end_ms,
SqlTraceInfo.timestamp_ms + SqlTraceInfo.execution_time_ms,
),
else_=max_end_ms,
)
- timestamp_update_expr
)
# If trace status is IN_PROGRESS or unspecified, check for root span to update it
if sql_trace_info.status in (
TraceState.IN_PROGRESS.value,
TraceState.STATE_UNSPECIFIED.value,
):
if root_span_status:
update_dict[SqlTraceInfo.status] = root_span_status
aggregated_token_usage = {}
for span in spans:
span_dict = translate_span_when_storing(span)
if span_token_usage := span_dict.get("attributes", {}).get(
SpanAttributeKey.CHAT_USAGE
):
aggregated_token_usage = update_token_usage(
aggregated_token_usage, span_token_usage
)
content_json = json.dumps(span_dict, cls=TraceJSONEncoder)
sql_span = SqlSpan(
trace_id=span.trace_id,
experiment_id=sql_trace_info.experiment_id,
span_id=span.span_id,
parent_span_id=span.parent_id,
name=span.name,
type=span.span_type,
status=span.status.status_code,
start_time_unix_nano=span.start_time_ns,
end_time_unix_nano=span.end_time_ns,
content=content_json,
)
session.merge(sql_span)
if span.parent_id is None:
update_dict.update(
self._update_trace_info_attributes(sql_trace_info, span_dict)
)
trace_token_usage = (
session.query(SqlTraceMetadata)
.filter(
SqlTraceMetadata.request_id == trace_id,
SqlTraceMetadata.key == TraceMetadataKey.TOKEN_USAGE,
)
.one_or_none()
)
if aggregated_token_usage:
trace_token_usage = update_token_usage(
trace_token_usage.value if trace_token_usage else {}, aggregated_token_usage
)
session.merge(
SqlTraceMetadata(
request_id=trace_id,
key=TraceMetadataKey.TOKEN_USAGE,
value=json.dumps(trace_token_usage),
)
)
session.query(SqlTraceInfo).filter(SqlTraceInfo.request_id == trace_id).update(
update_dict,
# Skip session synchronization for performance - we don't use the object afterward
synchronize_session=False,
)
return spans
def _update_trace_info_attributes(
self, sql_trace_info: SqlTraceInfo, span_dict: dict[str, Any]
) -> dict[str, Any]:
"""
Update trace info attributes based on span dictionary.
Args:
sql_trace_info: SqlTraceInfo object
span_dict: Dictionary of span
Returns:
Dictionary of update attributes
"""
update_dict = {}
try:
if sql_trace_info.request_preview is None and (
trace_inputs := span_dict.get("attributes", {}).get(SpanAttributeKey.INPUTS)
):
update_dict[SqlTraceInfo.request_preview] = _get_truncated_preview(
trace_inputs,
role="user",
)
if sql_trace_info.response_preview is None and (
trace_outputs := span_dict.get("attributes", {}).get(SpanAttributeKey.OUTPUTS)
):
update_dict[SqlTraceInfo.response_preview] = _get_truncated_preview(
trace_outputs,
role="assistant",
)
except Exception:
_logger.debug(f"Failed to update trace info attributes: {span_dict}", exc_info=True)
return update_dict
async def log_spans_async(self, location: str, spans: list[Span]) -> list[Span]:
"""
Asynchronously log multiple span entities to the tracking store.
Args:
location: The location to log spans to. It should be experiment ID of an MLflow
experiment.
spans: List of Span entities to log. All spans must belong to the same trace.
Returns:
List of logged Span entities.
Raises:
MlflowException: If spans belong to different traces.
"""
# TODO: Implement proper async support
return self.log_spans(location, spans)
def _get_trace_status_from_root_span(self, spans: list[Span]) -> str | None:
"""
Infer trace status from root span if present.
Returns the mapped trace status string or None if no root span found.
"""
for span in spans:
if span.parent_id is None: # Found root span (no parent)
# Map span status to trace status
span_status = span.status.status_code
if span_status == SpanStatusCode.ERROR:
return TraceState.ERROR.value
else:
# Beyond ERROR, the only other valid span statuses are OK and UNSET.
# For both OK and UNSET span statuses, return OK trace status.
# UNSET is unexpected in production but we handle it gracefully.
return TraceState.OK.value
return None
def get_trace(self, trace_id: str, *, allow_partial: bool = False) -> Trace:
if not allow_partial:
for retry_count in range(3):
# only retry if the spans are not fully exported
if trace := self._get_trace(trace_id, allow_partial):
return trace
elif retry_count < 2:
time.sleep(2**retry_count)
raise MlflowException(
message=f"Trace with ID {trace_id} is not fully exported yet, "
"please try again later.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
return self._get_trace(trace_id, allow_partial)
def _get_trace(self, trace_id: str, allow_partial: bool) -> Trace | None:
"""
Get the trace with spans for given trace id. This function should
only return None when the spans are not fully exported. If the trace
info doesn't exist, it should raise an exception.
"""
with self.ManagedSessionMaker() as session:
sql_trace_info = (
session.query(SqlTraceInfo)
.options(joinedload(SqlTraceInfo.spans))
.filter(SqlTraceInfo.request_id == trace_id)
.one_or_none()
)
if sql_trace_info:
trace_info = sql_trace_info.to_mlflow_entity()
spans = self._get_spans_with_trace_info(
trace_info, sql_trace_info.spans, allow_partial=allow_partial
)
if allow_partial or spans:
return Trace(info=trace_info, data=TraceData(spans=spans))
else:
raise MlflowException(
message=f"Trace with ID {trace_id} is not found.",
error_code=RESOURCE_DOES_NOT_EXIST,
)
def batch_get_traces(self, trace_ids: list[str], location: str | None = None) -> list[Trace]:
"""
Get complete traces with spans for given trace ids.
Args:
trace_ids: The trace IDs to get.
location: Location of the trace. Should be None for SQLAlchemy backend.
Returns:
List of Trace objects for the given trace IDs.
"""
if not trace_ids:
return []
traces = []
order_case = case(
{trace_id: idx for idx, trace_id in enumerate(trace_ids)},
value=SqlTraceInfo.request_id,
)
with self.ManagedSessionMaker() as session:
# Load traces and their spans in one go
sql_trace_infos = (
session.query(SqlTraceInfo)
.options(joinedload(SqlTraceInfo.spans))
.filter(SqlTraceInfo.request_id.in_(trace_ids))
.order_by(order_case)
.all()
)
traces = []
for sql_trace_info in sql_trace_infos:
trace_info = sql_trace_info.to_mlflow_entity()
# batch_get_traces is depended by search_traces, so we need to return
# complete traces only
if spans := self._get_spans_with_trace_info(
trace_info, sql_trace_info.spans, allow_partial=False
):
traces.append(Trace(info=trace_info, data=TraceData(spans=spans)))
return traces
def _get_spans_with_trace_info(
self, trace_info: TraceInfo, spans: list[SqlSpan], allow_partial: bool = True
) -> list[Span] | None:
# if the tag doesn't exist then the trace is not stored in the tracking store,
# we should rely on the artifact repo to get the trace data
if trace_info.tags.get(TraceTagKey.SPANS_LOCATION) != SpansLocation.TRACKING_STORE.value:
# This check is required so that the handler can capture the exception
# and load data from artifact repo instead
raise MlflowTracingException("Trace data not stored in tracking store")
sql_spans = sorted(
spans,
key=lambda s: (
# Root spans come first, then sort by start time
0 if s.parent_span_id is None else 1,
s.start_time_unix_nano,
),
)
# check whether all spans are logged before returning if not allow partial
if not allow_partial and (
trace_stats := trace_info.trace_metadata.get(TraceMetadataKey.SIZE_STATS)
):
trace_stats = json.loads(trace_stats)
num_spans = trace_stats.get(TraceSizeStatsKey.NUM_SPANS, 0)
if len(sql_spans) < num_spans:
_logger.debug(
f"Trace {trace_info.trace_id} is not fully exported yet, "
f"expecting {num_spans} spans but got {len(sql_spans)}"
)
return
return [
Span.from_dict(translate_loaded_span(json.loads(sql_span.content)))
for sql_span in sql_spans
]
#######################################################################################
# Entity Association Methods
#######################################################################################
def _search_entity_associations(
self,
entity_ids: str | list[str],
entity_type: EntityAssociationType,
target_type: EntityAssociationType,
search_direction: str, # "forward" or "reverse"
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[str]:
"""
Common implementation for searching entity associations.
Args:
entity_ids: The ID(s) of the entity to search from. Can be a single ID or a list.
entity_type: The type of the entity to search from.
target_type: The type of the target entities to find.
search_direction: "forward" to search source->destination, "reverse" for the opposite.
max_results: Maximum number of results to return. If None, return all results.
page_token: Token indicating the page of results to fetch.
Returns:
A :py:class:`mlflow.store.entities.paged_list.PagedList` of target entity IDs.
"""
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
with self.ManagedSessionMaker() as session:
query = session.query(SqlEntityAssociation)
if search_direction == "forward":
query = query.filter(
SqlEntityAssociation.source_type == entity_type,
SqlEntityAssociation.source_id.in_(entity_ids),
SqlEntityAssociation.destination_type == target_type,
)
order_field = SqlEntityAssociation.destination_id
result_field = "destination_id"
else:
query = query.filter(
SqlEntityAssociation.destination_type == entity_type,
SqlEntityAssociation.destination_id.in_(entity_ids),
SqlEntityAssociation.source_type == target_type,
)
order_field = SqlEntityAssociation.source_id
result_field = "source_id"
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
query = query.order_by(SqlEntityAssociation.created_time, order_field)
query = query.offset(offset)
if max_results is not None:
query = query.limit(max_results + 1)
associations = query.all()
next_token = None
if max_results is not None and len(associations) == max_results + 1:
final_offset = offset + max_results
next_token = SearchUtils.create_page_token(final_offset)
associations = associations[:max_results]
results = list(dict.fromkeys([getattr(assoc, result_field) for assoc in associations]))
return PagedList(results, next_token)
def search_entities_by_source(
self,
source_ids: str | list[str],
source_type: EntityAssociationType,
destination_type: EntityAssociationType,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[str]:
"""
Get destination IDs associated with source entity/entities.
Args:
source_ids: The ID(s) of the source entity. Can be a single ID or a list.
source_type: The type of the source entity.
destination_type: The type of the destination entity.
max_results: Maximum number of results to return. If None, return all results.
page_token: Token indicating the page of results to fetch.
Returns:
A :py:class:`mlflow.store.entities.paged_list.PagedList` of destination IDs
associated with the source entity/entities.
"""
return self._search_entity_associations(
source_ids, source_type, destination_type, "forward", max_results, page_token
)
def search_entities_by_destination(
self,
destination_ids: str | list[str],
destination_type: EntityAssociationType,
source_type: EntityAssociationType,
max_results: int | None = None,
page_token: str | None = None,
) -> PagedList[str]:
"""
Get source IDs associated with destination entity/entities.
Args:
destination_ids: The ID(s) of the destination entity. Can be a single ID or a list.
destination_type: The type of the destination entity.
source_type: The type of the source entity.
max_results: Maximum number of results to return. If None, return all results.
page_token: Token indicating the page of results to fetch.
Returns:
A :py:class:`mlflow.store.entities.paged_list.PagedList` of source IDs
associated with the destination entity/entities.
"""
return self._search_entity_associations(
destination_ids, destination_type, source_type, "reverse", max_results, page_token
)
#######################################################################################
# Evaluation Dataset Methods
#######################################################################################
def _compute_dataset_digest(self, name: str, last_update_time: int) -> str:
"""
Compute digest for an evaluation dataset.
The digest includes the dataset name and last_update_time to ensure
that any state change results in a different digest.
Args:
name: Dataset name
last_update_time: Last update timestamp in milliseconds
Returns:
8-character digest string
"""
digest_input = f"{name}:{last_update_time}".encode()
return hashlib.sha256(digest_input).hexdigest()[:8]
def create_dataset(
self,
name: str,
tags: dict[str, str] | None = None,
experiment_ids: list[str] | None = None,
) -> EvaluationDataset:
"""
Create a new evaluation dataset in the database.
Args:
name: The name of the evaluation dataset.
tags: Optional tags to associate with the dataset.
experiment_ids: List of experiment IDs to associate with the dataset
Returns:
The created EvaluationDataset object with backend-generated metadata.
"""
with self.ManagedSessionMaker() as session:
dataset_id = f"{self.EVALUATION_DATASET_ID_PREFIX}{uuid.uuid4().hex}"
current_time = get_current_time_millis()
digest = self._compute_dataset_digest(name, current_time)
user_id = None
if tags and MLFLOW_USER in tags:
user_id = tags[MLFLOW_USER]
created_dataset = EvaluationDataset(
dataset_id=dataset_id,
name=name,
digest=digest,
created_time=current_time,
last_update_time=current_time,
tags=tags or {},
schema=None, # Schema is computed when data is added
profile=None, # Profile is computed when data is added
created_by=user_id,
last_updated_by=user_id,
)
sql_dataset = SqlEvaluationDataset.from_mlflow_entity(created_dataset)
session.add(sql_dataset)
if created_dataset.tags:
for key, value in created_dataset.tags.items():
tag = SqlEvaluationDatasetTag(
dataset_id=dataset_id,
key=key,
value=value,
)
session.add(tag)
if experiment_ids:
for exp_id in experiment_ids:
association = SqlEntityAssociation(
source_type=EntityAssociationType.EVALUATION_DATASET,
source_id=dataset_id,
destination_type=EntityAssociationType.EXPERIMENT,
destination_id=str(exp_id),
created_time=current_time,
)
session.add(association)
sql_dataset_with_tags = (
session.query(SqlEvaluationDataset)
.filter(SqlEvaluationDataset.dataset_id == dataset_id)
.one()
)
created_dataset = sql_dataset_with_tags.to_mlflow_entity()
created_dataset.experiment_ids = experiment_ids or []
created_dataset._tracking_store = self
return created_dataset
def get_dataset(self, dataset_id: str) -> EvaluationDataset:
"""
Get an evaluation dataset by ID.
Args:
dataset_id: The ID of the dataset to retrieve.
Returns:
The EvaluationDataset object (without records or experiment_ids - lazy loading).
"""
with self.ManagedSessionMaker() as session:
sql_dataset = (
session.query(SqlEvaluationDataset)
.filter(SqlEvaluationDataset.dataset_id == dataset_id)
.one_or_none()
)
if sql_dataset is None:
raise MlflowException(
f"Evaluation dataset with id '{dataset_id}' not found",
RESOURCE_DOES_NOT_EXIST,
)
return sql_dataset.to_mlflow_entity()
def delete_dataset(self, dataset_id: str) -> None:
"""
Delete an evaluation dataset and all its records.
Args:
dataset_id: The ID of the dataset to delete.
"""
with self.ManagedSessionMaker() as session:
sql_dataset = (
session.query(SqlEvaluationDataset)
.filter(SqlEvaluationDataset.dataset_id == dataset_id)
.one_or_none()
)
if sql_dataset is None:
_logger.warning(f"Evaluation dataset with id '{dataset_id}' not found.")
return
session.query(SqlEntityAssociation).filter(
or_(
and_(
SqlEntityAssociation.destination_type
== EntityAssociationType.EVALUATION_DATASET,
SqlEntityAssociation.destination_id == dataset_id,
),
and_(
SqlEntityAssociation.source_type
== EntityAssociationType.EVALUATION_DATASET,
SqlEntityAssociation.source_id == dataset_id,
),
)
).delete()
session.delete(sql_dataset)
def search_datasets(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = 1000,
order_by: list[str] | None = None,
page_token: str | None = None,
) -> PagedList[EvaluationDataset]:
"""
Search for evaluation datasets.
Args:
experiment_ids: Filter by associated experiment IDs.
filter_string: SQL-like filter string.
max_results: Maximum number of results to return.
order_by: List of fields to order by.
page_token: Token for pagination.
Returns:
PagedList of EvaluationDataset objects (without records).
"""
self._validate_max_results_param(max_results)
offset = SearchUtils.parse_start_offset_from_page_token(page_token)
with self.ManagedSessionMaker() as session:
if filter_string:
parsed_filters = SearchEvaluationDatasetsUtils.parse_search_filter(filter_string)
attribute_filters, non_attribute_filters = _get_search_datasets_filter_clauses(
parsed_filters, self._get_dialect()
)
else:
attribute_filters = []
non_attribute_filters = []
stmt = reduce(
lambda s, f: s.join(f), non_attribute_filters, select(SqlEvaluationDataset)
)
if experiment_ids:
dataset_ids_result = self.search_entities_by_destination(
destination_ids=experiment_ids,
destination_type=EntityAssociationType.EXPERIMENT,
source_type=EntityAssociationType.EVALUATION_DATASET,
)
dataset_ids = dataset_ids_result.to_list()
stmt = stmt.filter(SqlEvaluationDataset.dataset_id.in_(dataset_ids))
stmt = stmt.filter(*attribute_filters)
order_by_clauses = _get_search_datasets_order_by_clauses(order_by)
stmt = stmt.order_by(*order_by_clauses)
stmt = stmt.offset(offset).limit(max_results + 1)
sql_datasets = session.execute(stmt).scalars(SqlEvaluationDataset).all()
next_page_token = None
if len(sql_datasets) > max_results:
sql_datasets = sql_datasets[:max_results]
next_page_token = SearchUtils.create_page_token(offset + max_results)
datasets = [sql_dataset.to_mlflow_entity() for sql_dataset in sql_datasets]
return PagedList(datasets, next_page_token)
def _update_dataset_schema(self, existing_schema_json, record_dicts):
"""
Update dataset schema with new fields from records.
This method combines schema computation and merging into a single operation
for efficiency, since schemas are stored as JSON strings.
Args:
existing_schema_json: JSON string of existing schema or None
record_dicts: List of record dictionaries being upserted
Returns:
Updated schema dictionary or None if no records and no existing schema
"""
if not record_dicts and not existing_schema_json:
return None
schema = (
json.loads(existing_schema_json)
if existing_schema_json
else {"inputs": {}, "outputs": {}, "expectations": {}, "version": "1.0"}
)
for record in record_dicts:
if inputs := record.get("inputs"):
for key, value in inputs.items():
if key not in schema["inputs"]:
schema["inputs"][key] = self._infer_field_type(value)
if (outputs := record.get("outputs")) is not None:
if isinstance(outputs, dict):
for key, value in outputs.items():
if key not in schema["outputs"]:
schema["outputs"][key] = self._infer_field_type(value)
else:
if not schema["outputs"]:
schema["outputs"] = self._infer_field_type(outputs)
if expectations := record.get("expectations"):
for key, value in expectations.items():
if key not in schema["expectations"]:
schema["expectations"][key] = self._infer_field_type(value)
return schema
def _compute_dataset_profile(self, session, dataset_id):
"""
Compute profile statistics for the dataset based on current state.
Args:
session: Database session
dataset_id: ID of the dataset
Returns:
Profile dictionary with current statistics
"""
total_records = (
session.query(SqlEvaluationDatasetRecord)
.filter(SqlEvaluationDatasetRecord.dataset_id == dataset_id)
.count()
)
if total_records == 0:
return None
return {"num_records": total_records}
def _infer_field_type(self, value):
"""
Infer the type of a field value.
Returns a string representation of the type.
"""
if value is None:
return "null"
elif isinstance(value, bool):
return "boolean"
elif isinstance(value, int):
return "integer"
elif isinstance(value, float):
return "float"
elif isinstance(value, str):
return "string"
elif isinstance(value, list):
return "array"
elif isinstance(value, dict):
return "object"
else:
return "unknown"
def _load_dataset_records(
self,
dataset_id: str,
max_results: int | None = None,
page_token: str | None = None,
) -> tuple[list[DatasetRecord], str | None]:
"""
Load dataset records with cursor-based pagination support.
Records are ordered by (created_time, dataset_record_id) to ensure deterministic
pagination across all database backends.
Args:
dataset_id: The ID of the dataset.
max_results: Maximum number of records to return. Defaults to
LOAD_DATASET_RECORDS_MAX_RESULTS. If explicitly set to None, returns all records.
page_token: Cursor token for pagination in format "created_time:record_id".
If None, starts from the beginning.
Returns:
Tuple of (list of DatasetRecord objects, next_page_token).
next_page_token is None if there are no more records.
"""
from mlflow.store.tracking import LOAD_DATASET_RECORDS_MAX_RESULTS
# Use default if not explicitly set
if max_results is None:
effective_max_results = None # Return all records for internal use
else:
effective_max_results = max_results or LOAD_DATASET_RECORDS_MAX_RESULTS
with self.ManagedSessionMaker() as session:
query = (
session.query(SqlEvaluationDatasetRecord)
.filter(SqlEvaluationDatasetRecord.dataset_id == dataset_id)
.order_by(
SqlEvaluationDatasetRecord.created_time,
SqlEvaluationDatasetRecord.dataset_record_id,
)
)
if page_token:
try:
decoded = base64.b64decode(page_token.encode()).decode()
last_created_time, last_record_id = decoded.split(":", 1)
last_created_time = int(last_created_time)
query = query.filter(
(SqlEvaluationDatasetRecord.created_time > last_created_time)
| (
(SqlEvaluationDatasetRecord.created_time == last_created_time)
& (SqlEvaluationDatasetRecord.dataset_record_id > last_record_id)
)
)
except (ValueError, AttributeError):
offset = int(page_token)
query = query.offset(offset)
if effective_max_results is not None:
sql_records = query.limit(effective_max_results + 1).all()
if len(sql_records) > effective_max_results:
sql_records = sql_records[:effective_max_results]
last_record = sql_records[-1]
cursor = f"{last_record.created_time}:{last_record.dataset_record_id}"
next_page_token = base64.b64encode(cursor.encode()).decode()
else:
next_page_token = None
else:
sql_records = query.all()
next_page_token = None
records = [record.to_mlflow_entity() for record in sql_records]
return records, next_page_token
def delete_dataset_tag(self, dataset_id: str, key: str) -> None:
"""
Delete a tag from an evaluation dataset.
This operation is idempotent - if the tag doesn't exist, it's a no-op.
Args:
dataset_id: The ID of the dataset.
key: The tag key to delete.
"""
with self.ManagedSessionMaker() as session:
deleted_count = (
session.query(SqlEvaluationDatasetTag)
.filter_by(dataset_id=dataset_id, key=key)
.delete()
)
if deleted_count == 0:
_logger.debug(
f"Tag '{key}' not found for evaluation dataset {dataset_id}. "
"It may have already been deleted or never existed."
)
def upsert_dataset_records(
self, dataset_id: str, records: list[dict[str, Any]]
) -> dict[str, int]:
"""
Bulk upsert records with input-based deduplication.
Args:
dataset_id: The ID of the dataset.
records: List of record dictionaries.
Returns:
Dictionary with counts of inserted and updated records.
"""
with self.ManagedSessionMaker() as session:
inserted_count = 0
updated_count = 0
current_time = get_current_time_millis()
updated_by = None # Track who last updated the dataset
for record_dict in records:
inputs_json = json.dumps(record_dict.get("inputs", {}), sort_keys=True)
input_hash = hashlib.sha256(inputs_json.encode()).hexdigest()
existing_record = (
session.query(SqlEvaluationDatasetRecord)
.filter(
SqlEvaluationDatasetRecord.dataset_id == dataset_id,
SqlEvaluationDatasetRecord.input_hash == input_hash,
)
.one_or_none()
)
tags = record_dict.get("tags")
if tags and MLFLOW_USER in tags:
updated_by = tags[MLFLOW_USER]
if existing_record:
existing_record.merge(record_dict)
updated_count += 1
else:
created_by = None
if tags and MLFLOW_USER in tags:
created_by = tags[MLFLOW_USER]
source = None
if source_data := record_dict.get("source"):
if isinstance(source_data, dict):
source = DatasetRecordSource.from_dict(source_data)
else:
source = source_data
record = DatasetRecord(
dataset_record_id=None,
dataset_id=dataset_id,
inputs=record_dict.get("inputs", {}),
outputs=record_dict.get("outputs", {}),
created_time=current_time,
last_update_time=current_time,
expectations=record_dict.get("expectations"),
tags=tags,
source=source,
created_by=created_by,
last_updated_by=created_by,
)
sql_record = SqlEvaluationDatasetRecord.from_mlflow_entity(record, input_hash)
session.add(sql_record)
inserted_count += 1
dataset_info = (
session.query(SqlEvaluationDataset.schema, SqlEvaluationDataset.name)
.filter(SqlEvaluationDataset.dataset_id == dataset_id)
.first()
)
if dataset_info:
existing_schema = dataset_info[0]
dataset_name = dataset_info[1]
else:
existing_schema = None
dataset_name = None
updated_schema = self._update_dataset_schema(existing_schema, records)
updated_profile = self._compute_dataset_profile(session, dataset_id)
new_digest = self._compute_dataset_digest(dataset_name, current_time)
update_fields = {
"last_update_time": current_time,
"last_updated_by": updated_by,
"digest": new_digest,
}
if updated_schema:
update_fields["schema"] = json.dumps(updated_schema)
if updated_profile:
update_fields["profile"] = json.dumps(updated_profile)
session.query(SqlEvaluationDataset).filter(
SqlEvaluationDataset.dataset_id == dataset_id
).update(update_fields)
return {"inserted": inserted_count, "updated": updated_count}
def get_dataset_experiment_ids(self, dataset_id: str) -> list[str]:
"""
Get experiment IDs associated with an evaluation dataset.
This method is used for lazy loading of experiment_ids in the EvaluationDataset entity.
Args:
dataset_id: The ID of the dataset.
Returns:
List of experiment IDs associated with the dataset.
"""
experiment_ids = self.search_entities_by_source(
source_ids=dataset_id,
source_type=EntityAssociationType.EVALUATION_DATASET,
destination_type=EntityAssociationType.EXPERIMENT,
)
return experiment_ids.to_list()
def set_dataset_tags(self, dataset_id: str, tags: dict[str, Any]) -> None:
"""
Update tags for an evaluation dataset.
This implements an upsert operation - existing tags are merged with new tags.
Args:
dataset_id: The ID of the dataset to update.
tags: Dictionary of tags to update.
Raises:
MlflowException: If the dataset doesn't exist.
"""
with self.ManagedSessionMaker() as session:
# NB: Checking that the dataset exists within this API avoids
# very confusing error messages regarding foreign key constraint
# violations that are different for various RDBMS backends and
# a generic error message regarding existence of a dependent key.
# Use .first() instead of .exists() for MSSQL compatibility
dataset = session.query(SqlEvaluationDataset).filter_by(dataset_id=dataset_id).first()
if not dataset:
raise MlflowException(
f"Could not find evaluation dataset with ID {dataset_id}",
error_code=RESOURCE_DOES_NOT_EXIST,
)
for key, value in tags.items():
if value is not None:
existing_tag = (
session.query(SqlEvaluationDatasetTag)
.filter_by(dataset_id=dataset_id, key=key)
.first()
)
if existing_tag:
existing_tag.value = str(value)
else:
new_tag = SqlEvaluationDatasetTag(
dataset_id=dataset_id,
key=key,
value=str(value),
)
session.add(new_tag)
#######################################################################################
# Below are legacy V2 Tracing APIs. DO NOT USE. Use the V3 APIs instead.
#######################################################################################
def deprecated_start_trace_v2(
self,
experiment_id: str,
timestamp_ms: int,
request_metadata: dict[str, str],
tags: dict[str, str],
) -> TraceInfoV2:
"""
DEPRECATED. DO NOT USE.
Create an initial TraceInfo object in the database.
Args:
experiment_id: String id of the experiment for this run.
timestamp_ms: Start time of the trace, in milliseconds since the UNIX epoch.
request_metadata: Metadata of the trace.
tags: Tags of the trace.
Returns:
The created TraceInfo object.
"""
with self.ManagedSessionMaker() as session:
experiment = self.get_experiment(experiment_id)
self._check_experiment_is_active(experiment)
request_id = generate_request_id_v2()
trace_info = SqlTraceInfo(
request_id=request_id,
experiment_id=experiment_id,
timestamp_ms=timestamp_ms,
execution_time_ms=None,
status=TraceStatus.IN_PROGRESS,
)
trace_info.tags = [SqlTraceTag(key=k, value=v) for k, v in tags.items()]
trace_info.tags.append(self._get_trace_artifact_location_tag(experiment, request_id))
trace_info.request_metadata = [
SqlTraceMetadata(key=k, value=v) for k, v in request_metadata.items()
]
session.add(trace_info)
return TraceInfoV2.from_v3(trace_info.to_mlflow_entity())
def deprecated_end_trace_v2(
self,
request_id: str,
timestamp_ms: int,
status: TraceStatus,
request_metadata: dict[str, str],
tags: dict[str, str],
) -> TraceInfoV2:
"""
DEPRECATED. DO NOT USE.
Update the TraceInfo object in the database with the completed trace info.
Args:
request_id: Unique string identifier of the trace.
timestamp_ms: End time of the trace, in milliseconds. The execution time field
in the TraceInfo will be calculated by subtracting the start time from this.
status: Status of the trace.
request_metadata: Metadata of the trace. This will be merged with the existing
metadata logged during the start_trace call.
tags: Tags of the trace. This will be merged with the existing tags logged
during the start_trace or set_trace_tag calls.
Returns:
The updated TraceInfo object.
"""
with self.ManagedSessionMaker() as session:
sql_trace_info = self._get_sql_trace_info(session, request_id)
trace_start_time_ms = sql_trace_info.timestamp_ms
execution_time_ms = timestamp_ms - trace_start_time_ms
sql_trace_info.execution_time_ms = execution_time_ms
sql_trace_info.status = status
session.merge(sql_trace_info)
for k, v in request_metadata.items():
session.merge(SqlTraceMetadata(request_id=request_id, key=k, value=v))
for k, v in tags.items():
session.merge(SqlTraceTag(request_id=request_id, key=k, value=v))
return TraceInfoV2.from_v3(sql_trace_info.to_mlflow_entity())
def add_dataset_to_experiments(
self, dataset_id: str, experiment_ids: list[str]
) -> EvaluationDataset:
"""
Add a dataset to additional experiments.
"""
from mlflow.entities.entity_type import EntityAssociationType
with self.ManagedSessionMaker() as session:
dataset = session.query(SqlEvaluationDataset).filter_by(dataset_id=dataset_id).first()
if not dataset:
raise MlflowException(
f"Dataset '{dataset_id}' not found",
error_code=RESOURCE_DOES_NOT_EXIST,
)
for exp_id in experiment_ids:
if not session.query(SqlExperiment).filter_by(experiment_id=str(exp_id)).first():
raise MlflowException(
f"Experiment '{exp_id}' not found",
error_code=RESOURCE_DOES_NOT_EXIST,
)
existing_associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_id == dataset_id,
SqlEntityAssociation.source_type == EntityAssociationType.EVALUATION_DATASET,
SqlEntityAssociation.destination_id.in_(
[str(exp_id) for exp_id in experiment_ids]
),
SqlEntityAssociation.destination_type == EntityAssociationType.EXPERIMENT,
)
.all()
)
existing_exp_ids = {assoc.destination_id for assoc in existing_associations}
new_associations = [
SqlEntityAssociation(
association_id=uuid.uuid4().hex,
source_id=dataset_id,
source_type=EntityAssociationType.EVALUATION_DATASET,
destination_id=str(exp_id),
destination_type=EntityAssociationType.EXPERIMENT,
)
for exp_id in experiment_ids
if str(exp_id) not in existing_exp_ids
]
if new_associations:
session.bulk_save_objects(new_associations)
dataset.last_update_time = get_current_time_millis()
session.commit()
return dataset.to_mlflow_entity()
def remove_dataset_from_experiments(
self, dataset_id: str, experiment_ids: list[str]
) -> EvaluationDataset:
"""
Remove a dataset from experiments (idempotent).
"""
from mlflow.entities.entity_type import EntityAssociationType
with self.ManagedSessionMaker() as session:
dataset = session.query(SqlEvaluationDataset).filter_by(dataset_id=dataset_id).first()
if not dataset:
raise MlflowException(
f"Dataset '{dataset_id}' not found",
error_code=RESOURCE_DOES_NOT_EXIST,
)
existing_associations = (
session.query(SqlEntityAssociation)
.filter(
SqlEntityAssociation.source_id == dataset_id,
SqlEntityAssociation.source_type == EntityAssociationType.EVALUATION_DATASET,
SqlEntityAssociation.destination_id.in_(
[str(exp_id) for exp_id in experiment_ids]
),
SqlEntityAssociation.destination_type == EntityAssociationType.EXPERIMENT,
)
.all()
)
existing_exp_ids = {assoc.destination_id for assoc in existing_associations}
for exp_id in experiment_ids:
if str(exp_id) not in existing_exp_ids:
_logger.warning(
f"Dataset '{dataset_id}' was not associated with experiment '{exp_id}'"
)
if existing_exp_ids:
session.query(SqlEntityAssociation).filter(
SqlEntityAssociation.source_id == dataset_id,
SqlEntityAssociation.source_type == EntityAssociationType.EVALUATION_DATASET,
SqlEntityAssociation.destination_id.in_(list(existing_exp_ids)),
SqlEntityAssociation.destination_type == EntityAssociationType.EXPERIMENT,
).delete(synchronize_session=False)
dataset.last_update_time = get_current_time_millis()
session.commit()
return dataset.to_mlflow_entity()
def _get_sqlalchemy_filter_clauses(parsed, session, dialect):
"""
Creates run attribute filters and subqueries that will be inner-joined to SqlRun to act as
multi-clause filters and return them as a tuple.
"""
attribute_filters = []
non_attribute_filters = []
dataset_filters = []
for sql_statement in parsed:
key_type = sql_statement.get("type")
key_name = sql_statement.get("key")
value = sql_statement.get("value")
comparator = sql_statement.get("comparator").upper()
key_name = SearchUtils.translate_key_alias(key_name)
if SearchUtils.is_string_attribute(
key_type, key_name, comparator
) or SearchUtils.is_numeric_attribute(key_type, key_name, comparator):
if key_name == "run_name":
# Treat "attributes.run_name == <value>" as "tags.`mlflow.runName` == <value>".
# The name column in the runs table is empty for runs logged in MLflow <= 1.29.0.
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(
SqlTag.key, MLFLOW_RUN_NAME
)
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
SqlTag.value, value
)
non_attribute_filters.append(
session.query(SqlTag).filter(key_filter, val_filter).subquery()
)
else:
attribute = getattr(SqlRun, SqlRun.get_attribute_name(key_name))
attr_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
attribute, value
)
attribute_filters.append(attr_filter)
else:
if SearchUtils.is_metric(key_type, comparator):
entity = SqlLatestMetric
value = float(value)
elif SearchUtils.is_param(key_type, comparator):
entity = SqlParam
elif SearchUtils.is_tag(key_type, comparator):
entity = SqlTag
elif SearchUtils.is_dataset(key_type, comparator):
entity = SqlDataset
else:
raise MlflowException(
f"Invalid search expression type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
if entity == SqlDataset:
if key_name == "context":
dataset_filters.append(
session.query(entity, SqlInput, SqlInputTag)
.join(SqlInput, SqlInput.source_id == SqlDataset.dataset_uuid)
.join(
SqlInputTag,
and_(
SqlInputTag.input_uuid == SqlInput.input_uuid,
SqlInputTag.name == MLFLOW_DATASET_CONTEXT,
SearchUtils.get_sql_comparison_func(comparator, dialect)(
getattr(SqlInputTag, "value"), value
),
),
)
.subquery()
)
else:
dataset_attr_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
getattr(SqlDataset, key_name), value
)
dataset_filters.append(
session.query(entity, SqlInput)
.join(SqlInput, SqlInput.source_id == SqlDataset.dataset_uuid)
.filter(dataset_attr_filter)
.subquery()
)
else:
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(entity.key, key_name)
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
entity.value, value
)
non_attribute_filters.append(
session.query(entity).filter(key_filter, val_filter).subquery()
)
return attribute_filters, non_attribute_filters, dataset_filters
def _get_orderby_clauses(order_by_list, session):
"""Sorts a set of runs based on their natural ordering and an overriding set of order_bys.
Runs are naturally ordered first by start time descending, then by run id for tie-breaking.
"""
clauses = []
ordering_joins = []
clause_id = 0
observed_order_by_clauses = set()
select_clauses = []
# contrary to filters, it is not easily feasible to separately handle sorting
# on attributes and on joined tables as we must keep all clauses in the same order
if order_by_list:
for order_by_clause in order_by_list:
clause_id += 1
(key_type, key, ascending) = SearchUtils.parse_order_by_for_search_runs(order_by_clause)
key = SearchUtils.translate_key_alias(key)
if SearchUtils.is_string_attribute(
key_type, key, "="
) or SearchUtils.is_numeric_attribute(key_type, key, "="):
order_value = getattr(SqlRun, SqlRun.get_attribute_name(key))
else:
if SearchUtils.is_metric(key_type, "="): # any valid comparator
entity = SqlLatestMetric
elif SearchUtils.is_tag(key_type, "="):
entity = SqlTag
elif SearchUtils.is_param(key_type, "="):
entity = SqlParam
else:
raise MlflowException(
f"Invalid identifier type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
# build a subquery first because we will join it in the main request so that the
# metric we want to sort on is available when we apply the sorting clause
subquery = session.query(entity).filter(entity.key == key).subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
# MySQL does not support NULLS LAST expression, so we sort first by
# presence of the field (and is_nan for metrics), then by actual value
# As the subqueries are created independently and used later in the
# same main query, the CASE WHEN columns need to have unique names to
# avoid ambiguity
if SearchUtils.is_metric(key_type, "="):
case = sql.case(
# Ideally the use of "IS" is preferred here but owing to sqlalchemy
# translation in MSSQL we are forced to use "=" instead.
# These 2 options are functionally identical / unchanged because
# the column (is_nan) is not nullable. However it could become an issue
# if this precondition changes in the future.
(subquery.c.is_nan == sqlalchemy.true(), 1),
(order_value.is_(None), 2),
else_=0,
).label(f"clause_{clause_id}")
else: # other entities do not have an 'is_nan' field
case = sql.case((order_value.is_(None), 1), else_=0).label(f"clause_{clause_id}")
clauses.append(case.name)
select_clauses.append(case)
select_clauses.append(order_value)
if (key_type, key) in observed_order_by_clauses:
raise MlflowException(f"`order_by` contains duplicate fields: {order_by_list}")
observed_order_by_clauses.add((key_type, key))
if ascending:
clauses.append(order_value)
else:
clauses.append(order_value.desc())
if (
SearchUtils._ATTRIBUTE_IDENTIFIER,
SqlRun.start_time.key,
) not in observed_order_by_clauses:
clauses.append(SqlRun.start_time.desc())
clauses.append(SqlRun.run_uuid)
return select_clauses, clauses, ordering_joins
def _get_search_experiments_filter_clauses(parsed_filters, dialect):
attribute_filters = []
non_attribute_filters = []
for f in parsed_filters:
type_ = f["type"]
key = f["key"]
comparator = f["comparator"]
value = f["value"]
if type_ == "attribute":
if SearchExperimentsUtils.is_string_attribute(
type_, key, comparator
) and comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for string attribute: {comparator}"
)
if SearchExperimentsUtils.is_numeric_attribute(
type_, key, comparator
) and comparator not in ("=", "!=", "<", "<=", ">", ">="):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for numeric attribute: {comparator}"
)
attr = getattr(SqlExperiment, key)
attr_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(attr, value)
attribute_filters.append(attr_filter)
elif type_ == "tag":
if comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for tag: {comparator}"
)
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
SqlExperimentTag.value, value
)
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(
SqlExperimentTag.key, key
)
non_attribute_filters.append(
select(SqlExperimentTag).filter(key_filter, val_filter).subquery()
)
else:
raise MlflowException.invalid_parameter_value(f"Invalid token type: {type_}")
return attribute_filters, non_attribute_filters
def _get_search_experiments_order_by_clauses(order_by):
order_by_clauses = []
for type_, key, ascending in map(
SearchExperimentsUtils.parse_order_by_for_search_experiments,
order_by or ["creation_time DESC", "experiment_id ASC"],
):
if type_ == "attribute":
order_by_clauses.append((getattr(SqlExperiment, key), ascending))
else:
raise MlflowException.invalid_parameter_value(f"Invalid order_by entity: {type_}")
# Add a tie-breaker
if not any(col == SqlExperiment.experiment_id for col, _ in order_by_clauses):
order_by_clauses.append((SqlExperiment.experiment_id, False))
return [col.asc() if ascending else col.desc() for col, ascending in order_by_clauses]
def _get_orderby_clauses_for_search_traces(order_by_list: list[str], session):
"""Sorts a set of traces based on their natural ordering and an overriding set of order_bys.
Traces are ordered first by timestamp_ms descending, then by trace_id for tie-breaking.
"""
clauses = []
ordering_joins = []
observed_order_by_clauses = set()
select_clauses = []
for clause_id, order_by_clause in enumerate(order_by_list):
(key_type, key, ascending) = SearchTraceUtils.parse_order_by_for_search_traces(
order_by_clause
)
if SearchTraceUtils.is_attribute(key_type, key, "="):
order_value = getattr(SqlTraceInfo, key)
else:
if SearchTraceUtils.is_tag(key_type, "="):
entity = SqlTraceTag
elif SearchTraceUtils.is_request_metadata(key_type, "="):
entity = SqlTraceMetadata
else:
raise MlflowException(
f"Invalid identifier type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
# Tags and request metadata requires a join to the main table (trace_info)
subquery = session.query(entity).filter(entity.key == key).subquery()
ordering_joins.append(subquery)
order_value = subquery.c.value
case = sql.case((order_value.is_(None), 1), else_=0).label(f"clause_{clause_id}")
clauses.append(case.name)
select_clauses.append(case)
select_clauses.append(order_value)
if (key_type, key) in observed_order_by_clauses:
raise MlflowException(f"`order_by` contains duplicate fields: {order_by_list}")
observed_order_by_clauses.add((key_type, key))
clauses.append(order_value if ascending else order_value.desc())
# Add descending trace start time as default ordering and a tie-breaker
for attr, ascending in [
(SqlTraceInfo.timestamp_ms, False),
(SqlTraceInfo.request_id, True),
]:
if (
SearchTraceUtils._ATTRIBUTE_IDENTIFIER,
attr.key,
) not in observed_order_by_clauses:
clauses.append(attr if ascending else attr.desc())
return select_clauses, clauses, ordering_joins
def _get_filter_clauses_for_search_traces(filter_string, session, dialect):
"""
Creates trace attribute filters and subqueries that will be inner-joined
to SqlTraceInfo to act as multi-clause filters and return them as a tuple.
Also extracts run_id filter if present for special handling.
Returns:
attribute_filters: Direct filters on SqlTraceInfo attributes
non_attribute_filters: Subqueries for tags and metadata
span_filters: Subqueries for span filters
run_id_filter: Special run_id value for linked trace handling
"""
attribute_filters = []
non_attribute_filters = []
span_filters = []
run_id_filter = None
parsed_filters = SearchTraceUtils.parse_search_filter_for_search_traces(filter_string)
for sql_statement in parsed_filters:
key_type = sql_statement.get("type")
key_name = sql_statement.get("key")
value = sql_statement.get("value")
comparator = sql_statement.get("comparator").upper()
if SearchTraceUtils.is_attribute(key_type, key_name, comparator):
if key_name in ("end_time_ms", "end_time"):
# end_time = timestamp_ms + execution_time_ms
attribute = SqlTraceInfo.timestamp_ms + func.coalesce(
SqlTraceInfo.execution_time_ms, 0
)
else:
attribute = getattr(SqlTraceInfo, key_name)
attr_filter = SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
attribute, value
)
attribute_filters.append(attr_filter)
else:
# Check if this is a run_id filter (stored as SOURCE_RUN in metadata)
if (
SearchTraceUtils.is_request_metadata(key_type, comparator)
and key_name == TraceMetadataKey.SOURCE_RUN
and comparator == "="
):
run_id_filter = value
# Don't add run_id filter to non_attribute_filters since we handle it specially
continue
if SearchTraceUtils.is_tag(key_type, comparator):
entity = SqlTraceTag
elif SearchTraceUtils.is_request_metadata(key_type, comparator):
entity = SqlTraceMetadata
elif SearchTraceUtils.is_span(key_type, key_name, comparator):
# Spans have specialized columns (name, type, status) unlike tags/metadata
# which have key-value structure, so we need specialized handling
# Handle span.attributes.<attribute> format
if key_name.startswith("attributes."):
attr_name = key_name[len("attributes.") :]
# Search within the content JSON for the specific attribute
# TODO: we should improve this by saving only the attributes into the table.
if comparator == "RLIKE":
# For RLIKE, transform the user pattern to match within JSON structure
# The JSON structure is: "<attr>": "\"<value>\""
# Values are JSON-encoded strings with escaped quotes
transformed_value = value
if value.startswith("^"):
transformed_value = transformed_value[1:]
search_prefix = '"\\\\"'
else:
search_prefix = '"\\\\".*'
if value.endswith("$"):
transformed_value = transformed_value[:-1]
search_suffix = '\\\\"'
else:
search_suffix = ""
search_pattern = (
f'"{attr_name}": {search_prefix}{transformed_value}{search_suffix}'
)
val_filter = SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
SqlSpan.content, search_pattern
)
else:
# For LIKE/ILIKE, use wildcards for broad matching
val_filter = SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
SqlSpan.content, f'%"{attr_name}"{value}%'
)
else:
span_column = getattr(SqlSpan, key_name)
val_filter = SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
span_column, value
)
span_subquery = (
session.query(SqlSpan.trace_id.label("request_id"))
.filter(val_filter)
.distinct()
.subquery()
)
span_filters.append(span_subquery)
continue
elif SearchTraceUtils.is_assessment(key_type, key_name, comparator):
# Create subquery to find traces with matching feedback
# Filter by feedback name and check the value
feedback_subquery = (
session.query(SqlAssessments.trace_id.label("request_id"))
.filter(
SqlAssessments.assessment_type == key_type,
SqlAssessments.name == key_name,
SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
SqlAssessments.value, value
),
)
.distinct()
.subquery()
)
span_filters.append(feedback_subquery)
continue
else:
raise MlflowException(
f"Invalid search expression type '{key_type}'",
error_code=INVALID_PARAMETER_VALUE,
)
key_filter = SearchTraceUtils.get_sql_comparison_func("=", dialect)(
entity.key, key_name
)
val_filter = SearchTraceUtils.get_sql_comparison_func(comparator, dialect)(
entity.value, value
)
non_attribute_filters.append(
session.query(entity).filter(key_filter, val_filter).subquery()
)
return attribute_filters, non_attribute_filters, span_filters, run_id_filter
def _get_search_datasets_filter_clauses(parsed_filters, dialect):
"""
Creates evaluation dataset attribute filters and non-attribute filters for tags.
"""
attribute_filters = []
non_attribute_filters = []
for f in parsed_filters:
type_ = f["type"]
key = f["key"]
comparator = f["comparator"]
value = f["value"]
if type_ == "attribute":
if SearchEvaluationDatasetsUtils.is_string_attribute(
type_, key, comparator
) and comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for string attribute: {comparator}"
)
if SearchEvaluationDatasetsUtils.is_numeric_attribute(
type_, key, comparator
) and comparator not in ("=", "!=", "<", "<=", ">", ">="):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for numeric attribute: {comparator}"
)
attr = getattr(SqlEvaluationDataset, key)
attr_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(attr, value)
attribute_filters.append(attr_filter)
elif type_ == "tag":
if comparator not in ("=", "!=", "LIKE", "ILIKE"):
raise MlflowException.invalid_parameter_value(
f"Invalid comparator for tag: {comparator}"
)
val_filter = SearchUtils.get_sql_comparison_func(comparator, dialect)(
SqlEvaluationDatasetTag.value, value
)
key_filter = SearchUtils.get_sql_comparison_func("=", dialect)(
SqlEvaluationDatasetTag.key, key
)
non_attribute_filters.append(
select(SqlEvaluationDatasetTag).filter(key_filter, val_filter).subquery()
)
else:
raise MlflowException.invalid_parameter_value(f"Invalid token type: {type_}")
return attribute_filters, non_attribute_filters
def _get_search_datasets_order_by_clauses(order_by):
"""
Creates order by clauses for searching evaluation datasets.
"""
if not order_by:
order_by = ["created_time DESC"]
order_by_clauses = []
for order in order_by:
type_, key, ascending = (
SearchEvaluationDatasetsUtils.parse_order_by_for_search_evaluation_datasets(order)
)
if type_ == "attribute":
field = key
else:
raise MlflowException.invalid_parameter_value(f"Invalid order_by entity: {type_}")
order_by_clauses.append((getattr(SqlEvaluationDataset, field), ascending))
# Add a tie-breaker
if not any(col == SqlEvaluationDataset.dataset_id for col, _ in order_by_clauses):
order_by_clauses.append((SqlEvaluationDataset.dataset_id, False))
return [col.asc() if ascending else col.desc() for col, ascending in order_by_clauses]
| SqlAlchemyStore |
python | catalyst-team__catalyst | tests/pipelines/test_multihead_classification.py | {
"start": 645,
"end": 956
} | class ____(dl.Runner):
def handle_batch(self, batch):
x, y1, y2 = batch
y1_hat, y2_hat = self.model(x)
self.batch = {
"features": x,
"logits1": y1_hat,
"logits2": y2_hat,
"targets1": y1,
"targets2": y2,
}
| CustomRunner |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 47047,
"end": 49911
} | class ____(Operation):
def __init__(self, weights=None, minlength=0, sparse=False, *, name=None):
super().__init__(name=name)
self.weights = weights
self.minlength = minlength
self.sparse = sparse
def call(self, x):
return backend.numpy.bincount(
x,
weights=self.weights,
minlength=self.minlength,
sparse=self.sparse,
)
def compute_output_spec(self, x):
dtypes_to_resolve = [x.dtype]
if self.weights is not None:
weights = backend.convert_to_tensor(self.weights)
dtypes_to_resolve.append(weights.dtype)
dtype = dtypes.result_type(*dtypes_to_resolve)
else:
dtype = "int32"
x_sparse = getattr(x, "sparse", False)
return KerasTensor(
list(x.shape[:-1]) + [None],
dtype=dtype,
sparse=x_sparse or self.sparse,
)
@keras_export(["keras.ops.bincount", "keras.ops.numpy.bincount"])
def bincount(x, weights=None, minlength=0, sparse=False):
"""Count the number of occurrences of each value in a tensor of integers.
Args:
x: Input tensor.
It must be of dimension 1, and it must only contain non-negative
integer(s).
weights: Weight tensor.
It must have the same length as `x`. The default value is `None`.
If specified, `x` is weighted by it, i.e. if `n = x[i]`,
`out[n] += weight[i]` instead of the default behavior `out[n] += 1`.
minlength: An integer.
The default value is 0. If specified, there will be at least
this number of bins in the output tensor. If greater than
`max(x) + 1`, each value of the output at an index higher than
`max(x)` is set to 0.
sparse: Whether to return a sparse tensor; for backends that support
sparse tensors.
Returns:
1D tensor where each element gives the number of occurrence(s) of its
index value in x. Its length is the maximum between `max(x) + 1` and
minlength.
Examples:
>>> x = keras.ops.array([1, 2, 2, 3], dtype="uint8")
>>> keras.ops.bincount(x)
array([0, 1, 2, 1], dtype=int32)
>>> weights = x / 2
>>> weights
array([0.5, 1., 1., 1.5], dtype=float64)
>>> keras.ops.bincount(x, weights=weights)
array([0., 0.5, 2., 1.5], dtype=float64)
>>> minlength = (keras.ops.max(x).numpy() + 1) + 2 # 6
>>> keras.ops.bincount(x, minlength=minlength)
array([0, 1, 2, 1, 0, 0], dtype=int32)
"""
if any_symbolic_tensors((x,)):
return Bincount(
weights=weights, minlength=minlength, sparse=sparse
).symbolic_call(x)
return backend.numpy.bincount(
x, weights=weights, minlength=minlength, sparse=sparse
)
| Bincount |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_salesforce_to_gcs.py | {
"start": 1787,
"end": 3260
} | class ____:
@pytest.mark.db_test
@mock.patch.object(GCSHook, "upload")
@mock.patch.object(SalesforceHook, "write_object_to_file")
@mock.patch.object(SalesforceHook, "make_query")
def test_execute(self, mock_make_query, mock_write_object_to_file, mock_upload):
mock_make_query.return_value = SALESFORCE_RESPONSE
operator = SalesforceToGcsOperator(
query=QUERY,
bucket_name=GCS_BUCKET,
object_name=GCS_OBJECT_PATH,
salesforce_conn_id=SALESFORCE_CONNECTION_ID,
gcp_conn_id=GCP_CONNECTION_ID,
include_deleted=INCLUDE_DELETED,
query_params=QUERY_PARAMS,
export_format="json",
coerce_to_timestamp=True,
record_time_added=True,
task_id=TASK_ID,
)
result = operator.execute({})
mock_make_query.assert_called_once_with(
query=QUERY, include_deleted=INCLUDE_DELETED, query_params=QUERY_PARAMS
)
mock_write_object_to_file.assert_called_once_with(
query_results=SALESFORCE_RESPONSE["records"],
filename=mock.ANY,
fmt="json",
coerce_to_timestamp=True,
record_time_added=True,
)
mock_upload.assert_called_once_with(
bucket_name=GCS_BUCKET, object_name=GCS_OBJECT_PATH, filename=mock.ANY, gzip=False
)
assert result == EXPECTED_GCS_URI
| TestSalesforceToGcsOperator |
python | openai__openai-python | src/openai/types/responses/response_reasoning_item_param.py | {
"start": 286,
"end": 520
} | class ____(TypedDict, total=False):
text: Required[str]
"""A summary of the reasoning output from the model so far."""
type: Required[Literal["summary_text"]]
"""The type of the object. Always `summary_text`."""
| Summary |
python | pypa__setuptools | setuptools/_distutils/tests/test_cmd.py | {
"start": 351,
"end": 3254
} | class ____:
def test_ensure_string_list(self, cmd):
cmd.not_string_list = ['one', 2, 'three']
cmd.yes_string_list = ['one', 'two', 'three']
cmd.not_string_list2 = object()
cmd.yes_string_list2 = 'ok'
cmd.ensure_string_list('yes_string_list')
cmd.ensure_string_list('yes_string_list2')
with pytest.raises(DistutilsOptionError):
cmd.ensure_string_list('not_string_list')
with pytest.raises(DistutilsOptionError):
cmd.ensure_string_list('not_string_list2')
cmd.option1 = 'ok,dok'
cmd.ensure_string_list('option1')
assert cmd.option1 == ['ok', 'dok']
cmd.option2 = ['xxx', 'www']
cmd.ensure_string_list('option2')
cmd.option3 = ['ok', 2]
with pytest.raises(DistutilsOptionError):
cmd.ensure_string_list('option3')
def test_make_file(self, cmd):
# making sure it raises when infiles is not a string or a list/tuple
with pytest.raises(TypeError):
cmd.make_file(infiles=True, outfile='', func='func', args=())
# making sure execute gets called properly
def _execute(func, args, exec_msg, level):
assert exec_msg == 'generating out from in'
cmd.force = True
cmd.execute = _execute
cmd.make_file(infiles='in', outfile='out', func='func', args=())
def test_dump_options(self, cmd):
msgs = []
def _announce(msg, level):
msgs.append(msg)
cmd.announce = _announce
cmd.option1 = 1
cmd.option2 = 1
cmd.user_options = [('option1', '', ''), ('option2', '', '')]
cmd.dump_options()
wanted = ["command options for 'MyCmd':", ' option1 = 1', ' option2 = 1']
assert msgs == wanted
def test_ensure_string(self, cmd):
cmd.option1 = 'ok'
cmd.ensure_string('option1')
cmd.option2 = None
cmd.ensure_string('option2', 'xxx')
assert hasattr(cmd, 'option2')
cmd.option3 = 1
with pytest.raises(DistutilsOptionError):
cmd.ensure_string('option3')
def test_ensure_filename(self, cmd):
cmd.option1 = __file__
cmd.ensure_filename('option1')
cmd.option2 = 'xxx'
with pytest.raises(DistutilsOptionError):
cmd.ensure_filename('option2')
def test_ensure_dirname(self, cmd):
cmd.option1 = os.path.dirname(__file__) or os.curdir
cmd.ensure_dirname('option1')
cmd.option2 = 'xxx'
with pytest.raises(DistutilsOptionError):
cmd.ensure_dirname('option2')
def test_debug_print(self, cmd, capsys, monkeypatch):
cmd.debug_print('xxx')
assert capsys.readouterr().out == ''
monkeypatch.setattr(debug, 'DEBUG', True)
cmd.debug_print('xxx')
assert capsys.readouterr().out == 'xxx\n'
| TestCommand |
python | networkx__networkx | networkx/readwrite/tests/test_adjlist.py | {
"start": 149,
"end": 2711
} | class ____:
@pytest.mark.parametrize("graph_type", [nx.Graph, nx.MultiGraph])
def test_undirected(self, graph_type):
G = nx.complete_graph(5, create_using=graph_type)
lines = [
"0 1 2 3 4",
"1 2 3 4",
"2 3 4",
"3 4",
"4",
]
assert list(nx.generate_adjlist(G)) == lines
@pytest.mark.parametrize("graph_type", [nx.DiGraph, nx.MultiDiGraph])
def test_directed(self, graph_type):
G = nx.complete_graph(5, create_using=graph_type)
lines = [
"0 1 2 3 4",
"1 0 2 3 4",
"2 0 1 3 4",
"3 0 1 2 4",
"4 0 1 2 3",
]
assert list(nx.generate_adjlist(G)) == lines
G = nx.path_graph(5, create_using=graph_type)
G.add_edge(1, 0)
lines = [
"0 1",
"1 2 0",
"2 3",
"3 4",
"4",
]
assert list(nx.generate_adjlist(G)) == lines
@pytest.mark.parametrize("delimiter", [" ", ",", "\t"])
def test_delimiter(self, delimiter):
G = nx.complete_graph(3)
lines = [
f"0{delimiter}1{delimiter}2",
f"1{delimiter}2",
f"2",
]
assert list(nx.generate_adjlist(G, delimiter=delimiter)) == lines
def test_multiple_edges_undirected(self):
G = nx.complete_graph(3, create_using=nx.MultiGraph)
G.add_edge(0, 1)
lines = [
"0 1 1 2",
"1 2",
"2",
]
assert list(nx.generate_adjlist(G)) == lines
def test_multiple_edges_directed(self):
G = nx.complete_graph(3, create_using=nx.MultiDiGraph)
G.add_edge(0, 1)
lines = [
"0 1 1 2",
"1 0 2",
"2 0 1",
]
assert list(nx.generate_adjlist(G)) == lines
G.add_edge(1, 0)
lines[1] = "1 0 0 2"
assert list(nx.generate_adjlist(G)) == lines
def test_multiple_edges_with_data(self):
G = nx.complete_graph(3, create_using=nx.MultiGraph)
G.add_edge(0, 1, weight=1)
G.add_edge(0, 1, weight=2)
lines = [
"0 1 1 1 2",
"1 2",
"2",
]
assert list(nx.generate_adjlist(G)) == lines
def test_with_self_loop(self):
G = nx.complete_graph(3)
G.add_edge(0, 0)
lines = [
"0 1 2 0",
"1 2",
"2",
]
assert list(nx.generate_adjlist(G)) == lines
| TestGenerateAdjlist |
python | astropy__astropy | astropy/table/tests/test_masked.py | {
"start": 1129,
"end": 1350
} | class ____(SetupData):
def test_pformat(self):
assert self.t.pformat() == [
" a b ",
"--- ---",
" 1 --",
" 2 --",
" 3 --",
]
| TestPprint |
python | ray-project__ray | release/train_tests/xgboost_lightgbm/train_batch_inference_benchmark.py | {
"start": 1531,
"end": 7786
} | class ____(BasePredictor):
def __call__(self, data: pd.DataFrame) -> Dict[str, np.ndarray]:
return {"predictions": self.model.predict(data)}
def xgboost_train_loop_function(config: Dict):
train_ds_iter = ray.train.get_dataset_shard("train")
train_df = train_ds_iter.materialize().to_pandas()
label_column, params = config["label_column"], config["params"]
train_X, train_y = train_df.drop(label_column, axis=1), train_df[label_column]
dtrain = xgb.DMatrix(train_X, label=train_y)
report_callback = config["report_callback_cls"]
xgb.train(
params,
dtrain=dtrain,
num_boost_round=10,
callbacks=[report_callback()],
)
def lightgbm_train_loop_function(config: Dict):
train_ds_iter = ray.train.get_dataset_shard("train")
train_df = train_ds_iter.materialize().to_pandas()
label_column, params = config["label_column"], config["params"]
train_X, train_y = train_df.drop(label_column, axis=1), train_df[label_column]
train_set = lgb.Dataset(train_X, label=train_y)
report_callback = config["report_callback_cls"]
network_params = ray.train.lightgbm.get_network_params()
params.update(network_params)
lgb.train(
params,
train_set=train_set,
num_boost_round=10,
callbacks=[report_callback()],
)
_FRAMEWORK_PARAMS = {
"xgboost": {
"trainer_cls": XGBoostTrainer,
"predictor_cls": XGBoostPredictor,
"train_loop_function": xgboost_train_loop_function,
"train_loop_config": {
"params": {
"objective": "binary:logistic",
"eval_metric": ["logloss", "error"],
},
"label_column": "labels",
"report_callback_cls": XGBoostReportCallback,
},
},
"lightgbm": {
"trainer_cls": LightGBMTrainer,
"predictor_cls": LightGBMPredictor,
"train_loop_function": lightgbm_train_loop_function,
"train_loop_config": {
"params": {
"objective": "binary",
"metric": ["binary_logloss", "binary_error"],
},
"label_column": "labels",
"report_callback_cls": LightGBMReportCallback,
},
},
}
def train(
framework: str, data_path: str, num_workers: int, cpus_per_worker: int
) -> ray.train.Result:
ds = data.read_parquet(data_path)
framework_params = _FRAMEWORK_PARAMS[framework]
trainer_cls = framework_params["trainer_cls"]
framework_train_loop_fn = framework_params["train_loop_function"]
trainer = trainer_cls(
train_loop_per_worker=framework_train_loop_fn,
train_loop_config=framework_params["train_loop_config"],
scaling_config=ScalingConfig(
num_workers=num_workers,
resources_per_worker={"CPU": cpus_per_worker},
),
datasets={"train": ds},
run_config=RunConfig(
storage_path="/mnt/cluster_storage", name=f"{framework}_benchmark"
),
)
result = trainer.fit()
return result
def predict(framework: str, result: ray.train.Result, data_path: str):
framework_params = _FRAMEWORK_PARAMS[framework]
predictor_cls = framework_params["predictor_cls"]
ds = data.read_parquet(data_path)
ds = ds.drop_columns(["labels"])
concurrency = int(ray.cluster_resources()["CPU"] // 2)
ds.map_batches(
predictor_cls,
# Improve prediction throughput with larger batch size than default 4096
batch_size=8192,
concurrency=concurrency,
fn_constructor_kwargs={
"report_callback_cls": framework_params["train_loop_config"][
"report_callback_cls"
],
"result": result,
},
batch_format="pandas",
).write_parquet("/mnt/cluster_storage/predictions")
def main(args):
framework = args.framework
experiment = args.size if not args.smoke_test else "smoke_test"
experiment_params = _EXPERIMENT_PARAMS[experiment]
data_path, num_workers, cpus_per_worker = (
experiment_params["data"],
experiment_params["num_workers"],
experiment_params["cpus_per_worker"],
)
print(f"Running {framework} training benchmark...")
training_start = time.perf_counter()
result = train(framework, data_path, num_workers, cpus_per_worker)
training_time = time.perf_counter() - training_start
print(f"Running {framework} prediction benchmark...")
prediction_start = time.perf_counter()
predict(framework, result, data_path)
prediction_time = time.perf_counter() - prediction_start
times = {"training_time": training_time, "prediction_time": prediction_time}
print("Training result:\n", result)
print("Training/prediction times:", times)
test_output_json = os.environ.get("TEST_OUTPUT_JSON", "/tmp/result.json")
with open(test_output_json, "wt") as f:
json.dump(times, f)
if not args.disable_check:
if training_time > _TRAINING_TIME_THRESHOLD:
raise RuntimeError(
f"Training is taking {training_time} seconds, "
f"which is longer than expected ({_TRAINING_TIME_THRESHOLD} seconds)."
)
if prediction_time > _PREDICTION_TIME_THRESHOLD:
raise RuntimeError(
f"Batch prediction is taking {prediction_time} seconds, "
f"which is longer than expected ({_PREDICTION_TIME_THRESHOLD} seconds)."
)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"framework", type=str, choices=["xgboost", "lightgbm"], default="xgboost"
)
parser.add_argument("--size", type=str, choices=["10G", "100G"], default="100G")
# Add a flag for disabling the timeout error.
# Use case: running the benchmark as a documented example, in infra settings
# different from the formal benchmark's EC2 setup.
parser.add_argument(
"--disable-check",
action="store_true",
help="disable runtime error on benchmark timeout",
)
parser.add_argument("--smoke-test", action="store_true")
args = parser.parse_args()
main(args)
| LightGBMPredictor |
python | pypa__pip | src/pip/_vendor/urllib3/contrib/socks.py | {
"start": 5300,
"end": 7097
} | class ____(PoolManager):
"""
A version of the urllib3 ProxyManager that routes connections via the
defined SOCKS proxy.
"""
pool_classes_by_scheme = {
"http": SOCKSHTTPConnectionPool,
"https": SOCKSHTTPSConnectionPool,
}
def __init__(
self,
proxy_url,
username=None,
password=None,
num_pools=10,
headers=None,
**connection_pool_kw
):
parsed = parse_url(proxy_url)
if username is None and password is None and parsed.auth is not None:
split = parsed.auth.split(":")
if len(split) == 2:
username, password = split
if parsed.scheme == "socks5":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = False
elif parsed.scheme == "socks5h":
socks_version = socks.PROXY_TYPE_SOCKS5
rdns = True
elif parsed.scheme == "socks4":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = False
elif parsed.scheme == "socks4a":
socks_version = socks.PROXY_TYPE_SOCKS4
rdns = True
else:
raise ValueError("Unable to determine SOCKS version from %s" % proxy_url)
self.proxy_url = proxy_url
socks_options = {
"socks_version": socks_version,
"proxy_host": parsed.host,
"proxy_port": parsed.port,
"username": username,
"password": password,
"rdns": rdns,
}
connection_pool_kw["_socks_options"] = socks_options
super(SOCKSProxyManager, self).__init__(
num_pools, headers, **connection_pool_kw
)
self.pool_classes_by_scheme = SOCKSProxyManager.pool_classes_by_scheme
| SOCKSProxyManager |
python | hynek__structlog | tests/test_config.py | {
"start": 929,
"end": 2032
} | class ____(BoundLoggerBase):
"""
Custom wrapper class for testing.
"""
def test_lazy_logger_is_not_detected_as_abstract_method():
"""
If someone defines an attribute on an ABC with a logger, that logger is not
detected as an abstract method.
See https://github.com/hynek/structlog/issues/229
"""
class Foo(metaclass=abc.ABCMeta): # noqa: B024
log = structlog.get_logger()
Foo()
def test_lazy_logger_is_an_instance_of_bindable_logger():
"""
The BoundLoggerLazyProxy returned by get_logger fulfills the BindableLogger
protocol.
See https://github.com/hynek/structlog/issues/560
"""
assert isinstance(get_logger(), BindableLogger)
def test_lazy_logger_context_is_initial_values():
"""
If a user asks for _context (e.g., using get_context) return
initial_values.
"""
logger = get_logger(context="a")
assert {"context": "a"} == structlog.get_context(logger)
def test_default_context_class():
"""
Default context class is dict.
"""
assert dict is _BUILTIN_DEFAULT_CONTEXT_CLASS
| Wrapper |
python | mitmproxy__pdoc | test/testdata/demopackage/child_b.py | {
"start": 74,
"end": 264
} | class ____:
"""This class is defined in .child_b. It has a B.b method."""
b_type: typing.Type[B]
"""we have a self-referential attribute here"""
def b(self):
return 1
| B |
python | Pylons__pyramid | tests/test_static.py | {
"start": 24168,
"end": 24329
} | class ____:
status = ()
headers = ()
def __call__(self, status, headers):
self.status = status
self.headers = headers
| DummyStartResponse |
python | pydantic__pydantic | tests/benchmarks/shared.py | {
"start": 1695,
"end": 1744
} | class ____(NamedTuple):
x: int
y: int
| Point |
python | doocs__leetcode | solution/1700-1799/1770.Maximum Score from Performing Multiplication Operations/Solution2.py | {
"start": 0,
"end": 647
} | class ____:
def maximumScore(self, nums: List[int], multipliers: List[int]) -> int:
n, m = len(nums), len(multipliers)
f = [[-inf] * (m + 1) for _ in range(m + 1)]
f[0][0] = 0
ans = -inf
for i in range(m + 1):
for j in range(m - i + 1):
k = i + j - 1
if i > 0:
f[i][j] = max(f[i][j], f[i - 1][j] + multipliers[k] * nums[i - 1])
if j > 0:
f[i][j] = max(f[i][j], f[i][j - 1] + multipliers[k] * nums[n - j])
if i + j == m:
ans = max(ans, f[i][j])
return ans
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typeNarrowingIsinstance6.py | {
"start": 1137,
"end": 1452
} | class ____(ParentB[_T2]):
pass
def func4(var: ParentB[int]):
if isinstance(var, ChildB1):
reveal_type(var, expected_text="ChildB1[int]")
def func5(var: ParentB[Any]):
if isinstance(var, ChildB1):
reveal_type(var, expected_text="ChildB1[Any]")
_T3 = TypeVar("_T3", float, str)
| ChildB1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self6.py | {
"start": 244,
"end": 341
} | class ____(Generic[T_contra]):
def __new__(cls: type[Self]) -> Self: ...
MyClass[int]()
| MyClass |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 21428,
"end": 21609
} | class ____(models.Model):
name = models.CharField(max_length=15)
log = HistoricalRecords()
#
# Following classes test the "custom_model_name" option
#
| CustomManagerNameModel |
python | Textualize__textual | src/textual/events.py | {
"start": 1508,
"end": 1650
} | class ____(Event, bubble=False):
"""Internal event used to retrieve the terminal's cursor position."""
x: int
y: int
| CursorPosition |
python | weaviate__weaviate-python-client | weaviate/collections/aggregations/near_object/sync.py | {
"start": 197,
"end": 262
} | class ____(_NearObjectExecutor[ConnectionSync]):
pass
| _NearObject |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 58715,
"end": 59224
} | class ____:
xlEmptyCellReferences = 7 # from enum XlErrorChecks
xlEvaluateToError = 1 # from enum XlErrorChecks
xlInconsistentFormula = 4 # from enum XlErrorChecks
xlInconsistentListFormula = 9 # from enum XlErrorChecks
xlListDataValidation = 8 # from enum XlErrorChecks
xlNumberAsText = 3 # from enum XlErrorChecks
xlOmittedCells = 5 # from enum XlErrorChecks
xlTextDate = 2 # from enum XlErrorChecks
xlUnlockedFormulaCells = 6 # from enum XlErrorChecks
| ErrorChecks |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 19812,
"end": 20315
} | class ____(_WeaviateInput):
"""Define the query-time return properties of a nested property."""
name: str
properties: "PROPERTIES"
def __hash__(self) -> int: # for set
return hash(str(self))
REFERENCE = Union[_QueryReference, _QueryReferenceMultiTarget]
REFERENCES = Union[Sequence[REFERENCE], REFERENCE]
PROPERTY = Union[str, QueryNested]
PROPERTIES = Union[Sequence[PROPERTY], PROPERTY]
NestedProperties = Union[List[Union[str, QueryNested]], str, QueryNested]
| QueryNested |
python | numpy__numpy | numpy/_core/tests/test_numerictypes.py | {
"start": 21929,
"end": 22170
} | class ____:
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
| TestDocStrings |
python | kamyu104__LeetCode-Solutions | Python/minimum-additions-to-make-valid-string.py | {
"start": 38,
"end": 257
} | class ____(object):
def addMinimum(self, word):
"""
:type word: str
:rtype: int
"""
return 3*(sum(i-1 < 0 or word[i-1] >= word[i] for i in xrange(len(word))))-len(word)
| Solution |
python | langchain-ai__langchain | libs/langchain/tests/unit_tests/retrievers/test_multi_vector.py | {
"start": 359,
"end": 4655
} | class ____(InMemoryVectorStore):
@staticmethod
def _identity_fn(score: float) -> float:
return score
def _select_relevance_score_fn(self) -> Callable[[float], float]:
return self._identity_fn
@override
def similarity_search(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[Document]:
res = self.store.get(query)
if res is None:
return []
return [res]
@override
def similarity_search_with_score(
self,
query: str,
k: int = 4,
**kwargs: Any,
) -> list[tuple[Document, float]]:
res = self.store.get(query)
if res is None:
return []
return [(res, 0.8)]
def test_multi_vector_retriever_initialization() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
retriever.vectorstore.add_documents(documents, ids=["1"])
retriever.docstore.mset(list(zip(["1"], documents, strict=False)))
results = retriever.invoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
async def test_multi_vector_retriever_initialization_async() -> None:
vectorstore = InMemoryVectorstoreWithSearch()
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
)
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
await retriever.vectorstore.aadd_documents(documents, ids=["1"])
await retriever.docstore.amset(list(zip(["1"], documents, strict=False)))
results = await retriever.ainvoke("1")
assert len(results) > 0
assert results[0].page_content == "test document"
def test_multi_vector_retriever_similarity_search_with_score() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
vectorstore.add_documents(documents, ids=["1"])
# test with score_threshold = 0.5
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents, strict=False)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# test with score_threshold = 0.9
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
retriever.docstore.mset(list(zip(["1"], documents, strict=False)))
results = retriever.invoke("1")
assert len(results) == 0
async def test_multi_vector_retriever_similarity_search_with_score_async() -> None:
documents = [Document(page_content="test document", metadata={"doc_id": "1"})]
vectorstore = InMemoryVectorstoreWithSearch()
await vectorstore.aadd_documents(documents, ids=["1"])
# test with score_threshold = 0.5
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.5},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents, strict=False)))
results = retriever.invoke("1")
assert len(results) == 1
assert results[0].page_content == "test document"
# test with score_threshold = 0.9
retriever = MultiVectorRetriever(
vectorstore=vectorstore,
docstore=InMemoryStore(),
doc_id="doc_id",
search_kwargs={"score_threshold": 0.9},
search_type=SearchType.similarity_score_threshold,
)
await retriever.docstore.amset(list(zip(["1"], documents, strict=False)))
results = retriever.invoke("1")
assert len(results) == 0
| InMemoryVectorstoreWithSearch |
python | scrapy__scrapy | tests/test_utils_trackref.py | {
"start": 174,
"end": 1811
} | class ____(trackref.object_ref):
pass
@pytest.fixture(autouse=True)
def clear_refs() -> None:
trackref.live_refs.clear()
def test_format_live_refs():
o1 = Foo() # noqa: F841
o2 = Bar() # noqa: F841
o3 = Foo() # noqa: F841
assert (
trackref.format_live_refs()
== """\
Live References
Bar 1 oldest: 0s ago
Foo 2 oldest: 0s ago
"""
)
assert (
trackref.format_live_refs(ignore=Foo)
== """\
Live References
Bar 1 oldest: 0s ago
"""
)
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_live_refs_empty(stdout):
trackref.print_live_refs()
assert stdout.getvalue() == "Live References\n\n\n"
@mock.patch("sys.stdout", new_callable=StringIO)
def test_print_live_refs_with_objects(stdout):
o1 = Foo() # noqa: F841
trackref.print_live_refs()
assert (
stdout.getvalue()
== """\
Live References
Foo 1 oldest: 0s ago\n\n"""
)
def test_get_oldest():
o1 = Foo()
o1_time = time()
o2 = Bar()
o3_time = time()
if o3_time <= o1_time:
sleep(0.01)
o3_time = time()
if o3_time <= o1_time:
pytest.skip("time.time is not precise enough")
o3 = Foo() # noqa: F841
assert trackref.get_oldest("Foo") is o1
assert trackref.get_oldest("Bar") is o2
assert trackref.get_oldest("XXX") is None
def test_iter_all():
o1 = Foo()
o2 = Bar() # noqa: F841
o3 = Foo()
assert set(trackref.iter_all("Foo")) == {o1, o3}
| Bar |
python | django-mptt__django-mptt | mptt/__init__.py | {
"start": 361,
"end": 486
} | class ____(Exception):
"Deprecated - don't use this anymore. It's never thrown, you don't need to catch it"
| AlreadyRegistered |
python | run-llama__llama_index | llama-index-core/llama_index/core/indices/knowledge_graph/retrievers.py | {
"start": 16614,
"end": 34798
} | class ____(BaseRetriever):
"""
Knowledge Graph RAG retriever.
Retriever that perform SubGraph RAG towards knowledge graph.
Args:
storage_context (Optional[StorageContext]): A storage context to use.
entity_extract_fn (Optional[Callable]): A function to extract entities.
entity_extract_template Optional[BasePromptTemplate]): A Query Key Entity
Extraction Prompt (see :ref:`Prompt-Templates`).
entity_extract_policy (Optional[str]): The entity extraction policy to use.
default: "union"
possible values: "union", "intersection"
synonym_expand_fn (Optional[Callable]): A function to expand synonyms.
synonym_expand_template (Optional[QueryKeywordExpandPrompt]): A Query Key Entity
Expansion Prompt (see :ref:`Prompt-Templates`).
synonym_expand_policy (Optional[str]): The synonym expansion policy to use.
default: "union"
possible values: "union", "intersection"
max_entities (int): The maximum number of entities to extract.
default: 5
max_synonyms (int): The maximum number of synonyms to expand per entity.
default: 5
retriever_mode (Optional[str]): The retriever mode to use.
default: "keyword"
possible values: "keyword", "embedding", "keyword_embedding"
with_nl2graphquery (bool): Whether to combine NL2GraphQuery in context.
default: False
graph_traversal_depth (int): The depth of graph traversal.
default: 2
max_knowledge_sequence (int): The maximum number of knowledge sequence to
include in the response. By default, it's 30.
verbose (bool): Whether to print out debug info.
"""
def __init__(
self,
storage_context: Optional[StorageContext] = None,
llm: Optional[LLM] = None,
entity_extract_fn: Optional[Callable] = None,
entity_extract_template: Optional[BasePromptTemplate] = None,
entity_extract_policy: Optional[str] = "union",
synonym_expand_fn: Optional[Callable] = None,
synonym_expand_template: Optional[BasePromptTemplate] = None,
synonym_expand_policy: Optional[str] = "union",
max_entities: int = 5,
max_synonyms: int = 5,
retriever_mode: Optional[str] = "keyword",
with_nl2graphquery: bool = False,
graph_traversal_depth: int = 2,
max_knowledge_sequence: int = REL_TEXT_LIMIT,
verbose: bool = False,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
) -> None:
"""Initialize the retriever."""
# Ensure that we have a graph store
assert storage_context is not None, "Must provide a storage context."
assert storage_context.graph_store is not None, (
"Must provide a graph store in the storage context."
)
self._storage_context = storage_context
self._graph_store = storage_context.graph_store
self._llm = llm or Settings.llm
self._entity_extract_fn = entity_extract_fn
self._entity_extract_template = (
entity_extract_template or DEFAULT_QUERY_KEYWORD_EXTRACT_TEMPLATE
)
self._entity_extract_policy = entity_extract_policy
self._synonym_expand_fn = synonym_expand_fn
self._synonym_expand_template = (
synonym_expand_template or DEFAULT_SYNONYM_EXPAND_PROMPT
)
self._synonym_expand_policy = synonym_expand_policy
self._max_entities = max_entities
self._max_synonyms = max_synonyms
self._retriever_mode = retriever_mode
self._with_nl2graphquery = with_nl2graphquery
if self._with_nl2graphquery:
from llama_index.core.query_engine.knowledge_graph_query_engine import (
KnowledgeGraphQueryEngine,
)
graph_query_synthesis_prompt = kwargs.get("graph_query_synthesis_prompt")
if graph_query_synthesis_prompt is not None:
del kwargs["graph_query_synthesis_prompt"]
graph_response_answer_prompt = kwargs.get("graph_response_answer_prompt")
if graph_response_answer_prompt is not None:
del kwargs["graph_response_answer_prompt"]
refresh_schema = kwargs.get("refresh_schema", False)
response_synthesizer = kwargs.get("response_synthesizer")
self._kg_query_engine = KnowledgeGraphQueryEngine(
llm=self._llm,
storage_context=self._storage_context,
graph_query_synthesis_prompt=graph_query_synthesis_prompt,
graph_response_answer_prompt=graph_response_answer_prompt,
refresh_schema=refresh_schema,
verbose=verbose,
response_synthesizer=response_synthesizer,
**kwargs,
)
self._graph_traversal_depth = graph_traversal_depth
self._max_knowledge_sequence = max_knowledge_sequence
self._verbose = verbose
refresh_schema = kwargs.get("refresh_schema", False)
try:
self._graph_schema = self._graph_store.get_schema(refresh=refresh_schema)
except NotImplementedError:
self._graph_schema = ""
except Exception as e:
logger.warning(f"Failed to get graph schema: {e}")
self._graph_schema = ""
super().__init__(callback_manager=callback_manager or Settings.callback_manager)
def _process_entities(
self,
query_str: str,
handle_fn: Optional[Callable],
handle_llm_prompt_template: Optional[BasePromptTemplate],
cross_handle_policy: Optional[str] = "union",
max_items: Optional[int] = 5,
result_start_token: str = "KEYWORDS:",
) -> List[str]:
"""Get entities from query string."""
assert cross_handle_policy in [
"union",
"intersection",
], "Invalid entity extraction policy."
if cross_handle_policy == "intersection":
assert all(
[
handle_fn is not None,
handle_llm_prompt_template is not None,
]
), "Must provide entity extract function and template."
assert any(
[
handle_fn is not None,
handle_llm_prompt_template is not None,
]
), "Must provide either entity extract function or template."
enitities_fn: List[str] = []
enitities_llm: Set[str] = set()
if handle_fn is not None:
enitities_fn = handle_fn(query_str)
if handle_llm_prompt_template is not None:
response = self._llm.predict(
handle_llm_prompt_template,
max_keywords=max_items,
question=query_str,
)
enitities_llm = extract_keywords_given_response(
response, start_token=result_start_token, lowercase=False
)
if cross_handle_policy == "union":
entities = list(set(enitities_fn) | enitities_llm)
elif cross_handle_policy == "intersection":
entities = list(set(enitities_fn).intersection(set(enitities_llm)))
if self._verbose:
print_text(f"Entities processed: {entities}\n", color="green")
return entities
async def _aprocess_entities(
self,
query_str: str,
handle_fn: Optional[Callable],
handle_llm_prompt_template: Optional[BasePromptTemplate],
cross_handle_policy: Optional[str] = "union",
max_items: Optional[int] = 5,
result_start_token: str = "KEYWORDS:",
) -> List[str]:
"""Get entities from query string."""
assert cross_handle_policy in [
"union",
"intersection",
], "Invalid entity extraction policy."
if cross_handle_policy == "intersection":
assert all(
[
handle_fn is not None,
handle_llm_prompt_template is not None,
]
), "Must provide entity extract function and template."
assert any(
[
handle_fn is not None,
handle_llm_prompt_template is not None,
]
), "Must provide either entity extract function or template."
enitities_fn: List[str] = []
enitities_llm: Set[str] = set()
if handle_fn is not None:
enitities_fn = handle_fn(query_str)
if handle_llm_prompt_template is not None:
response = await self._llm.apredict(
handle_llm_prompt_template,
max_keywords=max_items,
question=query_str,
)
enitities_llm = extract_keywords_given_response(
response, start_token=result_start_token, lowercase=False
)
if cross_handle_policy == "union":
entities = list(set(enitities_fn) | enitities_llm)
elif cross_handle_policy == "intersection":
entities = list(set(enitities_fn).intersection(set(enitities_llm)))
if self._verbose:
print_text(f"Entities processed: {entities}\n", color="green")
return entities
def _get_entities(self, query_str: str) -> List[str]:
"""Get entities from query string."""
entities = self._process_entities(
query_str,
self._entity_extract_fn,
self._entity_extract_template,
self._entity_extract_policy,
self._max_entities,
"KEYWORDS:",
)
expanded_entities = self._expand_synonyms(entities)
return list(set(entities) | set(expanded_entities))
async def _aget_entities(self, query_str: str) -> List[str]:
"""Get entities from query string."""
entities = await self._aprocess_entities(
query_str,
self._entity_extract_fn,
self._entity_extract_template,
self._entity_extract_policy,
self._max_entities,
"KEYWORDS:",
)
expanded_entities = await self._aexpand_synonyms(entities)
return list(set(entities) | set(expanded_entities))
def _expand_synonyms(self, keywords: List[str]) -> List[str]:
"""Expand synonyms or similar expressions for keywords."""
return self._process_entities(
str(keywords),
self._synonym_expand_fn,
self._synonym_expand_template,
self._synonym_expand_policy,
self._max_synonyms,
"SYNONYMS:",
)
async def _aexpand_synonyms(self, keywords: List[str]) -> List[str]:
"""Expand synonyms or similar expressions for keywords."""
return await self._aprocess_entities(
str(keywords),
self._synonym_expand_fn,
self._synonym_expand_template,
self._synonym_expand_policy,
self._max_synonyms,
"SYNONYMS:",
)
def _get_knowledge_sequence(
self, entities: List[str]
) -> Tuple[List[str], Optional[Dict[Any, Any]]]:
"""Get knowledge sequence from entities."""
# Get SubGraph from Graph Store as Knowledge Sequence
rel_map: Optional[Dict] = self._graph_store.get_rel_map(
entities, self._graph_traversal_depth, limit=self._max_knowledge_sequence
)
logger.debug(f"rel_map: {rel_map}")
# Build Knowledge Sequence
knowledge_sequence = []
if rel_map:
knowledge_sequence.extend(
[str(rel_obj) for rel_objs in rel_map.values() for rel_obj in rel_objs]
)
else:
logger.info("> No knowledge sequence extracted from entities.")
return [], None
return knowledge_sequence, rel_map
async def _aget_knowledge_sequence(
self, entities: List[str]
) -> Tuple[List[str], Optional[Dict[Any, Any]]]:
"""Get knowledge sequence from entities."""
# Get SubGraph from Graph Store as Knowledge Sequence
# TBD: async in graph store
rel_map: Optional[Dict] = self._graph_store.get_rel_map(
entities, self._graph_traversal_depth, limit=self._max_knowledge_sequence
)
logger.debug(f"rel_map from GraphStore:\n{rel_map}")
# Build Knowledge Sequence
knowledge_sequence = []
if rel_map:
knowledge_sequence.extend(
[str(rel_obj) for rel_objs in rel_map.values() for rel_obj in rel_objs]
)
else:
logger.info("> No knowledge sequence extracted from entities.")
return [], None
return knowledge_sequence, rel_map
def _build_nodes(
self, knowledge_sequence: List[str], rel_map: Optional[Dict[Any, Any]] = None
) -> List[NodeWithScore]:
"""Build nodes from knowledge sequence."""
if len(knowledge_sequence) == 0:
logger.info("> No knowledge sequence extracted from entities.")
return []
_new_line_char = "\n"
context_string = (
f"The following are knowledge sequence in max depth"
f" {self._graph_traversal_depth} "
f"in the form of directed graph like:\n"
f"`subject -[predicate]->, object, <-[predicate_next_hop]-,"
f" object_next_hop ...`"
f" extracted based on key entities as subject:\n"
f"{_new_line_char.join(knowledge_sequence)}"
)
if self._verbose:
print_text(f"Graph RAG context:\n{context_string}\n", color="blue")
rel_node_info = {
"kg_rel_map": rel_map,
"kg_rel_text": knowledge_sequence,
}
metadata_keys = ["kg_rel_map", "kg_rel_text"]
if self._graph_schema != "":
rel_node_info["kg_schema"] = {"schema": self._graph_schema}
metadata_keys.append("kg_schema")
node = NodeWithScore(
node=TextNode(
text=context_string,
score=1.0,
metadata=rel_node_info,
excluded_embed_metadata_keys=metadata_keys,
excluded_llm_metadata_keys=metadata_keys,
)
)
return [node]
def _retrieve_keyword(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve in keyword mode."""
if self._retriever_mode not in ["keyword", "keyword_embedding"]:
return []
# Get entities
entities = self._get_entities(query_bundle.query_str)
# Before we enable embedding/semantic search, we need to make sure
# we don't miss any entities that's synoynm of the entities we extracted
# in string matching based retrieval in following steps, thus we expand
# synonyms here.
if len(entities) == 0:
logger.info("> No entities extracted from query string.")
return []
# Get SubGraph from Graph Store as Knowledge Sequence
knowledge_sequence, rel_map = self._get_knowledge_sequence(entities)
return self._build_nodes(knowledge_sequence, rel_map)
async def _aretrieve_keyword(
self, query_bundle: QueryBundle
) -> List[NodeWithScore]:
"""Retrieve in keyword mode."""
if self._retriever_mode not in ["keyword", "keyword_embedding"]:
return []
# Get entities
entities = await self._aget_entities(query_bundle.query_str)
# Before we enable embedding/semantic search, we need to make sure
# we don't miss any entities that's synoynm of the entities we extracted
# in string matching based retrieval in following steps, thus we expand
# synonyms here.
if len(entities) == 0:
logger.info("> No entities extracted from query string.")
return []
# Get SubGraph from Graph Store as Knowledge Sequence
knowledge_sequence, rel_map = await self._aget_knowledge_sequence(entities)
return self._build_nodes(knowledge_sequence, rel_map)
def _retrieve_embedding(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve in embedding mode."""
if self._retriever_mode not in ["embedding", "keyword_embedding"]:
return []
# TBD: will implement this later with vector store.
raise NotImplementedError
async def _aretrieve_embedding(
self, query_bundle: QueryBundle
) -> List[NodeWithScore]:
"""Retrieve in embedding mode."""
if self._retriever_mode not in ["embedding", "keyword_embedding"]:
return []
# TBD: will implement this later with vector store.
raise NotImplementedError
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Build nodes for response."""
nodes: List[NodeWithScore] = []
if self._with_nl2graphquery:
try:
nodes_nl2graphquery = self._kg_query_engine._retrieve(query_bundle)
nodes.extend(nodes_nl2graphquery)
except Exception as e:
logger.warning(f"Error in retrieving from nl2graphquery: {e}")
nodes.extend(self._retrieve_keyword(query_bundle))
nodes.extend(self._retrieve_embedding(query_bundle))
return nodes
async def _aretrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Build nodes for response."""
nodes: List[NodeWithScore] = []
if self._with_nl2graphquery:
try:
nodes_nl2graphquery = await self._kg_query_engine._aretrieve(
query_bundle
)
nodes.extend(nodes_nl2graphquery)
except Exception as e:
logger.warning(f"Error in retrieving from nl2graphquery: {e}")
nodes.extend(await self._aretrieve_keyword(query_bundle))
nodes.extend(await self._aretrieve_embedding(query_bundle))
return nodes
| KnowledgeGraphRAGRetriever |
python | FactoryBoy__factory_boy | tests/test_typing.py | {
"start": 112,
"end": 167
} | class ____:
name: str
email: str
id: int
| User |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/with-custom-scope.py | {
"start": 181,
"end": 1050
} | class ____(dg.Component, dg.Resolvable):
script_path: str
asset_specs: Sequence[dg.ResolvedAssetSpec]
@classmethod
def get_additional_scope(cls) -> Mapping[str, Any]:
return {
"daily_partitions": dg.DailyPartitionsDefinition(start_date="2024-01-01")
}
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions:
resolved_script_path = Path(context.path, self.script_path).absolute()
@dg.multi_asset(name=Path(self.script_path).stem, specs=self.asset_specs)
def _asset(context: dg.AssetExecutionContext):
self.execute(resolved_script_path, context)
return dg.Definitions(assets=[_asset])
def execute(self, resolved_script_path: Path, context: dg.AssetExecutionContext):
return subprocess.run(["sh", str(resolved_script_path)], check=True)
| ShellCommand |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.