language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | spyder-ide__spyder | spyder/utils/snippets/nodes.py | {
"start": 1059,
"end": 1215
} | class ____:
TEXT = 'text'
LEAF = 'leaf'
FORMAT = 'format'
# ------------------------- Base AST Node classes -----------------------------
| NodeKind |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/campaign_manager.py | {
"start": 15330,
"end": 19097
} | class ____(BaseOperator):
"""
Inserts conversions.
.. seealso::
Check official API docs:
`https://developers.google.com/doubleclick-advertisers/rest/v4/conversions/batchinsert`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleCampaignManagerBatchInsertConversionsOperator`
:param profile_id: User profile ID associated with this request.
:param conversions: Conversions to insert, should be type of Conversion:
https://developers.google.com/doubleclick-advertisers/rest/v4/conversions
:param encryption_entity_type: The encryption entity type. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_entity_id: The encryption entity ID. This should match the encryption
configuration for ad serving or Data Transfer.
:param encryption_source: Describes whether the encrypted cookie was received from ad serving
(the %m macro) or from Data Transfer.
:param max_failed_inserts: The maximum number of conversions that failed to be inserted
:param api_version: The version of the api that will be requested, for example 'v4'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"profile_id",
"conversions",
"encryption_entity_type",
"encryption_entity_id",
"encryption_source",
"impersonation_chain",
)
def __init__(
self,
*,
profile_id: str,
conversions: list[dict[str, Any]],
encryption_entity_type: str,
encryption_entity_id: int,
encryption_source: str,
max_failed_inserts: int = 0,
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.profile_id = profile_id
self.conversions = conversions
self.encryption_entity_type = encryption_entity_type
self.encryption_entity_id = encryption_entity_id
self.encryption_source = encryption_source
self.max_failed_inserts = max_failed_inserts
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = GoogleCampaignManagerHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
response = hook.conversions_batch_insert(
profile_id=self.profile_id,
conversions=self.conversions,
encryption_entity_type=self.encryption_entity_type,
encryption_entity_id=self.encryption_entity_id,
encryption_source=self.encryption_source,
max_failed_inserts=self.max_failed_inserts,
)
return response
| GoogleCampaignManagerBatchInsertConversionsOperator |
python | google__pytype | pytype/rewrite/abstract/containers.py | {
"start": 811,
"end": 1899
} | class ____(base.PythonConstant[dict[_Var, _Var]]):
"""Representation of a Python dict."""
def __init__(
self,
ctx: base.ContextType,
constant: dict[_Var, _Var],
):
assert isinstance(constant, dict), constant
super().__init__(ctx, constant)
def __repr__(self):
return f'Dict({self.constant!r})'
@classmethod
def from_function_arg_dict(
cls, ctx: base.ContextType, val: internal.FunctionArgDict
) -> 'Dict':
assert not val.indefinite
new_constant = {
ctx.consts[k].to_variable(): v
for k, v in val.constant.items()
}
return cls(ctx, new_constant)
def setitem(self, key: _Var, val: _Var) -> 'Dict':
return Dict(self._ctx, {**self.constant, key: val})
def update(self, val: 'Dict') -> base.BaseValue:
return Dict(self._ctx, {**self.constant, **val.constant})
def to_function_arg_dict(self) -> internal.FunctionArgDict:
new_const = {
utils.get_atomic_constant(k, str): v
for k, v in self.constant.items()
}
return internal.FunctionArgDict(self._ctx, new_const)
| Dict |
python | celery__celery | celery/worker/state.py | {
"start": 6031,
"end": 8583
} | class ____:
"""Stores worker state between restarts.
This is the persistent data stored by the worker when
:option:`celery worker --statedb` is enabled.
Currently only stores revoked task id's.
"""
storage = shelve
protocol = pickle_protocol
compress = zlib.compress
decompress = zlib.decompress
_is_open = False
def __init__(self, state, filename, clock=None):
self.state = state
self.filename = filename
self.clock = clock
self.merge()
def open(self):
return self.storage.open(
self.filename, protocol=self.protocol, writeback=True,
)
def merge(self):
self._merge_with(self.db)
def sync(self):
self._sync_with(self.db)
self.db.sync()
def close(self):
if self._is_open:
self.db.close()
self._is_open = False
def save(self):
self.sync()
self.close()
def _merge_with(self, d):
self._merge_revoked(d)
self._merge_clock(d)
return d
def _sync_with(self, d):
self._revoked_tasks.purge()
d.update({
'__proto__': 3,
'zrevoked': self.compress(self._dumps(self._revoked_tasks)),
'clock': self.clock.forward() if self.clock else 0,
})
return d
def _merge_clock(self, d):
if self.clock:
d['clock'] = self.clock.adjust(d.get('clock') or 0)
def _merge_revoked(self, d):
try:
self._merge_revoked_v3(d['zrevoked'])
except KeyError:
try:
self._merge_revoked_v2(d.pop('revoked'))
except KeyError:
pass
# purge expired items at boot
self._revoked_tasks.purge()
def _merge_revoked_v3(self, zrevoked):
if zrevoked:
self._revoked_tasks.update(pickle.loads(self.decompress(zrevoked)))
def _merge_revoked_v2(self, saved):
if not isinstance(saved, LimitedSet):
# (pre 3.0.18) used to be stored as a dict
return self._merge_revoked_v1(saved)
self._revoked_tasks.update(saved)
def _merge_revoked_v1(self, saved):
add = self._revoked_tasks.add
for item in saved:
add(item)
def _dumps(self, obj):
return pickle.dumps(obj, protocol=self.protocol)
@property
def _revoked_tasks(self):
return self.state.revoked
@cached_property
def db(self):
self._is_open = True
return self.open()
| Persistent |
python | openai__openai-python | src/openai/types/beta/file_search_tool_param.py | {
"start": 632,
"end": 1540
} | class ____(TypedDict, total=False):
max_num_results: int
"""The maximum number of results the file search tool should output.
The default is 20 for `gpt-4*` models and 5 for `gpt-3.5-turbo`. This number
should be between 1 and 50 inclusive.
Note that the file search tool may output fewer than `max_num_results` results.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
ranking_options: FileSearchRankingOptions
"""The ranking options for the file search.
If not specified, the file search tool will use the `auto` ranker and a
score_threshold of 0.
See the
[file search tool documentation](https://platform.openai.com/docs/assistants/tools/file-search#customizing-file-search-settings)
for more information.
"""
| FileSearch |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 693438,
"end": 693952
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of MarkPullRequestReadyForReview"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "pull_request")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
pull_request = sgqlc.types.Field("PullRequest", graphql_name="pullRequest")
"""The pull request that is ready for review."""
| MarkPullRequestReadyForReviewPayload |
python | python-markdown__markdown | markdown/extensions/md_in_html.py | {
"start": 13789,
"end": 14146
} | class ____(Preprocessor):
"""Remove html blocks from the text and store them for later retrieval."""
def run(self, lines: list[str]) -> list[str]:
source = '\n'.join(lines)
parser = HTMLExtractorExtra(self.md)
parser.feed(source)
parser.close()
return ''.join(parser.cleandoc).split('\n')
| HtmlBlockPreprocessor |
python | chardet__chardet | chardet/chardistribution.py | {
"start": 9576,
"end": 10316
} | class ____(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = JIS_CHAR_TO_FREQ_ORDER
self._table_size = JIS_TABLE_SIZE
self.typical_distribution_ratio = JIS_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # type: ignore[reportIncompatibleMethodOverride]
# for euc-JP encoding, we are interested
# first byte range: 0xa0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
char = byte_str[0]
if char >= 0xA0:
return 94 * (char - 0xA1) + byte_str[1] - 0xA1
return -1
| EUCJPDistributionAnalysis |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_dot.py | {
"start": 115,
"end": 2434
} | class ____:
@pytest.fixture
def obj(self):
raise NotImplementedError
@pytest.fixture
def other(self) -> DataFrame:
"""
other is a DataFrame that is indexed so that obj.dot(other) is valid
"""
raise NotImplementedError
@pytest.fixture
def expected(self, obj, other) -> DataFrame:
"""
The expected result of obj.dot(other)
"""
raise NotImplementedError
@classmethod
def reduced_dim_assert(cls, result, expected):
"""
Assertion about results with 1 fewer dimension that self.obj
"""
raise NotImplementedError
def test_dot_equiv_values_dot(self, obj, other, expected):
# `expected` is constructed from obj.values.dot(other.values)
result = obj.dot(other)
tm.assert_equal(result, expected)
def test_dot_2d_ndarray(self, obj, other, expected):
# Check ndarray argument; in this case we get matching values,
# but index/columns may not match
result = obj.dot(other.values)
assert np.all(result == expected.values)
def test_dot_1d_ndarray(self, obj, expected):
# can pass correct-length array
row = obj.iloc[0] if obj.ndim == 2 else obj
result = obj.dot(row.values)
expected = obj.dot(row)
self.reduced_dim_assert(result, expected)
def test_dot_series(self, obj, other, expected):
# Check series argument
result = obj.dot(other["1"])
self.reduced_dim_assert(result, expected["1"])
def test_dot_series_alignment(self, obj, other, expected):
result = obj.dot(other.iloc[::-1]["1"])
self.reduced_dim_assert(result, expected["1"])
def test_dot_aligns(self, obj, other, expected):
# Check index alignment
other2 = other.iloc[::-1]
result = obj.dot(other2)
tm.assert_equal(result, expected)
def test_dot_shape_mismatch(self, obj):
msg = "Dot product shape mismatch"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
obj.dot(obj.values[:3])
def test_dot_misaligned(self, obj, other):
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
obj.dot(other.T)
| DotSharedTests |
python | openai__openai-python | src/openai/types/shared/response_format_json_schema.py | {
"start": 290,
"end": 1311
} | class ____(BaseModel):
name: str
"""The name of the response format.
Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length
of 64.
"""
description: Optional[str] = None
"""
A description of what the response format is for, used by the model to determine
how to respond in the format.
"""
schema_: Optional[Dict[str, object]] = FieldInfo(alias="schema", default=None)
"""
The schema for the response format, described as a JSON Schema object. Learn how
to build JSON schemas [here](https://json-schema.org/).
"""
strict: Optional[bool] = None
"""
Whether to enable strict schema adherence when generating the output. If set to
true, the model will always follow the exact schema defined in the `schema`
field. Only a subset of JSON Schema is supported when `strict` is `true`. To
learn more, read the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
"""
| JSONSchema |
python | walkccc__LeetCode | solutions/2800. Shortest String That Contains Three Strings/2800.py | {
"start": 0,
"end": 1005
} | class ____:
def minimumString(self, a: str, b: str, c: str) -> str:
def merge(a: str, b: str) -> str:
"""Merges a and b."""
if a in b: # a is a substring of b.
return b
for i in range(len(a)):
aSuffix = a[i:]
bPrefix = b[:len(aSuffix)]
if aSuffix == bPrefix:
return a + b[len(bPrefix):]
return a + b
abc = merge(a, merge(b, c))
acb = merge(a, merge(c, b))
bac = merge(b, merge(a, c))
bca = merge(b, merge(c, a))
cab = merge(c, merge(a, b))
cba = merge(c, merge(b, a))
return self._getMin([abc, acb, bac, bca, cab, cba])
def _getMin(self, words: list[str]) -> str:
"""Returns the lexicographically smallest string."""
def getMin(a: str, b: str) -> str:
"""Returns the lexicographically smaller string."""
return a if len(a) < len(b) or (len(a) == len(b) and a < b) else b
res = words[0]
for i in range(1, len(words)):
res = getMin(res, words[i])
return res
| Solution |
python | matplotlib__matplotlib | lib/matplotlib/colors.py | {
"start": 23624,
"end": 37967
} | class ____:
"""
Baseclass for all scalar to RGBA mappings.
Typically, Colormap instances are used to convert data values (floats)
from the interval ``[0, 1]`` to the RGBA color that the respective
Colormap represents. For scaling of data into the ``[0, 1]`` interval see
`matplotlib.colors.Normalize`. Subclasses of `matplotlib.cm.ScalarMappable`
make heavy use of this ``data -> normalize -> map-to-color`` processing
chain.
"""
def __init__(self, name, N=256, *, bad=None, under=None, over=None):
"""
Parameters
----------
name : str
The name of the colormap.
N : int
The number of RGB quantization levels.
bad : :mpltype:`color`, default: transparent
The color for invalid values (NaN or masked).
.. versionadded:: 3.11
under : :mpltype:`color`, default: color of the lowest value
The color for low out-of-range values.
.. versionadded:: 3.11
over : :mpltype:`color`, default: color of the highest value
The color for high out-of-range values.
.. versionadded:: 3.11
"""
self.name = name
self.N = int(N) # ensure that N is always int
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) if bad is None else to_rgba(bad)
self._rgba_under = None if under is None else to_rgba(under)
self._rgba_over = None if over is None else to_rgba(over)
self._i_under = self.N
self._i_over = self.N + 1
self._i_bad = self.N + 2
self._isinit = False
self.n_variates = 1
#: When this colormap exists on a scalar mappable and colorbar_extend
#: is not False, colorbar creation will pick up ``colorbar_extend`` as
#: the default value for the ``extend`` keyword in the
#: `matplotlib.colorbar.Colorbar` constructor.
self.colorbar_extend = False
def __call__(self, X, alpha=None, bytes=False):
r"""
Parameters
----------
X : float or int or array-like
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
Returns
-------
Tuple of RGBA values if X is scalar, otherwise an array of
RGBA values with a shape of ``X.shape + (4, )``.
"""
rgba, mask = self._get_rgba_and_mask(X, alpha=alpha, bytes=bytes)
if not np.iterable(X):
rgba = tuple(rgba)
return rgba
def _get_rgba_and_mask(self, X, alpha=None, bytes=False):
r"""
Parameters
----------
X : float or int or array-like
The data value(s) to convert to RGBA.
For floats, *X* should be in the interval ``[0.0, 1.0]`` to
return the RGBA values ``X*100`` percent along the Colormap line.
For integers, *X* should be in the interval ``[0, Colormap.N)`` to
return RGBA values *indexed* from the Colormap with index ``X``.
alpha : float or array-like or None
Alpha must be a scalar between 0 and 1, a sequence of such
floats with shape matching X, or None.
bytes : bool, default: False
If False (default), the returned RGBA values will be floats in the
interval ``[0, 1]`` otherwise they will be `numpy.uint8`\s in the
interval ``[0, 255]``.
Returns
-------
colors : np.ndarray
Array of RGBA values with a shape of ``X.shape + (4, )``.
mask : np.ndarray
Boolean array with True where the input is ``np.nan`` or masked.
"""
self._ensure_inited()
xa = np.array(X, copy=True)
if not xa.dtype.isnative:
# Native byteorder is faster.
xa = xa.byteswap().view(xa.dtype.newbyteorder())
if xa.dtype.kind == "f":
xa *= self.N
# xa == 1 (== N after multiplication) is not out of range.
xa[xa == self.N] = self.N - 1
# Pre-compute the masks before casting to int (which can truncate
# negative values to zero or wrap large floats to negative ints).
mask_under = xa < 0
mask_over = xa >= self.N
# If input was masked, get the bad mask from it; else mask out nans.
mask_bad = X.mask if np.ma.is_masked(X) else np.isnan(xa)
with np.errstate(invalid="ignore"):
# We need this cast for unsigned ints as well as floats
xa = xa.astype(int)
xa[mask_under] = self._i_under
xa[mask_over] = self._i_over
xa[mask_bad] = self._i_bad
lut = self._lut
if bytes:
lut = (lut * 255).astype(np.uint8)
rgba = lut.take(xa, axis=0, mode='clip')
if alpha is not None:
alpha = np.clip(alpha, 0, 1)
if bytes:
alpha *= 255 # Will be cast to uint8 upon assignment.
if alpha.shape not in [(), xa.shape]:
raise ValueError(
f"alpha is array-like but its shape {alpha.shape} does "
f"not match that of X {xa.shape}")
rgba[..., -1] = alpha
# If the "bad" color is all zeros, then ignore alpha input.
if (lut[-1] == 0).all():
rgba[mask_bad] = (0, 0, 0, 0)
return rgba, mask_bad
def __copy__(self):
cls = self.__class__
cmapobject = cls.__new__(cls)
cmapobject.__dict__.update(self.__dict__)
if self._isinit:
cmapobject._lut = np.copy(self._lut)
return cmapobject
def __eq__(self, other):
if (not isinstance(other, Colormap) or
self.colorbar_extend != other.colorbar_extend):
return False
# To compare lookup tables the Colormaps have to be initialized
self._ensure_inited()
other._ensure_inited()
return np.array_equal(self._lut, other._lut)
def get_bad(self):
"""Get the color for masked values."""
self._ensure_inited()
return np.array(self._lut[self._i_bad])
@_api.deprecated(
"3.11",
pending=True,
alternative="cmap.with_extremes(bad=...) or Colormap(bad=...)")
def set_bad(self, color='k', alpha=None):
"""Set the color for masked values."""
self._set_extremes(bad=(color, alpha))
def get_under(self):
"""Get the color for low out-of-range values."""
self._ensure_inited()
return np.array(self._lut[self._i_under])
@_api.deprecated(
"3.11",
pending=True,
alternative="cmap.with_extremes(under=...) or Colormap(under=...)")
def set_under(self, color='k', alpha=None):
"""Set the color for low out-of-range values."""
self._set_extremes(under=(color, alpha))
def get_over(self):
"""Get the color for high out-of-range values."""
self._ensure_inited()
return np.array(self._lut[self._i_over])
@_api.deprecated(
"3.11",
pending=True,
alternative="cmap.with_extremes(over=...) or Colormap(over=...)")
def set_over(self, color='k', alpha=None):
"""Set the color for high out-of-range values."""
self._set_extremes(over=(color, alpha))
@_api.deprecated(
"3.11",
pending=True,
alternative="cmap.with_extremes(bad=..., under=..., over=...) or "
"Colormap(bad=..., under=..., over=...)")
def set_extremes(self, *, bad=None, under=None, over=None):
"""
Set the colors for masked (*bad*) values and, when ``norm.clip =
False``, low (*under*) and high (*over*) out-of-range values.
"""
self._set_extremes(bad=bad, under=under, over=over)
def with_extremes(self, *, bad=None, under=None, over=None):
"""
Return a copy of the colormap, for which the colors for masked (*bad*)
values and, when ``norm.clip = False``, low (*under*) and high (*over*)
out-of-range values, have been set accordingly.
"""
new_cm = self.copy()
new_cm._set_extremes(bad=bad, under=under, over=over)
return new_cm
def _set_extremes(self, bad=None, under=None, over=None):
"""
Set the colors for masked (*bad*) and out-of-range (*under* and *over*) values.
Parameters that are None are left unchanged.
"""
if bad is not None:
self._rgba_bad = to_rgba(bad)
if under is not None:
self._rgba_under = to_rgba(under)
if over is not None:
self._rgba_over = to_rgba(over)
if self._isinit:
self._update_lut_extremes()
def _update_lut_extremes(self):
"""Ensure than an existing lookup table has the correct extreme values."""
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N - 1]
self._lut[self._i_bad] = self._rgba_bad
def with_alpha(self, alpha):
"""
Return a copy of the colormap with a new uniform transparency.
Parameters
----------
alpha : float
The alpha blending value, between 0 (transparent) and 1 (opaque).
"""
if not isinstance(alpha, Real):
raise TypeError(f"'alpha' must be numeric or None, not {type(alpha)}")
if not 0 <= alpha <= 1:
raise ValueError("'alpha' must be between 0 and 1, inclusive")
new_cm = self.copy()
new_cm._ensure_inited()
new_cm._lut[:, 3] = alpha
return new_cm
def _init(self):
"""Generate the lookup table, ``self._lut``."""
raise NotImplementedError("Abstract class only")
def _ensure_inited(self):
if not self._isinit:
self._init()
def is_gray(self):
"""Return whether the colormap is grayscale."""
self._ensure_inited()
return (np.all(self._lut[:, 0] == self._lut[:, 1]) and
np.all(self._lut[:, 0] == self._lut[:, 2]))
def resampled(self, lutsize):
"""Return a new colormap with *lutsize* entries."""
if hasattr(self, '_resample'):
_api.warn_external(
"The ability to resample a color map is now public API "
f"However the class {type(self)} still only implements "
"the previous private _resample method. Please update "
"your class."
)
return self._resample(lutsize)
raise NotImplementedError()
def reversed(self, name=None):
"""
Return a reversed instance of the Colormap.
.. note:: This function is not implemented for the base class.
Parameters
----------
name : str, optional
The name for the reversed colormap. If None, the
name is set to ``self.name + "_r"``.
See Also
--------
LinearSegmentedColormap.reversed
ListedColormap.reversed
"""
raise NotImplementedError()
def _repr_png_(self):
"""Generate a PNG representation of the Colormap."""
X = np.tile(np.linspace(0, 1, _REPR_PNG_SIZE[0]),
(_REPR_PNG_SIZE[1], 1))
pixels = self(X, bytes=True)
png_bytes = io.BytesIO()
title = self.name + ' colormap'
author = f'Matplotlib v{mpl.__version__}, https://matplotlib.org'
pnginfo = PngInfo()
pnginfo.add_text('Title', title)
pnginfo.add_text('Description', title)
pnginfo.add_text('Author', author)
pnginfo.add_text('Software', author)
Image.fromarray(pixels).save(png_bytes, format='png', pnginfo=pnginfo)
return png_bytes.getvalue()
def _repr_html_(self):
"""Generate an HTML representation of the Colormap."""
png_bytes = self._repr_png_()
png_base64 = base64.b64encode(png_bytes).decode('ascii')
def color_block(color):
hex_color = to_hex(color, keep_alpha=True)
return (f'<div title="{hex_color}" '
'style="display: inline-block; '
'width: 1em; height: 1em; '
'margin: 0; '
'vertical-align: middle; '
'border: 1px solid #555; '
f'background-color: {hex_color};"></div>')
return ('<div style="vertical-align: middle;">'
f'<strong>{self.name}</strong> '
'</div>'
'<div class="cmap"><img '
f'alt="{self.name} colormap" '
f'title="{self.name}" '
'style="border: 1px solid #555;" '
f'src="data:image/png;base64,{png_base64}"></div>'
'<div style="vertical-align: middle; '
f'max-width: {_REPR_PNG_SIZE[0]+2}px; '
'display: flex; justify-content: space-between;">'
'<div style="float: left;">'
f'{color_block(self.get_under())} under'
'</div>'
'<div style="margin: 0 auto; display: inline-block;">'
f'bad {color_block(self.get_bad())}'
'</div>'
'<div style="float: right;">'
f'over {color_block(self.get_over())}'
'</div>'
'</div>')
def copy(self):
"""Return a copy of the colormap."""
return self.__copy__()
| Colormap |
python | pytorch__pytorch | torch/_dynamo/variables/ctx_manager.py | {
"start": 46781,
"end": 48627
} | class ____(ContextWrappingVariable):
"""
fx.traceback.annotate is a context manager that allows users to annotate the
fx graph nodes with custom metadata. In the context of Dynamo, we don't have
to trace the body of the context manager. Instead we want to directly run
the body of the context manager, so the Dynamo created Fx graphs have the
right custom metadata. This variable tracker just runs __enter__ and
__exit__ method (instead of tracing).
"""
def __init__(
self, target_values: Any, initial_values: Any = None, **kwargs: Any
) -> None:
super().__init__(
target_values=target_values, initial_values=initial_values, **kwargs
)
def enter(
self, tx: "InstructionTranslator", *args: VariableTracker
) -> VariableTracker:
# Run the annotation ctx manager in eager. Also ensure that
# preserve_node_meta context manager is setup. This is important to pass
# on the metadata to the create_proxy nodes.
stack = ExitStack()
stack.enter_context(torch.fx.traceback.annotate(self.target_values))
stack.enter_context(torch.fx.traceback.preserve_node_meta())
self.set_cleanup_hook(tx, lambda: stack.close())
return variables.ConstantVariable.create(None)
def module_name(self) -> str:
return "torch.fx.traceback"
def fn_name(self) -> str:
return "annotate"
def reconstruct_type(self, codegen: "PyCodegen") -> None:
unimplemented(
gb_type="torch.fx.traceback.annotate escaped from compiled region",
context=str(self),
explanation="Dynamo doesn't support graph break on torch.fx.traceback.annotate.",
hints=[
*graph_break_hints.SUPPORTABLE,
],
)
| FxTracebackAnnotateVariable |
python | readthedocs__readthedocs.org | readthedocs/core/static.py | {
"start": 226,
"end": 622
} | class ____(FileSystemFinder):
"""
Add user media paths in ``media/`` to ignore patterns.
This allows collectstatic inside ``media/`` without collecting all of the
paths that include user files
"""
def list(self, ignore_patterns):
ignore_patterns.extend(["epub", "pdf", "htmlzip", "json", "man"])
return super().list(ignore_patterns)
| SelectiveFileSystemFinder |
python | pypa__warehouse | warehouse/oidc/models/_core.py | {
"start": 1031,
"end": 2895
} | class ____(TypedDict, total=False):
publisher_service: OIDCPublisherService
CheckClaimCallable = Callable[[C, C, SignedClaims, Unpack[CheckNamedArguments]], bool]
def check_claim_binary(binary_func: Callable[[C, C], bool]) -> CheckClaimCallable[C]:
"""
Wraps a binary comparison function so that it takes three arguments instead,
ignoring the third.
This is used solely to make claim verification compatible with "trivial"
comparison checks like `str.__eq__`.
"""
def wrapper(
ground_truth: C,
signed_claim: C,
_all_signed_claims: SignedClaims,
**_kwargs: Unpack[CheckNamedArguments],
) -> bool:
return binary_func(ground_truth, signed_claim)
return wrapper
def check_claim_invariant(value: C) -> CheckClaimCallable[C]:
"""
Wraps a fixed value comparison into a three-argument function.
This is used solely to make claim verification compatible with "invariant"
comparison checks, like "claim x is always the literal `true` value".
"""
def wrapper(
ground_truth: C,
signed_claim: C,
_all_signed_claims: SignedClaims,
**_kwargs: Unpack[CheckNamedArguments],
):
return ground_truth == signed_claim == value
return wrapper
def check_existing_jti(
_ground_truth,
signed_claim: str,
_all_signed_claims,
**kwargs: Unpack[CheckNamedArguments],
) -> bool:
"""Returns True if the checks passes or raises an exception."""
publisher_service: OIDCPublisherService = kwargs["publisher_service"]
if publisher_service.jwt_identifier_exists(signed_claim):
publisher_service.metrics.increment(
"warehouse.oidc.reused_token",
tags=[f"publisher:{publisher_service.publisher}"],
)
raise ReusedTokenError()
return True
| CheckNamedArguments |
python | scipy__scipy | scipy/stats/_discrete_distns.py | {
"start": 51764,
"end": 53121
} | class ____(rv_discrete_frozen):
# copied from rv_frozen; we just need to bind the `_parse_args` methods
def __init__(self, dist, *args, **kwds): # verbatim
self.args = args # verbatim
self.kwds = kwds # verbatim
# create a new instance # verbatim
self.dist = dist.__class__(**dist._updated_ctor_param()) # verbatim
# Here is the only modification
self.dist._parse_args_rvs = _parse_args_rvs.__get__(_pb_obj, _pb_cls)
self.dist._parse_args_stats = _parse_args_stats.__get__(_pb_obj, _pb_cls)
self.dist._parse_args = _parse_args.__get__(_pb_obj, _pb_cls)
shapes, _, _ = self.dist._parse_args(*args, **kwds) # verbatim
self.a, self.b = self.dist._get_support(*shapes) # verbatim
def expect(self, func=None, lb=None, ub=None, conditional=False, **kwds):
a, loc, scale = self.dist._parse_args(*self.args, **self.kwds)
# Here's the modification: we pass all args (including `loc`) into the `args`
# parameter of `expect` so the shape only goes through `_parse_args` once.
return self.dist.expect(func, self.args, loc, lb, ub, conditional, **kwds)
| poisson_binomial_frozen |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 25571,
"end": 27374
} | class ____(UnicodeFonts, metaclass=abc.ABCMeta):
_fontmap: dict[str | int, str] = {}
def __init__(self, default_font_prop: FontProperties, load_glyph_flags: LoadFlags):
# This must come first so the backend's owner is set correctly
if isinstance(self, DejaVuSerifFonts):
self._fallback_font = StixFonts(default_font_prop, load_glyph_flags)
else:
self._fallback_font = StixSansFonts(default_font_prop, load_glyph_flags)
self.bakoma = BakomaFonts(default_font_prop, load_glyph_flags)
TruetypeFonts.__init__(self, default_font_prop, load_glyph_flags)
# Include Stix sized alternatives for glyphs
self._fontmap.update({
1: 'STIXSizeOneSym',
2: 'STIXSizeTwoSym',
3: 'STIXSizeThreeSym',
4: 'STIXSizeFourSym',
5: 'STIXSizeFiveSym',
})
for key, name in self._fontmap.items():
fullpath = findfont(name)
self.fontmap[key] = fullpath
self.fontmap[name] = fullpath
def _get_glyph(self, fontname: str, font_class: str,
sym: str) -> tuple[FT2Font, int, bool]:
# Override prime symbol to use Bakoma.
if sym == r'\prime':
return self.bakoma._get_glyph(fontname, font_class, sym)
else:
# check whether the glyph is available in the display font
uniindex = get_unicode_index(sym)
font = self._get_font('ex')
if font is not None:
glyphindex = font.get_char_index(uniindex)
if glyphindex != 0:
return super()._get_glyph('ex', font_class, sym)
# otherwise return regular glyph
return super()._get_glyph(fontname, font_class, sym)
| DejaVuFonts |
python | getsentry__sentry | src/sentry/organizations/services/organization/model.py | {
"start": 2300,
"end": 2674
} | class ____(RpcModel):
id: int = -1
slug: str = ""
is_active: bool = False
role_id: str = ""
project_ids: list[int] = Field(default_factory=list)
scopes: list[str] = Field(default_factory=list)
team_id: int = -1
@property
def role(self) -> TeamRole | None:
return team_roles.get(self.role_id) if self.role_id else None
| RpcTeamMember |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_table_column_infos.py | {
"start": 546,
"end": 2002
} | class ____(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.table_column_infos"
value_keys = ("profile_path",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
profile_report: dict = metrics["data_profiler.profile_report"]
profile_report_column_data_stats: dict = {
element["column_name"]: element for element in profile_report["data_stats"]
}
return profile_report_column_data_stats
@classmethod
def _get_evaluation_dependencies(
cls,
metric: MetricConfiguration,
configuration: Optional[ExpectationConfiguration] = None,
execution_engine: Optional[ExecutionEngine] = None,
runtime_configuration: Optional[dict] = None,
):
dependencies: dict = super()._get_evaluation_dependencies(
metric=metric,
configuration=configuration,
execution_engine=execution_engine,
runtime_configuration=runtime_configuration,
)
dependencies["data_profiler.profile_report"] = MetricConfiguration(
metric_name="data_profiler.profile_report",
metric_domain_kwargs={},
metric_value_kwargs=metric.metric_value_kwargs,
)
return dependencies
| DataProfilerTableColumnInfos |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/benchmarks/unbatch_benchmark.py | {
"start": 853,
"end": 2451
} | class ____(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for `tf.data.Dataset.unbatch()`."""
def benchmark_native_unbatch(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
num_elements = 10000
for batch_size in batch_sizes:
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
dataset = dataset.batch(batch_size)
dataset = dataset.unbatch()
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=5,
extras={
"model_name": "unbatch.benchmark.1",
"parameters": "%d" % batch_size,
},
name="native_batch_size_%d" % batch_size)
# Include a benchmark of the previous `unbatch()` implementation that uses
# a composition of more primitive ops. Eventually we'd hope to generate code
# that is as good in both cases.
def benchmark_old_unbatch_implementation(self):
batch_sizes = [1, 2, 5, 10, 20, 50]
num_elements = 10000
for batch_size in batch_sizes:
dataset = dataset_ops.Dataset.from_tensors("element").repeat(None)
dataset = dataset.batch(batch_size)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
self.run_and_report_benchmark(
dataset=dataset,
num_elements=num_elements,
iters=5,
extras={
"model_name": "unbatch.benchmark.2",
"parameters": "%d" % batch_size,
},
name="unfused_batch_size_%d" % batch_size)
if __name__ == "__main__":
benchmark_base.test.main()
| UnbatchBenchmark |
python | openai__gym | tests/wrappers/test_pixel_observation.py | {
"start": 224,
"end": 994
} | class ____(gym.Env):
def __init__(self, render_mode="single_rgb_array"):
self.action_space = spaces.Box(shape=(1,), low=-1, high=1, dtype=np.float32)
self.render_mode = render_mode
def render(self, mode="human", width=32, height=32):
image_shape = (height, width, 3)
return np.zeros(image_shape, dtype=np.uint8)
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
super().reset(seed=seed)
observation = self.observation_space.sample()
return observation, {}
def step(self, action):
del action
observation = self.observation_space.sample()
reward, terminal, info = 0.0, False, {}
return observation, reward, terminal, info
| FakeEnvironment |
python | getsentry__sentry | src/sentry/api/serializers/models/dashboard.py | {
"start": 1884,
"end": 1963
} | class ____(TypedDict):
max_values: dict[str, int]
unit: str
| ThresholdType |
python | pytorch__pytorch | torch/testing/_internal/distributed/rpc/jit/rpc_test.py | {
"start": 26489,
"end": 46869
} | class ____(
RRefAPITest,
RRefTypingTest,
LocalRRefTest,
JitRpcOpTest,
FutureTypingTest,
RpcAgentTestFixture,
):
@dist_init
def test_torchscript_function(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
local_ret = one_arg(torch.ones(2, 2))
ret = rpc.rpc_sync(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(ret, local_ret)
rref = rpc.remote(dst_worker_name, one_arg, args=(torch.ones(2, 2),))
self.assertEqual(rref.to_here(), local_ret)
# create rref to itself
local_rref = rpc.remote(
worker_name(self.rank), one_arg, args=(torch.ones(2, 2),)
)
self.assertEqual(local_rref.to_here(), local_ret)
@dist_init
def test_torchscript_function_exception(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
rpc.rpc_sync(dst_worker_name, one_arg, args=(10, 20))
with self.assertRaisesRegex(RuntimeError, r"one_arg\(\) expected at most"):
rpc.remote(dst_worker_name, one_arg, args=(10, 20))
@dist_init
def test_torchscript_functions_not_supported(self):
dst_worker_name = worker_name((self.rank + 1) % self.world_size)
my_local_script_module = MyScriptModule(self.rank)
# It is not thread safe to instantiate MyScriptModule in multiple threads,
# wait for local MyScriptModule instantiation to finish,
# otherwise it could instantiate MyScriptModule in parallel with
# server thread in the below
initialize_pg(self.file_init_method, self.rank, self.world_size)
dist.barrier()
# rpc_sync still accepts script class and run it in
# the same code path as python call.
rpc.rpc_sync(dst_worker_name, MyScriptClass, args=(self.rank,))
# rpc_sync does not accept script module method.
# Python 3.5 and Python 3.6 throw different error message, the only
# common word can be greped is "pickle".
with self.assertRaisesRegex(TypeError, "pickle"):
rpc.rpc_async(dst_worker_name, my_local_script_module.forward, args=())
@dist_init
def test_remote_script_module(self):
# TODO, need more investigation
# there is rref leak when shutting down, suspect it is because
# ref as arg is passed to pybind boundary, and the ref is not garbage
# collected by python when calling shutdown()
import torch.distributed.rpc.api as api
api._ignore_rref_leak = True
local_ret = torch.ones(self.rank) + torch.ones(self.rank)
n = self.rank + 1
dst_rank = n % self.world_size
remote_ref = rpc.remote(
worker_name(dst_rank), construct_my_script_module, args=(self.rank,)
)
# pass rref arg to owner
ret = rpc.rpc_sync(
worker_name(dst_rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
self.assertEqual(ret, local_ret)
# pass rref arg to self/user
with self.assertRaisesRegex(
RuntimeError,
"is an RRef to a ScriptModule. It can't be sent through RPC from owner,",
):
ret = rpc.rpc_sync(
worker_name(self.rank),
run_ref_script_module,
args=(remote_ref, torch.ones(self.rank)),
)
@dist_init
def test_create_script_module_on_remote(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
# Construct on remote end with rpc_sync
created_script_module = rpc.rpc_sync(
dst_name, MyScriptModule, args=(self.rank,)
)
# Forward should output a ones tensor of self.rank.
self.assertTrue(isinstance(created_script_module, torch.jit.ScriptModule))
rank_ones_tensor = created_script_module()
self.assertEqual(torch.ones(self.rank), rank_ones_tensor)
# Construct ScriptModule with rpc.remote.
remote_script_module = rpc.remote(dst_name, MyScriptModule, args=(self.rank,))
# Verify it is an instance of ScriptModule on remote end.
remote_end_is_script = rpc.rpc_sync(
remote_script_module.owner(),
rref_isinstance,
args=(remote_script_module, torch.jit.ScriptModule),
)
self.assertTrue(remote_end_is_script)
# Run forward pass remotely.
remote_forward_output = remote_script_module.rpc_sync().forward()
self.assertEqual(remote_forward_output, torch.ones(self.rank))
# Run function defined on ScriptModule remotely.
remote_func_output = remote_script_module.rpc_sync().custom_func()
self.assertEqual(remote_func_output, torch.ones(self.rank))
# Ensure we can transfer ScriptModule RRef to this rank and run
# forward pass.
local_script_module = remote_script_module.to_here()
self.assertTrue(isinstance(local_script_module, torch.jit.ScriptModule))
rank_ones_tensor = local_script_module()
self.assertEqual(rank_ones_tensor, torch.ones(self.rank))
local_script_func_output = local_script_module.custom_func()
self.assertEqual(local_script_func_output, torch.ones(self.rank))
@dist_init
def test_load_script_module_with_pickled_rref(self):
dst_name = worker_name((self.rank + 1) % self.world_size)
m1 = MyScriptModuleWithRRefs(dst_name)
m2 = MyScriptModuleWithRRefs(dst_name)
f = io.BytesIO()
rpc._enable_jit_rref_pickle()
torch.jit.save(m1, f)
rpc._disable_jit_rref_pickle()
out1 = rpc.rpc_sync(
dst_name, load_script_module_with_pickled_rref, args=(f.getvalue(),)
)
out2 = m2()
self.assertEqual(out1, out2)
@dist_init
def test_rref_jit_pickle_not_supported(self):
n = self.rank + 1
dst_rank = n % self.world_size
rref_var = rpc_return_rref(worker_name(dst_rank))
with (
TemporaryFileName() as fname,
self.assertRaisesRegex(
RuntimeError, "RRef jit pickling is only allowed inside RPC calls"
),
):
save_rref(rref_var, fname)
@dist_init
def test_remote_script_throw(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_raise_func,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
rref.to_here()
@dist_init
def test_remote_script_udf(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(rref.to_here(), torch.ones(2) * 2)
@dist_init
def test_async_script_udf(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
self.assertEqual(future.wait(), torch.ones(2) * 2)
@dist_init
def test_callback_simple(self):
def callback(fut):
return fut.wait() + 1
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
).then(callback)
self.assertEqual(future.wait(), torch.ones(2) * 2 + 1)
@dist_init
def test_callback_chain(self):
n = self.rank + 1
def callback(fut):
return fut.wait() + 1
fut = rpc.rpc_async(
worker_name(n % self.world_size), one_arg, args=(torch.ones(n, n),)
)
num_cbs = 20
for _ in range(num_cbs):
fut = fut.then(callback)
self.assertEqual(fut.wait(), torch.ones(n, n) + 1 + num_cbs)
@dist_init
def test_add_done_callback(self):
callback_called = None
def callback(fut):
nonlocal callback_called
callback_called = fut.wait() * 2
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_udf,
args=(torch.ones(2),),
)
future.add_done_callback(callback)
future_then = future.then(lambda _: True)
self.assertEqual(future.wait(), torch.ones(2) * 2)
# We have no guarantee that the add_done_callback fn will execute before the test finishes.
# Adding a 'then' callback that runs afterwards to guarantee we wait for the first callback
future_then.wait()
self.assertEqual(callback_called, torch.ones(2) * 4)
@dist_init
def test_async_script_throw(self):
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
)
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
future.wait()
@dist_init
def test_callback_with_exception(self):
def callback(fut):
with self.assertRaisesRegex(Exception, ".*Expected error.*"):
fut.wait()
raise RuntimeError("Another expected error")
future = rpc.rpc_async(
worker_name((self.rank + 1) % self.world_size),
script_fork_wait_throw,
args=(torch.ones(2),),
).then(callback)
with self.assertRaisesRegex(RuntimeError, "Another expected error"):
future.wait()
@dist_init
def test_call_rpc_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function that calls rpc_async
if self.rank == 0:
with _profile() as prof:
prof_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC,
torch._jit_internal._qualified_name(one_arg),
"worker0",
"worker1",
)
with torch.autograd.profiler.record_function(prof_key) as rf:
call_rpc_with_profiling(rf.record, "worker1")
# TODO: Can't get a reliable time for this profiling event since
# it's hard to estimate the execution time on the remote end for non-UDFs.
# This can be resolved by https://github.com/pytorch/pytorch/issues/36272.
# After that, this test should be modified to validate the function time.
events = prof.function_events
function_event = get_function_event(events, prof_key)
self.assertTrue(
torch._jit_internal._qualified_name(one_arg) in function_event.name
)
@dist_init
def test_rpc_async_jit_profiled(self):
# Tests that rpc_async calls made from within a TorchScript function are
# profiled.
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
args = (torch.tensor([1, 1]), torch.tensor([2, 2]))
kwargs = {}
with _profile() as prof:
script_rpc_async_call(dst_worker_name, args, kwargs)
# Ensure rpc_async call is profiled
function_events = prof.function_events
qual_name = torch._jit_internal._qualified_name(two_args_two_kwargs)
rpc_async_jit_event = [
event
for event in function_events
if qual_name in event.name and event.node_id == self.rank
]
self.assertEqual(len(rpc_async_jit_event), 1)
rpc_async_jit_event = rpc_async_jit_event[0]
profiled_name = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
qual_name,
worker_name(self.rank),
dst_worker_name,
)
self.assertEqual(profiled_name, rpc_async_jit_event.name)
remote_events = [event for event in function_events if event.is_remote]
# All remote events should have taken place on dst_rank
remote_event_node_ids = {
remote_event.node_id for remote_event in remote_events
}
self.assertEqual(remote_event_node_ids, {dst_rank})
# script_rpc_async_call invokes add operator
# so we should see this as a remote event.
remote_add = next(
remote_event
for remote_event in remote_events
if "aten::add" in remote_event.name
)
remote_add_profiled_name = f"{profiled_name}#remote_op: aten::add"
self.assertEqual(remote_add.name, remote_add_profiled_name)
@dist_init
def test_record_function_on_caller_rpc_async(self):
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
# Runs 2 rpc_async calls within JIT under record_function.
record_function_on_caller_rpc_async(dst_worker_name, block_scope)
# Ensure record_function event is profiled.
function_events = prof.function_events
record_function_scope_event = [
event for event in function_events if event.name == block_scope
]
self.assertEqual(1, len(record_function_scope_event))
record_function_scope_event = record_function_scope_event[0]
# Ensure RPC future is profiled.
expected_key = _build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(script_add_ones),
worker_name(self.rank),
dst_worker_name,
)
jit_rpc_events = [
event for event in function_events if event.name == expected_key
]
self.assertEqual(2, len(jit_rpc_events))
# Validate that the record_function scope time is greater than both
# of the individual RPC async call times. The reason it is not necessarily
# greater than the sum is because the two can execute in parallel.
for jit_rpc_event in jit_rpc_events:
self.assertTrue(
record_function_scope_event.cpu_time_total
> jit_rpc_event.cpu_time_total
)
@dist_init
def test_rpc_torchscript_record_function(self):
# tests that torchscript functions can be profiled using with
# record_function(...) over RPC.
REMOTE_OP_STR = "#remote_op: "
if self.rank == 0:
dst_rank = (self.rank + 1) % self.world_size
dst_worker_name = worker_name(dst_rank)
block_scope = "foo"
with _profile() as prof:
call_rpc_torchscript_with_record_function(dst_worker_name, block_scope)
# Need to call below to populate CPU children.
prof.key_averages()
function_events = prof.function_events
expected_key = (
_build_rpc_profiling_key(
RPCExecMode.ASYNC_JIT,
torch._jit_internal._qualified_name(
script_add_ones_with_record_function
),
worker_name(self.rank),
dst_worker_name,
)
+ REMOTE_OP_STR
+ block_scope
)
remote_record_function_event = next(
evt for evt in function_events if evt.name == expected_key
)
self.assertTrue(block_scope in remote_record_function_event.name)
remote_children = remote_record_function_event.cpu_children
self.assertTrue("aten::add" in child.name for child in remote_children)
def test_record_function_jit_end_callbacks_with_fork(self):
# Ensures that we can call rf._call_end_callbacks_on_future on a jit
# future in python eager mode with torch.jit.fork
sleep_interval = 1
with _profile() as prof:
with torch.autograd.profiler.record_function("foo") as rf:
fut = torch.jit._fork(sleep, sleep_interval)
rf._call_end_callbacks_on_future(fut)
fut.wait()
function_events = prof.function_events
sleep_event = get_function_event(function_events, "foo")
self.assertEqual(sleep_event.name, "foo")
# Validate that callbacks were fired at the right time by checking the
# profiling event cpu time
self.assertGreaterAlmostEqual(sleep_event.cpu_time * 1e-6, sleep_interval)
def test_call_fork_in_jit_with_profiling(self):
# Ensures that we can call torch.ops.profiler._call_end_callbacks_on_jit_fut on a jit
# future from within a script function with torch.jit.fork
with _profile() as prof, torch.autograd.profiler.record_function("foo") as rf:
call_fork_with_profiling(rf.record)
events = prof.function_events
function_event = get_function_event(events, "foo")
self.assertEqual(function_event.name, "foo")
@dist_init
def test_async_function_simple(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
ret = rpc.rpc_sync(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(ret, torch.ones(2, 2) + 1)
@dist_init
def test_async_function_wrong_return_type(self):
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rpc.rpc_sync(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
@dist_init
def test_async_function_wrong_decorator_order(self):
# @torch.jit.script complains about undefined value rpc. Error is shown
# below. The reason for not checking error string is to avoid making
# JIT error handling code depend on RPC tests, as we don't have any
# restrictions on the error message here.
#
# RuntimeError:
# undefined value rpc:
# def async_wrong_decorator_order(to, x, y):
# # type: (str, Tensor, Tensor) -> Future[Tensor]
# return rpc.rpc_async(to, script_add, (x, y))
# ~~~ <--- HERE
with self.assertRaises(RuntimeError):
@torch.jit.script
@rpc.functions.async_execution
def async_wrong_decorator_order(
to: str, x: Tensor, y: Tensor
) -> Future[Tensor]:
return rpc.rpc_async(to, script_add, (x, y))
@dist_init
def test_async_function_remote(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
rref = rpc.remote(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2))
)
self.assertEqual(rref.to_here(), torch.ones(2, 2) + 1)
@dist_init
def test_async_function_remote_multi(self):
dst1 = worker_name((self.rank + 1) % self.world_size)
dst2 = worker_name((self.rank + 2) % self.world_size)
num = 20
rrefs = [
rpc.remote(
dst1, async_add, args=(dst2, torch.ones(2, 2), torch.ones(2, 2) * i)
)
for i in range(num)
]
for i in range(num):
self.assertEqual(rrefs[i].to_here(), torch.ones(2, 2) + i)
@dist_init
def test_async_function_wrong_return_type_remote(self):
rref = rpc.remote(
worker_name((self.rank + 1) % self.world_size), async_wrong_type
)
with self.assertRaisesRegex(
RuntimeError,
"Async functions must return an IValue of Future type, but got Tensor",
):
rref.to_here()
| JitRpcTest |
python | huggingface__transformers | src/transformers/models/gpt_neox/modeling_gpt_neox.py | {
"start": 25695,
"end": 29109
} | class ____(GPTNeoXPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.gpt_neox = GPTNeoXModel(config)
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
past_key_values: Optional[Cache] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
) -> SequenceClassifierOutputWithPast:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPast = self.gpt_neox(
input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
inputs_embeds=inputs_embeds,
past_key_values=past_key_values,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
hidden_states = outputs.last_hidden_state
logits = self.score(hidden_states)
batch_size = logits.shape[0]
if self.config.pad_token_id is None and batch_size != 1:
raise ValueError("Cannot handle batch sizes > 1 if no padding token is defined.")
if self.config.pad_token_id is None:
last_non_pad_token = -1
elif input_ids is not None:
# To handle both left- and right- padding, we take the rightmost token that is not equal to pad_token_id
non_pad_mask = (input_ids != self.config.pad_token_id).to(logits.device, torch.int32)
token_indices = torch.arange(input_ids.shape[-1], device=logits.device, dtype=torch.int32)
last_non_pad_token = (token_indices * non_pad_mask).argmax(-1)
else:
last_non_pad_token = -1
logger.warning_once(
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
)
pooled_logits = logits[torch.arange(batch_size, device=logits.device), last_non_pad_token]
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, pooled_logits=pooled_logits, config=self.config)
return SequenceClassifierOutputWithPast(
loss=loss,
logits=pooled_logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
| GPTNeoXForSequenceClassification |
python | jupyterlab__jupyterlab | examples/federated/main.py | {
"start": 595,
"end": 1619
} | class ____(LabServerApp):
name = "lab"
load_other_extensions = False
app_name = "JupyterLab Example App with Prebuilt Extensions"
app_settings_dir = os.path.join(HERE, "data", "application_settings")
app_version = version
schemas_dir = os.path.join(HERE, "data", "schemas")
static_dir = os.path.join(HERE, "core_package", "static")
templates_dir = os.path.join(HERE, "templates")
themes_dir = os.path.join(HERE, "data", "themes")
user_settings_dir = os.path.join(HERE, "data", "user_settings")
workspaces_dir = os.path.join(HERE, "data", "workspaces")
# Set the location for prebuilt extensions, overriding the default
# of looking in each of the Jupyter data paths.
labextensions_path = [os.path.join(HERE, "labextensions")]
def initialize_settings(self):
super().initialize_settings()
settings = self.serverapp.web_app.settings
settings["terminals_available"] = False
if __name__ == "__main__":
ExampleApp.launch_instance()
| ExampleApp |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_missing_org_members.py | {
"start": 316,
"end": 13658
} | class ____(APITestCase):
endpoint = "sentry-api-0-organization-missing-members"
method = "get"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="owner@example.com")
self.organization = self.create_organization(owner=self.user)
self.create_member(
email="a@example.com",
organization=self.organization,
)
member = self.create_member(user=self.create_user(), organization=self.organization)
member.user_email = "b@example.com"
member.save()
self.member_commit_author = self.create_commit_author(
project=self.project, email="b@example.com"
)
self.nonmember_commit_author1 = self.create_commit_author(
project=self.project, email="c@example.com"
)
self.nonmember_commit_author1.external_id = "github:c"
self.nonmember_commit_author1.save()
self.nonmember_commit_author2 = self.create_commit_author(
project=self.project, email="d@example.com"
)
self.nonmember_commit_author2.external_id = "github:d"
self.nonmember_commit_author2.save()
nonmember_commit_author_invalid_char = self.create_commit_author(
project=self.project, email="hi+1@example.com"
)
nonmember_commit_author_invalid_char.external_id = "github:hi+1"
nonmember_commit_author_invalid_char.save()
nonmember_commit_author_invalid_domain = self.create_commit_author(
project=self.project, email="gmail@gmail.com"
)
nonmember_commit_author_invalid_domain.external_id = "github:gmail"
nonmember_commit_author_invalid_domain.save()
self.integration = self.create_integration(
organization=self.organization, provider="github", name="Github", external_id="github:1"
)
self.integration2 = self.create_integration(
organization=self.organization,
provider="github",
name="Github2",
external_id="github:3",
)
self.repo = self.create_repo(
project=self.project, provider="integrations:github", integration_id=self.integration.id
)
self.create_commit(repo=self.repo, author=self.member_commit_author)
self.create_commit(repo=self.repo, author=self.nonmember_commit_author1)
self.create_commit(repo=self.repo, author=self.nonmember_commit_author1)
self.create_commit(repo=self.repo, author=self.nonmember_commit_author2)
self.create_commit(repo=self.repo, author=nonmember_commit_author_invalid_char)
self.create_commit(repo=self.repo, author=nonmember_commit_author_invalid_domain)
not_shared_domain_author = self.create_commit_author(
project=self.project, email="a@exampletwo.com"
)
not_shared_domain_author.external_id = "github:not"
not_shared_domain_author.save()
self.create_commit(repo=self.repo, author=not_shared_domain_author)
self.invited_member = self.create_member(
email="invited@example.com",
organization=self.organization,
)
self.invited_member.user_email = "invited@example.com"
self.invited_member.save()
self.invited_member_commit_author = self.create_commit_author(
project=self.project, email="invited@example.com"
)
self.invited_member_commit_author.external_id = "github:invited"
self.invited_member_commit_author.save()
self.create_commit(repo=self.repo, author=self.invited_member_commit_author)
self.login_as(self.user)
def test_shared_domain_filter(self) -> None:
# only returns users with example.com emails (shared domain)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_requires_org_write(self) -> None:
user = self.create_user()
self.create_member(organization=self.organization, user=user, role="member")
self.login_as(user)
self.get_error_response(self.organization.slug, status=403)
def test_filters_github_only(self) -> None:
repo = self.create_repo(project=self.project, provider="integrations:bitbucket")
self.create_commit(repo=repo, author=self.nonmember_commit_author1)
self.create_integration(
organization=self.organization, provider="bitbucket", external_id="bitbucket:1"
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_filters_old_commits(self) -> None:
self.create_commit(
repo=self.repo,
author=self.nonmember_commit_author1,
date_added=timezone.now() - timedelta(days=31),
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_filters_authors_with_no_external_id(self) -> None:
no_external_id_author = self.create_commit_author(
project=self.project, email="e@example.com"
)
self.create_commit(
repo=self.repo,
author=no_external_id_author,
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_no_authors(self) -> None:
org = self.create_organization(owner=self.create_user())
self.create_member(user=self.user, organization=org, role="manager")
self.create_integration(
organization=org, provider="github", name="Github", external_id="github:2"
)
response = self.get_success_response(org.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == []
def test_owners_filters_with_different_domains(self) -> None:
user = self.create_user(email="owner@exampletwo.com")
self.create_member(
organization=self.organization,
user=user,
role="owner",
)
# this user has an email domain that is filtered
noreply_email_author = self.create_commit_author(
project=self.project, email="hi@noreply.github.com"
)
noreply_email_author.external_id = "github:hi"
noreply_email_author.save()
self.create_commit(
repo=self.repo,
author=noreply_email_author,
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
{"email": "a@exampletwo.com", "externalId": "not", "commitCount": 1},
]
def test_case_insensitive(self) -> None:
# excludes author that has matching (case insensitive) email
member = self.create_member(user=self.create_user(), organization=self.organization)
member.user_email = "helloworld@example.com"
member.save()
commit_author = self.create_commit_author(
project=self.project, email="HelloWorld@example.com"
)
commit_author.external_id = "github:helloworld"
commit_author.save()
self.create_commit(repo=self.repo, author=commit_author)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_owners_invalid_domain_no_filter(self) -> None:
OrganizationMember.objects.filter(role="owner", organization=self.organization).update(
user_email="example"
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
{"email": "a@exampletwo.com", "externalId": "not", "commitCount": 1},
]
def test_excludes_empty_owner_emails(self) -> None:
# ignores this second owner with an empty email
user = self.create_user(email="")
self.create_member(
organization=self.organization,
user=user,
role="owner",
)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_no_github_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
self.integration2.delete()
response = self.get_success_response(self.organization.slug)
assert len(response.data) == 0
def test_disabled_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.status = ObjectStatus.DISABLED
self.integration.save()
self.integration2.status = ObjectStatus.DISABLED
self.integration2.save()
response = self.get_success_response(self.organization.slug)
assert len(response.data) == 0
def test_nongithub_integration(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
self.integration.delete()
self.integration2.delete()
integration = self.create_integration(
organization=self.organization,
provider="bitbucket",
name="Bitbucket",
external_id="bitbucket:1",
)
repo = self.create_repo(
project=self.project, provider="integrations:github", integration_id=integration.id
)
self.create_commit(repo=repo, author=self.member_commit_author)
self.create_commit(repo=repo, author=self.nonmember_commit_author1)
self.create_commit(repo=repo, author=self.nonmember_commit_author1)
self.create_commit(repo=repo, author=self.nonmember_commit_author2)
response = self.get_success_response(self.organization.slug)
assert len(response.data) == 0
def test_filters_disabled_github_integration(self) -> None:
integration = self.create_integration(
organization=self.organization,
provider="github",
name="Github",
external_id="github:2",
status=ObjectStatus.DISABLED,
)
repo = self.create_repo(
project=self.project, provider="integrations:github", integration_id=integration.id
)
self.create_commit(repo=repo, author=self.member_commit_author)
self.create_commit(repo=repo, author=self.nonmember_commit_author1)
self.create_commit(repo=repo, author=self.nonmember_commit_author1)
self.create_commit(repo=repo, author=self.nonmember_commit_author2)
response = self.get_success_response(self.organization.slug)
assert response.data[0]["integration"] == "github"
assert response.data[0]["users"] == [
{"email": "c@example.com", "externalId": "c", "commitCount": 2},
{"email": "d@example.com", "externalId": "d", "commitCount": 1},
]
def test_limit_50_missing_members(self) -> None:
repo = self.create_repo(
project=self.project, provider="integrations:github", integration_id=self.integration.id
)
for i in range(50):
nonmember_commit_author = self.create_commit_author(
project=self.project, email=str(i) + "@example.com"
)
nonmember_commit_author.external_id = "github:" + str(i)
nonmember_commit_author.save()
self.create_commit(repo=repo, author=nonmember_commit_author)
response = self.get_success_response(self.organization.slug)
assert len(response.data[0]["users"]) == 50
| OrganizationMissingMembersTestCase |
python | doocs__leetcode | solution/1100-1199/1137.N-th Tribonacci Number/Solution2.py | {
"start": 21,
"end": 448
} | class ____:
def tribonacci(self, n: int) -> int:
if n == 0:
return 0
if n < 3:
return 1
factor = np.asmatrix([(1, 1, 0), (1, 0, 1), (1, 0, 0)], np.dtype("O"))
res = np.asmatrix([(1, 1, 0)], np.dtype("O"))
n -= 3
while n:
if n & 1:
res *= factor
factor *= factor
n >>= 1
return res.sum()
| Solution |
python | mlflow__mlflow | dev/clint/src/clint/rules/incorrect_type_annotation.py | {
"start": 48,
"end": 664
} | class ____(Rule):
MAPPING = {
"callable": "Callable",
"any": "Any",
}
def __init__(self, type_hint: str) -> None:
self.type_hint = type_hint
@staticmethod
def check(node: ast.Name) -> bool:
return node.id in IncorrectTypeAnnotation.MAPPING
def _message(self) -> str:
if correct_hint := self.MAPPING.get(self.type_hint):
return f"Did you mean `{correct_hint}` instead of `{self.type_hint}`?"
raise ValueError(
f"Unexpected type: {self.type_hint}. It must be one of {list(self.MAPPING)}."
)
| IncorrectTypeAnnotation |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/loop28.py | {
"start": 285,
"end": 749
} | class ____:
def __init__(self):
self.pending: Optional[Dict[Future[Any], int]]
self.foo: bool
def poll(self):
assert self.pending is not None
while True:
if self.pending:
pass
ready, _ = futures.wait(self.pending)
for future_id in ready:
self.pending.pop(future_id)
future_id.result()
if self.foo:
pass
| A |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 33367,
"end": 33717
} | class ____(IncrementalMixin, GithubStream):
"""
API docs: https://docs.github.com/en/rest/issues/issues?apiVersion=2022-11-28#list-repository-issues
"""
use_cache = True
large_stream = True
is_sorted = "asc"
stream_base_params = {
"state": "all",
"sort": "updated",
"direction": "asc",
}
| Issues |
python | pytorch__pytorch | torch/_tensor_str.py | {
"start": 3883,
"end": 28400
} | class ____:
def __init__(self, tensor):
self.floating_dtype = tensor.dtype.is_floating_point
self.int_mode = True
self.sci_mode = False
self.max_width = 1
with torch.no_grad():
tensor_view = tensor.reshape(-1)
if not self.floating_dtype:
for value in tensor_view:
value_str = f"{value}"
self.max_width = max(self.max_width, len(value_str))
else:
if tensor.dtype == torch.float4_e2m1fn_x2: # type: ignore[attr-defined]
# torch.float4_e2m1fn_x2 is special and does not support the casts necessary
# to print it, we choose to display the uint8 representation here for
# convenience of being able to print a tensor.
# TODO(#146647): extend this to other dtypes without casts defined, such
# as the bits, uint1..7 and int1..7 dtypes.
tensor_view = tensor_view.view(torch.uint8)
nonzero_finite_vals = torch.masked_select(
tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
)
if nonzero_finite_vals.numel() == 0:
# no valid number, do nothing
return
if tensor.dtype == torch.float8_e8m0fnu: # type: ignore[attr-defined]
# float8_e8m0fnu is special and does not define arithmetic ops,
# and printing code further in this file assumes the existence
# of various arithmetic ops to figure out what to print. We hack
# and convert to float here to make printing work correctly.
# TODO(#113663): also add the other float8 dtypes here after arithmetic
# support for them is removed
nonzero_finite_vals = nonzero_finite_vals.float()
# Convert to double (or float) for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
for value in nonzero_finite_vals:
if value != torch.ceil(value):
self.int_mode = False
break
self.sci_mode = (
nonzero_finite_max / nonzero_finite_min > 1000.0
or nonzero_finite_max > 1.0e8
or nonzero_finite_min < 1.0e-4
if PRINT_OPTS.sci_mode is None
else PRINT_OPTS.sci_mode
)
if self.int_mode:
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
if self.sci_mode:
for value in nonzero_finite_vals:
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = f"{value:.0f}"
self.max_width = max(self.max_width, len(value_str) + 1)
else:
# Check if scientific representation should be used.
if self.sci_mode:
for value in nonzero_finite_vals:
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
self.max_width = max(self.max_width, len(value_str))
else:
for value in nonzero_finite_vals:
value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
self.max_width = max(self.max_width, len(value_str))
def width(self):
return self.max_width
def format(self, value):
if self.floating_dtype:
if self.sci_mode:
ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
elif self.int_mode:
ret = f"{value:.0f}"
if not (math.isinf(value) or math.isnan(value)):
ret += "."
else:
ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
else:
ret = f"{value}"
return (self.max_width - len(ret)) * " " + ret
def _scalar_str(self, formatter1, formatter2=None):
if formatter2 is not None:
real_str = _scalar_str(self.real, formatter1)
imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
# handles negative numbers, +0.0, -0.0
if imag_str[0] == "+" or imag_str[0] == "-":
return real_str + imag_str
else:
return real_str + "+" + imag_str
else:
return formatter1.format(self.item())
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
# length includes spaces and comma between elements
element_length = formatter1.width() + 2
if formatter2 is not None:
# width for imag_formatter + an extra j for complex
element_length += formatter2.width() + 1
elements_per_line = max(
1, math.floor((PRINT_OPTS.linewidth - indent) / (element_length))
)
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
if formatter2 is not None:
real_str = formatter1.format(val.real)
imag_str = (formatter2.format(val.imag) + "j").lstrip()
# handles negative numbers, +0.0, -0.0
if imag_str[0] == "+" or imag_str[0] == "-":
return real_str + imag_str
else:
return real_str + "+" + imag_str
else:
return formatter1.format(val)
if self.dtype == torch.float4_e2m1fn_x2: # type: ignore[attr-defined]
# torch.float4_e2m1fn_x2 is special and does not support the casts necessary
# to print it, we choose to display the uint8 representation here for
# convenience of being able to print a tensor.
# TODO(#146647): extend this to other dtypes without casts defined, such
# as the bits, uint1..7 and int1..7 dtypes.
self = self.view(torch.uint8)
if summarize and not PRINT_OPTS.edgeitems:
# Deal with edge case that negative zero is zero
data = ["..."]
elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
data = (
[_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
+ [" ..."]
+ [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
)
else:
data = [_val_formatter(val) for val in self.tolist()]
data_lines = [
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
]
lines = [", ".join(line) for line in data_lines]
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
# formatter2 is only used for printing complex tensors.
# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
# and tensor.imag respesectively
def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
dim = self.dim()
if dim == 0:
return _scalar_str(self, formatter1, formatter2)
if dim == 1:
return _vector_str(self, indent, summarize, formatter1, formatter2)
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
slices = (
[
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(PRINT_OPTS.edgeitems)
]
+ ["..."]
+ [
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
]
)
else:
slices = [
_tensor_str_with_formatter(
self[i], indent + 1, summarize, formatter1, formatter2
)
for i in range(self.size(0))
]
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
return "[" + tensor_str + "]"
def _tensor_str(self, indent):
if self.numel() == 0:
return "[]"
if self.has_names():
# There are two main codepaths (possibly more) that tensor printing goes through:
# - tensor data can fit comfortably on screen
# - tensor data needs to be summarized
# Some of the codepaths don't fully support named tensors, so we send in
# an unnamed tensor to the formatting code as a workaround.
self = self.rename(None)
summarize = self.numel() > PRINT_OPTS.threshold
if self._is_zerotensor():
self = self.clone()
# handle the negative bit
if self.is_neg():
self = self.resolve_neg()
# TODO: Remove me when `masked_select` is implemented for FP8
if self.dtype in [
torch.float8_e5m2,
torch.float8_e5m2fnuz,
torch.float8_e4m3fn,
torch.float8_e4m3fnuz,
]:
self = self.half()
if self.dtype.is_complex:
# handle the conjugate bit
self = self.resolve_conj()
real_formatter = _Formatter(
get_summarized_data(self.real) if summarize else self.real
)
imag_formatter = _Formatter(
get_summarized_data(self.imag) if summarize else self.imag
)
return _tensor_str_with_formatter(
self, indent, summarize, real_formatter, imag_formatter
)
else:
formatter = _Formatter(get_summarized_data(self) if summarize else self)
return _tensor_str_with_formatter(self, indent, summarize, formatter)
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
tensor_strs = [tensor_str]
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
for suffix in suffixes:
suffix_len = len(suffix)
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
tensor_strs.append(",\n" + " " * indent + suffix)
last_line_len = indent + suffix_len
force_newline = False
else:
tensor_strs.append(", " + suffix)
last_line_len += suffix_len + 2
tensor_strs.append(")")
return "".join(tensor_strs)
def get_summarized_data(self):
dim = self.dim()
if dim == 0:
return self
if dim == 1:
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
return torch.cat(
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
)
else:
return self
if not PRINT_OPTS.edgeitems:
return self.new_empty([0] * self.dim())
elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
start = [self[i] for i in range(PRINT_OPTS.edgeitems)]
end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
return torch.stack([get_summarized_data(x) for x in (start + end)])
else:
return torch.stack([get_summarized_data(x) for x in self])
def _str_intern(inp, *, tensor_contents=None):
if torch._C._functorch.is_functorch_wrapped_tensor(inp):
return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
if inp.is_nested:
prefix = "nested_tensor("
elif is_plain_tensor:
prefix = "tensor("
else:
prefix = f"{type(inp).__name__}("
indent = len(prefix)
suffixes = []
custom_contents_provided = tensor_contents is not None
if custom_contents_provided:
tensor_str = tensor_contents
# This is used to extract the primal value and thus disable the forward AD
# within this function.
# TODO(albanD) This needs to be updated when more than one level is supported
self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
# Note [Print tensor device]:
# A general logic here is we only print device when it doesn't match
# the device specified in default tensor type.
# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
# torch._C._get_default_device() only returns either cpu or cuda.
# In other cases, we don't have a way to set them as default yet,
# and we should always print out device for them.
if (
self.device.type != torch._C._get_default_device()
or (
self.device.type == "cuda"
and torch.cuda.current_device() != self.device.index
)
or (self.device.type == "mps")
):
suffixes.append("device='" + str(self.device) + "'")
# Tensor printing performs tensor operations like slice, indexing, etc to make it in a
# representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
# to avoid compilations, copying the tensor to cpu before printing.
if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
self = self.to("cpu")
# TODO: add an API to map real -> complex dtypes
_default_complex_dtype = (
torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
)
has_default_dtype = self.dtype in (
torch.get_default_dtype(),
_default_complex_dtype,
torch.int64,
torch.bool,
)
if self.is_sparse:
suffixes.append("size=" + str(tuple(self.shape)))
from torch._subclasses.fake_tensor import FakeTensor
is_meta = self.is_meta or isinstance(self, FakeTensor)
if not is_meta:
suffixes.append("nnz=" + str(self._nnz()))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
indices_prefix = "indices=tensor("
indices = self._indices().detach()
if is_meta:
indices_str = "..."
else:
indices_str = _tensor_str(indices, indent + len(indices_prefix))
if is_meta or indices.numel() == 0:
indices_str += ", size=" + str(tuple(indices.shape))
values_prefix = "values=tensor("
values = self._values().detach()
if is_meta:
values_str = "..."
else:
values_str = _tensor_str(values, indent + len(values_prefix))
if is_meta or values.numel() == 0:
values_str += ", size=" + str(tuple(values.shape))
tensor_str = (
indices_prefix
+ indices_str
+ "),\n"
+ " " * indent
+ values_prefix
+ values_str
+ ")"
)
elif self.layout in {
torch.sparse_csr,
torch.sparse_csc,
torch.sparse_bsr,
torch.sparse_bsc,
}:
from torch._subclasses.fake_tensor import FakeTensor
suffixes.append("size=" + str(tuple(self.shape)))
is_meta = self.is_meta or isinstance(self, FakeTensor)
if not is_meta:
suffixes.append("nnz=" + str(self._nnz()))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
compressed_indices_method, plain_indices_method = {
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
}[self.layout]
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
cdimname, pdimname = "row", "column"
else:
cdimname, pdimname = "column", "row"
compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
compressed_indices = compressed_indices_method(self).detach()
if is_meta:
compressed_indices_str = "..."
else:
compressed_indices_str = _tensor_str(
compressed_indices, indent + len(compressed_indices_prefix)
)
if compressed_indices.numel() == 0 or is_meta:
compressed_indices_str += ", size=" + str(
tuple(compressed_indices.shape)
)
plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
plain_indices = plain_indices_method(self).detach()
if is_meta:
plain_indices_str = "..."
else:
plain_indices_str = _tensor_str(
plain_indices, indent + len(plain_indices_prefix)
)
if plain_indices.numel() == 0 or is_meta:
plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
values_prefix = "values=tensor("
values = self.values().detach()
if is_meta:
values_str = "..."
else:
values_str = _tensor_str(values, indent + len(values_prefix))
if values.numel() == 0 or is_meta:
values_str += ", size=" + str(tuple(values.shape))
tensor_str = (
compressed_indices_prefix
+ compressed_indices_str
+ "),\n"
+ " " * indent
+ plain_indices_prefix
+ plain_indices_str
+ "),\n"
+ " " * indent
+ values_prefix
+ values_str
+ ")"
)
elif self.is_quantized:
suffixes.append("size=" + str(tuple(self.shape)))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
suffixes.append("quantization_scheme=" + str(self.qscheme()))
if (
self.qscheme() == torch.per_tensor_affine
or self.qscheme() == torch.per_tensor_symmetric
):
suffixes.append("scale=" + str(self.q_scale()))
suffixes.append("zero_point=" + str(self.q_zero_point()))
elif (
self.qscheme() == torch.per_channel_affine
or self.qscheme() == torch.per_channel_symmetric
or self.qscheme() == torch.per_channel_affine_float_qparams
):
suffixes.append("scale=" + str(self.q_per_channel_scales()))
suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
suffixes.append("axis=" + str(self.q_per_channel_axis()))
if not custom_contents_provided:
tensor_str = _tensor_str(self.dequantize(), indent)
elif self.is_nested:
if not custom_contents_provided:
def indented_str(s, indent):
return "\n".join(f" {line}" for line in s.split("\n"))
strs = ",\n".join(
indented_str(str(t), indent + 1)
for t in torch.ops.aten.unbind.int(self, 0)
)
tensor_str = f"[\n{strs}\n]"
elif torch._is_functional_tensor(self):
prefix = "_to_functional_tensor("
tensor_str = repr(torch._from_functional_tensor(self))
else:
# Circular import problem, so we import it here
from torch._subclasses.fake_tensor import FakeTensor
if self.is_meta or isinstance(self, FakeTensor):
suffixes.append("size=" + str(tuple(self.shape)))
if self.dtype != torch.get_default_dtype():
suffixes.append("dtype=" + str(self.dtype))
# TODO: This implies that ellipses is valid syntax for allocating
# a meta tensor or FakeTensor, which it could be, but it isn't right now
if not custom_contents_provided:
tensor_str = "..."
else:
if self.numel() == 0 and not self.is_sparse:
# Explicitly print the shape if it is not (0,), to match NumPy behavior
if self.dim() != 1:
suffixes.append("size=" + str(tuple(self.shape)))
# In an empty tensor, there are no elements to infer if the dtype
# should be int64, so it must be shown explicitly.
if self.dtype != torch.get_default_dtype():
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
tensor_str = "[]"
else:
if not PRINT_OPTS.edgeitems:
suffixes.append("size=" + str(tuple(self.shape)))
if not has_default_dtype:
suffixes.append("dtype=" + str(self.dtype))
if not custom_contents_provided:
if self.layout != torch.strided:
tensor_str = _tensor_str(self.to_dense(), indent)
else:
tensor_str = _tensor_str(self, indent)
if self.layout != torch.strided:
suffixes.append("layout=" + str(self.layout))
# Use inp here to get the original grad_fn and not the one generated by the forward grad
# unpacking.
grad_fn_name = None
try:
grad_fn = inp.grad_fn
except RuntimeError:
# Accessing the grad_fn calls rebasing logic which would cause an error
# if that tensor is a view created in no-grad mode modified in-place in
# no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
grad_fn_name = "Invalid"
if grad_fn_name is None and grad_fn is not None: # type: ignore[possibly-undefined]
# pyrefly: ignore [unbound-name]
grad_fn_name = type(grad_fn).__name__
if grad_fn_name == "CppFunction":
# pyrefly: ignore [unbound-name]
grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
if grad_fn_name is not None:
suffixes.append(f"grad_fn=<{grad_fn_name}>")
elif inp.requires_grad:
suffixes.append("requires_grad=True")
if self.has_names():
suffixes.append(f"names={self.names}")
if tangent is not None:
suffixes.append(f"tangent={tangent}")
string_repr = _add_suffixes(
prefix + tensor_str, # type: ignore[possibly-undefined]
suffixes,
indent,
force_newline=self.is_sparse,
)
# Check if this instance is flagged as a parameter and change the repr accordingly.
# Unfortunately, this function has to be aware of this detail.
# NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
# this should be done for those as well to produce a valid repr.
if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
string_repr = f"Parameter({string_repr})"
return string_repr
def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
level = torch._C._functorch.maybe_get_level(tensor)
assert level != -1
if torch._C._functorch.is_functionaltensor(tensor):
# Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
# that it's up to date first
torch._sync(tensor)
value = torch._C._functorch.get_unwrapped(tensor)
value_repr = repr(value)
indented_value_repr = textwrap.indent(value_repr, " " * 4)
if torch._C._functorch.is_batchedtensor(tensor):
bdim = torch._C._functorch.maybe_get_bdim(tensor)
assert bdim != -1
return (
f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n{indented_value_repr}\n)"
)
if torch._C._functorch.is_gradtrackingtensor(tensor):
return f"GradTrackingTensor(lvl={level}, value=\n{indented_value_repr}\n)"
if torch._C._functorch.is_functionaltensor(tensor):
return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
raise ValueError("We don't know how to print this, please file us an issue")
def _str(self, *, tensor_contents=None):
with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
guard = torch._C._DisableFuncTorch() # noqa: F841
return _str_intern(self, tensor_contents=tensor_contents)
| _Formatter |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_arkansas_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_arkansas_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_arkansas_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidArkansasZip |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 644,
"end": 767
} | class ____(HypothesisException):
"""Hypothesis can trim these tracebacks even if they're raised internally."""
| _Trimmable |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/glacier.py | {
"start": 1178,
"end": 1952
} | class ____(AwsBaseOperator[GlacierHook]):
"""
Initiate an Amazon Glacier inventory-retrieval job.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GlacierCreateJobOperator`
:param aws_conn_id: The reference to the AWS connection details
:param vault_name: the Glacier vault on which job is executed
"""
aws_hook_class = GlacierHook
template_fields: Sequence[str] = aws_template_fields("vault_name")
def __init__(self, *, vault_name: str, **kwargs):
super().__init__(**kwargs)
self.vault_name = vault_name
def execute(self, context: Context):
return self.hook.retrieve_inventory(vault_name=self.vault_name)
| GlacierCreateJobOperator |
python | python__mypy | mypyc/codegen/emit.py | {
"start": 4028,
"end": 4107
} | class ____(ErrorHandler):
"""Assign an error value on error."""
| AssignHandler |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modeling_sam3_tracker_video.py | {
"start": 18355,
"end": 20792
} | class ____(nn.Module):
"""
SAM3_TRACKER_VIDEO's attention layer that allows for downscaling the size of the embedding after projection to queries, keys, and
values.
"""
def __init__(self, config, downsample_rate=None):
super().__init__()
downsample_rate = config.attention_downsample_rate if downsample_rate is None else downsample_rate
self.config = config
self.hidden_size = config.hidden_size
self.internal_dim = config.hidden_size // downsample_rate
self.num_attention_heads = config.num_attention_heads
self.head_dim = self.internal_dim // config.num_attention_heads
self.scaling = self.head_dim**-0.5
self.is_causal = False
self.q_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.k_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.v_proj = nn.Linear(self.hidden_size, self.internal_dim)
self.o_proj = nn.Linear(self.internal_dim, self.hidden_size)
def forward(
self,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_similarity: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, torch.Tensor]:
# Input projections
batch_size, point_batch_size = query.shape[:2]
new_shape = (batch_size * point_batch_size, -1, self.num_attention_heads, self.head_dim)
query = self.q_proj(query).view(*new_shape).transpose(1, 2)
key = self.k_proj(key).view(*new_shape).transpose(1, 2)
value = self.v_proj(value).view(*new_shape).transpose(1, 2)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query,
key,
value,
attention_mask=attention_similarity,
dropout=0.0,
scaling=self.scaling,
is_causal=self.is_causal,
**kwargs,
)
attn_output = attn_output.reshape(
batch_size, point_batch_size, -1, self.num_attention_heads * self.head_dim
).contiguous()
attn_output = self.o_proj(attn_output)
return attn_output, attn_weights
| Sam3TrackerVideoAttention |
python | huggingface__transformers | src/transformers/models/groupvit/modeling_groupvit.py | {
"start": 45401,
"end": 47402
} | class ____(nn.Module):
def __init__(self, config: GroupViTVisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = GroupViTVisionEmbeddings(config)
self.encoder = GroupViTVisionEncoder(config)
self.layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_hidden_states: Optional[bool] = None,
output_attentions: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values)
encoder_outputs = self.encoder(
hidden_states=hidden_states,
output_hidden_states=output_hidden_states,
output_attentions=output_attentions,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
# normalize the last hidden state
last_hidden_state = self.layernorm(last_hidden_state)
pooled_output = last_hidden_state.mean(dim=1)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
| GroupViTVisionTransformer |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 113191,
"end": 114610
} | class ____(PallasBaseTest):
@parameterized.parameters(
(
lambda i: (i, pl.ds(0, 8), pl.ds(0, 128)), 0, False,
'dma_start(p0) c[d,:,:] -> e[...] f',
),
(
lambda i: (0, pl.ds(i, 8), pl.ds(0, 128)), 0, False,
'dma_start(p0) c[0,d:d+8,:] -> e[...] f',
),
(
lambda i: (i, pl.ds(2, 4), pl.ds(0, 100)), 0, False,
'dma_start(p0) c[d,2:6,:100] -> e[...] f',
),
(
lambda i: (i, pl.ds(2, 6), pl.ds(4, 100)), 1, False,
'dma_start(p1) c[d,2:,4:104] -> e[...] f',
),
(
lambda i: (i, pl.ds(2, 6), pl.ds(4, 100)), 0, True,
'dma_start(p0, add) c[d,2:,4:104] -> e[...] f',
),
)
def test_dma_custom_pretty_print(self, indexer, priority, add, expected):
def body(x_hbm_ref, i):
def inner(x_ref, sem):
pltpu.async_copy(x_hbm_ref.at[indexer(i)], x_ref, sem,
priority=priority,
add=add).wait()
pl.run_scoped(
inner, pltpu.VMEM((8, 128), jnp.float32), pltpu.SemaphoreType.DMA
)
return []
jaxpr, _, _ = pe.trace_to_jaxpr_dynamic(
wrap_init(body, 2),
[
state.shaped_array_ref((2, 8, 128), jnp.int32),
jax.core.ShapedArray((), jnp.int32),
],
)
self.assertIn(expected, jaxpr.pretty_print(use_color=False))
| PrettyPrintingTest |
python | doocs__leetcode | solution/0500-0599/0541.Reverse String II/Solution.py | {
"start": 0,
"end": 208
} | class ____:
def reverseStr(self, s: str, k: int) -> str:
cs = list(s)
for i in range(0, len(cs), 2 * k):
cs[i : i + k] = reversed(cs[i : i + k])
return "".join(cs)
| Solution |
python | numpy__numpy | benchmarks/benchmarks/bench_ufunc_strides.py | {
"start": 4057,
"end": 4162
} | class ____(BinaryFP):
data_finite = False
data_denormal = True
data_zeros = True
| BinaryFPSpecial |
python | doocs__leetcode | solution/0800-0899/0815.Bus Routes/Solution.py | {
"start": 0,
"end": 905
} | class ____:
def numBusesToDestination(
self, routes: List[List[int]], source: int, target: int
) -> int:
if source == target:
return 0
g = defaultdict(list)
for i, route in enumerate(routes):
for stop in route:
g[stop].append(i)
if source not in g or target not in g:
return -1
q = [(source, 0)]
vis_bus = set()
vis_stop = {source}
for stop, bus_count in q:
if stop == target:
return bus_count
for bus in g[stop]:
if bus not in vis_bus:
vis_bus.add(bus)
for next_stop in routes[bus]:
if next_stop not in vis_stop:
vis_stop.add(next_stop)
q.append((next_stop, bus_count + 1))
return -1
| Solution |
python | neetcode-gh__leetcode | python/0125-valid-palindrome.py | {
"start": 0,
"end": 207
} | class ____:
def isPalindrome(self, s: str) -> bool:
new = ''
for a in s:
if a.isalpha() or a.isdigit():
new += a.lower()
return (new == new[::-1])
| Solution |
python | django__django | tests/admin_scripts/tests.py | {
"start": 13017,
"end": 16627
} | class ____(AdminScriptTestCase):
"""
A series of tests for django-admin when using a settings.py file that
contains the test application specified using a full path.
"""
def setUp(self):
super().setUp()
self.write_settings(
"settings.py",
[
"django.contrib.auth",
"django.contrib.contenttypes",
"admin_scripts",
"admin_scripts.complex_app",
],
)
def test_builtin_command(self):
"""
fulldefault: django-admin builtin commands fail with an error when no
settings provided.
"""
args = ["check", "admin_scripts"]
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "settings are not configured")
def test_builtin_with_settings(self):
"""
fulldefault: django-admin builtin commands succeed if a settings file
is provided.
"""
args = ["check", "--settings=test_project.settings", "admin_scripts"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_environment(self):
"""
fulldefault: django-admin builtin commands succeed if the environment
contains settings.
"""
args = ["check", "admin_scripts"]
out, err = self.run_django_admin(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, SYSTEM_CHECK_MSG)
def test_builtin_with_bad_settings(self):
"""
fulldefault: django-admin builtin commands fail if settings file (from
argument) doesn't exist.
"""
args = ["check", "--settings=bad_settings", "admin_scripts"]
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_builtin_with_bad_environment(self):
"""
fulldefault: django-admin builtin commands fail if settings file (from
environment) doesn't exist.
"""
args = ["check", "admin_scripts"]
out, err = self.run_django_admin(args, "bad_settings")
self.assertNoOutput(out)
self.assertOutput(err, "No module named '?bad_settings'?", regex=True)
def test_custom_command(self):
"""
fulldefault: django-admin can't execute user commands unless settings
are provided.
"""
args = ["noargs_command"]
out, err = self.run_django_admin(args)
self.assertNoOutput(out)
self.assertOutput(err, "No Django settings specified")
self.assertOutput(err, "Unknown command: 'noargs_command'")
def test_custom_command_with_settings(self):
"""
fulldefault: django-admin can execute user commands if settings are
provided as argument.
"""
args = ["noargs_command", "--settings=test_project.settings"]
out, err = self.run_django_admin(args)
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
def test_custom_command_with_environment(self):
"""
fulldefault: django-admin can execute user commands if settings are
provided in environment.
"""
args = ["noargs_command"]
out, err = self.run_django_admin(args, "test_project.settings")
self.assertNoOutput(err)
self.assertOutput(out, "EXECUTE: noargs_command")
| DjangoAdminFullPathDefaultSettings |
python | langchain-ai__langchain | libs/partners/openai/tests/integration_tests/chat_models/test_base_standard.py | {
"start": 417,
"end": 4967
} | class ____(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {"model": "gpt-4o-mini"}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_json_mode(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {"invoke": ["reasoning_output", "cache_read_input"], "stream": []}
@property
def enable_vcr_tests(self) -> bool:
return True
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
with Path.open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
llm = ChatOpenAI(model="gpt-4o-mini", stream_usage=True)
_invoke(llm, input_, stream)
# invoke twice so first invocation is cached
return _invoke(llm, input_, stream)
def invoke_with_reasoning_output(self, *, stream: bool = False) -> AIMessage:
llm = ChatOpenAI(model="gpt-5-nano", reasoning_effort="medium")
input_ = (
"explain the relationship between the 2008/9 economic crisis and the "
"startup ecosystem in the early 2010s"
)
return _invoke(llm, input_, stream)
@property
def supports_pdf_inputs(self) -> bool:
# OpenAI requires a filename for PDF inputs
# For now, we test with filename in OpenAI-specific tests
return False
@pytest.mark.flaky(retries=3, delay=1)
def test_openai_pdf_inputs(self, model: BaseChatModel) -> None:
"""Test that the model can process PDF inputs."""
url = "https://www.w3.org/WAI/ER/tests/xhtml/testfiles/resources/pdf/dummy.pdf"
pdf_data = base64.b64encode(httpx.get(url).content).decode("utf-8")
message = HumanMessage(
[
{"type": "text", "text": "What is the document title, verbatim?"},
{
"type": "file",
"mime_type": "application/pdf",
"base64": pdf_data,
"filename": "my-pdf", # OpenAI requires a filename
},
]
)
_ = model.invoke([message])
# Test OpenAI Chat Completions format
message = HumanMessage(
[
{"type": "text", "text": "What is the document title, verbatim?"},
{
"type": "file",
"file": {
"filename": "test file.pdf",
"file_data": f"data:application/pdf;base64,{pdf_data}",
},
},
]
)
_ = model.invoke([message])
def _invoke(llm: ChatOpenAI, input_: str, stream: bool) -> AIMessage:
if stream:
full = None
for chunk in llm.stream(input_):
full = full + chunk if full else chunk # type: ignore[operator]
return cast(AIMessage, full)
return cast(AIMessage, llm.invoke(input_))
@pytest.mark.skip # Test either finishes in 5 seconds or 5 minutes.
def test_audio_model() -> None:
class AudioModelTests(ChatModelIntegrationTests):
@property
def chat_model_class(self) -> type[ChatOpenAI]:
return ChatOpenAI
@property
def chat_model_params(self) -> dict:
return {
"model": "gpt-4o-audio-preview",
"temperature": 0,
"model_kwargs": {
"modalities": ["text", "audio"],
"audio": {"voice": "alloy", "format": "wav"},
},
}
@property
def supports_audio_inputs(self) -> bool:
return True
test_instance = AudioModelTests()
model = test_instance.chat_model_class(**test_instance.chat_model_params)
AudioModelTests().test_audio_inputs(model)
| TestOpenAIStandard |
python | kubernetes-client__python | kubernetes/client/api/node_api.py | {
"start": 543,
"end": 5181
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_api_group(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: V1APIGroup
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_api_group_with_http_info(**kwargs) # noqa: E501
def get_api_group_with_http_info(self, **kwargs): # noqa: E501
"""get_api_group # noqa: E501
get information of a group # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_api_group_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(V1APIGroup, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_api_group" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/yaml', 'application/vnd.kubernetes.protobuf']) # noqa: E501
# Authentication setting
auth_settings = ['BearerToken'] # noqa: E501
return self.api_client.call_api(
'/apis/node.k8s.io/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='V1APIGroup', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| NodeApi |
python | spyder-ide__spyder | spyder/plugins/mainmenu/api.py | {
"start": 3262,
"end": 3348
} | class ____:
New = 'new_section'
Restart = 'restart_section'
| ConsolesMenuSections |
python | openai__openai-python | src/openai/types/beta/assistant_create_params.py | {
"start": 6740,
"end": 7607
} | class ____(TypedDict, total=False):
chunking_strategy: ToolResourcesFileSearchVectorStoreChunkingStrategy
"""The chunking strategy used to chunk the file(s).
If not set, will use the `auto` strategy.
"""
file_ids: SequenceNotStr[str]
"""
A list of [file](https://platform.openai.com/docs/api-reference/files) IDs to
add to the vector store. There can be a maximum of 10000 files in a vector
store.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
| ToolResourcesFileSearchVectorStore |
python | tox-dev__tox | docs/tox_conf.py | {
"start": 670,
"end": 3780
} | class ____(SphinxDirective):
name = "conf"
has_content = True
option_spec: Final[ClassVar[dict[str, Any]]] = {
"keys": unchanged_required,
"version_added": unchanged,
"version_deprecated": unchanged,
"default": unchanged,
"constant": flag,
"ref_suffix": unchanged,
}
def __init__( # noqa: PLR0913
self,
name: str,
arguments: list[str],
options: dict[str, str],
content: StringList,
lineno: int,
content_offset: int,
block_text: str,
state: RSTState,
state_machine: RSTStateMachine,
) -> None:
super().__init__(
name,
arguments,
options,
content,
lineno,
content_offset,
block_text,
state,
state_machine,
)
self._std_domain: StandardDomain = cast("StandardDomain", self.env.get_domain("std"))
def run(self) -> list[Node]:
self.env.note_reread() # this document needs to be always updated
line = paragraph()
line += Text("■" if "constant" in self.options else "⚙️")
for key in (i.strip() for i in self.options["keys"].split(",")):
line += Text(" ")
self._mk_key(line, key)
if "default" in self.options:
default = self.options["default"]
line += Text(" with default value of ")
line += literal(default, default)
if "version_added" in self.options:
line += Text(" 📢 added in ")
ver = self.options["version_added"]
line += literal(ver, ver)
if "version_deprecated" in self.options:
line += Text(" ⚠️ deprecated in ")
ver = self.options["version_deprecated"]
line += literal(ver, ver)
p = container("")
self.state.nested_parse(StringList(string2lines("\n".join(f" {i}" for i in self.content))), 0, p)
line += p
return [line]
def _mk_key(self, line: paragraph, key: str) -> None:
ref_id = key if "ref_suffix" not in self.options else f"{key}-{self.options['ref_suffix']}"
ref = reference("", refid=ref_id, reftitle=key)
line.attributes["ids"].append(ref_id)
st = strong()
st += literal(text=key)
ref += st
self._register_ref(ref_id, ref_id, ref)
line += ref
def _register_ref(self, ref_name: str, ref_title: str, node: Element) -> None:
of_name, doc_name = fully_normalize_name(ref_name), self.env.docname
if of_name in self._std_domain.labels:
LOGGER.warning(
__("duplicate label %s, other instance in %s"),
of_name,
self.env.doc2path(self._std_domain.labels[of_name][0]),
location=node,
type="sphinx-argparse-cli",
subtype=self.env.docname,
)
self._std_domain.anonlabels[of_name] = doc_name, ref_name
self._std_domain.labels[of_name] = doc_name, ref_name, ref_title
| ToxConfig |
python | Textualize__textual | src/textual/css/_style_properties.py | {
"start": 41761,
"end": 42981
} | class ____:
"""Property to expose hatch style."""
def __get__(
self, obj: StylesBase, type: type[StylesBase]
) -> tuple[str, Color] | Literal["none"]:
return obj.get_rule("hatch") # type: ignore[return-value]
def __set__(
self, obj: StylesBase, value: tuple[str, Color | str] | Literal["none"] | None
) -> None:
_rich_traceback_omit = True
if value is None:
if obj.clear_rule("hatch"):
obj.refresh(children=True)
return
if value == "none":
hatch = "none"
else:
character, color = value
if len(character) != 1:
try:
character = HATCHES[character]
except KeyError:
raise ValueError(
f"Expected a character or hatch value here; found {character!r}"
) from None
if cell_len(character) != 1:
raise ValueError("Hatch character must have a cell length of 1")
if isinstance(color, str):
color = Color.parse(color)
hatch = (character, color)
obj.set_rule("hatch", hatch)
| HatchProperty |
python | PyCQA__pylint | pylint/extensions/redefined_loop_name.py | {
"start": 515,
"end": 3230
} | class ____(checkers.BaseChecker):
name = "redefined-loop-name"
msgs = {
"W2901": (
"Redefining %r from loop (line %s)",
"redefined-loop-name",
"Used when a loop variable is overwritten in the loop body.",
),
}
def __init__(self, linter: PyLinter) -> None:
super().__init__(linter)
self._loop_variables: list[
tuple[nodes.For, list[str], nodes.LocalsDictNodeNG]
] = []
@utils.only_required_for_messages("redefined-loop-name")
def visit_assignname(self, node: nodes.AssignName) -> None:
assign_type = node.assign_type()
if not isinstance(assign_type, (nodes.Assign, nodes.AugAssign)):
return
node_scope = node.scope()
for outer_for, outer_variables, outer_for_scope in self._loop_variables:
if node_scope is not outer_for_scope:
continue
if node.name in outer_variables and not utils.in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-loop-name",
args=(node.name, outer_for.fromlineno),
node=node,
confidence=HIGH,
)
break
@utils.only_required_for_messages("redefined-loop-name")
def visit_for(self, node: nodes.For) -> None:
assigned_to = [a.name for a in node.target.nodes_of_class(nodes.AssignName)]
# Only check variables that are used
assigned_to = [
var
for var in assigned_to
if not self.linter.config.dummy_variables_rgx.match(var)
]
node_scope = node.scope()
for variable in assigned_to:
for outer_for, outer_variables, outer_for_scope in self._loop_variables:
if node_scope is not outer_for_scope:
continue
if variable in outer_variables and not utils.in_for_else_branch(
outer_for, node
):
self.add_message(
"redefined-loop-name",
args=(variable, outer_for.fromlineno),
node=node,
confidence=HIGH,
)
break
self._loop_variables.append((node, assigned_to, node.scope()))
@utils.only_required_for_messages("redefined-loop-name")
def leave_for(self, node: nodes.For) -> None: # pylint: disable=unused-argument
self._loop_variables.pop()
def register(linter: PyLinter) -> None:
linter.register_checker(RedefinedLoopNameChecker(linter))
| RedefinedLoopNameChecker |
python | getsentry__sentry | tests/sentry/integrations/slack/service/test_slack_service.py | {
"start": 1446,
"end": 2850
} | class ____(TestCase):
def setUp(self) -> None:
self.service = SlackService.default()
self.message_identifier = "1a2s3d"
def test_ignores_unsupported_activity(self) -> None:
activity = Activity.objects.create(
group=self.group,
project=self.project,
type=ActivityType.FIRST_SEEN.value,
user_id=self.user.id,
data={},
)
result = self.service._get_notification_message_to_send(activity=activity)
assert result is None
def test_simple(self) -> None:
activity = Activity.objects.create(
group=self.group,
project=self.project,
type=ActivityType.SET_IGNORED.value,
user_id=self.user.id,
data={"ignoreUntilEscalating": True},
)
uuid = uuid4()
with mock.patch("uuid.uuid4", return_value=uuid):
result = self.service._get_notification_message_to_send(activity=activity)
group_link = self.group.get_absolute_url(
params={
"referrer": "activity_notification",
"notification_uuid": uuid,
}
)
assert (
result == f"admin@localhost archived <{group_link}|{self.group.qualified_short_id}>"
)
@freeze_time("2025-01-01 00:00:00")
| TestGetNotificationMessageToSend |
python | pytorch__pytorch | torch/_tensor_str.py | {
"start": 186,
"end": 3883
} | class ____:
precision: int = 4
threshold: float = 1000
edgeitems: int = 3
linewidth: int = 80
sci_mode: Optional[bool] = None
PRINT_OPTS = __PrinterOptions()
# We could use **kwargs, but this will give better docs
def set_printoptions(
precision=None,
threshold=None,
edgeitems=None,
linewidth=None,
profile=None,
sci_mode=None,
):
r"""Set options for printing. Items shamelessly taken from NumPy
Args:
precision: Number of digits of precision for floating point output
(default = 4).
threshold: Total number of array elements which trigger summarization
rather than full `repr` (default = 1000).
edgeitems: Number of array items in summary at beginning and end of
each dimension (default = 3).
linewidth: The number of characters per line for the purpose of
inserting line breaks (default = 80). Thresholded matrices will
ignore this parameter.
profile: Sane defaults for pretty printing. Can override with any of
the above options. (any one of `default`, `short`, `full`)
sci_mode: Enable (True) or disable (False) scientific notation. If
None (default) is specified, the value is defined by
`torch._tensor_str._Formatter`. This value is automatically chosen
by the framework.
Example::
>>> # Limit the precision of elements
>>> torch.set_printoptions(precision=2)
>>> torch.tensor([1.12345])
tensor([1.12])
>>> # Limit the number of elements shown
>>> torch.set_printoptions(threshold=5)
>>> torch.arange(10)
tensor([0, 1, 2, ..., 7, 8, 9])
>>> # Restore defaults
>>> torch.set_printoptions(profile='default')
>>> torch.tensor([1.12345])
tensor([1.1235])
>>> torch.arange(10)
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if profile is not None:
if profile == "default":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
elif profile == "short":
PRINT_OPTS.precision = 2
PRINT_OPTS.threshold = 1000
PRINT_OPTS.edgeitems = 2
PRINT_OPTS.linewidth = 80
elif profile == "full":
PRINT_OPTS.precision = 4
PRINT_OPTS.threshold = inf
PRINT_OPTS.edgeitems = 3
PRINT_OPTS.linewidth = 80
if precision is not None:
PRINT_OPTS.precision = precision
if threshold is not None:
PRINT_OPTS.threshold = threshold
if edgeitems is not None:
PRINT_OPTS.edgeitems = edgeitems
if linewidth is not None:
PRINT_OPTS.linewidth = linewidth
PRINT_OPTS.sci_mode = sci_mode
def get_printoptions() -> dict[str, Any]:
r"""Gets the current options for printing, as a dictionary that
can be passed as ``**kwargs`` to set_printoptions().
"""
return dataclasses.asdict(PRINT_OPTS)
@contextlib.contextmanager
def printoptions(**kwargs):
r"""Context manager that temporarily changes the print options. Accepted
arguments are same as :func:`set_printoptions`."""
old_kwargs = get_printoptions()
set_printoptions(**kwargs)
try:
yield
finally:
set_printoptions(**old_kwargs)
def tensor_totype(t):
dtype = (
torch.float
if (
t.is_mps
or (t.is_xpu and not torch.xpu.get_device_properties(t.device).has_fp64)
or t.is_maia
)
else torch.double
)
return t.to(dtype=dtype)
| __PrinterOptions |
python | openai__openai-python | src/openai/types/beta/chatkit/chat_session_chatkit_configuration_param.py | {
"start": 1070,
"end": 1674
} | class ____(TypedDict, total=False):
automatic_thread_titling: AutomaticThreadTitling
"""Configuration for automatic thread titling.
When omitted, automatic thread titling is enabled by default.
"""
file_upload: FileUpload
"""Configuration for upload enablement and limits.
When omitted, uploads are disabled by default (max_files 10, max_file_size 512
MB).
"""
history: History
"""Configuration for chat history retention.
When omitted, history is enabled by default with no limit on recent_threads
(null).
"""
| ChatSessionChatKitConfigurationParam |
python | eriklindernoren__ML-From-Scratch | mlfromscratch/supervised_learning/gradient_boosting.py | {
"start": 3219,
"end": 3690
} | class ____(GradientBoosting):
def __init__(self, n_estimators=200, learning_rate=0.5, min_samples_split=2,
min_var_red=1e-7, max_depth=4, debug=False):
super(GradientBoostingRegressor, self).__init__(n_estimators=n_estimators,
learning_rate=learning_rate,
min_samples_split=min_samples_split,
min_impurity=min_var_red,
max_depth=max_depth,
regression=True)
| GradientBoostingRegressor |
python | crytic__slither | slither/detectors/compiler_bugs/uninitialized_function_ptr_in_constructor.py | {
"start": 2191,
"end": 5086
} | class ____(AbstractDetector):
"""
Uninitialized function pointer calls in constructors
"""
ARGUMENT = "uninitialized-fptr-cst"
HELP = "Uninitialized function pointer calls in constructors"
IMPACT = DetectorClassification.LOW
CONFIDENCE = DetectorClassification.HIGH
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#uninitialized-function-pointers-in-constructors"
WIKI_TITLE = "Uninitialized function pointers in constructors"
WIKI_DESCRIPTION = "solc versions `0.4.5`-`0.4.26` and `0.5.0`-`0.5.8` contain a compiler bug leading to unexpected behavior when calling uninitialized function pointers in constructors."
# region wiki_exploit_scenario
WIKI_EXPLOIT_SCENARIO = """
```solidity
contract bad0 {
constructor() public {
/* Uninitialized function pointer */
function(uint256) internal returns(uint256) a;
a(10);
}
}
```
The call to `a(10)` will lead to unexpected behavior because function pointer `a` is not initialized in the constructor."""
# endregion wiki_exploit_scenario
WIKI_RECOMMENDATION = (
"Initialize function pointers before calling. Avoid function pointers if possible."
)
VULNERABLE_SOLC_VERSIONS = make_solc_versions(4, 5, 25) + make_solc_versions(5, 0, 8)
@staticmethod
def _detect_uninitialized_function_ptr_in_constructor(
contract: Contract,
) -> List[Union[Any, Node]]:
"""
Detect uninitialized function pointer calls in constructors
:param contract: The contract of interest for detection
:return: A list of nodes with uninitialized function pointer calls in the constructor of given contract
"""
results = []
constructor = contract.constructors_declared
if constructor:
variables_entrance = _get_variables_entrance(constructor)
results = [
node for node in constructor.nodes if _is_vulnerable(node, variables_entrance)
]
return results
def _detect(self) -> List[Output]:
"""
Detect uninitialized function pointer calls in constructors of contracts
Returns:
list: ['uninitialized function pointer calls in constructors']
"""
results = []
for contract in self.compilation_unit.contracts:
contract_info: DETECTOR_INFO = ["Contract ", contract, " \n"]
nodes = self._detect_uninitialized_function_ptr_in_constructor(contract)
for node in nodes:
node_info: DETECTOR_INFO = [
"\t ",
node,
" is an unintialized function pointer call in a constructor\n",
]
json = self.generate_result(contract_info + node_info)
results.append(json)
return results
| UninitializedFunctionPtrsConstructor |
python | tensorflow__tensorflow | tensorflow/python/distribute/parallel_device/parallel_device.py | {
"start": 2160,
"end": 9612
} | class ____(object):
"""A device which executes operations in parallel."""
def __init__(self, components):
"""Creates a device which executes operations in parallel on `components`.
Args:
components: A list of device names. Each operation executed on the
returned device executes on these component devices.
Returns:
A string with the name of the newly created device.
"""
global _next_device_number, _next_device_number_lock
self.components = tuple(device_util.canonicalize(d) for d in components)
if not self.components:
raise ValueError("ParallelDevice requires at least one component.")
ctx = context.context()
with _next_device_number_lock:
# TODO(allenl): Better names for parallel devices (right now "CUSTOM" is
# special-cased).
self._name = "{}/device:CUSTOM:{}".format(ctx.host_address_space(),
_next_device_number)
_next_device_number += 1
device, device_info = _pywrap_parallel_device.GetParallelDeviceCapsules(
self._name, self.components)
context.register_custom_device(device, self._name, device_info)
self._device_ids = None
self._device_scope = None
_all_parallel_devices[self._name] = self
def _pack_tensor(self, *tensors):
"""Helper to pack plain-old-tensors, not structures or composites."""
for tensor in tensors:
if not isinstance(
tensor,
(
tensor_lib.Tensor,
composite_tensor.CompositeTensor,
variables.Variable,
),
):
raise ValueError(
("Every component to pack onto the ParallelDevice must already be "
"a tensor, got {}. Consider running `tf.constant` or "
"`tf.convert_to_tensor` first on literal values.")
.format(tensors))
with ops.device(self._name):
return tpu_ops.tpu_replicated_input(inputs=tensors)
def pack(self, tensors):
"""Create a tensor on the parallel device from a sequence of tensors.
Args:
tensors: A list of tensors, one per device in `self.components`. The list
can contain composite tensors and nests (lists, dicts, etc. supported by
`tf.nest`) with the same structure for each device, but every component
of nests must already be a `tf.Tensor` or composite. Passing
`tf.Variable` objects reads their value, it does not share a mutable
reference between the packed and unpacked forms.
Returns:
A tensor placed on the ParallelDevice. For nested structures, returns a
single structure containing tensors placed on the ParallelDevice (same
structure as each component of `tensors`).
Raises:
ValueError: If the length of `tensors` does not match the number of
component devices, or if there are non-tensor inputs.
"""
self._assert_eager()
if len(tensors) != len(self.components):
raise ValueError(
("Creating a parallel tensor requires one tensor per component. "
"Got {} but was expecting {}.")
.format(len(tensors), len(self.components)))
with ops.device(None):
# Explicitly read variable values. This can not be done on the parallel
# device since the tensors are to be packed.
tensors = variable_utils.convert_variables_to_tensors(tensors)
return nest.map_structure(self._pack_tensor, *tensors,
expand_composites=True)
def _unpack_tensor(self, parallel_tensor):
"""Helper to unpack a single tensor."""
if not isinstance(
parallel_tensor,
(
tensor_lib.Tensor,
composite_tensor.CompositeTensor,
variables.Variable,
),
):
raise ValueError("Expected a tensor, got {}.".format(parallel_tensor))
with ops.device(self._name):
return tpu_ops.tpu_replicated_output(
parallel_tensor, num_replicas=len(self.components))
def unpack(self, parallel_tensor):
"""Unpack a parallel tensor into its components.
Args:
parallel_tensor: A tensor, composite tensor, or `tf.nest` of such placed
on the ParallelDevice. Passing `tf.Variable` objects reads their value,
it does not share a mutable reference between the packed and unpacked
forms.
Returns:
A list with the same length as `self.components` each with the same
structure as `parallel_tensor`, containing component tensors.
"""
self._assert_eager()
unpacked_components = [[] for _ in range(len(self.components))]
with ops.device(self._name):
parallel_tensor = variable_utils.convert_variables_to_tensors(
parallel_tensor)
for tensor in nest.flatten(parallel_tensor, expand_composites=True):
for accumulator, unpacked_tensor in zip(
unpacked_components, self._unpack_tensor(tensor)):
accumulator.append(unpacked_tensor)
return [nest.pack_sequence_as(parallel_tensor, unpacked,
expand_composites=True)
for unpacked in unpacked_components]
@property
def device_ids(self):
"""A parallel tensor with scalar integers numbering component devices.
Each device ID is placed on its corresponding device, in the same order as
the `components` constructor argument.
Returns:
A parallel tensor containing 0 on the first device, 1 on the second, etc.
"""
if self._device_ids is None:
# device_ids may be called from inside a tf.function, in which case the
# function captures the eager tensor. We can't pack tensors in a function
# at the moment, and even if we could we don't want to hold on to a
# symbolic tensor, so we need to init_scope out of the function
# temporarily.
with ops.init_scope():
# TODO(allenl): Functions which capture eager device ID tensors won't be
# saveable in SavedModels. Ideally we'd run a DeviceID op every time
# device IDs are required, with functions using the op in their bodies
# but not hard-coding a fixed number of devices (so they can be re-used
# with a different replica count).
device_ids_list = []
for index, device in enumerate(self.components):
with ops.device(device):
# The identity op ensures each device ID tensor is placed on its
# device.
device_ids_list.append(
array_ops.identity(constant_op.constant(index)))
self._device_ids = self.pack(device_ids_list)
return self._device_ids
def _assert_eager(self):
"""Verifies that tracing is not active."""
if not context.executing_eagerly():
raise NotImplementedError(
"ParallelDevice is currently not supported inside `tf.function`. It "
"can however run calls to a `tf.function` in parallel:\n\n"
"with ParallelDevice() as p:\n f()")
def __enter__(self):
"""Runs ops in parallel, makes variables which save independent buffers."""
if self._device_scope is not None:
raise AssertionError(
"Re-entered a ParallelDevice scope without first exiting it.")
self._assert_eager()
self._device_scope = ops.device(self._name)
self._device_scope.__enter__()
return self
def __exit__(self, typ, exc, tb):
self._device_scope.__exit__(typ, exc, tb)
self._device_scope = None
| ParallelDevice |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 10583,
"end": 10866
} | class ____(AtomicRule):
"""integrate(sqrt(a+b*x+c*x**2), x)"""
a: Expr
b: Expr
c: Expr
def eval(self) -> Expr:
step = sqrt_quadratic_rule(IntegralInfo(self.integrand, self.variable), degenerate=False)
return step.eval()
@dataclass
| SqrtQuadraticRule |
python | django__django | django/db/models/functions/text.py | {
"start": 6698,
"end": 6784
} | class ____(OracleHashMixin, Transform):
function = "MD5"
lookup_name = "md5"
| MD5 |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/remote_representation/external_data.py | {
"start": 7153,
"end": 7265
} | class ____(Enum):
ANONYMOUS = "ANONYMOUS"
TOP_LEVEL = "TOP_LEVEL"
@whitelist_for_serdes
| NestedResourceType |
python | redis__redis-py | tests/test_auth/test_token_manager.py | {
"start": 405,
"end": 18660
} | class ____:
@pytest.mark.parametrize(
"exp_refresh_ratio",
[
0.9,
0.28,
],
ids=[
"Refresh ratio = 0.9",
"Refresh ratio = 0.28",
],
)
def test_success_token_renewal(self, exp_refresh_ratio):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 150,
(datetime.now(timezone.utc).timestamp() * 1000) + 50,
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 170,
(datetime.now(timezone.utc).timestamp() * 1000) + 70,
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 190,
(datetime.now(timezone.utc).timestamp() * 1000) + 90,
{"oid": "test"},
),
]
def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(exp_refresh_ratio, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
sleep(0.1)
assert len(tokens) > 0
@pytest.mark.parametrize(
"exp_refresh_ratio",
[
(0.9),
(0.28),
],
ids=[
"Refresh ratio = 0.9",
"Refresh ratio = 0.28",
],
)
@pytest.mark.asyncio
async def test_async_success_token_renewal(self, exp_refresh_ratio):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 130,
(datetime.now(timezone.utc).timestamp() * 1000) + 30,
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 160,
(datetime.now(timezone.utc).timestamp() * 1000) + 60,
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 190,
(datetime.now(timezone.utc).timestamp() * 1000) + 90,
{"oid": "test"},
),
]
async def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(exp_refresh_ratio, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
await asyncio.sleep(0.1)
assert len(tokens) > 0
@pytest.mark.parametrize(
"block_for_initial,tokens_acquired",
[
(True, 1),
(False, 0),
],
ids=[
"Block for initial, callback will triggered once",
"Non blocked, callback wont be triggered",
],
)
@pytest.mark.asyncio
async def test_async_request_token_blocking_behaviour(
self, block_for_initial, tokens_acquired
):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
)
async def on_next(token):
nonlocal tokens
sleep(0.1)
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=block_for_initial)
assert len(tokens) == tokens_acquired
def test_token_renewal_with_skip_initial(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1500,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
]
def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(0.5, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener, skip_initial=True)
assert len(tokens) == 0
sleep(0.6)
assert len(tokens) > 0
@pytest.mark.asyncio
async def test_async_token_renewal_with_skip_initial(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1200,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1400,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
]
async def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(0.5, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, skip_initial=True)
assert len(tokens) == 0
await asyncio.sleep(0.6)
assert len(tokens) > 0
def test_success_token_renewal_with_retry(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
RequestTokenErr,
RequestTokenErr,
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
]
def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
# Should be less than a 0.1, or it will be flacky
# due to additional token renewal.
sleep(0.08)
assert mock_provider.request_token.call_count > 0
assert len(tokens) > 0
@pytest.mark.asyncio
async def test_async_success_token_renewal_with_retry(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
RequestTokenErr,
RequestTokenErr,
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 100,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
),
]
async def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
mock_listener.on_error = None
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
# Should be less than a 0.1, or it will be flacky
# due to additional token renewal.
await asyncio.sleep(0.08)
assert mock_provider.request_token.call_count > 0
assert len(tokens) > 0
def test_no_token_renewal_on_process_complete(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
)
def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(0.9, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
sleep(0.2)
assert len(tokens) == 1
@pytest.mark.asyncio
async def test_async_no_token_renewal_on_process_complete(self):
tokens = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
)
async def on_next(token):
nonlocal tokens
tokens.append(token)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(0.9, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
await asyncio.sleep(0.2)
assert len(tokens) == 1
def test_failed_token_renewal_with_retry(self):
tokens = []
exceptions = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
]
def on_next(token):
nonlocal tokens
tokens.append(token)
def on_error(exception):
nonlocal exceptions
exceptions.append(exception)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
mock_listener.on_error = on_error
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
sleep(0.1)
assert mock_provider.request_token.call_count == 4
assert len(tokens) == 0
assert len(exceptions) == 1
@pytest.mark.asyncio
async def test_async_failed_token_renewal_with_retry(self):
tokens = []
exceptions = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.side_effect = [
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
RequestTokenErr,
]
async def on_next(token):
nonlocal tokens
tokens.append(token)
async def on_error(exception):
nonlocal exceptions
exceptions.append(exception)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
mock_listener.on_error = on_error
retry_policy = RetryPolicy(3, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
sleep(0.1)
assert mock_provider.request_token.call_count == 4
assert len(tokens) == 0
assert len(exceptions) == 1
def test_failed_renewal_on_expired_token(self):
errors = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) - 100,
(datetime.now(timezone.utc).timestamp() * 1000) - 1000,
{"oid": "test"},
)
def on_error(error: TokenRenewalErr):
nonlocal errors
errors.append(error)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_error = on_error
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
assert len(errors) == 1
assert isinstance(errors[0], TokenRenewalErr)
assert str(errors[0]) == "Requested token is expired"
@pytest.mark.asyncio
async def test_async_failed_renewal_on_expired_token(self):
errors = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) - 100,
(datetime.now(timezone.utc).timestamp() * 1000) - 1000,
{"oid": "test"},
)
async def on_error(error: TokenRenewalErr):
nonlocal errors
errors.append(error)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_error = on_error
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
assert len(errors) == 1
assert isinstance(errors[0], TokenRenewalErr)
assert str(errors[0]) == "Requested token is expired"
def test_failed_renewal_on_callback_error(self):
errors = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
)
def on_next(token):
raise Exception("Some exception")
def on_error(error):
nonlocal errors
errors.append(error)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
mock_listener.on_error = on_error
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
mgr.start(mock_listener)
assert len(errors) == 1
assert isinstance(errors[0], TokenRenewalErr)
assert str(errors[0]) == "Some exception"
@pytest.mark.asyncio
async def test_async_failed_renewal_on_callback_error(self):
errors = []
mock_provider = Mock(spec=IdentityProviderInterface)
mock_provider.request_token.return_value = SimpleToken(
"value",
(datetime.now(timezone.utc).timestamp() * 1000) + 1000,
(datetime.now(timezone.utc).timestamp() * 1000),
{"oid": "test"},
)
async def on_next(token):
raise Exception("Some exception")
async def on_error(error):
nonlocal errors
errors.append(error)
mock_listener = Mock(spec=CredentialsListener)
mock_listener.on_next = on_next
mock_listener.on_error = on_error
retry_policy = RetryPolicy(1, 10)
config = TokenManagerConfig(1, 0, 1000, retry_policy)
mgr = TokenManager(mock_provider, config)
await mgr.start_async(mock_listener, block_for_initial=True)
assert len(errors) == 1
assert isinstance(errors[0], TokenRenewalErr)
assert str(errors[0]) == "Some exception"
| TestTokenManager |
python | ray-project__ray | python/ray/util/scheduling_strategies.py | {
"start": 428,
"end": 1462
} | class ____:
"""Placement group based scheduling strategy.
Attributes:
placement_group: the placement group this actor belongs to,
or None if it doesn't belong to any group.
placement_group_bundle_index: the index of the bundle
if the actor belongs to a placement group, which may be -1 to
specify any available bundle.
placement_group_capture_child_tasks: Whether or not children tasks
of this actor should implicitly use the same placement group
as its parent. It is False by default.
"""
def __init__(
self,
placement_group: "PlacementGroup",
placement_group_bundle_index: int = -1,
placement_group_capture_child_tasks: Optional[bool] = None,
):
self.placement_group = placement_group
self.placement_group_bundle_index = placement_group_bundle_index
self.placement_group_capture_child_tasks = placement_group_capture_child_tasks
@PublicAPI
| PlacementGroupSchedulingStrategy |
python | conda__conda | conda/gateways/connection/adapters/s3.py | {
"start": 655,
"end": 3308
} | class ____(BaseAdapter):
def send(
self,
request: PreparedRequest,
stream: bool = False,
timeout: None | float | tuple[float, float] | tuple[float, None] = None,
verify: bool | str = True,
cert: None | bytes | str | tuple[bytes | str, bytes | str] = None,
proxies: dict[str, str] | None = None,
) -> Response:
resp = Response()
resp.status_code = 200
resp.url = request.url
try:
return self._send_boto3(resp, request)
except ImportError:
stderrlog.info(
"\nError: boto3 is required for S3 channels. "
"Please install with `conda install boto3`\n"
"Make sure to run `conda deactivate` if you "
"are in a conda environment.\n"
)
resp.status_code = 404
return resp
def close(self):
pass
def _send_boto3(self, resp: Response, request: PreparedRequest) -> Response:
from boto3.session import Session
from botocore.exceptions import BotoCoreError, ClientError
bucket_name, key_string = url_to_s3_info(request.url)
# https://github.com/conda/conda/issues/8993
# creating a separate boto3 session to make this thread safe
session = Session()
# create a resource client using this thread's session object
s3 = session.resource("s3")
# finally get the S3 object
key = s3.Object(bucket_name, key_string[1:])
try:
response = key.get()
except (BotoCoreError, ClientError) as e:
resp.status_code = 404
message = {
"error": "error downloading file from s3",
"path": request.url,
"exception": repr(e),
}
resp.raw = self._write_tempfile(
lambda x: x.write(ensure_binary(json.dumps(message)))
)
resp.close = resp.raw.close
return resp
key_headers = response["ResponseMetadata"]["HTTPHeaders"]
resp.headers = CaseInsensitiveDict(
{
"Content-Type": key_headers.get("content-type", "text/plain"),
"Content-Length": key_headers["content-length"],
"Last-Modified": key_headers["last-modified"],
}
)
resp.raw = self._write_tempfile(key.download_fileobj)
resp.close = resp.raw.close
return resp
def _write_tempfile(self, writer_callable):
fh = SpooledTemporaryFile()
writer_callable(fh)
fh.seek(0)
return fh
| S3Adapter |
python | doocs__leetcode | solution/0400-0499/0407.Trapping Rain Water II/Solution.py | {
"start": 0,
"end": 839
} | class ____:
def trapRainWater(self, heightMap: List[List[int]]) -> int:
m, n = len(heightMap), len(heightMap[0])
vis = [[False] * n for _ in range(m)]
pq = []
for i in range(m):
for j in range(n):
if i == 0 or i == m - 1 or j == 0 or j == n - 1:
heappush(pq, (heightMap[i][j], i, j))
vis[i][j] = True
ans = 0
dirs = (-1, 0, 1, 0, -1)
while pq:
h, i, j = heappop(pq)
for a, b in pairwise(dirs):
x, y = i + a, j + b
if x >= 0 and x < m and y >= 0 and y < n and not vis[x][y]:
ans += max(0, h - heightMap[x][y])
vis[x][y] = True
heappush(pq, (max(h, heightMap[x][y]), x, y))
return ans
| Solution |
python | python__mypy | mypy/report.py | {
"start": 11201,
"end": 15074
} | class ____(TraverserVisitor):
def __init__(self, source: list[str]) -> None:
self.source = source
# For each line of source, we maintain a pair of
# * the indentation level of the surrounding function
# (-1 if not inside a function), and
# * whether the surrounding function is typed.
# Initially, everything is covered at indentation level -1.
self.lines_covered = [(-1, True) for l in source]
# The Python AST has position information for the starts of
# elements, but not for their ends. Fortunately the
# indentation-based syntax makes it pretty easy to find where a
# block ends without doing any real parsing.
# TODO: Handle line continuations (explicit and implicit) and
# multi-line string literals. (But at least line continuations
# are normally more indented than their surrounding block anyways,
# by PEP 8.)
def indentation_level(self, line_number: int) -> int | None:
"""Return the indentation of a line of the source (specified by
zero-indexed line number). Returns None for blank lines or comments."""
line = self.source[line_number]
indent = 0
for char in list(line):
if char == " ":
indent += 1
elif char == "\t":
indent = 8 * ((indent + 8) // 8)
elif char == "#":
# Line is a comment; ignore it
return None
elif char == "\n":
# Line is entirely whitespace; ignore it
return None
# TODO line continuation (\)
else:
# Found a non-whitespace character
return indent
# Line is entirely whitespace, and at end of file
# with no trailing newline; ignore it
return None
def visit_func_def(self, defn: FuncDef) -> None:
start_line = defn.line - 1
start_indent = None
# When a function is decorated, sometimes the start line will point to
# whitespace or comments between the decorator and the function, so
# we have to look for the start.
while start_line < len(self.source):
start_indent = self.indentation_level(start_line)
if start_indent is not None:
break
start_line += 1
# If we can't find the function give up and don't annotate anything.
# Our line numbers are not reliable enough to be asserting on.
if start_indent is None:
return
cur_line = start_line + 1
end_line = cur_line
# After this loop, function body will be lines [start_line, end_line)
while cur_line < len(self.source):
cur_indent = self.indentation_level(cur_line)
if cur_indent is None:
# Consume the line, but don't mark it as belonging to the function yet.
cur_line += 1
elif cur_indent > start_indent:
# A non-blank line that belongs to the function.
cur_line += 1
end_line = cur_line
else:
# We reached a line outside the function definition.
break
is_typed = defn.type is not None
for line in range(start_line, end_line):
old_indent, _ = self.lines_covered[line]
# If there was an old indent level for this line, and the new
# level isn't increasing the indentation, ignore it.
# This is to be defensive against funniness in our line numbers,
# which are not always reliable.
if old_indent <= start_indent:
self.lines_covered[line] = (start_indent, is_typed)
# Visit the body, in case there are nested functions
super().visit_func_def(defn)
| LineCoverageVisitor |
python | lepture__authlib | authlib/integrations/httpx_client/oauth2_client.py | {
"start": 2010,
"end": 6808
} | class ____(_OAuth2Client, httpx.AsyncClient):
SESSION_REQUEST_PARAMS = HTTPX_CLIENT_KWARGS
client_auth_class = OAuth2ClientAuth
token_auth_class = OAuth2Auth
oauth_error_class = OAuthError
def __init__(
self,
client_id=None,
client_secret=None,
token_endpoint_auth_method=None,
revocation_endpoint_auth_method=None,
scope=None,
redirect_uri=None,
token=None,
token_placement="header",
update_token=None,
leeway=60,
**kwargs,
):
# extract httpx.Client kwargs
client_kwargs = self._extract_session_request_params(kwargs)
httpx.AsyncClient.__init__(self, **client_kwargs)
# We use a Lock to synchronize coroutines to prevent
# multiple concurrent attempts to refresh the same token
self._token_refresh_lock = Lock()
_OAuth2Client.__init__(
self,
session=None,
client_id=client_id,
client_secret=client_secret,
token_endpoint_auth_method=token_endpoint_auth_method,
revocation_endpoint_auth_method=revocation_endpoint_auth_method,
scope=scope,
redirect_uri=redirect_uri,
token=token,
token_placement=token_placement,
update_token=update_token,
leeway=leeway,
**kwargs,
)
async def request(
self, method, url, withhold_token=False, auth=USE_CLIENT_DEFAULT, **kwargs
):
if not withhold_token and auth is USE_CLIENT_DEFAULT:
if not self.token:
raise MissingTokenError()
await self.ensure_active_token(self.token)
auth = self.token_auth
return await super().request(method, url, auth=auth, **kwargs)
@asynccontextmanager
async def stream(
self, method, url, withhold_token=False, auth=USE_CLIENT_DEFAULT, **kwargs
):
if not withhold_token and auth is USE_CLIENT_DEFAULT:
if not self.token:
raise MissingTokenError()
await self.ensure_active_token(self.token)
auth = self.token_auth
async with super().stream(method, url, auth=auth, **kwargs) as resp:
yield resp
async def ensure_active_token(self, token):
async with self._token_refresh_lock:
if self.token.is_expired(leeway=self.leeway):
refresh_token = token.get("refresh_token")
url = self.metadata.get("token_endpoint")
if refresh_token and url:
await self.refresh_token(url, refresh_token=refresh_token)
elif self.metadata.get("grant_type") == "client_credentials":
access_token = token["access_token"]
new_token = await self.fetch_token(
url, grant_type="client_credentials"
)
if self.update_token:
await self.update_token(new_token, access_token=access_token)
else:
raise InvalidTokenError()
async def _fetch_token(
self,
url,
body="",
headers=None,
auth=USE_CLIENT_DEFAULT,
method="POST",
**kwargs,
):
if method.upper() == "POST":
resp = await self.post(
url, data=dict(url_decode(body)), headers=headers, auth=auth, **kwargs
)
else:
if "?" in url:
url = "&".join([url, body])
else:
url = "?".join([url, body])
resp = await self.get(url, headers=headers, auth=auth, **kwargs)
for hook in self.compliance_hook["access_token_response"]:
resp = hook(resp)
return self.parse_response_token(resp)
async def _refresh_token(
self,
url,
refresh_token=None,
body="",
headers=None,
auth=USE_CLIENT_DEFAULT,
**kwargs,
):
resp = await self.post(
url, data=dict(url_decode(body)), headers=headers, auth=auth, **kwargs
)
for hook in self.compliance_hook["refresh_token_response"]:
resp = hook(resp)
token = self.parse_response_token(resp)
if "refresh_token" not in token:
self.token["refresh_token"] = refresh_token
if self.update_token:
await self.update_token(self.token, refresh_token=refresh_token)
return self.token
def _http_post(
self, url, body=None, auth=USE_CLIENT_DEFAULT, headers=None, **kwargs
):
return self.post(
url, data=dict(url_decode(body)), headers=headers, auth=auth, **kwargs
)
| AsyncOAuth2Client |
python | pytorch__pytorch | test/distributed/checkpoint/test_hf_safetensor_e2e.py | {
"start": 16387,
"end": 21810
} | class ____(DTensorTestBase):
"""
Test DCP reshard for DTensor with placements changes and mesh_tensor change.
"""
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_1d_to_2d_reshard_mesh_change(self) -> None:
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
CHECKPOINT_DIR = self.temp_dir
for placements_1d in ONE_D_PLACEMENTS:
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
mesh_shape = (self.world_size,)
mesh_1d = init_device_mesh(self.device_type, mesh_shape)
dtensor = distribute_tensor(
global_tensor, mesh_1d, placements=placements_1d
)
state_dict_to_save = {"dtensor": dtensor}
dist_cp.save(
state_dict=state_dict_to_save,
storage_writer=dist_cp.HuggingFaceStorageWriter(
path=CHECKPOINT_DIR, save_distributed=True
),
)
for placements_2d in TWO_D_PLACEMENTS:
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(self.device_type, mesh_shape)
zero_dtensor = zeros(
[4, 4], device_mesh=mesh_2d, placements=placements_2d
)
state_dict_to_load = {"dtensor": zero_dtensor}
dist_cp.load(
state_dict=state_dict_to_load,
storage_reader=dist_cp.HuggingFaceStorageReader(CHECKPOINT_DIR),
planner=dist_cp.DefaultLoadPlanner(),
)
# materialzie the whole tensor to compare with the original global_tensor
state_dict_to_load["dtensor"] = state_dict_to_load[
"dtensor"
].redistribute(
mesh_2d,
placements=[Replicate(), Replicate()],
)
self.assertEqual(
global_tensor, state_dict_to_load["dtensor"].to_local()
)
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(4)
def test_2d_to_1d_reshard_mesh_change(self) -> None:
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
CHECKPOINT_DIR = self.temp_dir
for placements_2d in TWO_D_PLACEMENTS:
global_tensor = torch.arange(16, dtype=torch.float).view(4, 4)
mesh_shape = (2, self.world_size // 2)
mesh_2d = init_device_mesh(self.device_type, mesh_shape)
dtensor = distribute_tensor(
global_tensor, mesh_2d, placements=placements_2d
)
state_dict_to_save = {"dtensor": dtensor}
dist_cp.save(
state_dict=state_dict_to_save,
storage_writer=dist_cp.HuggingFaceStorageWriter(
path=CHECKPOINT_DIR, save_distributed=True
),
planner=dist_cp.DefaultSavePlanner(),
)
for placements_1d in ONE_D_PLACEMENTS:
mesh_shape = (self.world_size,)
mesh_1d = init_device_mesh(self.device_type, mesh_shape)
zero_dtensor = zeros(
[4, 4], device_mesh=mesh_1d, placements=placements_1d
)
state_dict_to_load = {"dtensor": zero_dtensor}
dist_cp.load(
state_dict=state_dict_to_load,
storage_reader=dist_cp.HuggingFaceStorageReader(CHECKPOINT_DIR),
planner=dist_cp.DefaultLoadPlanner(),
)
# materialzie the whole tensor to compare with the original global_tensor
state_dict_to_load["dtensor"] = state_dict_to_load[
"dtensor"
].redistribute(
mesh_1d,
placements=[Replicate()],
)
self.assertEqual(
global_tensor, state_dict_to_load["dtensor"].to_local()
)
@with_comms
@with_temp_dir
@skip_if_lt_x_gpu(2)
def test_dtensor_checkpoint_resharding_with_empty_shard(self):
"""
Test dtensor checkpoint resharding with dtensor containing empty shards.
"""
if importlib.util.find_spec("safetensors") is None:
print("safetensors not installed")
return
tensor = torch.rand(1).to(self.device_type)
mesh = init_device_mesh(self.device_type, (self.world_size,))
dtensor = distribute_tensor(tensor, mesh, [Shard(0)])
ref_state_dict = {"dtensor": dtensor}
dist_cp.save(
state_dict=ref_state_dict,
storage_writer=dist_cp.HuggingFaceStorageWriter(
path=self.temp_dir, save_distributed=True
),
)
tensor = torch.rand(1).to(self.device_type)
mesh_2 = init_device_mesh(self.device_type, (2, self.world_size // 2))
dtensor = distribute_tensor(tensor, mesh_2, [Shard(0), Shard(0)])
state_dict = {"dtensor": dtensor}
dist_cp.load(
state_dict=state_dict,
storage_reader=dist_cp.HuggingFaceStorageReader(self.temp_dir),
)
if __name__ == "__main__":
run_tests()
| TestDTensorReshardMeshChange |
python | python__mypy | mypy/report.py | {
"start": 21493,
"end": 22654
} | class ____:
"""Container for XML and statistics mapping python modules to Cobertura package."""
def __init__(self, name: str) -> None:
self.name = name
self.classes: dict[str, Any] = {}
self.packages: dict[str, CoberturaPackage] = {}
self.total_lines = 0
self.covered_lines = 0
def as_xml(self) -> Any:
package_element = etree.Element("package", complexity="1.0", name=self.name)
package_element.attrib["branch-rate"] = "0"
package_element.attrib["line-rate"] = get_line_rate(self.covered_lines, self.total_lines)
classes_element = etree.SubElement(package_element, "classes")
for class_name in sorted(self.classes):
classes_element.append(self.classes[class_name])
self.add_packages(package_element)
return package_element
def add_packages(self, parent_element: Any) -> None:
if self.packages:
packages_element = etree.SubElement(parent_element, "packages")
for package in sorted(self.packages.values(), key=attrgetter("name")):
packages_element.append(package.as_xml())
| CoberturaPackage |
python | sqlalchemy__sqlalchemy | examples/association/basic_association.py | {
"start": 1764,
"end": 3716
} | class ____(Base):
__tablename__ = "orderitem"
order_id: Mapped[int] = mapped_column(
ForeignKey("order.order_id"), primary_key=True
)
item_id: Mapped[int] = mapped_column(
ForeignKey("item.item_id"), primary_key=True
)
price: Mapped[float]
def __init__(self, item: Item, price: float | None = None) -> None:
self.item = item
self.price = price or item.price
item: Mapped[Item] = relationship(lazy="joined")
if __name__ == "__main__":
engine = create_engine("sqlite://")
Base.metadata.create_all(engine)
with Session(engine) as session:
# create catalog
tshirt, mug, hat, crowbar = (
Item("SA T-Shirt", 10.99),
Item("SA Mug", 6.50),
Item("SA Hat", 8.99),
Item("MySQL Crowbar", 16.99),
)
session.add_all([tshirt, mug, hat, crowbar])
session.commit()
# create an order
order = Order("john smith")
# add three OrderItem associations to the Order and save
order.order_items.append(OrderItem(mug))
order.order_items.append(OrderItem(crowbar, 10.99))
order.order_items.append(OrderItem(hat))
session.add(order)
session.commit()
# query the order, print items
order = session.scalars(
select(Order).filter_by(customer_name="john smith")
).one()
print(
[
(order_item.item.description, order_item.price)
for order_item in order.order_items
]
)
# print customers who bought 'MySQL Crowbar' on sale
q = (
select(Order)
.join(OrderItem)
.join(Item)
.where(
Item.description == "MySQL Crowbar",
Item.price > OrderItem.price,
)
)
print([order.customer_name for order in session.scalars(q)])
| OrderItem |
python | jazzband__django-oauth-toolkit | oauth2_provider/management/commands/cleartokens.py | {
"start": 91,
"end": 281
} | class ____(BaseCommand): # pragma: no cover
help = "Can be run as a cronjob or directly to clean out expired tokens"
def handle(self, *args, **options):
clear_expired()
| Command |
python | realpython__materials | chatgpt-mentor/recursion_error.py | {
"start": 393,
"end": 728
} | class ____(list):
def push(self, item):
self.append(item)
def pop(self):
return super().pop()
def __repr__(self) -> str:
return f"{type(self).__name__}({self})"
When I call repr() with an instance of this class,
I get <repr-error 'maximum recursion depth exceeded'>
Can you help me fix that?
"""
| Stack |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/scoping.py | {
"start": 4153,
"end": 80094
} | class ____(Generic[_S]):
"""Provides scoped management of :class:`.Session` objects.
See :ref:`unitofwork_contextual` for a tutorial.
.. note::
When using :ref:`asyncio_toplevel`, the async-compatible
:class:`_asyncio.async_scoped_session` class should be
used in place of :class:`.scoped_session`.
"""
_support_async: bool = False
session_factory: sessionmaker[_S]
"""The `session_factory` provided to `__init__` is stored in this
attribute and may be accessed at a later time. This can be useful when
a new non-scoped :class:`.Session` is needed."""
registry: ScopedRegistry[_S]
def __init__(
self,
session_factory: sessionmaker[_S],
scopefunc: Optional[Callable[[], Any]] = None,
):
"""Construct a new :class:`.scoped_session`.
:param session_factory: a factory to create new :class:`.Session`
instances. This is usually, but not necessarily, an instance
of :class:`.sessionmaker`.
:param scopefunc: optional function which defines
the current scope. If not passed, the :class:`.scoped_session`
object assumes "thread-local" scope, and will use
a Python ``threading.local()`` in order to maintain the current
:class:`.Session`. If passed, the function should return
a hashable token; this token will be used as the key in a
dictionary in order to store and retrieve the current
:class:`.Session`.
"""
self.session_factory = session_factory
if scopefunc:
self.registry = ScopedRegistry(session_factory, scopefunc)
else:
self.registry = ThreadLocalRegistry(session_factory)
@property
def _proxied(self) -> _S:
return self.registry()
def __call__(self, **kw: Any) -> _S:
r"""Return the current :class:`.Session`, creating it
using the :attr:`.scoped_session.session_factory` if not present.
:param \**kw: Keyword arguments will be passed to the
:attr:`.scoped_session.session_factory` callable, if an existing
:class:`.Session` is not present. If the :class:`.Session` is present
and keyword arguments have been passed,
:exc:`~sqlalchemy.exc.InvalidRequestError` is raised.
"""
if kw:
if self.registry.has():
raise sa_exc.InvalidRequestError(
"Scoped session is already present; "
"no new arguments may be specified."
)
else:
sess = self.session_factory(**kw)
self.registry.set(sess)
else:
sess = self.registry()
if not self._support_async and sess._is_asyncio:
warn_deprecated(
"Using `scoped_session` with asyncio is deprecated and "
"will raise an error in a future version. "
"Please use `async_scoped_session` instead.",
"1.4.23",
)
return sess
def configure(self, **kwargs: Any) -> None:
"""reconfigure the :class:`.sessionmaker` used by this
:class:`.scoped_session`.
See :meth:`.sessionmaker.configure`.
"""
if self.registry.has():
warn(
"At least one scoped session is already present. "
" configure() can not affect sessions that have "
"already been created."
)
self.session_factory.configure(**kwargs)
def remove(self) -> None:
"""Dispose of the current :class:`.Session`, if present.
This will first call :meth:`.Session.close` method
on the current :class:`.Session`, which releases any existing
transactional/connection resources still being held; transactions
specifically are rolled back. The :class:`.Session` is then
discarded. Upon next usage within the same scope,
the :class:`.scoped_session` will produce a new
:class:`.Session` object.
"""
if self.registry.has():
self.registry().close()
self.registry.clear()
def query_property(
self, query_cls: Optional[Type[Query[_T]]] = None
) -> QueryPropertyDescriptor:
"""return a class property which produces a legacy
:class:`_query.Query` object against the class and the current
:class:`.Session` when called.
.. legacy:: The :meth:`_orm.scoped_session.query_property` accessor
is specific to the legacy :class:`.Query` object and is not
considered to be part of :term:`2.0-style` ORM use.
e.g.::
from sqlalchemy.orm import QueryPropertyDescriptor
from sqlalchemy.orm import scoped_session
from sqlalchemy.orm import sessionmaker
Session = scoped_session(sessionmaker())
class MyClass:
query: QueryPropertyDescriptor = Session.query_property()
# after mappers are defined
result = MyClass.query.filter(MyClass.name == "foo").all()
Produces instances of the session's configured query class by
default. To override and use a custom implementation, provide
a ``query_cls`` callable. The callable will be invoked with
the class's mapper as a positional argument and a session
keyword argument.
There is no limit to the number of query properties placed on
a class.
"""
class query:
def __get__(s, instance: Any, owner: Type[_O]) -> Query[_O]:
if query_cls:
# custom query class
return query_cls(owner, session=self.registry()) # type: ignore # noqa: E501
else:
# session's configured query class
return self.registry().query(owner)
return query()
# START PROXY METHODS scoped_session
# code within this block is **programmatically,
# statically generated** by tools/generate_proxy_methods.py
def __contains__(self, instance: object) -> bool:
r"""Return True if the instance is associated with this session.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The instance may be pending or persistent within the Session for a
result of True.
""" # noqa: E501
return self._proxied.__contains__(instance)
def __iter__(self) -> Iterator[object]:
r"""Iterate over all pending or persistent instances within this
Session.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.__iter__()
def add(self, instance: object, *, _warn: bool = True) -> None:
r"""Place an object into this :class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Objects that are in the :term:`transient` state when passed to the
:meth:`_orm.Session.add` method will move to the
:term:`pending` state, until the next flush, at which point they
will move to the :term:`persistent` state.
Objects that are in the :term:`detached` state when passed to the
:meth:`_orm.Session.add` method will move to the :term:`persistent`
state directly.
If the transaction used by the :class:`_orm.Session` is rolled back,
objects which were transient when they were passed to
:meth:`_orm.Session.add` will be moved back to the
:term:`transient` state, and will no longer be present within this
:class:`_orm.Session`.
.. seealso::
:meth:`_orm.Session.add_all`
:ref:`session_adding` - at :ref:`session_basics`
""" # noqa: E501
return self._proxied.add(instance, _warn=_warn)
def add_all(self, instances: Iterable[object]) -> None:
r"""Add the given collection of instances to this :class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
See the documentation for :meth:`_orm.Session.add` for a general
behavioral description.
.. seealso::
:meth:`_orm.Session.add`
:ref:`session_adding` - at :ref:`session_basics`
""" # noqa: E501
return self._proxied.add_all(instances)
def begin(self, nested: bool = False) -> SessionTransaction:
r"""Begin a transaction, or nested transaction,
on this :class:`.Session`, if one is not already begun.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The :class:`_orm.Session` object features **autobegin** behavior,
so that normally it is not necessary to call the
:meth:`_orm.Session.begin`
method explicitly. However, it may be used in order to control
the scope of when the transactional state is begun.
When used to begin the outermost transaction, an error is raised
if this :class:`.Session` is already inside of a transaction.
:param nested: if True, begins a SAVEPOINT transaction and is
equivalent to calling :meth:`~.Session.begin_nested`. For
documentation on SAVEPOINT transactions, please see
:ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction`
acts as a Python context manager, allowing :meth:`.Session.begin`
to be used in a "with" block. See :ref:`session_explicit_begin` for
an example.
.. seealso::
:ref:`session_autobegin`
:ref:`unitofwork_transaction`
:meth:`.Session.begin_nested`
""" # noqa: E501
return self._proxied.begin(nested=nested)
def begin_nested(self) -> SessionTransaction:
r"""Begin a "nested" transaction on this Session, e.g. SAVEPOINT.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The target database(s) and associated drivers must support SQL
SAVEPOINT for this method to function correctly.
For documentation on SAVEPOINT
transactions, please see :ref:`session_begin_nested`.
:return: the :class:`.SessionTransaction` object. Note that
:class:`.SessionTransaction` acts as a context manager, allowing
:meth:`.Session.begin_nested` to be used in a "with" block.
See :ref:`session_begin_nested` for a usage example.
.. seealso::
:ref:`session_begin_nested`
:ref:`pysqlite_serializable` - special workarounds required
with the SQLite driver in order for SAVEPOINT to work
correctly. For asyncio use cases, see the section
:ref:`aiosqlite_serializable`.
""" # noqa: E501
return self._proxied.begin_nested()
def close(self) -> None:
r"""Close out the transactional resources and ORM objects used by this
:class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This expunges all ORM objects associated with this
:class:`_orm.Session`, ends any transaction in progress and
:term:`releases` any :class:`_engine.Connection` objects which this
:class:`_orm.Session` itself has checked out from associated
:class:`_engine.Engine` objects. The operation then leaves the
:class:`_orm.Session` in a state which it may be used again.
.. tip::
In the default running mode the :meth:`_orm.Session.close`
method **does not prevent the Session from being used again**.
The :class:`_orm.Session` itself does not actually have a
distinct "closed" state; it merely means
the :class:`_orm.Session` will release all database connections
and ORM objects.
Setting the parameter :paramref:`_orm.Session.close_resets_only`
to ``False`` will instead make the ``close`` final, meaning that
any further action on the session will be forbidden.
.. versionchanged:: 1.4 The :meth:`.Session.close` method does not
immediately create a new :class:`.SessionTransaction` object;
instead, the new :class:`.SessionTransaction` is created only if
the :class:`.Session` is used again for a database operation.
.. seealso::
:ref:`session_closing` - detail on the semantics of
:meth:`_orm.Session.close` and :meth:`_orm.Session.reset`.
:meth:`_orm.Session.reset` - a similar method that behaves like
``close()`` with the parameter
:paramref:`_orm.Session.close_resets_only` set to ``True``.
""" # noqa: E501
return self._proxied.close()
def reset(self) -> None:
r"""Close out the transactional resources and ORM objects used by this
:class:`_orm.Session`, resetting the session to its initial state.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This method provides for same "reset-only" behavior that the
:meth:`_orm.Session.close` method has provided historically, where the
state of the :class:`_orm.Session` is reset as though the object were
brand new, and ready to be used again.
This method may then be useful for :class:`_orm.Session` objects
which set :paramref:`_orm.Session.close_resets_only` to ``False``,
so that "reset only" behavior is still available.
.. versionadded:: 2.0.22
.. seealso::
:ref:`session_closing` - detail on the semantics of
:meth:`_orm.Session.close` and :meth:`_orm.Session.reset`.
:meth:`_orm.Session.close` - a similar method will additionally
prevent re-use of the Session when the parameter
:paramref:`_orm.Session.close_resets_only` is set to ``False``.
""" # noqa: E501
return self._proxied.reset()
def commit(self) -> None:
r"""Flush pending changes and commit the current transaction.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
When the COMMIT operation is complete, all objects are fully
:term:`expired`, erasing their internal contents, which will be
automatically re-loaded when the objects are next accessed. In the
interim, these objects are in an expired state and will not function if
they are :term:`detached` from the :class:`.Session`. Additionally,
this re-load operation is not supported when using asyncio-oriented
APIs. The :paramref:`.Session.expire_on_commit` parameter may be used
to disable this behavior.
When there is no transaction in place for the :class:`.Session`,
indicating that no operations were invoked on this :class:`.Session`
since the previous call to :meth:`.Session.commit`, the method will
begin and commit an internal-only "logical" transaction, that does not
normally affect the database unless pending flush changes were
detected, but will still invoke event handlers and object expiration
rules.
The outermost database transaction is committed unconditionally,
automatically releasing any SAVEPOINTs in effect.
.. seealso::
:ref:`session_committing`
:ref:`unitofwork_transaction`
:ref:`asyncio_orm_avoid_lazyloads`
""" # noqa: E501
return self._proxied.commit()
def connection(
self,
bind_arguments: Optional[_BindArguments] = None,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Connection:
r"""Return a :class:`_engine.Connection` object corresponding to this
:class:`.Session` object's transactional state.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Either the :class:`_engine.Connection` corresponding to the current
transaction is returned, or if no transaction is in progress, a new
one is begun and the :class:`_engine.Connection`
returned (note that no
transactional state is established with the DBAPI until the first
SQL statement is emitted).
Ambiguity in multi-bind or unbound :class:`.Session` objects can be
resolved through any of the optional keyword arguments. This
ultimately makes usage of the :meth:`.get_bind` method for resolution.
:param bind_arguments: dictionary of bind arguments. May include
"mapper", "bind", "clause", other custom arguments that are passed
to :meth:`.Session.get_bind`.
:param execution_options: a dictionary of execution options that will
be passed to :meth:`_engine.Connection.execution_options`, **when the
connection is first procured only**. If the connection is already
present within the :class:`.Session`, a warning is emitted and
the arguments are ignored.
.. seealso::
:ref:`session_transaction_isolation`
""" # noqa: E501
return self._proxied.connection(
bind_arguments=bind_arguments, execution_options=execution_options
)
def delete(self, instance: object) -> None:
r"""Mark an instance as deleted.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The object is assumed to be either :term:`persistent` or
:term:`detached` when passed; after the method is called, the
object will remain in the :term:`persistent` state until the next
flush proceeds. During this time, the object will also be a member
of the :attr:`_orm.Session.deleted` collection.
When the next flush proceeds, the object will move to the
:term:`deleted` state, indicating a ``DELETE`` statement was emitted
for its row within the current transaction. When the transaction
is successfully committed,
the deleted object is moved to the :term:`detached` state and is
no longer present within this :class:`_orm.Session`.
.. seealso::
:ref:`session_deleting` - at :ref:`session_basics`
:meth:`.Session.delete_all` - multiple instance version
""" # noqa: E501
return self._proxied.delete(instance)
def delete_all(self, instances: Iterable[object]) -> None:
r"""Calls :meth:`.Session.delete` on multiple instances.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
.. seealso::
:meth:`.Session.delete` - main documentation on delete
.. versionadded:: 2.1
""" # noqa: E501
return self._proxied.delete_all(instances)
@overload
def execute(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[_Ts]]: ...
@overload
def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[TupleAny]]: ...
def execute(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
_parent_execute_state: Optional[Any] = None,
_add_event: Optional[Any] = None,
) -> Result[Unpack[TupleAny]]:
r"""Execute a SQL expression construct.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Returns a :class:`_engine.Result` object representing
results of the statement execution.
E.g.::
from sqlalchemy import select
result = session.execute(select(User).where(User.id == 5))
The API contract of :meth:`_orm.Session.execute` is similar to that
of :meth:`_engine.Connection.execute`, the :term:`2.0 style` version
of :class:`_engine.Connection`.
.. versionchanged:: 1.4 the :meth:`_orm.Session.execute` method is
now the primary point of ORM statement execution when using
:term:`2.0 style` ORM usage.
:param statement:
An executable statement (i.e. an :class:`.Executable` expression
such as :func:`_expression.select`).
:param params:
Optional dictionary, or list of dictionaries, containing
bound parameter values. If a single dictionary, single-row
execution occurs; if a list of dictionaries, an
"executemany" will be invoked. The keys in each dictionary
must correspond to parameter names present in the statement.
:param execution_options: optional dictionary of execution options,
which will be associated with the statement execution. This
dictionary can provide a subset of the options that are accepted
by :meth:`_engine.Connection.execution_options`, and may also
provide additional options understood only in an ORM context.
The execution_options are passed along to methods like
:meth:`.Connection.execute` on :class:`.Connection` giving the
highest priority to execution_options that are passed to this
method explicitly, then the options that are present on the
statement object if any, and finally those options present
session-wide.
.. seealso::
:ref:`orm_queryguide_execution_options` - ORM-specific execution
options
:param bind_arguments: dictionary of additional arguments to determine
the bind. May include "mapper", "bind", or other custom arguments.
Contents of this dictionary are passed to the
:meth:`.Session.get_bind` method.
:return: a :class:`_engine.Result` object.
""" # noqa: E501
return self._proxied.execute(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
_parent_execute_state=_parent_execute_state,
_add_event=_add_event,
)
def expire(
self, instance: object, attribute_names: Optional[Iterable[str]] = None
) -> None:
r"""Expire the attributes on an instance.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Marks the attributes of an instance as out of date. When an expired
attribute is next accessed, a query will be issued to the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire all objects in the :class:`.Session` simultaneously,
use :meth:`Session.expire_all`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire` only makes sense for the specific
case that a non-ORM SQL statement was emitted in the current
transaction.
:param instance: The instance to be refreshed.
:param attribute_names: optional list of string attribute names
indicating a subset of attributes to be expired.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
""" # noqa: E501
return self._proxied.expire(instance, attribute_names=attribute_names)
def expire_all(self) -> None:
r"""Expires all persistent instances within this Session.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
When any attributes on a persistent instance is next accessed,
a query will be issued using the
:class:`.Session` object's current transactional context in order to
load all expired attributes for the given instance. Note that
a highly isolated transaction will return the same values as were
previously read in that same transaction, regardless of changes
in database state outside of that transaction.
To expire individual objects and individual attributes
on those objects, use :meth:`Session.expire`.
The :class:`.Session` object's default behavior is to
expire all state whenever the :meth:`Session.rollback`
or :meth:`Session.commit` methods are called, so that new
state can be loaded for the new transaction. For this reason,
calling :meth:`Session.expire_all` is not usually needed,
assuming the transaction is isolated.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.refresh`
:meth:`_orm.Query.populate_existing`
""" # noqa: E501
return self._proxied.expire_all()
def expunge(self, instance: object) -> None:
r"""Remove the `instance` from this ``Session``.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This will free all internal references to the instance. Cascading
will be applied according to the *expunge* cascade rule.
""" # noqa: E501
return self._proxied.expunge(instance)
def expunge_all(self) -> None:
r"""Remove all object instances from this ``Session``.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This is equivalent to calling ``expunge(obj)`` on all objects in this
``Session``.
""" # noqa: E501
return self._proxied.expunge_all()
def flush(self, objects: Optional[Sequence[Any]] = None) -> None:
r"""Flush all the object changes to the database.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Writes out all pending object creations, deletions and modifications
to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are
automatically ordered by the Session's unit of work dependency
solver.
Database operations will be issued in the current transactional
context and do not affect the state of the transaction, unless an
error occurs, in which case the entire transaction is rolled back.
You may flush() as often as you like within a transaction to move
changes from Python to the database's transaction buffer.
:param objects: Optional; restricts the flush operation to operate
only on elements that are in the given collection.
This feature is for an extremely narrow set of use cases where
particular objects may need to be operated upon before the
full flush() occurs. It is not intended for general use.
.. deprecated:: 2.1
""" # noqa: E501
return self._proxied.flush(objects=objects)
def get(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> Optional[_O]:
r"""Return an instance based on the given primary key identifier,
or ``None`` if not found.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
E.g.::
my_user = session.get(User, 5)
some_object = session.get(VersionedFoo, (5, 10))
some_object = session.get(VersionedFoo, {"id": 5, "version_id": 10})
.. versionadded:: 1.4 Added :meth:`_orm.Session.get`, which is moved
from the now legacy :meth:`_orm.Query.get` method.
:meth:`_orm.Session.get` is special in that it provides direct
access to the identity map of the :class:`.Session`.
If the given primary key identifier is present
in the local identity map, the object is returned
directly from this collection and no SQL is emitted,
unless the object has been marked fully expired.
If not present,
a SELECT is performed in order to locate the object.
:meth:`_orm.Session.get` also will perform a check if
the object is present in the identity map and
marked as expired - a SELECT
is emitted to refresh the object as well as to
ensure that the row is still present.
If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised.
:param entity: a mapped class or :class:`.Mapper` indicating the
type of entity to be loaded.
:param ident: A scalar, tuple, or dictionary representing the
primary key. For a composite (e.g. multiple column) primary key,
a tuple or dictionary should be passed.
For a single-column primary key, the scalar calling form is typically
the most expedient. If the primary key of a row is the value "5",
the call looks like::
my_object = session.get(SomeClass, 5)
The tuple form contains primary key values typically in
the order in which they correspond to the mapped
:class:`_schema.Table`
object's primary key columns, or if the
:paramref:`_orm.Mapper.primary_key` configuration parameter were
used, in
the order used for that parameter. For example, if the primary key
of a row is represented by the integer
digits "5, 10" the call would look like::
my_object = session.get(SomeClass, (5, 10))
The dictionary form should include as keys the mapped attribute names
corresponding to each element of the primary key. If the mapped class
has the attributes ``id``, ``version_id`` as the attributes which
store the object's primary key value, the call would look like::
my_object = session.get(SomeClass, {"id": 5, "version_id": 10})
:param options: optional sequence of loader options which will be
applied to the query, if one is emitted.
:param populate_existing: causes the method to unconditionally emit
a SQL query and refresh the object with the newly loaded data,
regardless of whether or not the object is already present.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
:param execution_options: optional dictionary of execution options,
which will be associated with the query execution if one is emitted.
This dictionary can provide a subset of the options that are
accepted by :meth:`_engine.Connection.execution_options`, and may
also provide additional options understood only in an ORM context.
.. versionadded:: 1.4.29
.. seealso::
:ref:`orm_queryguide_execution_options` - ORM-specific execution
options
:param bind_arguments: dictionary of additional arguments to determine
the bind. May include "mapper", "bind", or other custom arguments.
Contents of this dictionary are passed to the
:meth:`.Session.get_bind` method.
.. versionadded:: 2.0.0rc1
:return: The object instance, or ``None``.
""" # noqa: E501
return self._proxied.get(
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
def get_one(
self,
entity: _EntityBindKey[_O],
ident: _PKIdentityArgument,
*,
options: Optional[Sequence[ORMOption]] = None,
populate_existing: bool = False,
with_for_update: ForUpdateParameter = None,
identity_token: Optional[Any] = None,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
) -> _O:
r"""Return exactly one instance based on the given primary key
identifier, or raise an exception if not found.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Raises :class:`_exc.NoResultFound` if the query selects no rows.
For a detailed documentation of the arguments see the
method :meth:`.Session.get`.
.. versionadded:: 2.0.22
:return: The object instance.
.. seealso::
:meth:`.Session.get` - equivalent method that instead
returns ``None`` if no row was found with the provided primary
key
""" # noqa: E501
return self._proxied.get_one(
entity,
ident,
options=options,
populate_existing=populate_existing,
with_for_update=with_for_update,
identity_token=identity_token,
execution_options=execution_options,
bind_arguments=bind_arguments,
)
def get_bind(
self,
mapper: Optional[_EntityBindKey[_O]] = None,
*,
clause: Optional[ClauseElement] = None,
bind: Optional[_SessionBind] = None,
_sa_skip_events: Optional[bool] = None,
_sa_skip_for_implicit_returning: bool = False,
**kw: Any,
) -> Union[Engine, Connection]:
r"""Return a "bind" to which this :class:`.Session` is bound.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The "bind" is usually an instance of :class:`_engine.Engine`,
except in the case where the :class:`.Session` has been
explicitly bound directly to a :class:`_engine.Connection`.
For a multiply-bound or unbound :class:`.Session`, the
``mapper`` or ``clause`` arguments are used to determine the
appropriate bind to return.
Note that the "mapper" argument is usually present
when :meth:`.Session.get_bind` is called via an ORM
operation such as a :meth:`.Session.query`, each
individual INSERT/UPDATE/DELETE operation within a
:meth:`.Session.flush`, call, etc.
The order of resolution is:
1. if mapper given and :paramref:`.Session.binds` is present,
locate a bind based first on the mapper in use, then
on the mapped class in use, then on any base classes that are
present in the ``__mro__`` of the mapped class, from more specific
superclasses to more general.
2. if clause given and ``Session.binds`` is present,
locate a bind based on :class:`_schema.Table` objects
found in the given clause present in ``Session.binds``.
3. if ``Session.binds`` is present, return that.
4. if clause given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the clause.
5. if mapper given, attempt to return a bind
linked to the :class:`_schema.MetaData` ultimately
associated with the :class:`_schema.Table` or other
selectable to which the mapper is mapped.
6. No bind can be found, :exc:`~sqlalchemy.exc.UnboundExecutionError`
is raised.
Note that the :meth:`.Session.get_bind` method can be overridden on
a user-defined subclass of :class:`.Session` to provide any kind
of bind resolution scheme. See the example at
:ref:`session_custom_partitioning`.
:param mapper:
Optional mapped class or corresponding :class:`_orm.Mapper` instance.
The bind can be derived from a :class:`_orm.Mapper` first by
consulting the "binds" map associated with this :class:`.Session`,
and secondly by consulting the :class:`_schema.MetaData` associated
with the :class:`_schema.Table` to which the :class:`_orm.Mapper` is
mapped for a bind.
:param clause:
A :class:`_expression.ClauseElement` (i.e.
:func:`_expression.select`,
:func:`_expression.text`,
etc.). If the ``mapper`` argument is not present or could not
produce a bind, the given expression construct will be searched
for a bound element, typically a :class:`_schema.Table`
associated with
bound :class:`_schema.MetaData`.
.. seealso::
:ref:`session_partitioning`
:paramref:`.Session.binds`
:meth:`.Session.bind_mapper`
:meth:`.Session.bind_table`
""" # noqa: E501
return self._proxied.get_bind(
mapper=mapper,
clause=clause,
bind=bind,
_sa_skip_events=_sa_skip_events,
_sa_skip_for_implicit_returning=_sa_skip_for_implicit_returning,
**kw,
)
def is_modified(
self, instance: object, include_collections: bool = True
) -> bool:
r"""Return ``True`` if the given instance has locally
modified attributes.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This method retrieves the history for each instrumented
attribute on the instance and performs a comparison of the current
value to its previously flushed or committed value, if any.
It is in effect a more expensive and accurate
version of checking for the given instance in the
:attr:`.Session.dirty` collection; a full test for
each attribute's net "dirty" status is performed.
E.g.::
return session.is_modified(someobject)
A few caveats to this method apply:
* Instances present in the :attr:`.Session.dirty` collection may
report ``False`` when tested with this method. This is because
the object may have received change events via attribute mutation,
thus placing it in :attr:`.Session.dirty`, but ultimately the state
is the same as that loaded from the database, resulting in no net
change here.
* Scalar attributes may not have recorded the previously set
value when a new value was applied, if the attribute was not loaded,
or was expired, at the time the new value was received - in these
cases, the attribute is assumed to have a change, even if there is
ultimately no net change against its database value. SQLAlchemy in
most cases does not need the "old" value when a set event occurs, so
it skips the expense of a SQL call if the old value isn't present,
based on the assumption that an UPDATE of the scalar value is
usually needed, and in those few cases where it isn't, is less
expensive on average than issuing a defensive SELECT.
The "old" value is fetched unconditionally upon set only if the
attribute container has the ``active_history`` flag set to ``True``.
This flag is set typically for primary key attributes and scalar
object references that are not a simple many-to-one. To set this
flag for any arbitrary mapped column, use the ``active_history``
argument with :func:`.column_property`.
:param instance: mapped instance to be tested for pending changes.
:param include_collections: Indicates if multivalued collections
should be included in the operation. Setting this to ``False`` is a
way to detect only local-column based properties (i.e. scalar columns
or many-to-one foreign keys) that would result in an UPDATE for this
instance upon flush.
""" # noqa: E501
return self._proxied.is_modified(
instance, include_collections=include_collections
)
def bulk_save_objects(
self,
objects: Iterable[object],
return_defaults: bool = False,
update_changed_only: bool = True,
preserve_order: bool = True,
) -> None:
r"""Perform a bulk save of the given list of objects.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`.
For general INSERT and UPDATE of existing ORM mapped objects,
prefer standard :term:`unit of work` data management patterns,
introduced in the :ref:`unified_tutorial` at
:ref:`tutorial_orm_data_manipulation`. SQLAlchemy 2.0
now uses :ref:`engine_insertmanyvalues` with modern dialects
which solves previous issues of bulk INSERT slowness.
:param objects: a sequence of mapped object instances. The mapped
objects are persisted as is, and are **not** associated with the
:class:`.Session` afterwards.
For each object, whether the object is sent as an INSERT or an
UPDATE is dependent on the same rules used by the :class:`.Session`
in traditional operation; if the object has the
:attr:`.InstanceState.key`
attribute set, then the object is assumed to be "detached" and
will result in an UPDATE. Otherwise, an INSERT is used.
In the case of an UPDATE, statements are grouped based on which
attributes have changed, and are thus to be the subject of each
SET clause. If ``update_changed_only`` is False, then all
attributes present within each object are applied to the UPDATE
statement, which may help in allowing the statements to be grouped
together into a larger executemany(), and will also reduce the
overhead of checking history on attributes.
:param return_defaults: when True, rows that are missing values which
generate defaults, namely integer primary key defaults and sequences,
will be inserted **one at a time**, so that the primary key value
is available. In particular this will allow joined-inheritance
and other multi-table mappings to insert correctly without the need
to provide primary key values ahead of time; however,
:paramref:`.Session.bulk_save_objects.return_defaults` **greatly
reduces the performance gains** of the method overall. It is strongly
advised to please use the standard :meth:`_orm.Session.add_all`
approach.
:param update_changed_only: when True, UPDATE statements are rendered
based on those attributes in each state that have logged changes.
When False, all attributes present are rendered into the SET clause
with the exception of primary key attributes.
:param preserve_order: when True, the order of inserts and updates
matches exactly the order in which the objects are given. When
False, common types of objects are grouped into inserts
and updates, to allow for more batching opportunities.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_update_mappings`
""" # noqa: E501
return self._proxied.bulk_save_objects(
objects,
return_defaults=return_defaults,
update_changed_only=update_changed_only,
preserve_order=preserve_order,
)
def bulk_insert_mappings(
self,
mapper: Mapper[Any],
mappings: Iterable[Dict[str, Any]],
return_defaults: bool = False,
render_nulls: bool = False,
) -> None:
r"""Perform a bulk insert of the given list of mapping dictionaries.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`. The 2.0 API shares
implementation details with this method and adds new features
as well.
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be inserted, in terms of the attribute
names on the mapped class. If the mapping refers to multiple tables,
such as a joined-inheritance mapping, each dictionary must contain all
keys to be populated into all tables.
:param return_defaults: when True, the INSERT process will be altered
to ensure that newly generated primary key values will be fetched.
The rationale for this parameter is typically to enable
:ref:`Joined Table Inheritance <joined_inheritance>` mappings to
be bulk inserted.
.. note:: for backends that don't support RETURNING, the
:paramref:`_orm.Session.bulk_insert_mappings.return_defaults`
parameter can significantly decrease performance as INSERT
statements can no longer be batched. See
:ref:`engine_insertmanyvalues`
for background on which backends are affected.
:param render_nulls: When True, a value of ``None`` will result
in a NULL value being included in the INSERT statement, rather
than the column being omitted from the INSERT. This allows all
the rows being INSERTed to have the identical set of columns which
allows the full set of rows to be batched to the DBAPI. Normally,
each column-set that contains a different combination of NULL values
than the previous row must omit a different series of columns from
the rendered INSERT statement, which means it must be emitted as a
separate statement. By passing this flag, the full set of rows
are guaranteed to be batchable into one batch; the cost however is
that server-side defaults which are invoked by an omitted column will
be skipped, so care must be taken to ensure that these are not
necessary.
.. warning::
When this flag is set, **server side default SQL values will
not be invoked** for those columns that are inserted as NULL;
the NULL value will be sent explicitly. Care must be taken
to ensure that no server-side default functions need to be
invoked for the operation as a whole.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_save_objects`
:meth:`.Session.bulk_update_mappings`
""" # noqa: E501
return self._proxied.bulk_insert_mappings(
mapper,
mappings,
return_defaults=return_defaults,
render_nulls=render_nulls,
)
def bulk_update_mappings(
self, mapper: Mapper[Any], mappings: Iterable[Dict[str, Any]]
) -> None:
r"""Perform a bulk update of the given list of mapping dictionaries.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
.. legacy::
This method is a legacy feature as of the 2.0 series of
SQLAlchemy. For modern bulk INSERT and UPDATE, see
the sections :ref:`orm_queryguide_bulk_insert` and
:ref:`orm_queryguide_bulk_update`. The 2.0 API shares
implementation details with this method and adds new features
as well.
:param mapper: a mapped class, or the actual :class:`_orm.Mapper`
object,
representing the single kind of object represented within the mapping
list.
:param mappings: a sequence of dictionaries, each one containing the
state of the mapped row to be updated, in terms of the attribute names
on the mapped class. If the mapping refers to multiple tables, such
as a joined-inheritance mapping, each dictionary may contain keys
corresponding to all tables. All those keys which are present and
are not part of the primary key are applied to the SET clause of the
UPDATE statement; the primary key values, which are required, are
applied to the WHERE clause.
.. seealso::
:doc:`queryguide/dml`
:meth:`.Session.bulk_insert_mappings`
:meth:`.Session.bulk_save_objects`
""" # noqa: E501
return self._proxied.bulk_update_mappings(mapper, mappings)
def merge(
self,
instance: _O,
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> _O:
r"""Copy the state of a given instance into a corresponding instance
within this :class:`.Session`.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
:meth:`.Session.merge` examines the primary key attributes of the
source instance, and attempts to reconcile it with an instance of the
same primary key in the session. If not found locally, it attempts
to load the object from the database based on primary key, and if
none can be located, creates a new instance. The state of each
attribute on the source instance is then copied to the target
instance. The resulting target instance is then returned by the
method; the original source instance is left unmodified, and
un-associated with the :class:`.Session` if not already.
This operation cascades to associated instances if the association is
mapped with ``cascade="merge"``.
See :ref:`unitofwork_merging` for a detailed discussion of merging.
:param instance: Instance to be merged.
:param load: Boolean, when False, :meth:`.merge` switches into
a "high performance" mode which causes it to forego emitting history
events as well as all database access. This flag is used for
cases such as transferring graphs of objects into a :class:`.Session`
from a second level cache, or to transfer just-loaded objects
into the :class:`.Session` owned by a worker thread or process
without re-querying the database.
The ``load=False`` use case adds the caveat that the given
object has to be in a "clean" state, that is, has no pending changes
to be flushed - even if the incoming object is detached from any
:class:`.Session`. This is so that when
the merge operation populates local attributes and
cascades to related objects and
collections, the values can be "stamped" onto the
target object as is, without generating any history or attribute
events, and without the need to reconcile the incoming data with
any existing related objects or collections that might not
be loaded. The resulting objects from ``load=False`` are always
produced as "clean", so it is only appropriate that the given objects
should be "clean" as well, else this suggests a mis-use of the
method.
:param options: optional sequence of loader options which will be
applied to the :meth:`_orm.Session.get` method when the merge
operation loads the existing version of the object from the database.
.. versionadded:: 1.4.24
.. seealso::
:func:`.make_transient_to_detached` - provides for an alternative
means of "merging" a single object into the :class:`.Session`
:meth:`.Session.merge_all` - multiple instance version
""" # noqa: E501
return self._proxied.merge(instance, load=load, options=options)
def merge_all(
self,
instances: Iterable[_O],
*,
load: bool = True,
options: Optional[Sequence[ORMOption]] = None,
) -> Sequence[_O]:
r"""Calls :meth:`.Session.merge` on multiple instances.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
.. seealso::
:meth:`.Session.merge` - main documentation on merge
.. versionadded:: 2.1
""" # noqa: E501
return self._proxied.merge_all(instances, load=load, options=options)
@overload
def query(self, _entity: _EntityType[_O]) -> Query[_O]: ...
@overload
def query(
self, _colexpr: TypedColumnsClauseRole[_T]
) -> RowReturningQuery[_T]: ...
# START OVERLOADED FUNCTIONS self.query RowReturningQuery 2-8
# code within this block is **programmatically,
# statically generated** by tools/generate_tuple_map_overloads.py
@overload
def query(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], /
) -> RowReturningQuery[_T0, _T1]: ...
@overload
def query(
self, __ent0: _TCCA[_T0], __ent1: _TCCA[_T1], __ent2: _TCCA[_T2], /
) -> RowReturningQuery[_T0, _T1, _T2]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
/,
) -> RowReturningQuery[_T0, _T1, _T2, _T3, _T4, _T5, _T6]: ...
@overload
def query(
self,
__ent0: _TCCA[_T0],
__ent1: _TCCA[_T1],
__ent2: _TCCA[_T2],
__ent3: _TCCA[_T3],
__ent4: _TCCA[_T4],
__ent5: _TCCA[_T5],
__ent6: _TCCA[_T6],
__ent7: _TCCA[_T7],
/,
*entities: _ColumnsClauseArgument[Any],
) -> RowReturningQuery[
_T0, _T1, _T2, _T3, _T4, _T5, _T6, _T7, Unpack[TupleAny]
]: ...
# END OVERLOADED FUNCTIONS self.query
@overload
def query(
self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any
) -> Query[Any]: ...
def query(
self, *entities: _ColumnsClauseArgument[Any], **kwargs: Any
) -> Query[Any]:
r"""Return a new :class:`_query.Query` object corresponding to this
:class:`_orm.Session`.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Note that the :class:`_query.Query` object is legacy as of
SQLAlchemy 2.0; the :func:`_sql.select` construct is now used
to construct ORM queries.
.. seealso::
:ref:`unified_tutorial`
:ref:`queryguide_toplevel`
:ref:`query_api_toplevel` - legacy API doc
""" # noqa: E501
return self._proxied.query(*entities, **kwargs)
def refresh(
self,
instance: object,
attribute_names: Optional[Iterable[str]] = None,
with_for_update: ForUpdateParameter = None,
) -> None:
r"""Expire and refresh attributes on the given instance.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
The selected attributes will first be expired as they would when using
:meth:`_orm.Session.expire`; then a SELECT statement will be issued to
the database to refresh column-oriented attributes with the current
value available in the current transaction.
:func:`_orm.relationship` oriented attributes will also be immediately
loaded if they were already eagerly loaded on the object, using the
same eager loading strategy that they were loaded with originally.
.. versionadded:: 1.4 - the :meth:`_orm.Session.refresh` method
can also refresh eagerly loaded attributes.
:func:`_orm.relationship` oriented attributes that would normally
load using the ``select`` (or "lazy") loader strategy will also
load **if they are named explicitly in the attribute_names
collection**, emitting a SELECT statement for the attribute using the
``immediate`` loader strategy. If lazy-loaded relationships are not
named in :paramref:`_orm.Session.refresh.attribute_names`, then
they remain as "lazy loaded" attributes and are not implicitly
refreshed.
.. versionchanged:: 2.0.4 The :meth:`_orm.Session.refresh` method
will now refresh lazy-loaded :func:`_orm.relationship` oriented
attributes for those which are named explicitly in the
:paramref:`_orm.Session.refresh.attribute_names` collection.
.. tip::
While the :meth:`_orm.Session.refresh` method is capable of
refreshing both column and relationship oriented attributes, its
primary focus is on refreshing of local column-oriented attributes
on a single instance. For more open ended "refresh" functionality,
including the ability to refresh the attributes on many objects at
once while having explicit control over relationship loader
strategies, use the
:ref:`populate existing <orm_queryguide_populate_existing>` feature
instead.
Note that a highly isolated transaction will return the same values as
were previously read in that same transaction, regardless of changes
in database state outside of that transaction. Refreshing
attributes usually only makes sense at the start of a transaction
where database rows have not yet been accessed.
:param attribute_names: optional. An iterable collection of
string attribute names indicating a subset of attributes to
be refreshed.
:param with_for_update: optional boolean ``True`` indicating FOR UPDATE
should be used, or may be a dictionary containing flags to
indicate a more specific set of FOR UPDATE flags for the SELECT;
flags should match the parameters of
:meth:`_query.Query.with_for_update`.
Supersedes the :paramref:`.Session.refresh.lockmode` parameter.
.. seealso::
:ref:`session_expire` - introductory material
:meth:`.Session.expire`
:meth:`.Session.expire_all`
:ref:`orm_queryguide_populate_existing` - allows any ORM query
to refresh objects as they would be loaded normally.
""" # noqa: E501
return self._proxied.refresh(
instance,
attribute_names=attribute_names,
with_for_update=with_for_update,
)
def rollback(self) -> None:
r"""Rollback the current transaction in progress.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
If no transaction is in progress, this method is a pass-through.
The method always rolls back
the topmost database transaction, discarding any nested
transactions that may be in progress.
.. seealso::
:ref:`session_rollback`
:ref:`unitofwork_transaction`
""" # noqa: E501
return self._proxied.rollback()
@overload
def scalar(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Optional[_T]: ...
@overload
def scalar(
self,
statement: Executable,
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any: ...
def scalar(
self,
statement: Executable,
params: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> Any:
r"""Execute a statement and return a scalar result.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Usage and parameters are the same as that of
:meth:`_orm.Session.execute`; the return result is a scalar Python
value.
""" # noqa: E501
return self._proxied.scalar(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
@overload
def scalars(
self,
statement: TypedReturnsRows[_T],
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[_T]: ...
@overload
def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]: ...
def scalars(
self,
statement: Executable,
params: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: OrmExecuteOptionsParameter = util.EMPTY_DICT,
bind_arguments: Optional[_BindArguments] = None,
**kw: Any,
) -> ScalarResult[Any]:
r"""Execute a statement and return the results as scalars.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
Usage and parameters are the same as that of
:meth:`_orm.Session.execute`; the return result is a
:class:`_result.ScalarResult` filtering object which
will return single elements rather than :class:`_row.Row` objects.
:return: a :class:`_result.ScalarResult` object
.. versionadded:: 1.4.24 Added :meth:`_orm.Session.scalars`
.. versionadded:: 1.4.26 Added :meth:`_orm.scoped_session.scalars`
.. seealso::
:ref:`orm_queryguide_select_orm_entities` - contrasts the behavior
of :meth:`_orm.Session.execute` to :meth:`_orm.Session.scalars`
""" # noqa: E501
return self._proxied.scalars(
statement,
params=params,
execution_options=execution_options,
bind_arguments=bind_arguments,
**kw,
)
@property
def bind(self) -> Optional[Union[Engine, Connection]]:
r"""Proxy for the :attr:`_orm.Session.bind` attribute
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.bind
@bind.setter
def bind(self, attr: Optional[Union[Engine, Connection]]) -> None:
self._proxied.bind = attr
@property
def dirty(self) -> Any:
r"""The set of all persistent instances considered dirty.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
E.g.::
some_mapped_object in session.dirty
Instances are considered dirty when they were modified but not
deleted.
Note that this 'dirty' calculation is 'optimistic'; most
attribute-setting or collection modification operations will
mark an instance as 'dirty' and place it in this set, even if
there is no net change to the attribute's value. At flush
time, the value of each attribute is compared to its
previously saved value, and if there's no net change, no SQL
operation will occur (this is a more expensive operation so
it's only done at flush time).
To check if an instance has actionable net changes to its
attributes, use the :meth:`.Session.is_modified` method.
""" # noqa: E501
return self._proxied.dirty
@property
def deleted(self) -> Any:
r"""The set of all instances marked as 'deleted' within this ``Session``
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.deleted
@property
def new(self) -> Any:
r"""The set of all instances marked as 'new' within this ``Session``.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.new
@property
def identity_map(self) -> IdentityMap:
r"""Proxy for the :attr:`_orm.Session.identity_map` attribute
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.identity_map
@identity_map.setter
def identity_map(self, attr: IdentityMap) -> None:
self._proxied.identity_map = attr
@property
def is_active(self) -> Any:
r"""True if this :class:`.Session` not in "partial rollback" state.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
.. versionchanged:: 1.4 The :class:`_orm.Session` no longer begins
a new transaction immediately, so this attribute will be False
when the :class:`_orm.Session` is first instantiated.
"partial rollback" state typically indicates that the flush process
of the :class:`_orm.Session` has failed, and that the
:meth:`_orm.Session.rollback` method must be emitted in order to
fully roll back the transaction.
If this :class:`_orm.Session` is not in a transaction at all, the
:class:`_orm.Session` will autobegin when it is first used, so in this
case :attr:`_orm.Session.is_active` will return True.
Otherwise, if this :class:`_orm.Session` is within a transaction,
and that transaction has not been rolled back internally, the
:attr:`_orm.Session.is_active` will also return True.
.. seealso::
:ref:`faq_session_rollback`
:meth:`_orm.Session.in_transaction`
""" # noqa: E501
return self._proxied.is_active
@property
def autoflush(self) -> bool:
r"""Proxy for the :attr:`_orm.Session.autoflush` attribute
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.autoflush
@autoflush.setter
def autoflush(self, attr: bool) -> None:
self._proxied.autoflush = attr
@property
def no_autoflush(self) -> Any:
r"""Return a context manager that disables autoflush.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
e.g.::
with session.no_autoflush:
some_object = SomeClass()
session.add(some_object)
# won't autoflush
some_object.related_thing = session.query(SomeRelated).first()
Operations that proceed within the ``with:`` block
will not be subject to flushes occurring upon query
access. This is useful when initializing a series
of objects which involve existing database queries,
where the uncompleted object should not yet be flushed.
""" # noqa: E501
return self._proxied.no_autoflush
@property
def info(self) -> Any:
r"""A user-modifiable dictionary.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class
on behalf of the :class:`_orm.scoping.scoped_session` class.
The initial value of this dictionary can be populated using the
``info`` argument to the :class:`.Session` constructor or
:class:`.sessionmaker` constructor or factory methods. The dictionary
here is always local to this :class:`.Session` and can be modified
independently of all other :class:`.Session` objects.
""" # noqa: E501
return self._proxied.info
@property
def execution_options(self) -> _ExecuteOptions:
r"""Proxy for the :attr:`_orm.Session.execution_options` attribute
on behalf of the :class:`_orm.scoping.scoped_session` class.
""" # noqa: E501
return self._proxied.execution_options
@execution_options.setter
def execution_options(self, attr: _ExecuteOptions) -> None:
self._proxied.execution_options = attr
@classmethod
def object_session(cls, instance: object) -> Optional[Session]:
r"""Return the :class:`.Session` to which an object belongs.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This is an alias of :func:`.object_session`.
""" # noqa: E501
return Session.object_session(instance)
@classmethod
def identity_key(
cls,
class_: Optional[Type[Any]] = None,
ident: Union[Any, Tuple[Any, ...]] = None,
*,
instance: Optional[Any] = None,
row: Optional[Union[Row[Unpack[TupleAny]], RowMapping]] = None,
identity_token: Optional[Any] = None,
) -> _IdentityKeyType[Any]:
r"""Return an identity key.
.. container:: class_bases
Proxied for the :class:`_orm.Session` class on
behalf of the :class:`_orm.scoping.scoped_session` class.
This is an alias of :func:`.util.identity_key`.
""" # noqa: E501
return Session.identity_key(
class_=class_,
ident=ident,
instance=instance,
row=row,
identity_token=identity_token,
)
# END PROXY METHODS scoped_session
ScopedSession = scoped_session
"""Old name for backwards compatibility."""
| scoped_session |
python | kubernetes-client__python | kubernetes/client/models/v1_condition.py | {
"start": 383,
"end": 10070
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_transition_time': 'datetime',
'message': 'str',
'observed_generation': 'int',
'reason': 'str',
'status': 'str',
'type': 'str'
}
attribute_map = {
'last_transition_time': 'lastTransitionTime',
'message': 'message',
'observed_generation': 'observedGeneration',
'reason': 'reason',
'status': 'status',
'type': 'type'
}
def __init__(self, last_transition_time=None, message=None, observed_generation=None, reason=None, status=None, type=None, local_vars_configuration=None): # noqa: E501
"""V1Condition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_transition_time = None
self._message = None
self._observed_generation = None
self._reason = None
self._status = None
self._type = None
self.discriminator = None
self.last_transition_time = last_transition_time
self.message = message
if observed_generation is not None:
self.observed_generation = observed_generation
self.reason = reason
self.status = status
self.type = type
@property
def last_transition_time(self):
"""Gets the last_transition_time of this V1Condition. # noqa: E501
lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. # noqa: E501
:return: The last_transition_time of this V1Condition. # noqa: E501
:rtype: datetime
"""
return self._last_transition_time
@last_transition_time.setter
def last_transition_time(self, last_transition_time):
"""Sets the last_transition_time of this V1Condition.
lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. # noqa: E501
:param last_transition_time: The last_transition_time of this V1Condition. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and last_transition_time is None: # noqa: E501
raise ValueError("Invalid value for `last_transition_time`, must not be `None`") # noqa: E501
self._last_transition_time = last_transition_time
@property
def message(self):
"""Gets the message of this V1Condition. # noqa: E501
message is a human readable message indicating details about the transition. This may be an empty string. # noqa: E501
:return: The message of this V1Condition. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1Condition.
message is a human readable message indicating details about the transition. This may be an empty string. # noqa: E501
:param message: The message of this V1Condition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and message is None: # noqa: E501
raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501
self._message = message
@property
def observed_generation(self):
"""Gets the observed_generation of this V1Condition. # noqa: E501
observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. # noqa: E501
:return: The observed_generation of this V1Condition. # noqa: E501
:rtype: int
"""
return self._observed_generation
@observed_generation.setter
def observed_generation(self, observed_generation):
"""Sets the observed_generation of this V1Condition.
observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance. # noqa: E501
:param observed_generation: The observed_generation of this V1Condition. # noqa: E501
:type: int
"""
self._observed_generation = observed_generation
@property
def reason(self):
"""Gets the reason of this V1Condition. # noqa: E501
reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. # noqa: E501
:return: The reason of this V1Condition. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1Condition.
reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty. # noqa: E501
:param reason: The reason of this V1Condition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and reason is None: # noqa: E501
raise ValueError("Invalid value for `reason`, must not be `None`") # noqa: E501
self._reason = reason
@property
def status(self):
"""Gets the status of this V1Condition. # noqa: E501
status of the condition, one of True, False, Unknown. # noqa: E501
:return: The status of this V1Condition. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1Condition.
status of the condition, one of True, False, Unknown. # noqa: E501
:param status: The status of this V1Condition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and status is None: # noqa: E501
raise ValueError("Invalid value for `status`, must not be `None`") # noqa: E501
self._status = status
@property
def type(self):
"""Gets the type of this V1Condition. # noqa: E501
type of condition in CamelCase or in foo.example.com/CamelCase. # noqa: E501
:return: The type of this V1Condition. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this V1Condition.
type of condition in CamelCase or in foo.example.com/CamelCase. # noqa: E501
:param type: The type of this V1Condition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and type is None: # noqa: E501
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1Condition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1Condition):
return True
return self.to_dict() != other.to_dict()
| V1Condition |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform2.py | {
"start": 1081,
"end": 1167
} | class ____(Customer1, frozen=False):
salary: float = model_field()
| Customer1Subclass |
python | modin-project__modin | modin/core/execution/dask/implementations/pandas_on_dask/partitioning/partition.py | {
"start": 1182,
"end": 13091
} | class ____(PandasDataframePartition):
"""
The class implements the interface in ``PandasDataframePartition``.
Parameters
----------
data : distributed.Future
A reference to pandas DataFrame that need to be wrapped with this class.
length : distributed.Future or int, optional
Length or reference to it of wrapped pandas DataFrame.
width : distributed.Future or int, optional
Width or reference to it of wrapped pandas DataFrame.
ip : distributed.Future or str, optional
Node IP address or reference to it that holds wrapped pandas DataFrame.
call_queue : list, optional
Call queue that needs to be executed on wrapped pandas DataFrame.
"""
execution_wrapper = DaskWrapper
def __init__(self, data, length=None, width=None, ip=None, call_queue=None):
super().__init__()
assert isinstance(data, Future)
self._data = data
if call_queue is None:
call_queue = []
self.call_queue = call_queue
self._length_cache = length
self._width_cache = width
self._ip_cache = ip
log = get_logger()
self._is_debug(log) and log.debug(
"Partition ID: {}, Height: {}, Width: {}, Node IP: {}".format(
self._identity,
str(self._length_cache),
str(self._width_cache),
str(self._ip_cache),
)
)
def apply(self, func, *args, **kwargs):
"""
Apply a function to the object wrapped by this partition.
Parameters
----------
func : callable or distributed.Future
A function to apply.
*args : iterable
Additional positional arguments to be passed in `func`.
**kwargs : dict
Additional keyword arguments to be passed in `func`.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
Notes
-----
The keyword arguments are sent as a dictionary.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.apply::{self._identity}")
call_queue = self.call_queue + [[func, args, kwargs]]
if len(call_queue) > 1:
self._is_debug(log) and log.debug(
f"SUBMIT::_apply_list_of_funcs::{self._identity}"
)
futures = self.execution_wrapper.deploy(
func=apply_list_of_funcs,
f_args=(call_queue, self._data),
num_returns=2,
pure=False,
)
else:
# We handle `len(call_queue) == 1` in a different way because
# this improves performance a bit.
func, f_args, f_kwargs = call_queue[0]
futures = self.execution_wrapper.deploy(
func=apply_func,
f_args=(self._data, func, *f_args),
f_kwargs=f_kwargs,
num_returns=2,
pure=False,
)
self._is_debug(log) and log.debug(f"SUBMIT::_apply_func::{self._identity}")
self._is_debug(log) and log.debug(f"EXIT::Partition.apply::{self._identity}")
return self.__constructor__(futures[0], ip=futures[1])
def drain_call_queue(self):
"""Execute all operations stored in the call queue on the object wrapped by this partition."""
log = get_logger()
self._is_debug(log) and log.debug(
f"ENTER::Partition.drain_call_queue::{self._identity}"
)
if len(self.call_queue) == 0:
return
call_queue = self.call_queue
if len(call_queue) > 1:
self._is_debug(log) and log.debug(
f"SUBMIT::_apply_list_of_funcs::{self._identity}"
)
futures = self.execution_wrapper.deploy(
func=apply_list_of_funcs,
f_args=(call_queue, self._data),
num_returns=2,
pure=False,
)
else:
# We handle `len(call_queue) == 1` in a different way because
# this improves performance a bit.
func, f_args, f_kwargs = call_queue[0]
self._is_debug(log) and log.debug(f"SUBMIT::_apply_func::{self._identity}")
futures = self.execution_wrapper.deploy(
func=apply_func,
f_args=(self._data, func, *f_args),
f_kwargs=f_kwargs,
num_returns=2,
pure=False,
)
self._data = futures[0]
self._ip_cache = futures[1]
self._is_debug(log) and log.debug(
f"EXIT::Partition.drain_call_queue::{self._identity}"
)
self.call_queue = []
def wait(self):
"""Wait completing computations on the object wrapped by the partition."""
self.drain_call_queue()
self.execution_wrapper.wait(self._data)
def mask(self, row_labels, col_labels):
"""
Lazily create a mask that extracts the indices provided.
Parameters
----------
row_labels : list-like, slice or label
The row labels for the rows to extract.
col_labels : list-like, slice or label
The column labels for the columns to extract.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
"""
log = get_logger()
self._is_debug(log) and log.debug(f"ENTER::Partition.mask::{self._identity}")
new_obj = super().mask(row_labels, col_labels)
if isinstance(row_labels, slice) and isinstance(self._length_cache, Future):
if row_labels == slice(None):
# fast path - full axis take
new_obj._length_cache = self._length_cache
else:
new_obj._length_cache = self.execution_wrapper.deploy(
func=compute_sliced_len, f_args=(row_labels, self._length_cache)
)
if isinstance(col_labels, slice) and isinstance(self._width_cache, Future):
if col_labels == slice(None):
# fast path - full axis take
new_obj._width_cache = self._width_cache
else:
new_obj._width_cache = self.execution_wrapper.deploy(
func=compute_sliced_len, f_args=(col_labels, self._width_cache)
)
self._is_debug(log) and log.debug(f"EXIT::Partition.mask::{self._identity}")
return new_obj
def __copy__(self):
"""
Create a copy of this partition.
Returns
-------
PandasOnDaskDataframePartition
A copy of this partition.
"""
return self.__constructor__(
self._data,
length=self._length_cache,
width=self._width_cache,
ip=self._ip_cache,
call_queue=self.call_queue,
)
@classmethod
def put(cls, obj):
"""
Put an object into distributed memory and wrap it with partition object.
Parameters
----------
obj : any
An object to be put.
Returns
-------
PandasOnDaskDataframePartition
A new ``PandasOnDaskDataframePartition`` object.
"""
return cls(
cls.execution_wrapper.put(obj, hash=False),
len(obj.index),
len(obj.columns),
)
@classmethod
def preprocess_func(cls, func):
"""
Preprocess a function before an ``apply`` call.
Parameters
----------
func : callable
The function to preprocess.
Returns
-------
callable
An object that can be accepted by ``apply``.
"""
return cls.execution_wrapper.put(func, hash=False, broadcast=True)
def length(self, materialize=True):
"""
Get the length of the object wrapped by this partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or distributed.Future
The length of the object.
"""
if self._length_cache is None:
self._length_cache = self.apply(len)._data
if isinstance(self._length_cache, Future) and materialize:
self._length_cache = self.execution_wrapper.materialize(self._length_cache)
return self._length_cache
def width(self, materialize=True):
"""
Get the width of the object wrapped by the partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
int or distributed.Future
The width of the object.
"""
if self._width_cache is None:
self._width_cache = self.apply(lambda df: len(df.columns))._data
if isinstance(self._width_cache, Future) and materialize:
self._width_cache = self.execution_wrapper.materialize(self._width_cache)
return self._width_cache
def ip(self, materialize=True):
"""
Get the node IP address of the object wrapped by this partition.
Parameters
----------
materialize : bool, default: True
Whether to forcibly materialize the result into an integer. If ``False``
was specified, may return a future of the result if it hasn't been
materialized yet.
Returns
-------
str
IP address of the node that holds the data.
"""
if self._ip_cache is None:
self._ip_cache = self.apply(lambda df: pandas.DataFrame([]))._ip_cache
if materialize and isinstance(self._ip_cache, Future):
self._ip_cache = self.execution_wrapper.materialize(self._ip_cache)
return self._ip_cache
def apply_func(partition, func, *args, **kwargs):
"""
Execute a function on the partition in a worker process.
Parameters
----------
partition : pandas.DataFrame
A pandas DataFrame the function needs to be executed on.
func : callable
The function to perform.
*args : list
Positional arguments to pass to ``func``.
**kwargs : dict
Keyword arguments to pass to ``func``.
Returns
-------
pandas.DataFrame
The resulting pandas DataFrame.
str
The node IP address of the worker process.
Notes
-----
Directly passing a call queue entry (i.e. a list of [func, args, kwargs]) instead of
destructuring it causes a performance penalty.
"""
result = func(partition, *args, **kwargs)
return result, get_ip()
def apply_list_of_funcs(call_queue, partition):
"""
Execute all operations stored in the call queue on the partition in a worker process.
Parameters
----------
call_queue : list
A call queue of ``[func, args, kwargs]`` triples that needs to be executed on the partition.
partition : pandas.DataFrame
A pandas DataFrame the call queue needs to be executed on.
Returns
-------
pandas.DataFrame
The resulting pandas DataFrame.
str
The node IP address of the worker process.
"""
for func, f_args, f_kwargs in call_queue:
partition = func(partition, *f_args, **f_kwargs)
return partition, get_ip()
| PandasOnDaskDataframePartition |
python | Textualize__textual | src/textual/events.py | {
"start": 21207,
"end": 21918
} | class ____(Event, bubble=False):
"""Sent when a widget is focussed.
- [ ] Bubbles
- [ ] Verbose
Args:
from_app_focus: True if this focus event has been sent because the app itself has
regained focus (via an AppFocus event). False if the focus came from within
the Textual app (e.g. via the user pressing tab or a programmatic setting
of the focused widget).
"""
def __init__(self, from_app_focus: bool = False) -> None:
self.from_app_focus = from_app_focus
super().__init__()
def __rich_repr__(self) -> rich.repr.Result:
yield from super().__rich_repr__()
yield "from_app_focus", self.from_app_focus
| Focus |
python | google__pytype | pytype/overlays/chex_overlay.py | {
"start": 619,
"end": 839
} | class ____(overlay.Overlay):
def __init__(self, ctx):
member_map = {
"dataclass": Dataclass.make,
}
ast = ctx.loader.import_name("chex")
super().__init__(ctx, "chex", member_map, ast)
| ChexOverlay |
python | ansible__ansible | lib/ansible/plugins/callback/minimal.py | {
"start": 673,
"end": 3237
} | class ____(CallbackBase):
"""
This is the default callback interface, which simply prints messages
to stdout when new callback events are received.
"""
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'minimal'
def _command_generic_msg(self, host, result, caption):
""" output the result of a command run """
buf = "%s | %s | rc=%s >>\n" % (host, caption, result.get('rc', -1))
buf += result.get('stdout', '')
buf += result.get('stderr', '')
buf += result.get('msg', '')
return buf + "\n"
def v2_runner_on_failed(self, result: CallbackTaskResult, ignore_errors: bool = False) -> None:
self._handle_warnings_and_exception(result)
if result.task.action in C.MODULE_NO_JSON and 'module_stderr' not in result.result:
self._display.display(self._command_generic_msg(result.host.get_name(), result.result, "FAILED"), color=C.COLOR_ERROR)
else:
self._display.display("%s | FAILED! => %s" % (result.host.get_name(), self._dump_results(result.result, indent=4)), color=C.COLOR_ERROR)
def v2_runner_on_ok(self, result: CallbackTaskResult) -> None:
self._handle_warnings_and_exception(result)
self._clean_results(result.result, result.task.action)
if result.result.get('changed', False):
color = C.COLOR_CHANGED
state = 'CHANGED'
else:
color = C.COLOR_OK
state = 'SUCCESS'
if result.task.action in C.MODULE_NO_JSON and 'ansible_job_id' not in result.result:
self._display.display(self._command_generic_msg(result.host.get_name(), result.result, state), color=color)
else:
self._display.display("%s | %s => %s" % (result.host.get_name(), state, self._dump_results(result.result, indent=4)), color=color)
def v2_runner_on_skipped(self, result: CallbackTaskResult) -> None:
self._handle_warnings_and_exception(result)
self._display.display("%s | SKIPPED" % (result.host.get_name()), color=C.COLOR_SKIP)
def v2_runner_on_unreachable(self, result: CallbackTaskResult) -> None:
self._handle_warnings_and_exception(result)
self._display.display("%s | UNREACHABLE! => %s" % (result.host.get_name(), self._dump_results(result.result, indent=4)), color=C.COLOR_UNREACHABLE)
def v2_on_file_diff(self, result):
if 'diff' in result.result and result.result['diff']:
self._display.display(self._get_diff(result.result['diff']))
| CallbackModule |
python | python__mypy | mypyc/irbuild/for_helpers.py | {
"start": 38476,
"end": 39055
} | class ____(ForDictionaryCommon):
"""Generate optimized IR for a for loop over dictionary values."""
dict_next_op = dict_next_value_op
dict_iter_op = dict_value_iter_op
def begin_body(self) -> None:
builder = self.builder
line = self.line
# Value is stored at the third place in the tuple.
value = builder.add(TupleGet(self.next_tuple, 2, line))
builder.assign(
builder.get_assignment_target(self.index),
builder.coerce(value, self.target_type, line),
line,
)
| ForDictionaryValues |
python | django__django | tests/validation/models.py | {
"start": 2377,
"end": 2682
} | class ____(models.Model):
other = models.IntegerField(blank=True, null=True)
number = models.IntegerField(
db_column="number_val",
error_messages={"null": "NULL", "not42": "AAARGH", "not_equal": "%s != me"},
validators=[validate_answer_to_universe],
)
| CustomMessagesModel |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 397570,
"end": 398755
} | class ____(Response):
"""
Response of tasks.update_batch endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "update_batch"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated: Optional[int] = None, **kwargs: Any) -> None:
super(UpdateBatchResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self) -> Optional[int]:
return self._property_updated
@updated.setter
def updated(self, value: Optional[int]) -> None:
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
| UpdateBatchResponse |
python | pypa__warehouse | tests/unit/integration/secrets/test_utils.py | {
"start": 3176,
"end": 27065
} | class ____:
def test_init(self, metrics, someorigin):
session = pretend.stub()
token = "api_token"
url = "http://foo"
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=metrics,
api_token=token,
public_keys_cache=cache,
)
assert generic_verifier._session is session
assert generic_verifier._metrics is metrics
assert generic_verifier._api_token == token
assert generic_verifier._api_url == url
assert generic_verifier._public_keys_cache is cache
def test_verify_cache_miss(self, metrics, someorigin):
# Example taken from
# https://gist.github.com/ewjoachim/7dde11c31d9686ed6b4431c3ca166da2
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=lambda *a, **k: response)
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=metrics,
api_token="api-token",
public_keys_cache=cache,
)
key_id = "90a421169f0a406205f1563a953312f0be898d3c7b6c06b681aa86a874555f4a"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb4985f91f740272c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
generic_verifier.verify(payload=payload, key_id=key_id, signature=signature)
is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.auth.cache.miss"),
pretend.call("warehouse.token_leak.someorigin.auth.success"),
]
def test_verify_cache_hit(self, metrics, someorigin):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
cache.cached_at = time.time()
cache.cache = [
{
"key_id": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
}
]
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=metrics,
api_token="api-token",
public_keys_cache=cache,
)
key_id = "90a421169f0a406205f1563a953312f0be898d3c7b6c06b681aa86a874555f4a"
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
payload = (
b'[{"type":"github_oauth_token","token":"cb4985f91f740272c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]'
)
assert (
generic_verifier.verify(payload=payload, key_id=key_id, signature=signature)
is True
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.auth.cache.hit"),
pretend.call("warehouse.token_leak.someorigin.auth.success"),
]
def test_verify_error(self, metrics, someorigin):
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=metrics,
api_token="api-token",
public_keys_cache=cache,
)
generic_verifier.retrieve_public_key_payload = pretend.raiser(
integrations.InvalidPayloadSignatureError("Bla", "bla")
)
assert generic_verifier.verify(payload={}, key_id="a", signature="a") is False
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.auth.cache.miss"),
pretend.call("warehouse.token_leak.someorigin.auth.error.bla"),
]
def test_headers_auth_no_token(self, someorigin):
headers = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token=None,
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {}
def test_headers_auth_token(self, someorigin):
headers = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
api_token="api-token",
public_keys_cache=pretend.stub(),
)._headers_auth()
assert headers == {"Authorization": "token api-token"}
def test_retrieve_public_key_payload(self, metrics, someorigin):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
response = pretend.stub(
json=lambda: meta_payload, raise_for_status=lambda: None
)
session = pretend.stub(get=pretend.call_recorder(lambda *a, **k: response))
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=metrics,
api_token="api-token",
public_keys_cache=pretend.stub(),
)
assert generic_verifier.retrieve_public_key_payload() == meta_payload
assert session.get.calls == [
pretend.call(
"http://foo",
headers={"Authorization": "token api-token"},
)
]
def test_get_cached_public_key_cache_hit(self, someorigin):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
cache_value = pretend.stub()
cache.set(now=time.time(), value=cache_value)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=cache,
)
assert generic_verifier._get_cached_public_keys() is cache_value
def test_get_cached_public_key_cache_miss_no_cache(self, someorigin):
session = pretend.stub()
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(integrations.CacheMissError):
generic_verifier._get_cached_public_keys()
def test_retrieve_public_key_payload_http_error(self, someorigin):
response = pretend.stub(
status_code=418,
text="I'm a teapot",
raise_for_status=pretend.raiser(requests.HTTPError),
)
session = pretend.stub(
get=lambda *a, **k: response,
)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GenericPublicKeyMetaAPIError) as exc:
generic_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Invalid response code 418: I'm a teapot"
assert exc.value.reason == "public_key_api.status.418"
def test_retrieve_public_key_payload_json_error(self, someorigin):
response = pretend.stub(
text="Still a non-json teapot",
json=pretend.raiser(json.JSONDecodeError("", "", 3)),
raise_for_status=lambda: None,
)
session = pretend.stub(get=lambda *a, **k: response)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GenericPublicKeyMetaAPIError) as exc:
generic_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Non-JSON response received: Still a non-json teapot"
assert exc.value.reason == "public_key_api.invalid_json"
def test_retrieve_public_key_payload_connection_error(self, someorigin):
session = pretend.stub(get=pretend.raiser(requests.ConnectionError))
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=session,
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(utils.GenericPublicKeyMetaAPIError) as exc:
generic_verifier.retrieve_public_key_payload()
assert str(exc.value) == "Could not connect to SomeOrigin"
assert exc.value.reason == "public_key_api.network_error"
def test_extract_public_keys(self, someorigin):
meta_payload = {
"public_keys": [
{
"key_identifier": "90a421169f0a406205f1563a953312f0be898d3c"
"7b6c06b681aa86a874555f4a",
"key": "-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----",
"is_current": True,
}
]
}
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
keys = generic_verifier.extract_public_keys(pubkey_api_data=meta_payload)
assert keys == [
{
"key": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcD"
"QgAE9MJJHnMfn2+H4xL4YaPDA4RpJqUq\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ"
"8qpVIW4clayyef9gWhFbNHWAA==\n-----END PUBLIC KEY-----",
"key_id": "90a421169f0a406205f1563a953312f0be"
"898d3c7b6c06b681aa86a874555f4a",
}
]
assert cache.cache == keys
@pytest.mark.parametrize(
("payload", "expected"),
[
([], "Payload is not a dict but: []"),
({}, "Payload misses 'public_keys' attribute"),
({"public_keys": None}, "Payload 'public_keys' attribute is not a list"),
({"public_keys": [None]}, "Key is not a dict but: None"),
(
{"public_keys": [{}]},
"Missing attribute in key: ['key', 'key_identifier']",
),
(
{"public_keys": [{"key": "a"}]},
"Missing attribute in key: ['key_identifier']",
),
(
{"public_keys": [{"key_identifier": "a"}]},
"Missing attribute in key: ['key']",
),
],
)
def test_extract_public_keys_error(self, payload, expected, someorigin):
cache = integrations.PublicKeysCache(cache_time=12)
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=cache,
)
with pytest.raises(utils.GenericPublicKeyMetaAPIError) as exc:
list(generic_verifier.extract_public_keys(pubkey_api_data=payload))
assert exc.value.reason == "public_key_api.format_error"
assert str(exc.value) == expected
assert cache.cache is None
def test_check_public_key(self, someorigin):
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
keys = [
{"key_id": "a", "key": "b"},
{"key_id": "c", "key": "d"},
]
assert generic_verifier._check_public_key(public_keys=keys, key_id="c") == "d"
def test_check_public_key_error(self, someorigin):
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
generic_verifier._check_public_key(public_keys=[], key_id="c")
assert str(exc.value) == "Key c not found in public keys"
assert exc.value.reason == "wrong_key_id"
@pytest.mark.parametrize(
("origin", "payload"),
[
(
"GitHub",
b'[{"type":"github_oauth_token","token":"cb4985f91f740272c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]',
)
],
)
def test_check_signature(self, origin, payload):
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=origin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----"
)
signature = (
"MEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
assert (
generic_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
is None
)
@pytest.mark.parametrize(
("origin", "payload"),
[
(
"GitHub",
b'[{"type":"github_oauth_token","token":"cb4985f91f740272c0234202299'
b'f43808034d7f5","url":" https://github.com/github/faketestrepo/blob/'
b'b0dd59c0b500650cacd4551ca5989a6194001b10/production.env"}]',
)
],
)
def test_check_signature_invalid_signature(self, origin, payload):
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=origin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = (
"-----BEGIN PUBLIC KEY-----\n"
"MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE9MJJHnMfn2+H4xL4YaPDA4RpJqU"
"q\nkCmRCBnYERxZanmcpzQSXs1X/AljlKkbJ8qpVIW4clayyef9gWhFbNHWAA==\n"
"-----END PUBLIC KEY-----"
)
# Changed the initial N for an M
signature = (
"NEQCIAfgjgz6Ou/3DXMYZBervz1TKCHFsvwMcbuJhNZse622AiAG86/"
"cku2XdcmFWNHl2WSJi2fkE8t+auvB24eURaOd2A=="
)
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
generic_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid signature"
assert exc.value.reason == "invalid_signature"
def test_check_signature_invalid_crypto(self, someorigin):
generic_verifier = utils.GenericTokenScanningPayloadVerifier(
origin=someorigin,
api_url="http://foo",
session=pretend.stub(),
metrics=pretend.stub(),
public_keys_cache=pretend.stub(),
)
public_key = ""
signature = ""
payload = "yeah, nope, that won't pass"
with pytest.raises(integrations.InvalidPayloadSignatureError) as exc:
generic_verifier._check_signature(
payload=payload, public_key=public_key, signature=signature
)
assert str(exc.value) == "Invalid cryptographic values"
assert exc.value.reason == "invalid_crypto"
def test_analyze_disclosure(monkeypatch, metrics, someorigin):
user_id = uuid.UUID(bytes=b"0" * 16)
user = pretend.stub(
id=user_id,
record_event=pretend.call_recorder(lambda *a, **kw: None),
)
database_macaroon = pretend.stub(
user=user,
id=12,
permissions_caveat={"permissions": "user", "version": 1},
caveats=[],
description="foo",
)
find = pretend.call_recorder(lambda *a, **kw: database_macaroon)
delete = pretend.call_recorder(lambda *a, **kw: None)
svc = {
utils.IMetricsService: metrics,
utils.IMacaroonService: pretend.stub(
find_from_raw=find, delete_macaroon=delete
),
}
request = pretend.stub(
find_service=lambda iface, context: svc[iface], remote_addr="0.0.0.0"
)
send_email = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(utils, "send_token_compromised_email_leak", send_email)
utils.analyze_disclosure(
request=request,
disclosure_record={
"type": "pypi_api_token",
"token": "pypi-1234",
"url": "http://example.com",
},
origin=someorigin,
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.received"),
pretend.call("warehouse.token_leak.someorigin.valid"),
pretend.call("warehouse.token_leak.someorigin.processed"),
]
assert send_email.calls == [
pretend.call(request, user, public_url="http://example.com", origin=someorigin)
]
assert find.calls == [pretend.call(raw_macaroon="pypi-1234")]
assert delete.calls == [pretend.call(macaroon_id="12")]
assert user.record_event.calls == [
pretend.call(
tag=EventTag.Account.APITokenRemovedLeak,
request=request,
additional={
"macaroon_id": "12",
"public_url": "http://example.com",
"permissions": "user",
"caveats": [],
"description": "foo",
"origin": "SomeOrigin",
},
)
]
def test_analyze_disclosure_wrong_record(metrics, someorigin):
svc = {
utils.IMetricsService: metrics,
utils.IMacaroonService: pretend.stub(),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
utils.analyze_disclosure(
request=request,
disclosure_record={},
origin=someorigin,
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.received"),
pretend.call("warehouse.token_leak.someorigin.error.format"),
]
def test_analyze_disclosure_invalid_macaroon(metrics, someorigin):
find = pretend.raiser(utils.InvalidMacaroonError("Bla", "bla"))
svc = {
utils.IMetricsService: metrics,
utils.IMacaroonService: pretend.stub(find_from_raw=find),
}
request = pretend.stub(find_service=lambda iface, context: svc[iface])
utils.analyze_disclosure(
request=request,
disclosure_record={
"type": "pypi_api_token",
"token": "pypi-1234",
"url": "http://example.com",
},
origin=someorigin,
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.received"),
pretend.call("warehouse.token_leak.someorigin.error.invalid"),
]
def test_analyze_disclosure_unknown_error(metrics, monkeypatch, someorigin):
request = pretend.stub(find_service=lambda *a, **k: metrics)
class SpecificError(Exception):
pass
monkeypatch.setattr(utils, "_analyze_disclosure", pretend.raiser(SpecificError))
with pytest.raises(SpecificError):
utils.analyze_disclosure(
request=request,
disclosure_record={},
origin=someorigin,
)
assert metrics.increment.calls == [
pretend.call("warehouse.token_leak.someorigin.error.unknown"),
]
def test_analyze_disclosures_wrong_type(metrics, someorigin):
with pytest.raises(utils.InvalidTokenLeakRequestError) as exc:
utils.analyze_disclosures(
request=pretend.stub(),
disclosure_records={},
origin=someorigin,
metrics=metrics,
)
assert str(exc.value) == "Invalid format: payload is not a list"
assert exc.value.reason == "format"
def test_analyze_disclosures_raise(metrics, monkeypatch, someorigin):
task = pretend.stub(delay=pretend.call_recorder(lambda *a, **k: None))
request = pretend.stub(task=lambda x: task)
monkeypatch.setattr(tasks, "analyze_disclosure_task", task)
utils.analyze_disclosures(
request=request,
disclosure_records=[1, 2, 3],
origin=someorigin,
metrics=metrics,
)
assert task.delay.calls == [
pretend.call(disclosure_record=1, origin=someorigin.to_dict()),
pretend.call(disclosure_record=2, origin=someorigin.to_dict()),
pretend.call(disclosure_record=3, origin=someorigin.to_dict()),
]
| TestGenericTokenScanningPayloadVerifier |
python | ray-project__ray | python/ray/serve/_private/deployment_state.py | {
"start": 69341,
"end": 124295
} | class ____:
"""Manages the target state and replicas for a single deployment."""
FORCE_STOP_UNHEALTHY_REPLICAS = RAY_SERVE_FORCE_STOP_UNHEALTHY_REPLICAS
MAX_CONSTRUCTOR_RETRY_COUNT_WARNING_LOGGED = False
def __init__(
self,
id: DeploymentID,
long_poll_host: LongPollHost,
deployment_scheduler: DeploymentScheduler,
cluster_node_info_cache: ClusterNodeInfoCache,
autoscaling_state_manager: AutoscalingStateManager,
):
self._id = id
self._long_poll_host: LongPollHost = long_poll_host
self._deployment_scheduler = deployment_scheduler
self._cluster_node_info_cache = cluster_node_info_cache
self._autoscaling_state_manager = autoscaling_state_manager
# Each time we set a new deployment goal, we're trying to save new
# DeploymentInfo and bring current deployment to meet new status.
self._target_state: DeploymentTargetState = DeploymentTargetState.default()
self._prev_startup_warning: float = time.time()
self._replica_constructor_error_msg: Optional[str] = None
# Counter for how many times replicas failed to start. This is reset to 0 when:
# (1) The deployment is deployed / re-deployed.
# (2) The deployment reaches the HEALTHY state.
self._replica_constructor_retry_counter: int = 0
# Flag for whether any replicas of the target version has successfully started.
# This is reset to False when the deployment is re-deployed.
self._replica_has_started: bool = False
self._replicas: ReplicaStateContainer = ReplicaStateContainer()
self._curr_status_info: DeploymentStatusInfo = DeploymentStatusInfo(
self._id.name,
DeploymentStatus.UPDATING,
DeploymentStatusTrigger.CONFIG_UPDATE_STARTED,
)
self._rank_manager = DeploymentRankManager(
fail_on_rank_error=RAY_SERVE_FAIL_ON_RANK_ERROR
)
self.replica_average_ongoing_requests: Dict[str, float] = {}
self.health_check_gauge = metrics.Gauge(
"serve_deployment_replica_healthy",
description=(
"Tracks whether this deployment replica is healthy. 1 means "
"healthy, 0 means unhealthy."
),
tag_keys=("deployment", "replica", "application"),
)
# Whether the request routing info have been updated since the last
# time we checked.
self._request_routing_info_updated = False
self._last_broadcasted_running_replica_infos: List[RunningReplicaInfo] = []
self._last_broadcasted_availability: bool = True
self._last_broadcasted_deployment_config = None
self._docs_path: Optional[str] = None
self._route_patterns: Optional[List[str]] = None
def should_autoscale(self) -> bool:
"""
Check if the deployment is under autoscaling
"""
return self._autoscaling_state_manager.should_autoscale_deployment(self._id)
def get_checkpoint_data(self) -> DeploymentTargetState:
"""
Return deployment's target state submitted by user's deployment call.
Should be persisted and outlive current ray cluster.
"""
return self._target_state
def recover_target_state_from_checkpoint(
self, target_state_checkpoint: DeploymentTargetState
):
logger.info(f"Recovering target state for {self._id} from checkpoint.")
self._target_state = target_state_checkpoint
self._deployment_scheduler.on_deployment_deployed(
self._id, self._target_state.info.replica_config
)
if self._target_state.info.deployment_config.autoscaling_config:
self._autoscaling_state_manager.register_deployment(
self._id,
self._target_state.info,
self._target_state.target_num_replicas,
)
def recover_current_state_from_replica_actor_names(
self, replica_actor_names: List[str]
):
"""Recover deployment state from live replica actors found in the cluster."""
assert self._target_state is not None, (
"Target state should be recovered successfully first before "
"recovering current state from replica actor names."
)
logger.info(
f"Recovering current state for {self._id} "
f"from {len(replica_actor_names)} live actors."
)
# All current states use default value, only attach running replicas.
for replica_actor_name in replica_actor_names:
replica_id = ReplicaID.from_full_id_str(replica_actor_name)
new_deployment_replica = DeploymentReplica(
replica_id,
self._target_state.version,
)
# If replica is no longer alive, simply don't add it to the
# deployment state manager to track.
if not new_deployment_replica.recover():
logger.warning(f"{replica_id} died before controller could recover it.")
continue
self._replicas.add(ReplicaState.RECOVERING, new_deployment_replica)
self._deployment_scheduler.on_replica_recovering(replica_id)
logger.debug(f"RECOVERING {replica_id}.")
# TODO(jiaodong): this currently halts all traffic in the cluster
# briefly because we will broadcast a replica update with everything in
# RECOVERING. We should have a grace period where we recover the state
# of the replicas before doing this update.
@property
def target_info(self) -> DeploymentInfo:
return self._target_state.info
@property
def target_version(self) -> DeploymentVersion:
return self._target_state.version
@property
def target_num_replicas(self) -> int:
return self._target_state.target_num_replicas
@property
def curr_status_info(self) -> DeploymentStatusInfo:
return self._curr_status_info
@property
def deployment_name(self) -> str:
return self._id.name
@property
def app_name(self) -> str:
return self._id.app_name
@property
def docs_path(self) -> Optional[str]:
return self._docs_path
@property
def route_patterns(self) -> Optional[List[str]]:
return self._route_patterns
@property
def _failed_to_start_threshold(self) -> int:
# Use global override if set, otherwise use deployment config
value = MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT
if value is not None and not self.MAX_CONSTRUCTOR_RETRY_COUNT_WARNING_LOGGED:
logger.warning(
"MAX_DEPLOYMENT_CONSTRUCTOR_RETRY_COUNT is deprecated and will be removed in the future. "
"Please use 'max_constructor_retry_count' instead in configurations."
)
self.MAX_CONSTRUCTOR_RETRY_COUNT_WARNING_LOGGED = True
base_retry_count = (
value
if value is not None
else self._target_state.info.deployment_config.max_constructor_retry_count
)
return min(
base_retry_count,
self._target_state.target_num_replicas * MAX_PER_REPLICA_RETRY_COUNT,
)
def _replica_startup_failing(self) -> bool:
"""Check whether replicas are currently failing and the number of
failures has exceeded a threshold.
"""
return (
self._target_state.target_num_replicas > 0
and self._replica_constructor_retry_counter
>= self._failed_to_start_threshold
)
def _terminally_failed(self) -> bool:
"""Check whether the current version is terminally errored.
The version is considered terminally errored if the number of
replica failures has exceeded a threshold, and there hasn't been
any replicas of the target version that has successfully started.
"""
return not self._replica_has_started and self._replica_startup_failing()
def get_alive_replica_actor_ids(self) -> Set[str]:
return {replica.actor_id for replica in self._replicas.get()}
def get_running_replica_ids(self) -> List[ReplicaID]:
return [
replica.replica_id
for replica in self._replicas.get(
[ReplicaState.RUNNING, ReplicaState.PENDING_MIGRATION]
)
]
def get_running_replica_infos(self) -> List[RunningReplicaInfo]:
return [
replica.get_running_replica_info(self._cluster_node_info_cache)
for replica in self._replicas.get(
[ReplicaState.RUNNING, ReplicaState.PENDING_MIGRATION]
)
]
def get_num_running_replicas(self, version: DeploymentVersion = None) -> int:
return self._replicas.count(states=[ReplicaState.RUNNING], version=version)
def get_active_node_ids(self) -> Set[str]:
"""Get the node ids of all running replicas in this deployment.
This is used to determine which node has replicas. Only nodes with replicas and
head node should have active proxies.
"""
active_states = [
ReplicaState.STARTING,
ReplicaState.UPDATING,
ReplicaState.RECOVERING,
ReplicaState.RUNNING,
# NOTE(zcin): We still want a proxy to run on a draining
# node before all the replicas are migrated.
ReplicaState.PENDING_MIGRATION,
]
return {
replica.actor_node_id
for replica in self._replicas.get(active_states)
if replica.actor_node_id is not None
}
def list_replica_details(self) -> List[ReplicaDetails]:
return [replica.actor_details for replica in self._replicas.get()]
def broadcast_running_replicas_if_changed(self) -> None:
"""Broadcasts the set of running replicas over long poll if it has changed.
Keeps an in-memory record of the last set of running replicas that was broadcast
to determine if it has changed.
The set will also be broadcast if any replicas have an updated set of
multiplexed model IDs.
"""
running_replica_infos = self.get_running_replica_infos()
is_available = not self._terminally_failed()
running_replicas_changed = (
set(self._last_broadcasted_running_replica_infos)
!= set(running_replica_infos)
or self._request_routing_info_updated
)
availability_changed = is_available != self._last_broadcasted_availability
if not running_replicas_changed and not availability_changed:
return
deployment_metadata = DeploymentTargetInfo(
is_available=is_available,
running_replicas=running_replica_infos,
)
self._long_poll_host.notify_changed(
{
(
LongPollNamespace.DEPLOYMENT_TARGETS,
self._id,
): deployment_metadata,
# NOTE(zcin): notify changed for Java routers. Since Java only
# supports 1.x API, there is no concept of applications in Java,
# so the key should remain a string describing the deployment
# name. If there are no Java routers, this is a no-op.
(
LongPollNamespace.DEPLOYMENT_TARGETS,
self._id.name,
): deployment_metadata,
}
)
self._last_broadcasted_running_replica_infos = running_replica_infos
self._last_broadcasted_availability = is_available
self._request_routing_info_updated = False
def broadcast_deployment_config_if_changed(self) -> None:
"""Broadcasts the deployment config over long poll if it has changed.
Keeps an in-memory record of the last config that was broadcast to determine
if it has changed.
"""
current_deployment_config = self._target_state.info.deployment_config
if self._last_broadcasted_deployment_config == current_deployment_config:
return
self._long_poll_host.notify_changed(
{(LongPollNamespace.DEPLOYMENT_CONFIG, self._id): current_deployment_config}
)
self._last_broadcasted_deployment_config = current_deployment_config
def _set_target_state_deleting(self) -> None:
"""Set the target state for the deployment to be deleted."""
target_state = DeploymentTargetState.create(
info=self._target_state.info,
target_num_replicas=0,
deleting=True,
)
self._target_state = target_state
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.DELETE
)
logger.info(
f"Deleting {self._id}",
extra={"log_to_stderr": False},
)
def _set_target_state(
self,
target_info: DeploymentInfo,
target_num_replicas: int,
updated_via_api: bool = False,
) -> None:
"""Set the target state for the deployment to the provided info.
Args:
target_info: The info with which to set the target state.
target_num_replicas: The number of replicas that this deployment
should attempt to run.
status_trigger: The driver that triggered this change of state.
updated_via_api: Whether the target state update was triggered via API.
"""
new_target_state = DeploymentTargetState.create(
target_info, target_num_replicas, deleting=False
)
if self._target_state.version == new_target_state.version:
# Record either num replica or autoscaling config lightweight update
if (
self._target_state.version.deployment_config.autoscaling_config
!= new_target_state.version.deployment_config.autoscaling_config
):
ServeUsageTag.AUTOSCALING_CONFIG_LIGHTWEIGHT_UPDATED.record("True")
elif updated_via_api:
ServeUsageTag.NUM_REPLICAS_VIA_API_CALL_UPDATED.record("True")
elif (
self._target_state.version.deployment_config.num_replicas
!= new_target_state.version.deployment_config.num_replicas
):
ServeUsageTag.NUM_REPLICAS_LIGHTWEIGHT_UPDATED.record("True")
self._target_state = new_target_state
def deploy(self, deployment_info: DeploymentInfo) -> bool:
"""Deploy the deployment.
If the deployment already exists with the same version, config,
target_capacity, and target_capacity_direction,
this method returns False.
Returns:
bool: Whether the target state has changed.
"""
curr_deployment_info = self._target_state.info
if curr_deployment_info is not None:
# Redeploying should not reset the deployment's start time.
if not self._target_state.deleting:
deployment_info.start_time_ms = curr_deployment_info.start_time_ms
deployment_settings_changed = (
self._target_state.deleting
or curr_deployment_info.deployment_config
!= deployment_info.deployment_config
or curr_deployment_info.replica_config.ray_actor_options
!= deployment_info.replica_config.ray_actor_options
or curr_deployment_info.route_prefix != deployment_info.route_prefix
or deployment_info.version is None
or curr_deployment_info.version != deployment_info.version
)
target_capacity_changed = (
curr_deployment_info.target_capacity != deployment_info.target_capacity
or curr_deployment_info.target_capacity_direction
!= deployment_info.target_capacity_direction
)
else:
deployment_settings_changed = True
target_capacity_changed = True
# Exit early if the deployment info hasn't changed. Ensures this method
# is idempotent.
if not deployment_settings_changed and not target_capacity_changed:
return False
if deployment_info.deployment_config.autoscaling_config:
target_num_replicas = self._autoscaling_state_manager.register_deployment(
self._id, deployment_info, self._target_state.target_num_replicas
)
else:
self._autoscaling_state_manager.deregister_deployment(self._id)
target_num_replicas = get_capacity_adjusted_num_replicas(
deployment_info.deployment_config.num_replicas,
deployment_info.target_capacity,
)
old_target_state = self._target_state
self._set_target_state(deployment_info, target_num_replicas=target_num_replicas)
self._deployment_scheduler.on_deployment_deployed(
self._id, deployment_info.replica_config
)
# Determine if the updated target state simply scales the current state.
# Although the else branch handles the CONFIG_UPDATE, we also take this branch
# for a config update whose only effect is changing `num_replicas`.
# Treating it as a scaling event keeps the user-visible deployment status more
# consistent for observability.
if self._target_state.is_scaled_copy_of(old_target_state):
old_num = old_target_state.target_num_replicas
new_num = self._target_state.target_num_replicas
if new_num > old_num:
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.MANUALLY_INCREASE_NUM_REPLICAS, # noqa: E501
message=f"Upscaling from {old_num} to {new_num} replicas.",
)
elif new_num < old_num:
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.MANUALLY_DECREASE_NUM_REPLICAS, # noqa: E501
message=f"Downscaling from {old_num} to {new_num} replicas.",
)
else:
# Otherwise, the deployment configuration has actually been updated.
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.CONFIG_UPDATE
)
logger.info(
f"Deploying new version of {self._id} "
f"(initial target replicas: {target_num_replicas})."
)
self._replica_constructor_retry_counter = 0
self._replica_has_started = False
return True
def autoscale(self, decision_num_replicas: int) -> bool:
"""
Apply the given scaling decision by updating the target replica count.
Skips if deleting, if `decision_num_replicas` is None, or matches the
current target. Otherwise updates the state and logs an up/down scaling.
Args:
decision_num_replicas: target replica count to apply.
Returns:
bool: True if the target state was updated, False if no change occurred.
"""
if self._target_state.deleting:
return False
if decision_num_replicas == self._target_state.target_num_replicas:
return False
new_info = copy(self._target_state.info)
new_info.version = self._target_state.version.code_version
old_num = self._target_state.target_num_replicas
self._set_target_state(new_info, decision_num_replicas)
# The deployment should only transition to UPSCALING/DOWNSCALING
# if it's within the autoscaling bounds
if not self._autoscaling_state_manager.is_within_bounds(
self._id,
self._replicas.count(
states=[ReplicaState.RUNNING], version=self._target_state.version
),
):
return True
curr_stats_str = (
f"Current ongoing requests: "
f"{self._autoscaling_state_manager.get_total_num_requests_for_deployment(self._id):.2f}, "
f"current running replicas: "
f"{self._replicas.count(states=[ReplicaState.RUNNING])}."
)
new_num = self._target_state.target_num_replicas
if new_num > old_num:
logger.info(
f"Upscaling {self._id} from {old_num} to {new_num} replicas. "
f"{curr_stats_str}"
)
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.AUTOSCALE_UP,
message=f"Upscaling from {old_num} to {new_num} replicas.",
)
elif new_num < old_num:
logger.info(
f"Downscaling {self._id} from {old_num} to {new_num} replicas. "
f"{curr_stats_str}"
)
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.AUTOSCALE_DOWN,
message=f"Downscaling from {old_num} to {new_num} replicas.",
)
return True
def delete(self) -> bool:
if not self._target_state.deleting:
self._set_target_state_deleting()
return True
return False
def set_target_num_replicas(
self,
target_num_replicas: int,
) -> None:
"""Set the target state for the deployment to the provided info."""
self._set_target_state(
self._target_state.info, target_num_replicas, updated_via_api=True
)
def _stop_or_update_outdated_version_replicas(self, max_to_stop=math.inf) -> bool:
"""Stop or update replicas with outdated versions.
Stop replicas with versions that require the actor to be restarted, and
reconfigure replicas that require refreshing deployment config values.
Args:
max_to_stop: max number of replicas to stop, by default,
it stops all replicas with an outdated version.
"""
replicas_to_update = self._replicas.pop(
exclude_version=self._target_state.version,
states=[
ReplicaState.STARTING,
ReplicaState.PENDING_MIGRATION,
ReplicaState.RUNNING,
],
)
replicas_changed = False
code_version_changes = 0
reconfigure_changes = 0
for replica in replicas_to_update:
if (code_version_changes + reconfigure_changes) >= max_to_stop:
self._replicas.add(replica.actor_details.state, replica)
# If the new version requires the actors to be restarted, stop the replica.
# A new one with the correct version will be started later as part of the
# normal scale-up process.
elif replica.version.requires_actor_restart(self._target_state.version):
code_version_changes += 1
# If the replica is still `STARTING`, we don't need to go through the
# graceful stop period.
graceful_stop = replica.actor_details.state == ReplicaState.RUNNING
self._stop_replica(replica, graceful_stop=graceful_stop)
replicas_changed = True
# Otherwise, only lightweight options in deployment config is a mismatch, so
# we update it dynamically without restarting the replica.
elif replica.actor_details.state == ReplicaState.RUNNING:
reconfigure_changes += 1
if replica.version.requires_long_poll_broadcast(
self._target_state.version
):
replicas_changed = True
# Get current rank for the replica
current_rank = self._rank_manager.get_replica_rank(
replica.replica_id.unique_id
)
actor_updating = replica.reconfigure(
self._target_state.version, rank=current_rank.rank
)
if actor_updating:
self._replicas.add(ReplicaState.UPDATING, replica)
else:
self._replicas.add(ReplicaState.RUNNING, replica)
# We don't allow going from STARTING, PENDING_MIGRATION to UPDATING.
else:
self._replicas.add(replica.actor_details.state, replica)
if code_version_changes > 0:
logger.info(
f"Stopping {code_version_changes} replicas of {self._id} "
"with outdated versions."
)
if reconfigure_changes > 0:
logger.info(
f"Updating {reconfigure_changes} replicas of {self._id} "
"with outdated deployment configs."
)
# Record user config lightweight update
ServeUsageTag.USER_CONFIG_LIGHTWEIGHT_UPDATED.record("True")
return replicas_changed
def _check_and_stop_outdated_version_replicas(self) -> bool:
"""Stops replicas with outdated versions to implement rolling updates.
This includes both explicit code version updates and changes to the
user_config.
Returns whether any replicas were stopped.
"""
# Short circuit if target replicas is 0 (the deployment is being
# deleted) because this will be handled in the main loop.
if self._target_state.target_num_replicas == 0:
return False
# We include STARTING and UPDATING replicas here
# because if there are replicas still pending startup, we may as well
# terminate them and start new version replicas instead.
old_running_replicas = self._replicas.count(
exclude_version=self._target_state.version,
states=[
ReplicaState.STARTING,
ReplicaState.UPDATING,
ReplicaState.RUNNING,
],
)
old_stopping_replicas = self._replicas.count(
exclude_version=self._target_state.version, states=[ReplicaState.STOPPING]
)
new_running_replicas = self._replicas.count(
version=self._target_state.version, states=[ReplicaState.RUNNING]
)
# If the deployment is currently scaling down, let the scale down
# complete before doing a rolling update.
if (
self._target_state.target_num_replicas
< old_running_replicas + old_stopping_replicas
):
return False
# The number of replicas that are currently in transition between
# an old version and the new version. Note that we cannot directly
# count the number of stopping replicas because once replicas finish
# stopping, they are removed from the data structure.
pending_replicas = (
self._target_state.target_num_replicas
- new_running_replicas
- old_running_replicas
)
# Maximum number of replicas that can be updating at any given time.
# There should never be more than rollout_size old replicas stopping
# or rollout_size new replicas starting.
rollout_size = max(int(0.2 * self._target_state.target_num_replicas), 1)
max_to_stop = max(rollout_size - pending_replicas, 0)
return self._stop_or_update_outdated_version_replicas(max_to_stop)
def scale_deployment_replicas(
self,
) -> Tuple[List[ReplicaSchedulingRequest], DeploymentDownscaleRequest]:
"""Scale the given deployment to the number of replicas."""
assert (
self._target_state.target_num_replicas >= 0
), "Target number of replicas must be greater than or equal to 0."
upscale = []
downscale = None
self._check_and_stop_outdated_version_replicas()
current_replicas = self._replicas.count(
states=[ReplicaState.STARTING, ReplicaState.UPDATING, ReplicaState.RUNNING]
)
recovering_replicas = self._replicas.count(states=[ReplicaState.RECOVERING])
delta_replicas = (
self._target_state.target_num_replicas
- current_replicas
- recovering_replicas
)
if delta_replicas == 0:
return (upscale, downscale)
elif delta_replicas > 0:
to_add = delta_replicas
if to_add > 0 and not self._terminally_failed():
logger.info(f"Adding {to_add} replica{'s' * (to_add>1)} to {self._id}.")
for _ in range(to_add):
replica_id = ReplicaID(get_random_string(), deployment_id=self._id)
new_deployment_replica = DeploymentReplica(
replica_id,
self._target_state.version,
)
scheduling_request = new_deployment_replica.start(
self._target_state.info,
assign_rank_callback=self._rank_manager.assign_rank,
)
upscale.append(scheduling_request)
self._replicas.add(ReplicaState.STARTING, new_deployment_replica)
elif delta_replicas < 0:
to_remove = -delta_replicas
removed_replicas = f"{to_remove} replica{'s' if to_remove > 1 else ''}"
logger.info(f"Removing {removed_replicas} from {self._id}.")
downscale = DeploymentDownscaleRequest(
deployment_id=self._id, num_to_stop=to_remove
)
return upscale, downscale
def check_curr_status(self) -> Tuple[bool, bool]:
"""Check the current deployment status.
Checks the difference between the target vs. running replica count for
the target version.
This will update the current deployment status depending on the state
of the replicas.
Returns (deleted, any_replicas_recovering).
"""
# TODO(edoakes): we could make this more efficient in steady-state by
# having a "healthy" flag that gets flipped if an update or replica
# failure happens.
target_version = self._target_state.version
any_replicas_recovering = (
self._replicas.count(states=[ReplicaState.RECOVERING]) > 0
)
all_running_replica_cnt = self._replicas.count(states=[ReplicaState.RUNNING])
running_at_target_version_replica_cnt = self._replicas.count(
states=[ReplicaState.RUNNING], version=target_version
)
# Got to make a call to complete current deploy() goal after
# start failure threshold reached, while we might still have
# pending replicas in current goal.
if running_at_target_version_replica_cnt > 0:
# At least one RUNNING replica at target state, partial
# success; We can stop tracking constructor failures and
# leave it to the controller to fully scale to target
# number of replicas and only return as completed once
# reached target replica count
self._replica_has_started = True
elif self._replica_startup_failing():
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.REPLICA_STARTUP_FAILED,
message=(
"The deployment failed to start "
f"{self._replica_constructor_retry_counter} times "
"in a row. This may be due to a problem with its "
"constructor or initial health check failing. See "
"controller logs for details. Error:\n"
f"{self._replica_constructor_error_msg}"
),
)
return False, any_replicas_recovering
# If we have pending ops, the current goal is *not* ready.
if (
self._replicas.count(
states=[
ReplicaState.STARTING,
ReplicaState.UPDATING,
ReplicaState.RECOVERING,
ReplicaState.STOPPING,
]
)
== 0
):
# Check for deleting and a non-zero number of deployments.
if self._target_state.deleting and all_running_replica_cnt == 0:
return True, any_replicas_recovering
if (
self._target_state.target_num_replicas
== running_at_target_version_replica_cnt
and running_at_target_version_replica_cnt == all_running_replica_cnt
):
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.HEALTHY
)
self._replica_constructor_retry_counter = 0
return False, any_replicas_recovering
return False, any_replicas_recovering
def _check_startup_replicas(
self, original_state: ReplicaState, stop_on_slow=False
) -> List[Tuple[DeploymentReplica, ReplicaStartupStatus]]:
"""
Common helper function for startup actions tracking and status
transition: STARTING, UPDATING and RECOVERING.
Args:
stop_on_slow: If we consider a replica failed upon observing it's
slow to reach running state.
"""
slow_replicas = []
for replica in self._replicas.pop(states=[original_state]):
start_status, error_msg = replica.check_started()
if start_status == ReplicaStartupStatus.SUCCEEDED:
if original_state == ReplicaState.RECOVERING:
# If the previous state was RECOVERING, that mean the replica
# crashed and is now starting up again. We need to recover the rank
# from the replica actor. The invariant is that the rank is assigned
# during startup and before the replica is added to the replicas
# data structure with RUNNING state.
# Recover rank from the replica actor during controller restart
replica_id = replica.replica_id.unique_id
self._rank_manager.recover_rank(replica_id, replica.rank)
# This replica should be now be added to handle's replica
# set.
self._replicas.add(ReplicaState.RUNNING, replica)
self._deployment_scheduler.on_replica_running(
replica.replica_id, replica.actor_node_id
)
# if replica version is the same as the target version,
# we update the docs path and route patterns
if replica.version == self._target_state.version:
self._docs_path = replica.docs_path
self._route_patterns = replica.route_patterns
# Log the startup latency.
e2e_replica_start_latency = time.time() - replica._start_time
replica_startup_message = (
f"{replica.replica_id} started successfully "
f"on node '{replica.actor_node_id}' after "
f"{e2e_replica_start_latency:.1f}s (PID: {replica.actor_pid})."
)
if replica.initialization_latency_s is not None:
# This condition should always be True. The initialization
# latency is only None before the replica has initialized.
replica_startup_message += (
" Replica constructor, "
"reconfigure method, and initial health check took "
f"{replica.initialization_latency_s:.1f}s."
)
logger.info(replica_startup_message, extra={"log_to_stderr": False})
elif start_status == ReplicaStartupStatus.FAILED:
# Replica reconfigure (deploy / upgrade) failed
self.record_replica_startup_failure(error_msg)
self._stop_replica(replica)
elif start_status in [
ReplicaStartupStatus.PENDING_ALLOCATION,
ReplicaStartupStatus.PENDING_INITIALIZATION,
]:
is_slow = time.time() - replica._start_time > SLOW_STARTUP_WARNING_S
if is_slow:
slow_replicas.append((replica, start_status))
# Does it make sense to stop replicas in PENDING_ALLOCATION
# state?
if is_slow and stop_on_slow:
self._stop_replica(replica, graceful_stop=False)
else:
self._replicas.add(original_state, replica)
return slow_replicas
def record_replica_startup_failure(self, error_msg: str):
"""Record that a replica failed to start."""
# There is no need to record replica failures if the target is 0.
if self._target_state.target_num_replicas == 0:
return
# Increase startup failure counter
self._replica_constructor_retry_counter += 1
self._replica_constructor_error_msg = error_msg
# Update the deployment message only if replicas are failing during
# the very first time the controller is trying to start replicas of
# this version.
retrying_msg = ""
if not self._replica_has_started:
remaining_retries = max(
self._failed_to_start_threshold
- self._replica_constructor_retry_counter,
0,
)
retrying_msg = f" {remaining_retries} more time(s)"
message = (
f"A replica failed to start with exception. Retrying{retrying_msg}. "
f"Error:\n{error_msg}"
)
self._curr_status_info = self._curr_status_info.update_message(message)
def stop_replicas(self, replicas_to_stop) -> None:
for replica in self._replicas.pop():
if replica.replica_id in replicas_to_stop:
self._stop_replica(replica)
else:
self._replicas.add(replica.actor_details.state, replica)
def _stop_replica(self, replica: DeploymentReplica, graceful_stop=True):
"""Stop replica
1. Stop the replica.
2. Change the replica into stopping state.
3. Set the health replica stats to 0.
"""
logger.debug(f"Adding STOPPING to replica: {replica.replica_id}.")
replica.stop(graceful=graceful_stop)
self._replicas.add(ReplicaState.STOPPING, replica)
self._deployment_scheduler.on_replica_stopping(replica.replica_id)
self.health_check_gauge.set(
0,
tags={
"deployment": self.deployment_name,
"replica": replica.replica_id.unique_id,
"application": self.app_name,
},
)
def check_and_update_replicas(self):
"""
Check current state of all DeploymentReplica being tracked, and compare
with state container from previous update() cycle to see if any state
transition happened.
"""
for replica in self._replicas.pop(
states=[ReplicaState.RUNNING, ReplicaState.PENDING_MIGRATION]
):
if replica.check_health():
self._replicas.add(replica.actor_details.state, replica)
self.health_check_gauge.set(
1,
tags={
"deployment": self.deployment_name,
"replica": replica.replica_id.unique_id,
"application": self.app_name,
},
)
routing_stats = replica.pull_routing_stats()
replica.record_routing_stats(routing_stats)
else:
logger.warning(
f"Replica {replica.replica_id} failed health check, stopping it."
)
self.health_check_gauge.set(
0,
tags={
"deployment": self.deployment_name,
"replica": replica.replica_id.unique_id,
"application": self.app_name,
},
)
self._stop_replica(
replica, graceful_stop=not self.FORCE_STOP_UNHEALTHY_REPLICAS
)
# If this is a replica of the target version, the deployment
# enters the "UNHEALTHY" status until the replica is
# recovered or a new deploy happens.
if replica.version == self._target_state.version:
self._curr_status_info = self._curr_status_info.handle_transition(
trigger=DeploymentStatusInternalTrigger.HEALTH_CHECK_FAILED,
message="A replica's health check failed. This "
"deployment will be UNHEALTHY until the replica "
"recovers or a new deploy happens.",
)
slow_start_replicas = []
slow_start = self._check_startup_replicas(ReplicaState.STARTING)
slow_update = self._check_startup_replicas(ReplicaState.UPDATING)
slow_recover = self._check_startup_replicas(
ReplicaState.RECOVERING, stop_on_slow=True
)
slow_start_replicas = slow_start + slow_update + slow_recover
if (
len(slow_start_replicas)
and time.time() - self._prev_startup_warning > SLOW_STARTUP_WARNING_PERIOD_S
):
pending_allocation = []
pending_initialization = []
for replica, startup_status in slow_start_replicas:
if startup_status == ReplicaStartupStatus.PENDING_ALLOCATION:
pending_allocation.append(replica)
if startup_status == ReplicaStartupStatus.PENDING_INITIALIZATION:
pending_initialization.append(replica)
if len(pending_allocation) > 0:
required, available = pending_allocation[0].resource_requirements()
message = (
f"Deployment '{self.deployment_name}' in application "
f"'{self.app_name}' has {len(pending_allocation)} replicas that "
f"have taken more than {SLOW_STARTUP_WARNING_S}s to be scheduled. "
"This may be due to waiting for the cluster to auto-scale or for a "
"runtime environment to be installed. "
f"Resources required for each replica: {required}, "
f"total resources available: {available}. "
"Use `ray status` for more details."
)
logger.warning(message)
if _SCALING_LOG_ENABLED:
print_verbose_scaling_log()
# If status is UNHEALTHY, leave the status and message as is.
# The issue that caused the deployment to be unhealthy should be
# prioritized over this resource availability issue.
if self._curr_status_info.status not in [
DeploymentStatus.UNHEALTHY,
DeploymentStatus.DEPLOY_FAILED,
]:
self._curr_status_info = self._curr_status_info.update_message(
message
)
if len(pending_initialization) > 0:
message = (
f"Deployment '{self.deployment_name}' in application "
f"'{self.app_name}' has {len(pending_initialization)} replicas "
f"that have taken more than {SLOW_STARTUP_WARNING_S}s to "
"initialize.\n"
"This may be caused by a slow __init__ or reconfigure method."
)
logger.warning(message)
# If status is UNHEALTHY, leave the status and message as is.
# The issue that caused the deployment to be unhealthy should be
# prioritized over this resource availability issue.
if self._curr_status_info.status not in [
DeploymentStatus.UNHEALTHY,
DeploymentStatus.DEPLOY_FAILED,
]:
self._curr_status_info = self._curr_status_info.update_message(
message
)
self._prev_startup_warning = time.time()
for replica in self._replicas.pop(states=[ReplicaState.STOPPING]):
stopped = replica.check_stopped()
if not stopped:
self._replicas.add(ReplicaState.STOPPING, replica)
else:
logger.info(f"{replica.replica_id} is stopped.")
# Release rank only after replica is successfully stopped
# This ensures rank is available during draining/graceful shutdown
replica_id = replica.replica_id.unique_id
self._rank_manager.release_rank(replica_id)
logger.debug(
f"Released rank from replica {replica_id} in deployment {self._id}"
)
self._autoscaling_state_manager.on_replica_stopped(replica.replica_id)
# After replica state updates, check rank consistency and perform minimal reassignment if needed
# This ensures ranks are continuous after lifecycle events
# Only do consistency check when deployment is stable (not during active updates)
# maybe this constraint need to be relaxed in the future. The implication is that
# if we delay the rank reassignment, the rank system will be in an invalid state
# for a longer period of time. Abrar made this decision because he is not confident
# about how rollouts work in the deployment state machine.
active_replicas = self._replicas.get()
if (
active_replicas
and self._curr_status_info.status == DeploymentStatus.HEALTHY
):
replicas_to_reconfigure = (
self._rank_manager.check_rank_consistency_and_reassign_minimally(
active_replicas,
)
)
# Reconfigure replicas that had their ranks reassigned
self._reconfigure_replicas_with_new_ranks(replicas_to_reconfigure)
def _reconfigure_replicas_with_new_ranks(
self, replicas_to_reconfigure: List["DeploymentReplica"]
):
"""Reconfigure replicas with their new ranks after reassignment.
This uses the reconfigure() mechanism to update replicas with their new ranks.
"""
if not replicas_to_reconfigure:
return
logger.debug(
f"Reconfiguring {len(replicas_to_reconfigure)} replicas with rank changes in deployment {self._id}"
)
updated_count = 0
for replica in replicas_to_reconfigure:
replica_id = replica.replica_id.unique_id
new_rank = self._rank_manager.get_replica_rank(replica_id)
# Use reconfigure() to update rank
# World size is calculated automatically from deployment config
_ = replica.reconfigure(
self._target_state.version,
rank=new_rank,
)
updated_count += 1
logger.debug(
f"Successfully reconfigured {updated_count} replicas with new ranks in deployment {self._id}"
)
def _get_replica_ranks_mapping(self) -> Dict[str, int]:
"""Get the current mapping of replica IDs to ranks.
Returns:
Dictionary mapping replica_id to rank.
"""
return self._rank_manager.get_replica_ranks_mapping()
def _choose_pending_migration_replicas_to_stop(
self,
replicas: List[DeploymentReplica],
deadlines: Dict[str, int],
min_replicas_to_stop: int,
) -> Tuple[List[DeploymentReplica], List[DeploymentReplica]]:
"""Returns a partition of replicas to stop and to keep.
Args:
replicas: The current list of replicas pending migration.
deadlines: The current draining node deadlines.
min_replicas_to_stop: The minimum number of replicas to stop.
"""
to_stop = []
remaining = []
# Stop replicas whose deadline is up
for replica in replicas:
assert replica.actor_node_id in deadlines
curr_timestamp_ms = time.time() * 1000
timeout_ms = replica._actor.graceful_shutdown_timeout_s * 1000
if curr_timestamp_ms >= deadlines[replica.actor_node_id] - timeout_ms:
to_stop.append(replica)
else:
remaining.append(replica)
# Stop excess PENDING_MIGRATION replicas when new "replacement"
# replicas have transitioned to RUNNING. The replicas with the
# earliest deadlines should be chosen greedily.
remaining.sort(key=lambda r: deadlines[r.actor_node_id])
num_excess = min_replicas_to_stop - len(to_stop)
if num_excess > 0:
to_stop.extend(remaining[:num_excess])
remaining = remaining[num_excess:]
return to_stop, remaining
def migrate_replicas_on_draining_nodes(self, draining_nodes: Dict[str, int]):
# Move replicas back to running if they are no longer on a draining node.
# If this causes the number of replicas to exceed the target state,
# they will be scaled down because `scale_deployment_replicas` is called on
# each deployment after this
for replica in self._replicas.pop(states=[ReplicaState.PENDING_MIGRATION]):
if replica.actor_node_id not in draining_nodes:
self._replicas.add(ReplicaState.RUNNING, replica)
else:
self._replicas.add(ReplicaState.PENDING_MIGRATION, replica)
# Migrate replicas on draining nodes
for replica in self._replicas.pop(
states=[ReplicaState.UPDATING, ReplicaState.RUNNING, ReplicaState.STARTING]
):
if replica.actor_node_id in draining_nodes:
# For RUNNING replicas, migrate them safely by starting
# a replacement replica first.
if replica.actor_details.state == ReplicaState.RUNNING:
logger.info(
f"Migrating {replica.replica_id} from draining node "
f"'{replica.actor_node_id}'. A new replica will be created on "
"another node."
)
self._replicas.add(ReplicaState.PENDING_MIGRATION, replica)
# For replicas that are STARTING or UPDATING, might as
# well terminate them immediately to allow replacement
# replicas to start. Otherwise we need to wait for them
# to transition to RUNNING before starting migration.
else:
self._stop_replica(replica, graceful_stop=True)
else:
self._replicas.add(replica.actor_details.state, replica)
num_running = self._replicas.count(states=[ReplicaState.RUNNING])
num_draining = self._replicas.count(states=[ReplicaState.PENDING_MIGRATION])
num_pending_migration_replicas_to_stop = (
num_running + num_draining - self._target_state.target_num_replicas
)
(
replicas_to_stop,
replicas_to_keep,
) = self._choose_pending_migration_replicas_to_stop(
self._replicas.pop(states=[ReplicaState.PENDING_MIGRATION]),
draining_nodes,
num_pending_migration_replicas_to_stop,
)
for replica in replicas_to_stop:
logger.info(
f"Stopping {replica.replica_id} "
f"on draining node {replica.actor_node_id}."
)
self._stop_replica(replica, graceful_stop=True)
for replica in replicas_to_keep:
self._replicas.add(ReplicaState.PENDING_MIGRATION, replica)
def record_request_routing_info(self, info: RequestRoutingInfo) -> None:
"""Records the multiplexed model IDs of a replica.
Args:
info: RequestRoutingInfo including deployment name, replica tag,
multiplex model ids, and routing stats.
"""
# Find the replica
for replica in self._replicas.get():
if replica.replica_id == info.replica_id:
if info.multiplexed_model_ids is not None:
replica.record_multiplexed_model_ids(info.multiplexed_model_ids)
if info.routing_stats is not None:
replica.record_routing_stats(info.routing_stats)
self._request_routing_info_updated = True
return
logger.warning(f"{info.replica_id} not found.")
def _stop_one_running_replica_for_testing(self):
running_replicas = self._replicas.pop(states=[ReplicaState.RUNNING])
replica_to_stop = running_replicas.pop()
replica_to_stop.stop(graceful=False)
self._replicas.add(ReplicaState.STOPPING, replica_to_stop)
for replica in running_replicas:
self._replicas.add(ReplicaState.RUNNING, replica)
def is_ingress(self) -> bool:
return self._target_state.info.ingress
def get_outbound_deployments(self) -> Optional[List[DeploymentID]]:
"""Get the outbound deployments.
Returns:
Sorted list of deployment IDs that this deployment calls. None if
outbound deployments are not yet polled.
"""
result: Set[DeploymentID] = set()
has_outbound_deployments = False
for replica in self._replicas.get([ReplicaState.RUNNING]):
if replica.version != self._target_state.version:
# Only consider replicas of the target version
continue
outbound_deployments = replica.get_outbound_deployments()
if outbound_deployments is not None:
result.update(outbound_deployments)
has_outbound_deployments = True
if not has_outbound_deployments:
return None
return sorted(result, key=lambda d: (d.name))
| DeploymentState |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 18844,
"end": 19251
} | class ____(Element):
proto: MarkdownProto = field(repr=False)
is_caption: bool
allow_html: bool
key: None
def __init__(self, proto: MarkdownProto, root: ElementTree) -> None:
self.proto = proto
self.key = None
self.root = root
self.type = "markdown"
@property
def value(self) -> str:
return self.proto.body
@dataclass(repr=False)
| Markdown |
python | django-guardian__django-guardian | example_project_custom_group/articles/migrations/0001_initial.py | {
"start": 126,
"end": 2855
} | class ____(migrations.Migration):
initial = True
dependencies = [
("auth", "0012_alter_user_first_name_max_length"),
("contenttypes", "0002_remove_content_type_name"),
]
operations = [
migrations.CreateModel(
name="Article",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("title", models.CharField(max_length=64, verbose_name="title")),
("slug", models.SlugField(max_length=64)),
("content", models.TextField(verbose_name="content")),
("created_at", models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
"permissions": (("view_article", "Can view article"),),
"get_latest_by": "created_at",
"default_permissions": ("add", "change", "delete"),
},
),
migrations.CreateModel(
name="ArticleGroupObjectPermission",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="ArticleUserObjectPermission",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="BigGroupObjectPermission",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("object_pk", models.CharField(max_length=255, verbose_name="object ID")),
],
options={
"abstract": False,
},
),
migrations.CreateModel(
name="BigUserObjectPermission",
fields=[
("id", models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name="ID")),
("object_pk", models.CharField(max_length=255, verbose_name="object ID")),
(
"content_type",
models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="contenttypes.contenttype"),
),
("permission", models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to="auth.permission")),
],
options={
"abstract": False,
},
),
]
| Migration |
python | doocs__leetcode | solution/0200-0299/0215.Kth Largest Element in an Array/Solution.py | {
"start": 0,
"end": 770
} | class ____:
def findKthLargest(self, nums: List[int], k: int) -> int:
def quick_sort(l: int, r: int) -> int:
if l == r:
return nums[l]
i, j = l - 1, r + 1
x = nums[(l + r) >> 1]
while i < j:
while 1:
i += 1
if nums[i] >= x:
break
while 1:
j -= 1
if nums[j] <= x:
break
if i < j:
nums[i], nums[j] = nums[j], nums[i]
if j < k:
return quick_sort(j + 1, r)
return quick_sort(l, j)
n = len(nums)
k = n - k
return quick_sort(0, n - 1)
| Solution |
python | doocs__leetcode | solution/1000-1099/1052.Grumpy Bookstore Owner/Solution.py | {
"start": 0,
"end": 454
} | class ____:
def maxSatisfied(
self, customers: List[int], grumpy: List[int], minutes: int
) -> int:
mx = cnt = sum(c * g for c, g in zip(customers[:minutes], grumpy))
for i in range(minutes, len(customers)):
cnt += customers[i] * grumpy[i]
cnt -= customers[i - minutes] * grumpy[i - minutes]
mx = max(mx, cnt)
return sum(c * (g ^ 1) for c, g in zip(customers, grumpy)) + mx
| Solution |
python | scikit-image__scikit-image | tests/skimage/morphology/test_max_tree.py | {
"start": 1685,
"end": 20613
} | class ____(TestCase):
def test_max_tree(self):
"Test for max tree"
img_type = np.uint8
img = np.array(
[[10, 8, 8, 9], [7, 7, 9, 9], [8, 7, 10, 10], [9, 9, 10, 10]],
dtype=img_type,
)
P_exp = np.array(
[[1, 4, 1, 1], [4, 4, 3, 3], [1, 4, 3, 10], [3, 3, 10, 10]], dtype=np.int64
)
S_exp = np.array(
[4, 5, 9, 1, 2, 8, 3, 6, 7, 12, 13, 0, 10, 11, 14, 15], dtype=np.int64
)
for img_type in [np.uint8, np.uint16, np.uint32, np.uint64]:
img = img.astype(img_type)
P, S = max_tree(img, connectivity=2)
assert_array_equal(P, P_exp)
assert_array_equal(S, S_exp)
for img_type in [np.int8, np.int16, np.int32, np.int64]:
img = img.astype(img_type)
img_shifted = img - 9
P, S = max_tree(img_shifted, connectivity=2)
assert_array_equal(P, P_exp)
assert_array_equal(S, S_exp)
img_float = img.astype(float)
img_float = (img_float - 8) / 2.0
for img_type in [np.float32, np.float64]:
img_float = img_float.astype(img_type)
P, S = max_tree(img_float, connectivity=2)
assert_array_equal(P, P_exp)
assert_array_equal(S, S_exp)
return
def test_area_closing(self):
"Test for Area Closing (2 thresholds, all types)"
# original image
img = np.array(
[
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 200, 200, 240, 200, 240, 200, 200, 240, 240, 200, 240],
[240, 200, 40, 240, 240, 240, 240, 240, 240, 240, 40, 240],
[240, 240, 240, 240, 100, 240, 100, 100, 240, 240, 200, 240],
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
[200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 40],
[200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],
[200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 40, 200, 240, 240, 100, 255, 255],
[200, 40, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255],
],
dtype=np.uint8,
)
# expected area closing with area 2
expected_2 = np.array(
[
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 200, 200, 240, 240, 240, 200, 200, 240, 240, 200, 240],
[240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 200, 240],
[240, 240, 240, 240, 240, 240, 100, 100, 240, 240, 200, 240],
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
[200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],
[200, 200, 200, 100, 200, 200, 200, 240, 255, 255, 255, 255],
[200, 200, 200, 100, 200, 200, 200, 240, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 40, 200, 240, 240, 200, 255, 255],
[200, 200, 255, 255, 255, 40, 200, 255, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255],
],
dtype=np.uint8,
)
# expected diameter closing with diameter 4
expected_4 = np.array(
[
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 200, 200, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240, 240],
[200, 200, 200, 200, 200, 200, 200, 240, 240, 240, 255, 255],
[200, 255, 200, 200, 200, 255, 200, 240, 255, 255, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 240, 255, 255, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 240, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 240, 240, 200, 255, 255],
[200, 200, 255, 255, 255, 200, 200, 255, 200, 200, 255, 255],
[200, 200, 200, 200, 200, 200, 200, 255, 255, 255, 255, 255],
],
dtype=np.uint8,
)
# _full_type_test makes a test with many image types.
_full_type_test(img, 2, expected_2, area_closing, connectivity=2)
_full_type_test(img, 4, expected_4, area_closing, connectivity=2)
P, S = max_tree(invert(img), connectivity=2)
_full_type_test(img, 4, expected_4, area_closing, parent=P, tree_traverser=S)
def test_area_opening(self):
"Test for Area Opening (2 thresholds, all types)"
# original image
img = np.array(
[
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 55, 55, 15, 55, 15, 55, 55, 15, 15, 55, 15],
[15, 55, 215, 15, 15, 15, 15, 15, 15, 15, 215, 15],
[15, 15, 15, 15, 155, 15, 155, 155, 15, 15, 55, 15],
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[55, 55, 55, 55, 55, 55, 55, 15, 55, 55, 0, 0],
[55, 0, 55, 55, 55, 0, 55, 15, 0, 0, 0, 215],
[55, 55, 55, 155, 55, 55, 55, 15, 0, 0, 0, 0],
[55, 55, 55, 155, 55, 55, 55, 15, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 215, 55, 15, 15, 155, 0, 0],
[55, 215, 0, 0, 0, 215, 55, 0, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
# expected area closing with area 2
expected_2 = np.array(
[
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 55, 55, 15, 15, 15, 55, 55, 15, 15, 55, 15],
[15, 55, 55, 15, 15, 15, 15, 15, 15, 15, 55, 15],
[15, 15, 15, 15, 15, 15, 155, 155, 15, 15, 55, 15],
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[55, 55, 55, 55, 55, 55, 55, 15, 55, 55, 0, 0],
[55, 0, 55, 55, 55, 0, 55, 15, 0, 0, 0, 0],
[55, 55, 55, 155, 55, 55, 55, 15, 0, 0, 0, 0],
[55, 55, 55, 155, 55, 55, 55, 15, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 215, 55, 15, 15, 55, 0, 0],
[55, 55, 0, 0, 0, 215, 55, 0, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
# expected diameter closing with diameter 4
expected_4 = np.array(
[
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 55, 55, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 55, 55, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15],
[55, 55, 55, 55, 55, 55, 55, 15, 15, 15, 0, 0],
[55, 0, 55, 55, 55, 0, 55, 15, 0, 0, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 15, 0, 0, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 15, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 15, 15, 55, 0, 0],
[55, 55, 0, 0, 0, 55, 55, 0, 55, 55, 0, 0],
[55, 55, 55, 55, 55, 55, 55, 0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
# _full_type_test makes a test with many image types.
_full_type_test(img, 2, expected_2, area_opening, connectivity=2)
_full_type_test(img, 4, expected_4, area_opening, connectivity=2)
P, S = max_tree(img, connectivity=2)
_full_type_test(img, 4, expected_4, area_opening, parent=P, tree_traverser=S)
def test_diameter_closing(self):
"Test for Diameter Opening (2 thresholds, all types)"
img = np.array(
[
[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
[93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],
[92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[90, 88, 86, 84, 83, 83, 82, 83, 83, 84, 86, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],
[93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
],
dtype=np.uint8,
)
ex2 = np.array(
[
[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
[93, 63, 63, 63, 63, 86, 86, 86, 87, 43, 43, 91],
[92, 89, 88, 86, 85, 85, 84, 85, 85, 43, 43, 89],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[92, 89, 23, 23, 85, 85, 84, 85, 85, 3, 3, 89],
[93, 91, 23, 23, 87, 86, 86, 86, 87, 88, 3, 91],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
],
dtype=np.uint8,
)
ex4 = np.array(
[
[97, 95, 93, 92, 91, 90, 90, 90, 91, 92, 93, 95],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
[93, 63, 63, 63, 63, 86, 86, 86, 87, 84, 84, 91],
[92, 89, 88, 86, 85, 85, 84, 85, 85, 84, 84, 89],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[90, 88, 86, 84, 83, 83, 83, 83, 83, 84, 86, 88],
[90, 88, 86, 85, 84, 83, 83, 83, 84, 85, 86, 88],
[91, 88, 87, 85, 84, 84, 83, 84, 84, 85, 87, 88],
[92, 89, 84, 84, 85, 85, 84, 85, 85, 84, 84, 89],
[93, 91, 84, 84, 87, 86, 86, 86, 87, 88, 84, 91],
[95, 93, 91, 89, 88, 88, 88, 88, 88, 89, 91, 93],
],
dtype=np.uint8,
)
# _full_type_test makes a test with many image types.
_full_type_test(img, 2, ex2, diameter_closing, connectivity=2)
_full_type_test(img, 4, ex4, diameter_closing, connectivity=2)
P, S = max_tree(invert(img), connectivity=2)
_full_type_test(img, 4, ex4, diameter_opening, parent=P, tree_traverser=S)
def test_diameter_opening(self):
"Test for Diameter Opening (2 thresholds, all types)"
img = np.array(
[
[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
[9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],
[11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 15, 16, 18, 19, 19, 20, 19, 19, 18, 16, 15],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],
[9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
]
)
ex2 = np.array(
[
[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
[9, 40, 40, 40, 40, 16, 16, 16, 16, 60, 60, 11],
[11, 13, 15, 16, 17, 18, 18, 18, 17, 60, 60, 13],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[11, 13, 80, 80, 17, 18, 18, 18, 17, 100, 100, 13],
[9, 11, 80, 80, 16, 16, 16, 16, 16, 15, 100, 11],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
]
)
ex4 = np.array(
[
[5, 7, 9, 11, 12, 12, 12, 12, 12, 11, 9, 7],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
[9, 40, 40, 40, 40, 16, 16, 16, 16, 18, 18, 11],
[11, 13, 15, 16, 17, 18, 18, 18, 17, 18, 18, 13],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 15, 16, 18, 19, 19, 19, 19, 19, 18, 16, 15],
[12, 14, 16, 18, 19, 19, 19, 19, 19, 18, 16, 14],
[12, 14, 16, 17, 18, 19, 19, 19, 18, 17, 16, 14],
[11, 13, 18, 18, 17, 18, 18, 18, 17, 18, 18, 13],
[9, 11, 18, 18, 16, 16, 16, 16, 16, 15, 18, 11],
[7, 10, 11, 13, 14, 14, 15, 14, 14, 13, 11, 10],
]
)
# _full_type_test makes a test with many image types.
_full_type_test(img, 2, ex2, diameter_opening, connectivity=2)
_full_type_test(img, 4, ex4, diameter_opening, connectivity=2)
P, S = max_tree(img, connectivity=2)
_full_type_test(img, 4, ex4, diameter_opening, parent=P, tree_traverser=S)
def test_local_maxima(self):
"local maxima for various data types"
data = np.array(
[
[10, 11, 13, 14, 14, 15, 14, 14, 13, 11],
[11, 13, 15, 16, 16, 16, 16, 16, 15, 13],
[13, 15, 40, 40, 18, 18, 18, 60, 60, 15],
[14, 16, 40, 40, 19, 19, 19, 60, 60, 16],
[14, 16, 18, 19, 19, 19, 19, 19, 18, 16],
[15, 16, 18, 19, 19, 20, 19, 19, 18, 16],
[14, 16, 18, 19, 19, 19, 19, 19, 18, 16],
[14, 16, 80, 80, 19, 19, 19, 100, 100, 16],
[13, 15, 80, 80, 18, 18, 18, 100, 100, 15],
[11, 13, 15, 16, 16, 16, 16, 16, 15, 13],
],
dtype=np.uint8,
)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.uint64,
)
for dtype in [np.uint8, np.uint64, np.int8, np.int64]:
test_data = data.astype(dtype)
out = max_tree_local_maxima(test_data, connectivity=1)
out_bin = out > 0
assert_array_equal(expected_result, out_bin)
assert out.dtype == expected_result.dtype
assert np.max(out) == 5
P, S = max_tree(test_data)
out = max_tree_local_maxima(test_data, parent=P, tree_traverser=S)
assert_array_equal(expected_result, out_bin)
assert out.dtype == expected_result.dtype
assert np.max(out) == 5
def test_extrema_float(self):
"specific tests for float type"
data = np.array(
[
[0.10, 0.11, 0.13, 0.14, 0.14, 0.15, 0.14, 0.14, 0.13, 0.11],
[0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13],
[0.13, 0.15, 0.40, 0.40, 0.18, 0.18, 0.18, 0.60, 0.60, 0.15],
[0.14, 0.16, 0.40, 0.40, 0.19, 0.19, 0.19, 0.60, 0.60, 0.16],
[0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16],
[0.15, 0.182, 0.18, 0.19, 0.204, 0.20, 0.19, 0.19, 0.18, 0.16],
[0.14, 0.16, 0.18, 0.19, 0.19, 0.19, 0.19, 0.19, 0.18, 0.16],
[0.14, 0.16, 0.80, 0.80, 0.19, 0.19, 0.19, 4.0, 1.0, 0.16],
[0.13, 0.15, 0.80, 0.80, 0.18, 0.18, 0.18, 1.0, 1.0, 0.15],
[0.11, 0.13, 0.15, 0.16, 0.16, 0.16, 0.16, 0.16, 0.15, 0.13],
],
dtype=np.float32,
)
expected_result = np.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=np.uint8,
)
# test for local maxima
out = max_tree_local_maxima(data, connectivity=1)
out_bin = out > 0
assert_array_equal(expected_result, out_bin)
assert np.max(out) == 6
def test_3d(self):
"""tests the detection of maxima in 3D."""
img = np.zeros((8, 8, 8), dtype=np.uint8)
local_maxima = np.zeros((8, 8, 8), dtype=np.uint64)
# first maximum: only one pixel
img[1, 1:3, 1:3] = 100
img[2, 2, 2] = 200
img[3, 1:3, 1:3] = 100
local_maxima[2, 2, 2] = 1
# second maximum: three pixels in z-direction
img[5:8, 1, 1] = 200
local_maxima[5:8, 1, 1] = 1
# third: two maxima in 0 and 3.
img[0, 5:8, 5:8] = 200
img[1, 6, 6] = 100
img[2, 5:7, 5:7] = 200
img[0:3, 5:8, 5:8] += 50
local_maxima[0, 5:8, 5:8] = 1
local_maxima[2, 5:7, 5:7] = 1
# four : one maximum in the corner of the square
img[6:8, 6:8, 6:8] = 200
img[7, 7, 7] = 255
local_maxima[7, 7, 7] = 1
out = max_tree_local_maxima(img)
out_bin = out > 0
assert_array_equal(local_maxima, out_bin)
assert np.max(out) == 5
| TestMaxtree |
python | pytorch__pytorch | torch/_inductor/codegen/triton.py | {
"start": 25150,
"end": 33786
} | class ____(PythonPrinter):
def _print_TruncToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return (
f"libdevice.trunc({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
def _print_Float(self, expr: sympy.Expr) -> str:
if expr.is_integer:
# sympy considers 0.0 to be integer, but triton doesn't.
# this workaround prints the float as an integer
# xref: https://github.com/sympy/sympy/issues/26620
ret = str(int(expr))
elif config.is_fbcode() and torch.version.hip:
ret = f"{expr}"
else:
ret = f"tl.full([], {expr}, tl.float64)"
return ret
def _print_ToFloat(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
s = self.parenthesize(expr.args[0], PRECEDENCE["Atom"] - 0.5)
return f"{s}.to(tl.float64)"
def _print_PythonMod(self, expr: sympy.Expr) -> str:
quot, div = expr.args
if quot.is_nonnegative and div.is_nonnegative:
return self.stringify(expr.args, " % ", PRECEDENCE["Atom"] - 0.5)
quot_s = self._print(quot)
div_s = self._print(div)
return f"triton_helpers.remainder_integer({quot_s}, {div_s})"
def _print_FloorDiv(self, expr: sympy.Expr) -> str:
assert expr.is_integer
quot, div = expr.args
if quot.is_nonnegative and div.is_nonnegative:
return self.stringify(expr.args, " // ", PRECEDENCE["Atom"] - 0.5)
quot_s = self._print(quot)
div_s = self._print(div)
return f"triton_helpers.div_floor_integer({quot_s}, {div_s})"
# TODO: This is wrong, when lhs, rhs > 2**53, Python does a higher
# precision algorithm, which we would need to replicate here
def _print_IntTrueDiv(self, expr: sympy.Expr) -> str:
return self.stringify(expr.args, " / ", PRECEDENCE["Atom"] - 0.5)
# NB: sympy.floor/ceiling produce integers, so we have to do the
# conversion to index dtype
def _print_floor(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return (
f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
def _print_FloorToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return (
f"libdevice.floor({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
def _print_ceiling(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
def _print_CeilToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.ceil({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
def _helper_sqrt(self, expr: sympy.Expr) -> str:
# work around for https://github.com/pytorch/pytorch/issues/165738
if torch.xpu.is_available():
return f"libdevice.sqrt(({self._print(expr)}).to(tl.float32))"
return f"tl.sqrt_rn(({self._print(expr)}).to(tl.float32))"
def _print_FloatPow(self, expr: sympy.Expr) -> str:
return (
f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})"
)
def _print_PowByNatural(self, expr: sympy.Expr) -> str:
if expr.args[0].is_Integer:
return f"libdevice.pow({float(expr.args[0])}, {self._print(expr.args[1])})"
return (
f"libdevice.pow({self._print(expr.args[0])}, {self._print(expr.args[1])})"
)
def _print_Where(self, expr: sympy.Expr) -> str:
c = self.doprint(expr.args[0])
p = self.doprint(expr.args[1])
q = self.doprint(expr.args[2])
return f"tl.where({c}, {p}, {q})"
def _print_min_max_helper(self, expr: sympy.Expr, cmp: str) -> str:
"""
Helper for max/min code generation.
cmp: > or <
"""
if len(expr.args) == 1:
return self._print(expr.args[0])
mid = len(expr.args) // 2
cls = type(expr)
a = self._print(cls(*expr.args[:mid]))
b = self._print(cls(*expr.args[mid:]))
# Use a macro so we can propagate constexprs.
# https://github.com/triton-lang/triton/issues/3815
a, b = tuple(f"({x})" for x in (a, b))
assert cmp in (">", "<"), f"Unexpected comparator: '{cmp}'"
return f"({a} * ({a} {cmp}= {b}) + {b} * ({b} {cmp} {a}))"
def _print_Min(self, expr: sympy.Expr) -> str:
return self._print_min_max_helper(expr, "<")
def _print_Max(self, expr: sympy.Expr) -> str:
return self._print_min_max_helper(expr, ">")
def _print_Abs(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"tl_math.abs({self._print(expr.args[0])})"
def _print_OpaqueUnaryFn_cos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.cos(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_cosh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.cosh(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_acos(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.acos(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_sin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.sin(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_sinh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.sinh(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_asin(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.asin(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_tan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.tan(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_tanh(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.tanh(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_atan(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.atan(({self._print(expr.args[0])}).to(tl.float32))"
def _print_OpaqueUnaryFn_log2(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return f"libdevice.log2(({self._print(expr.args[0])}).to(tl.float32))"
def _print_RoundToInt(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 1
return (
f"libdevice.llrint({self._print(expr.args[0])}).to({V.kernel.index_dtype})"
)
def _print_RoundDecimal(self, expr: sympy.Expr) -> str:
assert len(expr.args) == 2
number, ndigits = expr.args
if number.is_integer:
# ndigits < 0 should have been filtered by the sympy function
assert ndigits < 0
raise ValueError(
f"For integer inputs, only non-negative ndigits are currently supported, but got {ndigits}."
)
number_str = self.parenthesize(number, PRECEDENCE["Mul"])
return f"libdevice.nearbyint(1e{ndigits} * {number_str}) * 1e{-ndigits}"
texpr = TritonPrinter().doprint
def triton_compute_type(dtype: torch.dtype) -> str:
"""Convert torch.dtype to triton type and upcast [b]float16 to float32"""
return triton_type(upcast_compute_type(dtype))
def triton_store_type(dtype: torch.dtype) -> str:
"""Convert torch.dtype to triton type, with fix for storing tl.bool"""
if dtype == torch.bool:
dtype = torch.int8
return triton_type(dtype)
def upcast_acc_dtype(dtype: torch.dtype) -> torch.dtype:
"""Implicit upcasts used for Triton reduction types"""
if is_integer_dtype(dtype) and dtype.is_signed and dtype.itemsize <= 4:
return torch.int32
return upcast_compute_type(dtype)
def triton_acc_type(dtype: torch.dtype) -> str:
"""Convert torch.dtype to triton type, with reduction upcasts"""
return triton_compute_type(upcast_acc_dtype(dtype))
def low_precision_fp(dtype: torch.dtype) -> bool:
return dtype.itemsize <= 2 and dtype.is_floating_point
def low_precision_fp_var(var: Union[CSEVariable, Any]) -> bool:
if not isinstance(var, CSEVariable):
return False
dtype = var.dtype
return low_precision_fp(dtype) if isinstance(dtype, torch.dtype) else False
| TritonPrinter |
python | gevent__gevent | src/gevent/tests/test__example_portforwarder.py | {
"start": 363,
"end": 2025
} | class ____(util.TestServer):
example = 'portforwarder.py'
# [listen on, forward to]
example_args = ['127.0.0.1:10011', '127.0.0.1:10012']
if greentest.WIN:
from subprocess import CREATE_NEW_PROCESS_GROUP
# Must be in a new process group to use CTRL_C_EVENT, otherwise
# we get killed too
start_kwargs = {'creationflags': CREATE_NEW_PROCESS_GROUP}
def after(self):
if greentest.WIN:
self.assertIsNotNone(self.popen.poll())
else:
self.assertEqual(self.popen.poll(), 0)
def _run_all_tests(self):
log = []
def handle(sock, _address):
while True:
data = sock.recv(1024)
print('got %r' % data)
if not data:
break
log.append(data)
server = StreamServer(self.example_args[1], handle)
server.start()
try:
conn = socket.create_connection(('127.0.0.1', 10011))
conn.sendall(b'msg1')
sleep(0.1)
# On Windows, SIGTERM actually abruptly terminates the process;
# it can't be caught. However, CTRL_C_EVENT results in a KeyboardInterrupt
# being raised, so we can shut down properly.
self.popen.send_signal(getattr(signal, 'CTRL_C_EVENT', signal.SIGTERM))
sleep(0.1)
conn.sendall(b'msg2')
conn.close()
with gevent.Timeout(2.1):
self.popen.wait()
finally:
server.close()
self.assertEqual([b'msg1', b'msg2'], log)
if __name__ == '__main__':
greentest.main()
| Test |
python | allegroai__clearml | clearml/backend_api/services/v2_23/datasets.py | {
"start": 90451,
"end": 91310
} | class ____(Response):
"""
Response of datasets.create_version endpoint.
:param id: ID of the version
:type id: str
"""
_service = "datasets"
_action = "create_version"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "ID of the version", "type": ["string", "null"]}
},
"type": "object",
}
def __init__(self, id=None, **kwargs):
super(CreateVersionResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateVersionResponse |
python | PrefectHQ__prefect | src/prefect/server/schemas/actions.py | {
"start": 29581,
"end": 30292
} | class ____(ActionBaseModel):
"""Data used to create block document reference."""
id: UUID = Field(
default_factory=uuid4, description="The block document reference ID"
)
parent_block_document_id: UUID = Field(
default=..., description="ID of the parent block document"
)
reference_block_document_id: UUID = Field(
default=..., description="ID of the nested block document"
)
name: str = Field(
default=..., description="The name that the reference is nested under"
)
@model_validator(mode="before")
def validate_parent_and_ref_are_different(cls, values):
return validate_parent_and_ref_diff(values)
| BlockDocumentReferenceCreate |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/agent.py | {
"start": 6871,
"end": 11176
} | class ____(BaseModel):
"""Base Multi Action Agent class."""
@property
def return_values(self) -> list[str]:
"""Return values of the agent."""
return ["output"]
def get_allowed_tools(self) -> list[str] | None:
"""Get allowed tools.
Returns:
Allowed tools.
"""
return None
@abstractmethod
def plan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
@abstractmethod
async def aplan(
self,
intermediate_steps: list[tuple[AgentAction, str]],
callbacks: Callbacks = None,
**kwargs: Any,
) -> list[AgentAction] | AgentFinish:
"""Async given input, decided what to do.
Args:
intermediate_steps: Steps the LLM has taken to date,
along with the observations.
callbacks: Callbacks to run.
**kwargs: User inputs.
Returns:
Actions specifying what tool to use.
"""
@property
@abstractmethod
def input_keys(self) -> list[str]:
"""Return the input keys."""
def return_stopped_response(
self,
early_stopping_method: str,
intermediate_steps: list[tuple[AgentAction, str]], # noqa: ARG002
**_: Any,
) -> AgentFinish:
"""Return response when agent has been stopped due to max iterations.
Args:
early_stopping_method: Method to use for early stopping.
intermediate_steps: Steps the LLM has taken to date,
along with observations.
Returns:
Agent finish object.
Raises:
ValueError: If `early_stopping_method` is not supported.
"""
if early_stopping_method == "force":
# `force` just returns a constant string
return AgentFinish({"output": "Agent stopped due to max iterations."}, "")
msg = f"Got unsupported early_stopping_method `{early_stopping_method}`"
raise ValueError(msg)
@property
def _agent_type(self) -> str:
"""Return Identifier of an agent type."""
raise NotImplementedError
@override
def dict(self, **kwargs: Any) -> builtins.dict:
"""Return dictionary representation of agent."""
_dict = super().model_dump()
with contextlib.suppress(NotImplementedError):
_dict["_type"] = str(self._agent_type)
return _dict
def save(self, file_path: Path | str) -> None:
"""Save the agent.
Args:
file_path: Path to file to save the agent to.
Raises:
NotImplementedError: If agent does not support saving.
ValueError: If `file_path` is not json or yaml.
Example:
```python
# If working with agent executor
agent.agent.save(file_path="path/agent.yaml")
```
"""
# Convert file to Path object.
save_path = Path(file_path) if isinstance(file_path, str) else file_path
# Fetch dictionary to save
agent_dict = self.dict()
if "_type" not in agent_dict:
msg = f"Agent {self} does not support saving."
raise NotImplementedError(msg)
directory_path = save_path.parent
directory_path.mkdir(parents=True, exist_ok=True)
if save_path.suffix == ".json":
with save_path.open("w") as f:
json.dump(agent_dict, f, indent=4)
elif save_path.suffix.endswith((".yaml", ".yml")):
with save_path.open("w") as f:
yaml.dump(agent_dict, f, default_flow_style=False)
else:
msg = f"{save_path} must be json or yaml"
raise ValueError(msg)
def tool_run_logging_kwargs(self) -> builtins.dict:
"""Return logging kwargs for tool run."""
return {}
| BaseMultiActionAgent |
python | ray-project__ray | python/ray/tune/integration/lightgbm.py | {
"start": 343,
"end": 2713
} | class ____(RayReportCallback):
"""Creates a callback that reports metrics and checkpoints model.
Args:
metrics: Metrics to report. If this is a list,
each item should be a metric key reported by LightGBM,
and it will be reported to Ray Train/Tune under the same name.
This can also be a dict of {<key-to-report>: <lightgbm-metric-key>},
which can be used to rename LightGBM default metrics.
filename: Customize the saved checkpoint file type by passing
a filename. Defaults to "model.txt".
frequency: How often to save checkpoints, in terms of iterations.
Defaults to 0 (no checkpoints are saved during training).
checkpoint_at_end: Whether or not to save a checkpoint at the end of training.
results_postprocessing_fn: An optional Callable that takes in
the metrics dict that will be reported (after it has been flattened)
and returns a modified dict.
Examples
--------
Reporting checkpoints and metrics to Ray Tune when running many
independent LightGBM trials (without data parallelism within a trial).
.. testcode::
:skipif: True
import lightgbm
from ray.tune.integration.lightgbm import TuneReportCheckpointCallback
config = {
# ...
"metric": ["binary_logloss", "binary_error"],
}
# Report only log loss to Tune after each validation epoch.
bst = lightgbm.train(
...,
callbacks=[
TuneReportCheckpointCallback(
metrics={"loss": "eval-binary_logloss"}, frequency=1
)
],
)
"""
@contextmanager
def _get_checkpoint(self, model: Booster) -> Optional[Checkpoint]:
with tempfile.TemporaryDirectory() as temp_checkpoint_dir:
model.save_model(Path(temp_checkpoint_dir, self._filename).as_posix())
yield Checkpoint.from_directory(temp_checkpoint_dir)
def _save_and_report_checkpoint(self, report_dict: Dict, model: Booster):
with self._get_checkpoint(model=model) as checkpoint:
ray.tune.report(report_dict, checkpoint=checkpoint)
def _report_metrics(self, report_dict: Dict):
ray.tune.report(report_dict)
@Deprecated
| TuneReportCheckpointCallback |
python | modin-project__modin | modin/pandas/iterator.py | {
"start": 992,
"end": 2287
} | class ____(Iterator):
"""
Iterator on partitioned data.
Parameters
----------
df : modin.pandas.DataFrame
The dataframe to iterate over.
axis : {0, 1}
Axis to iterate over.
func : callable
The function to get inner iterables from each partition.
"""
df: DataFrame
def __init__(self, df: DataFrame, axis, func):
self.df = df
self.axis = axis
self.index_iter = (
zip(
iter(slice(None) for _ in range(len(self.df.columns))),
range(len(self.df.columns)),
)
if axis
else zip(
range(len(self.df.index)),
iter(slice(None) for _ in range(len(self.df.index))),
)
)
self.func = func
def __iter__(self):
"""
Implement iterator interface.
Returns
-------
PartitionIterator
Iterator object.
"""
return self
def __next__(self):
"""
Implement iterator interface.
Returns
-------
PartitionIterator
Incremented iterator object.
"""
key = next(self.index_iter)
df = self.df.iloc[key]
return self.func(df)
| PartitionIterator |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/dag_sources.py | {
"start": 918,
"end": 1168
} | class ____(BaseModel):
"""DAG Source serializer for responses."""
content: str | None
dag_id: str
version_number: int | None
dag_display_name: str = Field(validation_alias=AliasPath("dag_model", "dag_display_name"))
| DAGSourceResponse |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 90773,
"end": 91056
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_AUDIO_FRAME_CLASSIFICATION_MAPPING
AutoModelForAudioFrameClassification = auto_class_update(
AutoModelForAudioFrameClassification, head_doc="audio frame (token) classification"
)
| AutoModelForAudioFrameClassification |
python | getsentry__sentry | tests/sentry/rules/actions/test_base.py | {
"start": 248,
"end": 1361
} | class ____(TestCase):
def setUp(self) -> None:
self.rule = self.create_project_rule(project=self.project)
self.notification_uuid = str(uuid4())
self.event_id = 456
self.rule_fire_history = RuleFireHistory.objects.create(
project=self.project,
rule=self.rule,
group=self.group,
event_id=self.event_id,
notification_uuid=self.notification_uuid,
)
def test_passes_in_rule_fire_history(self) -> None:
action = {
"id": SlackNotifyServiceAction.id,
}
instance = instantiate_action(
rule=self.rule, action=action, rule_fire_history=self.rule_fire_history
)
assert instance.rule_fire_history is not None
assert instance.rule_fire_history == self.rule_fire_history
def test_respects_empty_rule_fire_history(self) -> None:
action = {
"id": SlackNotifyServiceAction.id,
}
instance = instantiate_action(rule=self.rule, action=action)
assert instance.rule_fire_history is None
| TestInstantiateAction |
python | pytorch__pytorch | test/jit/test_peephole.py | {
"start": 311,
"end": 29697
} | class ____(JitTestCase):
def test_peephole_with_writes(self):
def test_write(x):
s = 0
s += x
s += x
return s
self.checkScript(test_write, (torch.ones(4, 4),))
def test_peephole_with_non_output_writes(self):
@torch.jit.ignore
def nomnom(x):
pass
def test_write(x):
t = torch.ones_like(x)
z = x.clone()
y = z + 0
z.add_(t)
# this makes sure z isn't blasted out of existence
# because it isn't returned or used in a side-effectful
# way
nomnom(z)
return y + y
a = torch.ones(4, 4)
self.checkScript(test_write, (a,))
def test_peephole_no_output_aliasing(self):
def test_peephole(x):
y = x + 0
return x, y
a = torch.ones(4, 4)
j = self.checkScript(test_peephole, (a,))
r1, r2 = j(a)
self.assertNotEqual(r1.data_ptr(), r2.data_ptr())
def test_peephole(self):
a = torch.tensor([0.4])
b = torch.tensor([0.7])
c = torch.tensor([0], dtype=torch.int32)
def f(x, y):
return x.type_as(y)
tf = torch.jit.trace(f, (a, b))
FileCheck().check("type_as").run(str(tf.graph))
self.run_pass("peephole", tf.graph)
FileCheck().check_not("type_as").run(str(tf.graph))
tf2 = torch.jit.trace(f, (a, c))
s = str(tf2.graph)
self.run_pass("peephole", tf2.graph)
self.assertEqual(s, str(s))
def test_peephole_dynamic(self):
def f(x, y):
return x.type_as(y)
fn = torch.jit.script(f)
s = str(fn.graph)
torch._C._jit_pass_peephole(fn.graph)
self.assertEqual(s, str(fn.graph))
def test_peephole_list_ops(self):
@torch.jit.script
def foo(x, y, z):
return len([x, y, z])
self.run_pass("peephole", foo.graph)
FileCheck().check("value=3").check_next("return").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for _ in range(len(x)):
li.append(x)
return len([x, y, z])
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::len").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[1], li[-2]
FileCheck().check("aten::__getitem__").run(foo.graph)
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[-7]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for _ in range(len(x)):
li.append(x)
return li[-2]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
def test_peephole_cuda(self):
a = torch.tensor([0.4], device="cpu")
b = torch.tensor([0.7], device="cuda")
c = torch.tensor([0.7], device="cuda")
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, c))
s = str(trace.graph)
self.run_pass("peephole", trace.graph)
self.assertEqual(s, str(trace.graph))
trace = torch.jit.trace(f, (b, c))
self.run_pass("peephole", trace.graph)
self.run_pass("dce", trace.graph)
FileCheck().check_not("type_as").run(str(trace.graph))
@_inline_everything
def test_peephole_type_refinements(self):
def refine(x):
# type: (Optional[Tensor]) -> Tensor
return x if x is not None else torch.tensor(3)
@torch.jit.script
def test():
return refine(torch.tensor(4))
FileCheck().check("prim::unchecked_cast").run(test.graph)
self.run_pass("peephole", test.graph)
FileCheck().check_not("prim::unchecked_cast").run(test.graph)
# refinement not optimized out
def is_int_tensor(x):
scalar = x.item()
if isinstance(scalar, int):
return scalar + 3
else:
return 8
self.checkScript(is_int_tensor, (torch.tensor(2),))
self.checkScript(is_int_tensor, (torch.tensor(2.5),))
graph = torch.jit.script(is_int_tensor).graph
self.run_pass("peephole", graph)
FileCheck().check("prim::unchecked_cast").run(graph)
def test_short_circuit_optimization(self):
@torch.jit.script
def const_expressions(x):
# type: (int) -> Tuple[bool, bool]
return x == 1 and False, x == 1 or True
self.run_pass("constant_propagation", const_expressions.graph)
FileCheck().check_not("prim::If").check_not("aten::eq").run(
const_expressions.graph
)
self.assertEqual(const_expressions(1), (False, True))
@torch.jit.script
def redundant_expressions(x):
# type: (int) -> Tuple[bool, bool]
return x == 1 and True, x == 1 or False
self.run_pass("peephole", redundant_expressions.graph)
self.assertEqual(redundant_expressions(1), (True, True))
self.assertEqual(redundant_expressions(0), (False, False))
# and True / or False are removed from graph
FileCheck().check("aten::eq").check_not("prim::If").run(
redundant_expressions.graph
)
def test_conv_dim_folding(self):
modules = [nn.Conv1d, nn.Conv2d, nn.Conv3d]
for mod in modules:
class ConvDim(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = mod(3, 32, kernel_size=3, stride=2, bias=False)
def forward(self, x):
x = self.conv(x)
return x.dim()
conv_dim = torch.jit.script(ConvDim())
self.run_pass("inline", conv_dim.graph)
self.run_pass("peephole", conv_dim.graph)
FileCheck().check_not("conv").check_not("dim").run(conv_dim.graph)
class ConvDimMutate(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv = mod(3, 32, kernel_size=3, stride=2, bias=False)
def forward(self, x):
x = self.conv(x)
x.resize_([4, 4])
return x.dim()
conv_dim = torch.jit.script(ConvDimMutate())
self.run_pass("inline", conv_dim.graph)
self.run_pass("peephole", conv_dim.graph)
FileCheck().check("conv").check("dim").run(conv_dim.graph)
def test_normalized_rsub(self):
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5, 6])
def convertible_rsub(x, y):
return (x - y), torch.rsub(y, x)
self.checkScript(convertible_rsub, (a, b))
op_graph = torch.jit.script(convertible_rsub).graph
FileCheck().check_count("aten::sub", 2, exactly=True).run(op_graph)
FileCheck().check_count("aten::rsub", 0, exactly=True).run(op_graph)
def test_normalized_is_op(self):
def convertible_is_op(x: bool, y: bool):
return x is True, False is x, x is y
self.checkScript(convertible_is_op, (True, False))
op_graph = torch.jit.script(convertible_is_op).graph
FileCheck().check_count("aten::eq", 3, exactly=True).run(op_graph)
FileCheck().check_count("aten::__is__", 0, exactly=True).run(op_graph)
def test_normalized_isnot_op(self):
def convertible_isnot_op(x: bool, y: bool):
return x is not True, False is not x, x is not y
self.checkScript(convertible_isnot_op, (True, False))
op_graph = torch.jit.script(convertible_isnot_op).graph
FileCheck().check_count("aten::ne", 3, exactly=True).run(op_graph)
FileCheck().check_count("aten::__isnot__", 0, exactly=True).run(op_graph)
def test_peephole_list_len(self):
def run_peephole_and_check_const_value(graph, const_string):
torch._C._jit_pass_peephole_list_idioms(graph, refine_list_len=True)
self.run_pass("constant_propagation", graph)
FileCheck().check(const_string).check_next("return").run(graph)
def gen_li(inp_len: int):
return [0 for i in range(inp_len)]
@torch.jit.script
def foo(x: List[int], y: List[int]):
if len(x) != 4 or len(y) != 5:
raise Exception("") # noqa: TRY002
return len(x) + len(y)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(gen_li(4), gen_li(5)), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int]):
if len(x) == 4 and len(y) == 5:
pass
else:
raise Exception("hi") # noqa: TRY002
return len(x) + len(y)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(gen_li(4), gen_li(5)), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int], z: List[int]):
if len(x) != 4:
raise Exception("..") # noqa: TRY002
else:
if len(y) != 8:
raise Exception("...") # noqa: TRY002
else:
if len(z) == 3:
pass
else:
raise Exception("...") # noqa: TRY002
return len(x) + len(y) * len(z)
run_peephole_and_check_const_value(foo.graph, "value=28")
self.assertEqual(foo(gen_li(4), gen_li(8), gen_li(3)), 28)
with self.assertRaises(Exception):
foo(1, 2, 3)
# refinement should persist in second len(x) call
@torch.jit.script
def foo(x: List[int], cond: bool):
if len(x) == 4:
if cond:
return len(x)
return 4
return 4
run_peephole_and_check_const_value(foo.graph, "value=4")
def test_const_tuple_output(graph, const_inputs):
tup = graph.findNode("prim::TupleConstruct")
for i, elem in enumerate(tup.inputs()):
if i in const_inputs:
self.assertIsNotNone(elem.toIValue())
else:
self.assertIsNone(elem.toIValue())
# testing combinations of x1 : {True, False} x
# {then/else branch} x assert {True/False}
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# we can only infer len(b) == 4 here
test_const_tuple_output(foo.graph, [1])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = False
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can't infer anything
test_const_tuple_output(foo.graph, [])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) == 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# we can't infer anything, only len(b) != 4
test_const_tuple_output(foo.graph, [])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# swap branches
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) != 5:
x1 = len(b) != 4
else:
x1 = True
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# use __not__
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) != 5:
x1 = len(b) != 4
else:
x1 = True
assert not x1
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# Test unsuccessful optimizations
@torch.jit.script
def foo(x: List[int]):
assert len(x) == 4
x.append(3)
return len(x)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check_count("aten::len", 2).run(foo.graph)
@torch.jit.script
def foo(x: List[int], y: List[int]):
assert len(x) == 4 or len(y) == 5
return len(x) + len(y)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check_count("aten::len", 4).run(foo.graph)
def test_integer_refinement(self):
def run_peephole_and_check_const_value(graph, const_string):
self.run_pass("refine_integer_values", graph)
self.run_pass("constant_propagation", graph)
self.run_pass("dce", graph)
FileCheck().check(const_string).check_next("return").run(graph)
@torch.jit.script
def foo(x: int, y: int):
if x != 4 or y != 5:
raise Exception("") # noqa: TRY002
return x + y
graph = foo.graph
self.run_pass("refine_integer_values", graph)
self.run_pass("constant_propagation", graph)
self.run_pass("dce", graph)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(4, 5), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: int, y: int):
if x == 4 and y == 5:
pass
else:
raise Exception("hi") # noqa: TRY002
return x + y
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(4, 5), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: int, y: int, z: int):
if x != 4:
raise Exception("..") # noqa: TRY002
else:
if y != 8:
raise Exception("...") # noqa: TRY002
else:
if z == 3:
pass
else:
raise Exception("...") # noqa: TRY002
return x + y * z
run_peephole_and_check_const_value(foo.graph, "value=28")
self.assertEqual(foo(4, 8, 3), 28)
with self.assertRaises(Exception):
foo(1, 2, 3)
# refinement should persist in second len(x) call
@torch.jit.script
def foo(x: int, cond: bool):
if x == 4:
if cond:
return x
return 4
return 4
run_peephole_and_check_const_value(foo.graph, "value=4")
@torch.jit.script
def foo(x: int, y: int):
assert x == 4 or y == 5
return x + y
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check("aten::add").run(foo.graph)
def test_optimize_out_comparison_same_value(self):
def foo(x: int):
return x == x, x != x
def foo2(x: List[int]):
return x == x, x != x
for func, inp in zip([foo, foo2], [1, [2, 3]]):
func_s = torch.jit.script(func)
self.run_pass("peephole", func_s.graph)
FileCheck().check_not("aten::eq").check_not("aten::neq").run(func_s.graph)
self.assertEqual(func(inp), func_s(inp))
def test_peephole_add_zero(self):
@torch.jit.script
def foo(x: int):
return x + 0, 0 + x
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::add")
self.assertEqual(foo(3), (3, 3))
def test_noop_peephole(self):
# test unsuccessful
def foo1(x):
return x + 0
def foo2():
x = torch.zeros([2, 2])
x.sub_(3)
return x + 0
def foo3():
x = torch.zeros([2, 2])
return x, x + 0
def foo4():
x = torch.zeros([2, 2])
return x + 0.0
funcs = foo1, foo2, foo3, foo4
inps = (torch.ones([2]),), (), (), ()
for func, inp in zip(funcs, inps):
foo_s = torch.jit.script(func)
self.run_pass("peephole", foo_s.graph)
FileCheck().check_count("aten::add", 1, exactly=True).run(foo_s.graph)
self.assertEqual(func(*inp), foo_s(*inp))
# successful
def func(x):
return (x + 0) * 1 - 5
func_s = torch.jit.script(func)
self.run_pass("peephole", func_s.graph)
# bail on modified value first
FileCheck().check_not("aten::add").check("aten::mul").run(func_s.graph)
# second run it should succeed
self.run_pass("peephole", func_s.graph)
FileCheck().check_not("aten::add").check_not("aten::mul").run(func_s.graph)
self.assertEqual(func(torch.ones([2, 2])), func_s(torch.ones([2, 2])))
def func(x):
return (x + 0.0) - 5
func_s = torch.jit.script(func)
inp = next(func_s.graph.inputs())
inp.setType(torch._C.TensorType.create_from_tensor(torch.rand([2, 2])))
torch._C._jit_pass_peephole(func_s.graph, disable_shape_peepholes=True)
FileCheck().check("aten::add").run(func_s.graph)
torch._C._jit_pass_peephole(func_s.graph, disable_shape_peepholes=False)
FileCheck().check_not("aten::add").run(func_s.graph)
def test_refine_integer_values(self):
@torch.jit.script
def foo(x: int):
y = 1
if x == 1:
return y
else:
return x
self.run_pass("refine_integer_values", foo.graph)
self.run_pass("constant_propagation", foo.graph)
self.run_pass("dce", foo.graph)
FileCheck().check("graph").check_next("return").run(foo.graph)
self.assertEqual(foo(2), 2)
self.assertEqual(foo(1), 1)
def test_peephole_len_list(self):
@torch.jit.script
def foo(x):
return len(x.size())
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::len").run(foo.graph)
inputs = list(foo.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes([None, None]))
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::len").run(foo.graph)
self.assertEqual(2, foo(torch.rand([3, 1])))
@torch.jit.script
def foo(x):
li = x.size()
li.append(4)
return len(li)
inputs = list(foo.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes([None, None]))
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::len").run(foo.graph)
self.assertEqual(3, foo(torch.rand([3, 1])))
def test_peephole_optional_refine(self):
@torch.jit.script
def foo(z: int, z2: int, cond: bool):
if cond:
return z
else:
return z2
out = next(foo.graph.findNode("prim::If").outputs())
out.setType(torch._C.OptionalType(torch._C.IntType.get()))
self.run_pass("peephole", foo.graph)
FileCheck().check_not("int?").run(foo.graph)
def test_peephole_int(self):
@torch.jit.script
def foo(x):
# type: (number)
return int(x)
FileCheck().check("aten::Int").run(foo.graph)
next(foo.graph.inputs()).setType(torch._C.IntType.get())
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::Int").run(foo.graph)
def test_peephole_arith(self):
@torch.jit.script
def foo(input0: int, input1: int, input2: int, input3: int):
_1 = torch.add(input1, 2)
_3 = torch.add(input3, 2)
_5 = torch.add(1, torch.sub(_1, 3) // 1)
_6 = torch.add(1 * torch.sub(_3, 3) // 1, 1) / 1
return [_5, int(_6)]
FileCheck().check("aten::add").check("aten::sub").check("aten::mul").check(
"aten::floordiv"
).check("aten::div").run(foo.graph)
self.run_pass("peephole", foo.graph)
FileCheck().check("graph").check("):").check_next("ListConstruct").check_next(
"return"
).run(foo.graph)
self.assertEqual(foo(0, 1, 2, 3), [1, 3])
def test_peephole_dict_getitem_simple(self):
@torch.jit.script
def foo(a: int, b: int):
d = {0: a, 1: b}
x = d[1]
y = d[0]
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
@torch.jit.script
def foo(a: int, b: int):
d = {"0": a, "1": b}
x = d["1"]
y = d["0"]
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
@torch.jit.script
def foo(a: int, b: int):
d = {0.0: a, 1.0: b}
x = d[1.0]
y = d[0.0]
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
def test_peephole_dict_getitem_no_optimization_missing_key(self):
@torch.jit.script
def foo():
d = {0: 1}
return d[2]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_get_input_arg(self):
# Here we don't know if the input arg is in the dict, so we can't
# make the optimization.
@torch.jit.script
def foo(a: int):
d = {0: 1}
return d[a]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(0), 1)
def test_peephole_dict_getitem_no_optimization_dict_modified(self):
@torch.jit.script
def foo():
d = {0: 1}
d[0] = 2
return d[0]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(), 2)
def test_peephole_dict_getitem_no_optimization_overlapping_keys(self):
@torch.jit.script
def foo():
d = {0: 1, 0: 2} # noqa: F601
return d[0]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_keys_might_overlap(self):
@torch.jit.script
def foo(x: int):
d = {0: 1, x: 2}
return d[x]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_unsupported_type(self):
@torch.jit.script
def foo():
a = torch.rand((2, 2))
d = {a: 1}
return d[a]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_dict_len(self):
@torch.jit.script
def foo():
d = {0: 1, 1: 2}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("len").run(foo.graph)
self.assertEqual(foo(), 2)
def test_peephole_dict_len_no_optimization_overlapping_keys(self):
@torch.jit.script
def foo():
d = {0: 1, 0: 2} # noqa: F601
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_dict_len_no_optimization_keys_might_overlap(self):
@torch.jit.script
def foo(x: int):
d = {0: 1, x: 2}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
def test_peephole_dict_len_no_optimization_unsupported_type(self):
@torch.jit.script
def foo():
a = torch.rand((2, 2))
d = {a: 1}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_slice_all_three_args(self):
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][-5:6:2]
graph = torch.jit.script(foo).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(foo, (3,))
def test_peephole_slice_one_empty_arg(self):
def check_helper(fn: Callable[[int], None]) -> None:
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(fn, (3,))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][1::2]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][:5:3]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][0:4]
check_helper(foo)
def test_peephole_slice_two_empty_args(self):
def check_helper(fn: Callable[[int], None]) -> None:
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(fn, (3,))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][::2]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][:5]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][1:]
check_helper(foo)
def test_peephole_slice_optimization_not_applied_list_modified(self):
@torch.jit.script
def foo():
li = [1, 2, 3, 4, 5, 6, 7]
li[0] = 0
return li[2:5]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::slice").run(foo.graph)
def test_peephole_slice_optimization_not_applied_non_const_args(self):
@torch.jit.script
def foo(x: int, y: int):
li = [1, 2, 3, 4, 5, 6, 7]
return li[x:y]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::slice").run(foo.graph)
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestPeephole |
python | pallets__werkzeug | src/werkzeug/_reloader.py | {
"start": 9837,
"end": 15100
} | class ____(ReloaderLoop):
def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
from watchdog.events import EVENT_TYPE_CLOSED
from watchdog.events import EVENT_TYPE_CREATED
from watchdog.events import EVENT_TYPE_DELETED
from watchdog.events import EVENT_TYPE_MODIFIED
from watchdog.events import EVENT_TYPE_MOVED
from watchdog.events import FileModifiedEvent
from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer
super().__init__(*args, **kwargs)
trigger_reload = self.trigger_reload
class EventHandler(PatternMatchingEventHandler):
def on_any_event(self, event: FileModifiedEvent) -> None: # type: ignore[override]
if event.event_type not in {
EVENT_TYPE_CLOSED,
EVENT_TYPE_CREATED,
EVENT_TYPE_DELETED,
EVENT_TYPE_MODIFIED,
EVENT_TYPE_MOVED,
}:
# skip events that don't involve changes to the file
return
trigger_reload(event.src_path)
reloader_name = Observer.__name__.lower() # type: ignore[attr-defined]
if reloader_name.endswith("observer"):
reloader_name = reloader_name[:-8]
self.name = f"watchdog ({reloader_name})"
self.observer = Observer()
extra_patterns = (p for p in self.extra_files if not os.path.isdir(p))
self.event_handler = EventHandler(
patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
ignore_patterns=[
*[f"*/{d}/*" for d in _ignore_common_dirs],
*self.exclude_patterns,
],
)
self.should_reload = threading.Event()
def trigger_reload(self, filename: str | bytes) -> None:
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload.set()
self.log_reload(filename)
def __enter__(self) -> ReloaderLoop:
self.watches: dict[str, t.Any] = {}
self.observer.start()
return super().__enter__()
def __exit__(self, exc_type, exc_val, exc_tb): # type: ignore
self.observer.stop()
self.observer.join()
def run(self) -> None:
while not self.should_reload.wait(timeout=self.interval):
self.run_step()
sys.exit(3)
def run_step(self) -> None:
to_delete = set(self.watches)
for path in _find_watchdog_paths(self.extra_files, self.exclude_patterns):
if path not in self.watches:
try:
self.watches[path] = self.observer.schedule(
self.event_handler, path, recursive=True
)
except OSError:
# Clear this path from list of watches. We don't want
# the same error message showing again in the next
# iteration.
self.watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = self.watches.pop(path, None)
if watch is not None:
self.observer.unschedule(watch)
reloader_loops: dict[str, type[ReloaderLoop]] = {
"stat": StatReloaderLoop,
"watchdog": WatchdogReloaderLoop,
}
try:
__import__("watchdog.observers")
except ImportError:
reloader_loops["auto"] = reloader_loops["stat"]
else:
reloader_loops["auto"] = reloader_loops["watchdog"]
def ensure_echo_on() -> None:
"""Ensure that echo mode is enabled. Some tools such as PDB disable
it which causes usability issues after a reload."""
# tcgetattr will fail if stdin isn't a tty
if sys.stdin is None or not sys.stdin.isatty():
return
try:
import termios
except ImportError:
return
attributes = termios.tcgetattr(sys.stdin)
if not attributes[3] & termios.ECHO:
attributes[3] |= termios.ECHO
termios.tcsetattr(sys.stdin, termios.TCSANOW, attributes)
def run_with_reloader(
main_func: t.Callable[[], None],
extra_files: t.Iterable[str] | None = None,
exclude_patterns: t.Iterable[str] | None = None,
interval: int | float = 1,
reloader_type: str = "auto",
) -> None:
"""Run the given function in an independent Python interpreter."""
import signal
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
reloader = reloader_loops[reloader_type](
extra_files=extra_files, exclude_patterns=exclude_patterns, interval=interval
)
try:
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
ensure_echo_on()
t = threading.Thread(target=main_func, args=())
t.daemon = True
# Enter the reloader to set up initial state, then start
# the app thread and reloader update loop.
with reloader:
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
| WatchdogReloaderLoop |
python | getsentry__sentry | tests/sentry/sentry_apps/token_exchange/test_refresher.py | {
"start": 1059,
"end": 9548
} | class ____(TestCase):
def setUp(self) -> None:
self.install = self.create_sentry_app_installation()
self.client_id = self.install.sentry_app.application.client_id
self.user = self.install.sentry_app.proxy_user
self.token = self.install.api_token
self.refresher = Refresher(
install=self.install,
client_id=self.client_id,
refresh_token=self.token.refresh_token,
user=self.user,
)
def test_happy_path(self) -> None:
assert self.refresher.run()
def test_adds_token_to_installation(self) -> None:
token = self.refresher.run()
assert SentryAppInstallation.objects.get(id=self.install.id).api_token == token
def test_deletes_refreshed_token(self) -> None:
self.refresher.run()
assert not ApiToken.objects.filter(id=self.token.id).exists()
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_validates_token_belongs_to_sentry_app(self, mock_record: MagicMock) -> None:
new_install = self.create_sentry_app_installation()
refresh_token = new_install.api_token.refresh_token
assert refresh_token is not None
self.refresher.refresh_token = refresh_token
with pytest.raises(SentryAppIntegratorError) as e:
self.refresher.run()
assert e.value.message == "Token does not belong to the application"
assert e.value.webhook_context == {
"client_id_installation_uuid": self.install.uuid,
"client_id": self.client_id,
"token_installation": new_install.uuid,
}
assert e.value.public_context == {}
# SLO assertions
assert_halt_metric(
mock_record=mock_record,
error_msg=SentryAppIntegratorError(message="Token does not belong to the application"),
)
# APP_CREATE (success) -> WEBHOOK_UPDATE (success) -> TOKEN_EXCHANGE (success) -> REFRESHER (halt)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=4
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_validates_token_belongs_to_sentry_app_random_token(
self, mock_record: MagicMock
) -> None:
new_application = ApiApplication.objects.create(owner_id=self.create_user().id)
refresh_token = ApiToken.objects.create(
user=self.user,
application=new_application,
).refresh_token
assert refresh_token is not None
self.refresher.refresh_token = refresh_token
with pytest.raises(SentryAppIntegratorError) as e:
self.refresher.run()
assert e.value.message == "Token does not belong to the application"
assert e.value.webhook_context == {
"client_id_installation_uuid": self.install.uuid,
"client_id": self.client_id,
}
assert e.value.public_context == {}
# SLO assertions
assert_halt_metric(
mock_record=mock_record,
error_msg=SentryAppIntegratorError(message="Token does not belong to the application"),
)
# REFRESHER (halt)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch("sentry.models.ApiToken.objects.get", side_effect=ApiToken.DoesNotExist)
def test_token_must_exist(self, _: MagicMock, mock_record: MagicMock) -> None:
with pytest.raises(SentryAppIntegratorError) as e:
self.refresher.run()
assert e.value.message == "Given refresh token does not exist"
assert e.value.webhook_context == {
"installation_uuid": self.install.uuid,
}
assert e.value.public_context == {}
# SLO assertions
assert_halt_metric(
mock_record=mock_record,
error_msg=SentryAppIntegratorError(message="Given refresh token does not exist"),
)
# REFRESHER (halt)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.HALTED, outcome_count=1
)
@patch("sentry.sentry_apps.token_exchange.refresher.Refresher._validate")
@patch("sentry.models.ApiApplication.objects.get", side_effect=ApiApplication.DoesNotExist)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_api_application_must_exist(
self, mock_record: MagicMock, _: MagicMock, mock_validate: MagicMock
) -> None:
with pytest.raises(SentryAppSentryError) as e:
self.refresher.run()
assert e.value.message == "Could not find matching Application for given client_id"
assert e.value.webhook_context == {
"client_id": self.client_id,
"installation_uuid": self.install.uuid,
}
assert e.value.public_context == {}
# SLO assertions
assert_failure_metric(
mock_record=mock_record,
error_msg=SentryAppSentryError(
message="Could not find matching Application for given client_id"
),
)
# REFRESHER (failure)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
@patch("sentry.sentry_apps.token_exchange.refresher.Refresher._validate")
@patch("sentry.models.ApiApplication.sentry_app", new_callable=PropertyMock)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_sentry_app_must_exist(
self, mock_record: MagicMock, sentry_app: MagicMock, validate: MagicMock
) -> None:
sentry_app.side_effect = SentryApp.DoesNotExist()
with pytest.raises(SentryAppSentryError) as e:
self.refresher.run()
assert e.value.message == "Sentry App does not exist on attached Application"
assert e.value.webhook_context == {
"application_id": self.install.sentry_app.application.id,
"installation_uuid": self.install.uuid,
"client_id": self.client_id[:SENSITIVE_CHARACTER_LIMIT],
}
assert e.value.public_context == {}
# SLO assertions
assert_failure_metric(
mock_record=mock_record,
error_msg=SentryAppSentryError(
message="Sentry App does not exist on attached Application"
),
)
# REFRESHER (failure)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.FAILURE, outcome_count=1
)
@patch("sentry.analytics.record")
def test_records_analytics(self, record: MagicMock) -> None:
Refresher(
install=self.install,
client_id=self.client_id,
refresh_token=self.token.refresh_token,
user=self.user,
).run()
assert_last_analytics_event(
record,
SentryAppTokenExchangedEvent(
sentry_app_installation_id=self.install.id,
exchange_type="refresh",
),
)
def test_returns_token_on_outbox_error(self) -> None:
# Mock the transaction to raise OperationalError after token creation
with patch("sentry.hybridcloud.models.outbox.OutboxBase.process_coalesced") as mock_process:
mock_process.side_effect = OperationalError("Outbox issue")
# The refresher should return the token even though there was an error
token = self.refresher.run()
assert SentryAppInstallation.objects.get(id=self.install.id).api_token == token
| TestRefresher |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.