language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | PyCQA__pylint | tests/functional/i/invalid/invalid_class_object.py | {
"start": 688,
"end": 1726
} | class ____:
"""See https://github.com/pylint-dev/pylint/issues/7467"""
def class_defining_function_good(self):
self.__class__, myvar = AnotherClass, "myvalue"
print(myvar)
def class_defining_function_bad(self):
self.__class__, myvar = 1, "myvalue" # [invalid-class-object]
print(myvar)
def class_defining_function_good_inverted(self):
myvar, self.__class__ = "myvalue", AnotherClass
print(myvar)
def class_defining_function_bad_inverted(self):
myvar, self.__class__ = "myvalue", 1 # [invalid-class-object]
print(myvar)
def class_defining_function_complex_bad(self):
myvar, self.__class__, other = ( # [invalid-class-object]
"myvalue",
1,
"othervalue",
)
print(myvar, other)
def class_defining_function_complex_good(self):
myvar, self.__class__, other = (
"myvalue",
str,
"othervalue",
)
print(myvar, other)
| Pylint7429Good |
python | PrefectHQ__prefect | src/prefect/server/schemas/core.py | {
"start": 40541,
"end": 41182
} | class ____(ORMBaseModel):
"""An ORM representation of a worker"""
name: str = Field(description="The name of the worker.")
work_pool_id: UUID = Field(
description="The work pool with which the queue is associated."
)
last_heartbeat_time: Optional[datetime.datetime] = Field(
None, description="The last time the worker process sent a heartbeat."
)
heartbeat_interval_seconds: Optional[int] = Field(
default=None,
description=(
"The number of seconds to expect between heartbeats sent by the worker."
),
)
Flow.model_rebuild()
FlowRun.model_rebuild()
| Worker |
python | Pylons__pyramid | tests/test_testing.py | {
"start": 14880,
"end": 15030
} | class ____(Test_setUp):
def _callFUT(self, *arg, **kw):
from pyramid.testing import cleanUp
return cleanUp(*arg, **kw)
| Test_cleanUp |
python | walkccc__LeetCode | solutions/547. Friend Circles/547.py | {
"start": 557,
"end": 823
} | class ____:
def findCircleNum(self, isConnected: list[list[int]]) -> int:
n = len(isConnected)
uf = UnionFind(n)
for i in range(n):
for j in range(i, n):
if isConnected[i][j] == 1:
uf.unionByRank(i, j)
return uf.count
| Solution |
python | django__django | tests/indexes/tests.py | {
"start": 12691,
"end": 14724
} | class ____(TransactionTestCase):
available_apps = ["indexes"]
def test_no_index_for_foreignkey(self):
"""
MySQL on InnoDB already creates indexes automatically for foreign keys.
(#14180). An index should be created if db_constraint=False (#26171).
"""
with connection.cursor() as cursor:
storage = connection.introspection.get_storage_engine(
cursor,
ArticleTranslation._meta.db_table,
)
if storage != "InnoDB":
self.skipTest("This test only applies to the InnoDB storage engine")
index_sql = [
str(statement)
for statement in connection.schema_editor()._model_indexes_sql(
ArticleTranslation
)
]
self.assertEqual(
index_sql,
[
"CREATE INDEX "
"`indexes_articletranslation_article_no_constraint_id_d6c0806b` "
"ON `indexes_articletranslation` (`article_no_constraint_id`)"
],
)
# The index also shouldn't be created if the ForeignKey is added after
# the model was created.
field_created = False
try:
with connection.schema_editor() as editor:
new_field = ForeignKey(Article, CASCADE)
new_field.set_attributes_from_name("new_foreign_key")
editor.add_field(ArticleTranslation, new_field)
field_created = True
# No deferred SQL. The FK constraint is included in the
# statement to add the field.
self.assertFalse(editor.deferred_sql)
finally:
if field_created:
with connection.schema_editor() as editor:
editor.remove_field(ArticleTranslation, new_field)
@skipUnlessDBFeature("supports_partial_indexes")
# SQLite doesn't support timezone-aware datetimes when USE_TZ is False.
@override_settings(USE_TZ=True)
| SchemaIndexesMySQLTests |
python | scipy__scipy | scipy/sparse/tests/test_base.py | {
"start": 6231,
"end": 6849
} | class ____:
# Custom type to test binary operations on sparse matrices.
def __add__(self, mat):
return "matrix on the right"
def __mul__(self, mat):
return "matrix on the right"
def __sub__(self, mat):
return "matrix on the right"
def __radd__(self, mat):
return "matrix on the left"
def __rmul__(self, mat):
return "matrix on the left"
def __rsub__(self, mat):
return "matrix on the left"
def __matmul__(self, mat):
return "matrix on the right"
def __rmatmul__(self, mat):
return "matrix on the left"
| BinopTester |
python | pallets__werkzeug | src/werkzeug/routing/map.py | {
"start": 1207,
"end": 14732
} | class ____:
"""The map class stores all the URL rules and some configuration
parameters. Some of the configuration values are only stored on the
`Map` instance since those affect all rules, others are just defaults
and can be overridden for each rule. Note that you have to specify all
arguments besides the `rules` as keyword arguments!
:param rules: sequence of url rules for this map.
:param default_subdomain: The default subdomain for rules without a
subdomain defined.
:param strict_slashes: If a rule ends with a slash but the matched
URL does not, redirect to the URL with a trailing slash.
:param merge_slashes: Merge consecutive slashes when matching or
building URLs. Matches will redirect to the normalized URL.
Slashes in variable parts are not merged.
:param redirect_defaults: This will redirect to the default rule if it
wasn't visited that way. This helps creating
unique URLs.
:param converters: A dict of converters that adds additional converters
to the list of converters. If you redefine one
converter this will override the original one.
:param sort_parameters: If set to `True` the url parameters are sorted.
See `url_encode` for more details.
:param sort_key: The sort key function for `url_encode`.
:param host_matching: if set to `True` it enables the host matching
feature and disables the subdomain one. If
enabled the `host` parameter to rules is used
instead of the `subdomain` one.
.. versionchanged:: 3.0
The ``charset`` and ``encoding_errors`` parameters were removed.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules will match.
.. versionchanged:: 1.0
The ``merge_slashes`` parameter was added.
.. versionchanged:: 0.7
The ``encoding_errors`` and ``host_matching`` parameters were added.
.. versionchanged:: 0.5
The ``sort_parameters`` and ``sort_key`` parameters were added.
"""
#: A dict of default converters to be used.
default_converters = ImmutableDict(DEFAULT_CONVERTERS)
#: The type of lock to use when updating.
#:
#: .. versionadded:: 1.0
lock_class = Lock
def __init__(
self,
rules: t.Iterable[RuleFactory] | None = None,
default_subdomain: str = "",
strict_slashes: bool = True,
merge_slashes: bool = True,
redirect_defaults: bool = True,
converters: t.Mapping[str, type[BaseConverter]] | None = None,
sort_parameters: bool = False,
sort_key: t.Callable[[t.Any], t.Any] | None = None,
host_matching: bool = False,
) -> None:
self._matcher = StateMachineMatcher(merge_slashes)
self._rules_by_endpoint: dict[t.Any, list[Rule]] = {}
self._remap = True
self._remap_lock = self.lock_class()
self.default_subdomain = default_subdomain
self.strict_slashes = strict_slashes
self.redirect_defaults = redirect_defaults
self.host_matching = host_matching
self.converters = self.default_converters.copy()
if converters:
self.converters.update(converters)
self.sort_parameters = sort_parameters
self.sort_key = sort_key
for rulefactory in rules or ():
self.add(rulefactory)
@property
def merge_slashes(self) -> bool:
return self._matcher.merge_slashes
@merge_slashes.setter
def merge_slashes(self, value: bool) -> None:
self._matcher.merge_slashes = value
def is_endpoint_expecting(self, endpoint: t.Any, *arguments: str) -> bool:
"""Iterate over all rules and check if the endpoint expects
the arguments provided. This is for example useful if you have
some URLs that expect a language code and others that do not and
you want to wrap the builder a bit so that the current language
code is automatically added if not provided but endpoints expect
it.
:param endpoint: the endpoint to check.
:param arguments: this function accepts one or more arguments
as positional arguments. Each one of them is
checked.
"""
self.update()
arguments_set = set(arguments)
for rule in self._rules_by_endpoint[endpoint]:
if arguments_set.issubset(rule.arguments):
return True
return False
@property
def _rules(self) -> list[Rule]:
return [rule for rules in self._rules_by_endpoint.values() for rule in rules]
def iter_rules(self, endpoint: t.Any | None = None) -> t.Iterator[Rule]:
"""Iterate over all rules or the rules of an endpoint.
:param endpoint: if provided only the rules for that endpoint
are returned.
:return: an iterator
"""
self.update()
if endpoint is not None:
return iter(self._rules_by_endpoint[endpoint])
return iter(self._rules)
def add(self, rulefactory: RuleFactory) -> None:
"""Add a new rule or factory to the map and bind it. Requires that the
rule is not bound to another map.
:param rulefactory: a :class:`Rule` or :class:`RuleFactory`
"""
for rule in rulefactory.get_rules(self):
rule.bind(self)
if not rule.build_only:
self._matcher.add(rule)
self._rules_by_endpoint.setdefault(rule.endpoint, []).append(rule)
self._remap = True
def bind(
self,
server_name: str,
script_name: str | None = None,
subdomain: str | None = None,
url_scheme: str = "http",
default_method: str = "GET",
path_info: str | None = None,
query_args: t.Mapping[str, t.Any] | str | None = None,
) -> MapAdapter:
"""Return a new :class:`MapAdapter` with the details specified to the
call. Note that `script_name` will default to ``'/'`` if not further
specified or `None`. The `server_name` at least is a requirement
because the HTTP RFC requires absolute URLs for redirects and so all
redirect exceptions raised by Werkzeug will contain the full canonical
URL.
If no path_info is passed to :meth:`match` it will use the default path
info passed to bind. While this doesn't really make sense for
manual bind calls, it's useful if you bind a map to a WSGI
environment which already contains the path info.
`subdomain` will default to the `default_subdomain` for this map if
no defined. If there is no `default_subdomain` you cannot use the
subdomain feature.
.. versionchanged:: 1.0
If ``url_scheme`` is ``ws`` or ``wss``, only WebSocket rules
will match.
.. versionchanged:: 0.15
``path_info`` defaults to ``'/'`` if ``None``.
.. versionchanged:: 0.8
``query_args`` can be a string.
.. versionchanged:: 0.7
Added ``query_args``.
"""
server_name = server_name.lower()
if self.host_matching:
if subdomain is not None:
raise RuntimeError("host matching enabled and a subdomain was provided")
elif subdomain is None:
subdomain = self.default_subdomain
if script_name is None:
script_name = "/"
if path_info is None:
path_info = "/"
# Port isn't part of IDNA, and might push a name over the 63 octet limit.
server_name, port_sep, port = server_name.partition(":")
try:
server_name = server_name.encode("idna").decode("ascii")
except UnicodeError as e:
raise BadHost() from e
return MapAdapter(
self,
f"{server_name}{port_sep}{port}",
script_name,
subdomain,
url_scheme,
path_info,
default_method,
query_args,
)
def bind_to_environ(
self,
environ: WSGIEnvironment | Request,
server_name: str | None = None,
subdomain: str | None = None,
) -> MapAdapter:
"""Like :meth:`bind` but you can pass it an WSGI environment and it
will fetch the information from that dictionary. Note that because of
limitations in the protocol there is no way to get the current
subdomain and real `server_name` from the environment. If you don't
provide it, Werkzeug will use `SERVER_NAME` and `SERVER_PORT` (or
`HTTP_HOST` if provided) as used `server_name` with disabled subdomain
feature.
If `subdomain` is `None` but an environment and a server name is
provided it will calculate the current subdomain automatically.
Example: `server_name` is ``'example.com'`` and the `SERVER_NAME`
in the wsgi `environ` is ``'staging.dev.example.com'`` the calculated
subdomain will be ``'staging.dev'``.
If the object passed as environ has an environ attribute, the value of
this attribute is used instead. This allows you to pass request
objects. Additionally `PATH_INFO` added as a default of the
:class:`MapAdapter` so that you don't have to pass the path info to
the match method.
.. versionchanged:: 1.0.0
If the passed server name specifies port 443, it will match
if the incoming scheme is ``https`` without a port.
.. versionchanged:: 1.0.0
A warning is shown when the passed server name does not
match the incoming WSGI server name.
.. versionchanged:: 0.8
This will no longer raise a ValueError when an unexpected server
name was passed.
.. versionchanged:: 0.5
previously this method accepted a bogus `calculate_subdomain`
parameter that did not have any effect. It was removed because
of that.
:param environ: a WSGI environment.
:param server_name: an optional server name hint (see above).
:param subdomain: optionally the current subdomain (see above).
"""
env = _get_environ(environ)
wsgi_server_name = get_host(env).lower()
scheme = env["wsgi.url_scheme"]
upgrade = any(
v.strip() == "upgrade"
for v in env.get("HTTP_CONNECTION", "").lower().split(",")
)
if upgrade and env.get("HTTP_UPGRADE", "").lower() == "websocket":
scheme = "wss" if scheme == "https" else "ws"
if server_name is None:
server_name = wsgi_server_name
else:
server_name = server_name.lower()
# strip standard port to match get_host()
if scheme in {"http", "ws"} and server_name.endswith(":80"):
server_name = server_name[:-3]
elif scheme in {"https", "wss"} and server_name.endswith(":443"):
server_name = server_name[:-4]
if subdomain is None and not self.host_matching:
cur_server_name = wsgi_server_name.split(".")
real_server_name = server_name.split(".")
offset = -len(real_server_name)
if cur_server_name[offset:] != real_server_name:
# This can happen even with valid configs if the server was
# accessed directly by IP address under some situations.
# Instead of raising an exception like in Werkzeug 0.7 or
# earlier we go by an invalid subdomain which will result
# in a 404 error on matching.
warnings.warn(
f"Current server name {wsgi_server_name!r} doesn't match configured"
f" server name {server_name!r}",
stacklevel=2,
)
subdomain = "<invalid>"
else:
subdomain = ".".join(filter(None, cur_server_name[:offset]))
def _get_wsgi_string(name: str) -> str | None:
val = env.get(name)
if val is not None:
return _wsgi_decoding_dance(val)
return None
script_name = _get_wsgi_string("SCRIPT_NAME")
path_info = _get_wsgi_string("PATH_INFO")
query_args = _get_wsgi_string("QUERY_STRING")
return Map.bind(
self,
server_name,
script_name,
subdomain,
scheme,
env["REQUEST_METHOD"],
path_info,
query_args=query_args,
)
def update(self) -> None:
"""Called before matching and building to keep the compiled rules
in the correct order after things changed.
"""
if not self._remap:
return
with self._remap_lock:
if not self._remap:
return
self._matcher.update()
for rules in self._rules_by_endpoint.values():
rules.sort(key=lambda x: x.build_compare_key())
self._remap = False
def __repr__(self) -> str:
rules = self.iter_rules()
return f"{type(self).__name__}({pformat(list(rules))})"
| Map |
python | astropy__astropy | astropy/coordinates/representation/spherical.py | {
"start": 15743,
"end": 24454
} | class ____(BaseRepresentation):
"""
Representation of points in 3D spherical coordinates.
Parameters
----------
lon, lat : `~astropy.units.Quantity` ['angle']
The longitude and latitude of the point(s), in angular units. The
latitude should be between -90 and 90 degrees, and the longitude will
be wrapped to an angle between 0 and 360 degrees. These can also be
instances of `~astropy.coordinates.Angle`,
`~astropy.coordinates.Longitude`, or `~astropy.coordinates.Latitude`.
distance : `~astropy.units.Quantity` ['length']
The distance to the point(s). If the distance is a length, it is
passed to the :class:`~astropy.coordinates.Distance` class, otherwise
it is passed to the :class:`~astropy.units.Quantity` class.
differentials : dict, `~astropy.coordinates.BaseDifferential`, optional
Any differential classes that should be associated with this
representation. The input must either be a single `~astropy.coordinates.BaseDifferential`
instance (see `._compatible_differentials` for valid types), or a
dictionary of of differential instances with keys set to a string
representation of the SI unit with which the differential (derivative)
is taken. For example, for a velocity differential on a positional
representation, the key would be ``'s'`` for seconds, indicating that
the derivative is a time derivative.
copy : bool, optional
If `True` (default), arrays will be copied. If `False`, arrays will
be references, though possibly broadcast to ensure matching shapes.
"""
attr_classes = {"lon": Longitude, "lat": Latitude, "distance": u.Quantity}
_unit_representation = UnitSphericalRepresentation
def __init__(self, lon, lat=None, distance=None, differentials=None, copy=True):
super().__init__(lon, lat, distance, copy=copy, differentials=differentials)
if (
not isinstance(self._distance, Distance)
and self._distance.unit.physical_type == "length"
):
try:
self._distance = Distance(self._distance, copy=False)
except ValueError as e:
if e.args[0].startswith("distance must be >= 0"):
raise ValueError(
"Distance must be >= 0. To allow negative distance values, you"
" must explicitly pass in a `Distance` object with the "
"argument 'allow_negative=True'."
) from e
raise
@classproperty
def _compatible_differentials(cls):
return [
UnitSphericalDifferential,
UnitSphericalCosLatDifferential,
SphericalDifferential,
SphericalCosLatDifferential,
RadialDifferential,
]
@property
def lon(self):
"""
The longitude of the point(s).
"""
return self._lon
@property
def lat(self):
"""
The latitude of the point(s).
"""
return self._lat
@property
def distance(self):
"""
The distance from the origin to the point(s).
"""
return self._distance
def unit_vectors(self):
sinlon, coslon = np.sin(self.lon), np.cos(self.lon)
sinlat, coslat = np.sin(self.lat), np.cos(self.lat)
return {
"lon": CartesianRepresentation(-sinlon, coslon, 0.0, copy=COPY_IF_NEEDED),
"lat": CartesianRepresentation(
-sinlat * coslon, -sinlat * sinlon, coslat, copy=COPY_IF_NEEDED
),
"distance": CartesianRepresentation(
coslat * coslon, coslat * sinlon, sinlat, copy=COPY_IF_NEEDED
),
}
def scale_factors(self, omit_coslat=False):
sf_lat = self.distance / u.radian
sf_lon = sf_lat if omit_coslat else sf_lat * np.cos(self.lat)
sf_distance = np.broadcast_to(1.0 * u.one, self.shape, subok=True)
return {"lon": sf_lon, "lat": sf_lat, "distance": sf_distance}
def represent_as(self, other_class, differential_class=None):
# Take a short cut if the other class is a spherical representation
if isinstance(other_class, type):
if issubclass(other_class, PhysicsSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
phi=self.lon,
theta=90 * u.deg - self.lat,
r=self.distance,
differentials=diffs,
copy=False,
)
elif issubclass(other_class, UnitSphericalRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
lon=self.lon, lat=self.lat, differentials=diffs, copy=False
)
elif issubclass(other_class, RadialRepresentation):
diffs = self._re_represent_differentials(
other_class, differential_class
)
return other_class(
distance=self.distance,
differentials=diffs,
copy=False,
)
return super().represent_as(other_class, differential_class)
def to_cartesian(self):
"""
Converts spherical polar coordinates to 3D rectangular cartesian
coordinates.
"""
# We need to convert Distance to Quantity to allow negative values.
if isinstance(self.distance, Distance):
d = self.distance.view(u.Quantity)
else:
d = self.distance
# erfa s2p: Convert spherical polar coordinates to p-vector.
p = erfa_ufunc.s2p(self.lon, self.lat, d)
return CartesianRepresentation(p, xyz_axis=-1, copy=False)
@classmethod
def from_cartesian(cls, cart):
"""
Converts 3D rectangular cartesian coordinates to spherical polar
coordinates.
"""
p = cart.get_xyz(xyz_axis=-1)
# erfa p2s: P-vector to spherical polar coordinates.
return cls(*erfa_ufunc.p2s(p), copy=False)
def transform(self, matrix):
"""Transform the spherical coordinates using a 3x3 matrix.
This returns a new representation and does not modify the original one.
Any differentials attached to this representation will also be
transformed.
Parameters
----------
matrix : (3,3) array-like
A 3x3 matrix, such as a rotation matrix (or a stack of matrices).
"""
xyz = erfa_ufunc.s2c(self.lon, self.lat)
p = erfa_ufunc.rxp(matrix, xyz)
lon, lat, ur = erfa_ufunc.p2s(p)
rep = self.__class__(lon=lon, lat=lat, distance=self.distance * ur)
# handle differentials
new_diffs = {
k: d.transform(matrix, self, rep) for k, d in self.differentials.items()
}
return rep.with_differentials(new_diffs)
def norm(self):
"""Vector norm.
The norm is the standard Frobenius norm, i.e., the square root of the
sum of the squares of all components with non-angular units. For
spherical coordinates, this is just the absolute value of the distance.
Returns
-------
norm : `astropy.units.Quantity`
Vector norm, with the same shape as the representation.
"""
return np.abs(self.distance)
def _scale_operation(self, op, *args):
# TODO: expand special-casing to UnitSpherical and RadialDifferential.
if any(
differential.base_representation is not self.__class__
for differential in self.differentials.values()
):
return super()._scale_operation(op, *args)
lon_op, lat_op, distance_op = _spherical_op_funcs(op, *args)
result = self.__class__(
lon_op(self.lon),
lat_op(self.lat),
distance_op(self.distance),
copy=COPY_IF_NEEDED,
)
for key, differential in self.differentials.items():
new_comps = (
op(getattr(differential, comp))
for op, comp in zip(
(operator.pos, lat_op, distance_op), differential.components
)
)
result.differentials[key] = differential.__class__(*new_comps, copy=False)
return result
| SphericalRepresentation |
python | fastai__fastai | fastai/vision/core.py | {
"start": 5106,
"end": 5312
} | class ____(PILImage):
"A BW Pillow `Image` that can show itself and converts to `TensorImageBW`"
_show_args,_open_args = {'cmap':'Greys'},{'mode': 'L'}
# %% ../../nbs/07_vision.core.ipynb 48
| PILImageBW |
python | streamlit__streamlit | lib/streamlit/external/langchain/streamlit_callback_handler.py | {
"start": 10021,
"end": 15637
} | class ____(BaseCallbackHandler):
@gather_metrics("external.langchain.StreamlitCallbackHandler")
def __init__(
self,
parent_container: DeltaGenerator,
*,
max_thought_containers: int = 4,
expand_new_thoughts: bool = False,
collapse_completed_thoughts: bool = False,
thought_labeler: LLMThoughtLabeler | None = None,
) -> None:
"""Construct a new StreamlitCallbackHandler. This CallbackHandler is geared
towards use with a LangChain Agent; it displays the Agent's LLM and tool-usage
"thoughts" inside a series of Streamlit expanders.
Parameters
----------
parent_container
The `st.container` that will contain all the Streamlit elements that the
Handler creates.
max_thought_containers
.. note::
This parameter is deprecated and is ignored in the latest version of
the callback handler.
The max number of completed LLM thought containers to show at once. When
this threshold is reached, a new thought will cause the oldest thoughts to
be collapsed into a "History" expander. Defaults to 4.
expand_new_thoughts
Each LLM "thought" gets its own `st.expander`. This param controls whether
that expander is expanded by default. Defaults to False.
collapse_completed_thoughts
If True, LLM thought expanders will be collapsed when completed.
Defaults to False.
thought_labeler
An optional custom LLMThoughtLabeler instance. If unspecified, the handler
will use the default thought labeling logic. Defaults to None.
"""
self._parent_container = parent_container
self._history_parent = parent_container.container()
self._current_thought: LLMThought | None = None
self._completed_thoughts: list[LLMThought] = []
self._max_thought_containers = max(max_thought_containers, 1)
self._expand_new_thoughts = expand_new_thoughts
self._collapse_completed_thoughts = collapse_completed_thoughts
self._thought_labeler = thought_labeler or LLMThoughtLabeler()
def _require_current_thought(self) -> LLMThought:
"""Return our current LLMThought. Raise an error if we have no current
thought.
"""
if self._current_thought is None:
raise RuntimeError("Current LLMThought is unexpectedly None!")
return self._current_thought
def _get_last_completed_thought(self) -> LLMThought | None:
"""Return our most recent completed LLMThought, or None if we don't have one."""
if len(self._completed_thoughts) > 0:
return self._completed_thoughts[len(self._completed_thoughts) - 1]
return None
def _complete_current_thought(self, final_label: str | None = None) -> None:
"""Complete the current thought, optionally assigning it a new label.
Add it to our _completed_thoughts list.
"""
thought = self._require_current_thought()
thought.complete(final_label)
self._completed_thoughts.append(thought)
self._current_thought = None
def on_llm_start(
self, serialized: dict[str, Any], prompts: list[str], **kwargs: Any
) -> None:
if self._current_thought is None:
self._current_thought = LLMThought(
parent_container=self._parent_container,
expanded=self._expand_new_thoughts,
collapse_on_complete=self._collapse_completed_thoughts,
labeler=self._thought_labeler,
)
self._current_thought.on_llm_start(serialized, prompts)
# We don't prune_old_thought_containers here, because our container won't
# be visible until it has a child.
def on_llm_new_token(self, token: str, **kwargs: Any) -> None:
self._require_current_thought().on_llm_new_token(token, **kwargs)
def on_llm_end(self, response: LLMResult, **kwargs: Any) -> None:
self._require_current_thought().on_llm_end(response, **kwargs)
def on_llm_error(self, error: BaseException, *args: Any, **kwargs: Any) -> None:
self._require_current_thought().on_llm_error(error, **kwargs)
def on_tool_start(
self, serialized: dict[str, Any], input_str: str, **kwargs: Any
) -> None:
self._require_current_thought().on_tool_start(serialized, input_str, **kwargs)
def on_tool_end(
self,
output: str,
color: str | None = None,
observation_prefix: str | None = None,
llm_prefix: str | None = None,
**kwargs: Any,
) -> None:
self._require_current_thought().on_tool_end(
output, color, observation_prefix, llm_prefix, **kwargs
)
self._complete_current_thought()
def on_tool_error(self, error: BaseException, *args: Any, **kwargs: Any) -> None:
self._require_current_thought().on_tool_error(error, **kwargs)
def on_agent_action(
self, action: AgentAction, color: str | None = None, **kwargs: Any
) -> Any:
self._require_current_thought().on_agent_action(action, color, **kwargs)
def on_agent_finish(
self, finish: AgentFinish, color: str | None = None, **kwargs: Any
) -> None:
if self._current_thought is not None:
self._current_thought.complete(
self._thought_labeler.get_final_agent_thought_label()
)
self._current_thought = None
| StreamlitCallbackHandler |
python | astropy__astropy | astropy/units/decorators.py | {
"start": 3962,
"end": 12517
} | class ____:
@classmethod
def as_decorator(cls, func=None, **kwargs):
r"""
A decorator for validating the units of arguments to functions.
Unit specifications can be provided as keyword arguments to the
decorator, or by using function annotation syntax. Arguments to the
decorator take precedence over any function annotations present.
A `~astropy.units.UnitsError` will be raised if the unit attribute of
the argument is not equivalent to the unit specified to the decorator or
in the annotation. If the argument has no unit attribute, i.e. it is not
a Quantity object, a `ValueError` will be raised unless the argument is
an annotation. This is to allow non Quantity annotations to pass
through.
Where an equivalency is specified in the decorator, the function will be
executed with that equivalency in force.
Notes
-----
The checking of arguments inside variable arguments to a function is not
supported (i.e. \*arg or \**kwargs).
The original function is accessible by the attributed ``__wrapped__``.
See :func:`functools.wraps` for details.
Examples
--------
.. code-block:: python
import astropy.units as u
@u.quantity_input(myangle=u.arcsec)
def myfunction(myangle):
return myangle**2
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec):
return myangle**2
Or using a unit-aware Quantity annotation.
.. code-block:: python
@u.quantity_input
def myfunction(myangle: u.Quantity[u.arcsec]):
return myangle**2
Also you can specify a return value annotation, which will
cause the function to always return a `~astropy.units.Quantity` in that
unit.
.. code-block:: python
import astropy.units as u
@u.quantity_input
def myfunction(myangle: u.arcsec) -> u.deg**2:
return myangle**2
Using equivalencies::
import astropy.units as u
@u.quantity_input(myenergy=u.eV, equivalencies=u.mass_energy())
def myfunction(myenergy):
return myenergy**2
"""
self = cls(**kwargs)
if func is not None and not kwargs:
return self(func)
else:
return self
def __init__(self, func=None, strict_dimensionless=False, **kwargs):
self.equivalencies = kwargs.pop("equivalencies", [])
self.decorator_kwargs = kwargs
self.strict_dimensionless = strict_dimensionless
def __call__(self, wrapped_function):
# Extract the function signature for the function we are wrapping.
wrapped_signature = inspect.signature(wrapped_function)
# Define a new function to return in place of the wrapped one
@wraps(wrapped_function)
def wrapper(*func_args, **func_kwargs):
# Bind the arguments to our new function to the signature of the original.
bound_args = wrapped_signature.bind(*func_args, **func_kwargs)
# Iterate through the parameters of the original signature
for param in wrapped_signature.parameters.values():
# We do not support variable arguments (*args, **kwargs)
if param.kind in (
inspect.Parameter.VAR_KEYWORD,
inspect.Parameter.VAR_POSITIONAL,
):
continue
# Catch the (never triggered) case where bind relied on a default value.
if (
param.name not in bound_args.arguments
and param.default is not param.empty
):
bound_args.arguments[param.name] = param.default
# Get the value of this parameter (argument to new function)
arg = bound_args.arguments[param.name]
# Get target unit or physical type, either from decorator kwargs
# or annotations
if param.name in self.decorator_kwargs:
targets = self.decorator_kwargs[param.name]
is_annotation = False
else:
targets = param.annotation
is_annotation = True
# parses to unit if it's an annotation (or list thereof)
targets = _parse_annotation(targets)
# If the targets is empty, then no target units or physical
# types were specified so we can continue to the next arg
if targets is inspect.Parameter.empty:
continue
# If the argument value is None, and the default value is None,
# pass through the None even if there is a target unit
if arg is None and param.default is None:
continue
# Here, we check whether multiple target unit/physical type's
# were specified in the decorator/annotation, or whether a
# single string (unit or physical type) or a Unit object was
# specified
if isinstance(targets, str) or not isinstance(targets, Sequence):
valid_targets = [targets]
# Check for None in the supplied list of allowed units and, if
# present and the passed value is also None, ignore.
elif None in targets or NoneType in targets:
if arg is None:
continue
else:
valid_targets = [t for t in targets if t is not None]
else:
valid_targets = targets
# If we're dealing with an annotation, skip all the targets that
# are not strings or subclasses of Unit. This is to allow
# non unit related annotations to pass through
if is_annotation:
valid_targets = [
t
for t in valid_targets
if isinstance(t, (str, UnitBase, PhysicalType))
]
# Now we loop over the allowed units/physical types and validate
# the value of the argument:
_validate_arg_value(
param.name,
wrapped_function.__name__,
arg,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if self.equivalencies:
equiv_context = add_enabled_equivalencies(self.equivalencies)
else:
# Avoid creating a duplicate registry if we don't have
# equivalencies to add. (If we're wrapping a short function,
# the time spent duplicating the registry is quite noticeable.)
equiv_context = contextlib.nullcontext()
# Call the original function with any equivalencies in force.
with equiv_context:
return_ = wrapped_function(*func_args, **func_kwargs)
# Return
ra = wrapped_signature.return_annotation
valid_empty = (inspect.Signature.empty, None, NoneType, T.NoReturn)
if ra not in valid_empty:
target = (
ra
if T.get_origin(ra) not in (T.Annotated, T.Union)
else _parse_annotation(ra)
)
if isinstance(target, str) or not isinstance(target, Sequence):
target = [target]
valid_targets = [
t for t in target if isinstance(t, (str, UnitBase, PhysicalType))
]
_validate_arg_value(
"return",
wrapped_function.__name__,
return_,
valid_targets,
self.equivalencies,
self.strict_dimensionless,
)
if len(valid_targets) > 0:
return_ <<= valid_targets[0]
return return_
return wrapper
quantity_input = QuantityInput.as_decorator
| QuantityInput |
python | numba__numba | numba/core/codegen.py | {
"start": 25856,
"end": 38039
} | class ____(CodeLibrary):
def __init__(self, codegen, name):
super().__init__(codegen, name)
self._linking_libraries = [] # maintain insertion order
self._final_module = ll.parse_assembly(
str(self._codegen._create_empty_module(self.name)))
self._final_module.name = cgutils.normalize_ir_text(self.name)
self._shared_module = None
self._reload_init = set()
def _optimize_functions(self, ll_module):
"""
Internal: run function-level optimizations inside *ll_module*.
"""
# Enforce data layout to enable layout-specific optimizations
ll_module.data_layout = self._codegen._data_layout
for func in ll_module.functions:
# Run function-level optimizations to reduce memory usage and improve
# module-level optimization.
fpm, pb = self._codegen._function_pass_manager()
k = f"Function passes on {func.name!r}"
with self._recorded_timings.record(k, pb):
fpm.run(func, pb)
def _optimize_final_module(self):
"""
Internal: optimize this library's final module.
"""
mpm_cheap, mpb_cheap = self._codegen._module_pass_manager(
loop_vectorize=self._codegen._loopvect,
slp_vectorize=False,
opt=self._codegen._opt_level,
cost="cheap")
mpm_full, mpb_full = self._codegen._module_pass_manager()
cheap_name = "Module passes (cheap optimization for refprune)"
with self._recorded_timings.record(cheap_name, mpb_cheap):
# A cheaper optimisation pass is run first to try and get as many
# refops into the same function as possible via inlining
mpm_cheap.run(self._final_module, mpb_cheap)
# Refop pruning is then run on the heavily inlined function
if not config.LLVM_REFPRUNE_PASS:
self._final_module = remove_redundant_nrt_refct(self._final_module)
full_name = "Module passes (full optimization)"
with self._recorded_timings.record(full_name, mpb_full):
# The full optimisation suite is then run on the refop pruned IR
mpm_full.run(self._final_module, mpb_full)
def _get_module_for_linking(self):
"""
Internal: get a LLVM module suitable for linking multiple times
into another library. Exported functions are made "linkonce_odr"
to allow for multiple definitions, inlining, and removal of
unused exports.
See discussion in https://github.com/numba/numba/pull/890
"""
self._ensure_finalized()
if self._shared_module is not None:
return self._shared_module
mod = self._final_module
to_fix = []
nfuncs = 0
for fn in mod.functions:
nfuncs += 1
if not fn.is_declaration and fn.linkage == ll.Linkage.external:
to_fix.append(fn.name)
if nfuncs == 0:
# This is an issue which can occur if loading a module
# from an object file and trying to link with it, so detect it
# here to make debugging easier.
raise RuntimeError("library unfit for linking: "
"no available functions in %s"
% (self,))
if to_fix:
mod = mod.clone()
for name in to_fix:
# NOTE: this will mark the symbol WEAK if serialized
# to an ELF file
mod.get_function(name).linkage = 'linkonce_odr'
self._shared_module = mod
return mod
def add_linking_library(self, library):
library._ensure_finalized()
self._linking_libraries.append(library)
def add_ir_module(self, ir_module):
self._raise_if_finalized()
assert isinstance(ir_module, llvmir.Module)
ir = cgutils.normalize_ir_text(str(ir_module))
ll_module = ll.parse_assembly(ir)
ll_module.name = ir_module.name
ll_module.verify()
self.add_llvm_module(ll_module)
def add_llvm_module(self, ll_module):
self._optimize_functions(ll_module)
# TODO: we shouldn't need to recreate the LLVM module object
if not config.LLVM_REFPRUNE_PASS:
ll_module = remove_redundant_nrt_refct(ll_module)
self._final_module.link_in(ll_module)
def finalize(self):
require_global_compiler_lock()
# Report any LLVM-related problems to the user
self._codegen._check_llvm_bugs()
self._raise_if_finalized()
if config.DUMP_FUNC_OPT:
dump("FUNCTION OPTIMIZED DUMP %s" % self.name,
self.get_llvm_str(), 'llvm')
# Link libraries for shared code
seen = set()
for library in self._linking_libraries:
if library not in seen:
# Parent inherits reload_init
self._reload_init.update(library._reload_init)
seen.add(library)
self._final_module.link_in(
library._get_module_for_linking(), preserve=True,
)
# Optimize the module after all dependences are linked in above,
# to allow for inlining.
self._optimize_final_module()
self._final_module.verify()
self._finalize_final_module()
def _finalize_dynamic_globals(self):
# Scan for dynamic globals
for gv in self._final_module.global_variables:
if gv.name.startswith('numba.dynamic.globals'):
self._dynamic_globals.append(gv.name)
def _verify_declare_only_symbols(self):
# Verify that no declare-only function compiled by numba.
for fn in self._final_module.functions:
# We will only check for symbol name starting with '_ZN5numba'
if fn.is_declaration and fn.name.startswith('_ZN5numba'):
msg = 'Symbol {} not linked properly'
raise AssertionError(msg.format(fn.name))
def _finalize_final_module(self):
"""
Make the underlying LLVM module ready to use.
"""
self._finalize_dynamic_globals()
self._verify_declare_only_symbols()
# Remember this on the module, for the object cache hooks
self._final_module.__library = weakref.proxy(self)
# It seems add_module() must be done only here and not before
# linking in other modules, otherwise get_pointer_to_function()
# could fail.
cleanup = self._codegen._add_module(self._final_module)
if cleanup:
weakref.finalize(self, cleanup)
self._finalize_specific()
self._finalized = True
if config.DUMP_OPTIMIZED:
dump("OPTIMIZED DUMP %s" % self.name, self.get_llvm_str(), 'llvm')
if config.DUMP_ASSEMBLY:
dump("ASSEMBLY %s" % self.name, self.get_asm_str(), 'asm')
def get_defined_functions(self):
"""
Get all functions defined in the library. The library must have
been finalized.
"""
mod = self._final_module
for fn in mod.functions:
if not fn.is_declaration:
yield fn
def get_function(self, name):
return self._final_module.get_function(name)
def _sentry_cache_disable_inspection(self):
if self._disable_inspection:
warnings.warn('Inspection disabled for cached code. '
'Invalid result is returned.')
def get_llvm_str(self):
self._sentry_cache_disable_inspection()
return str(self._final_module)
def get_asm_str(self):
self._sentry_cache_disable_inspection()
return str(self._codegen._tm.emit_assembly(self._final_module))
def get_function_cfg(self, name, py_func=None, **kwargs):
"""
Get control-flow graph of the LLVM function
"""
self._sentry_cache_disable_inspection()
return _CFG(self, name, py_func, **kwargs)
def get_disasm_cfg(self, mangled_name):
"""
Get the CFG of the disassembly of the ELF object at symbol mangled_name.
Requires python package: r2pipe
Requires radare2 binary on $PATH.
Notebook rendering requires python package: graphviz
Optionally requires a compiler toolchain (via pycc) to link the ELF to
get better disassembly results.
"""
elf = self._get_compiled_object()
return disassemble_elf_to_cfg(elf, mangled_name)
@classmethod
def _dump_elf(cls, buf):
"""
Dump the symbol table of an ELF file.
Needs pyelftools (https://github.com/eliben/pyelftools)
"""
from elftools.elf.elffile import ELFFile
from elftools.elf import descriptions
from io import BytesIO
f = ELFFile(BytesIO(buf))
print("ELF file:")
for sec in f.iter_sections():
if sec['sh_type'] == 'SHT_SYMTAB':
symbols = sorted(sec.iter_symbols(), key=lambda sym: sym.name)
print(" symbols:")
for sym in symbols:
if not sym.name:
continue
print(" - %r: size=%d, value=0x%x, type=%s, bind=%s"
% (sym.name.decode(),
sym['st_size'],
sym['st_value'],
descriptions.describe_symbol_type(sym['st_info']['type']),
descriptions.describe_symbol_bind(sym['st_info']['bind']),
))
print()
@classmethod
def _object_compiled_hook(cls, ll_module, buf):
"""
`ll_module` was compiled into object code `buf`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled:
self._compiled = True
self._compiled_object = buf
@classmethod
def _object_getbuffer_hook(cls, ll_module):
"""
Return a cached object code for `ll_module`.
"""
try:
self = ll_module.__library
except AttributeError:
return
if self._object_caching_enabled and self._compiled_object:
buf = self._compiled_object
self._compiled_object = None
return buf
def serialize_using_bitcode(self):
"""
Serialize this library using its bitcode as the cached representation.
"""
self._ensure_finalized()
return (self.name, 'bitcode', self._final_module.as_bitcode())
def serialize_using_object_code(self):
"""
Serialize this library using its object code as the cached
representation. We also include its bitcode for further inlining
with other libraries.
"""
self._ensure_finalized()
data = (self._get_compiled_object(),
self._get_module_for_linking().as_bitcode())
return (self.name, 'object', data)
@classmethod
def _unserialize(cls, codegen, state):
name, kind, data = state
self = codegen.create_library(name)
assert isinstance(self, cls)
if kind == 'bitcode':
# No need to re-run optimizations, just make the module ready
self._final_module = ll.parse_bitcode(data)
self._finalize_final_module()
return self
elif kind == 'object':
object_code, shared_bitcode = data
self.enable_object_caching()
self._set_compiled_object(object_code)
self._shared_module = ll.parse_bitcode(shared_bitcode)
self._finalize_final_module()
# Load symbols from cache
self._codegen._engine._load_defined_symbols(self._shared_module)
return self
else:
raise ValueError("unsupported serialization kind %r" % (kind,))
| CPUCodeLibrary |
python | MongoEngine__mongoengine | mongoengine/queryset/visitor.py | {
"start": 1999,
"end": 2505
} | class ____(QNodeVisitor):
"""Compiles the nodes in a query tree to a PyMongo-compatible query
dictionary.
"""
def __init__(self, document):
self.document = document
def visit_combination(self, combination):
operator = "$and"
if combination.operation == combination.OR:
operator = "$or"
return {operator: combination.children}
def visit_query(self, query):
return transform.query(self.document, **query.query)
| QueryCompilerVisitor |
python | PyCQA__pylint | doc/data/messages/a/attribute-defined-outside-init/bad.py | {
"start": 0,
"end": 109
} | class ____:
def register(self):
self.is_registered = True # [attribute-defined-outside-init]
| Student |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-outbrain-amplify/source_outbrain_amplify/source.py | {
"start": 12432,
"end": 14674
} | class ____(OutbrainAmplifyStream, HttpSubStream):
primary_key = None
def __init__(self, authenticator, config, parent: Marketers, **kwargs):
super().__init__(parent=parent, **kwargs)
self.config = config
self._authenticator = authenticator
self._session = requests.sessions.Session()
@property
def name(self) -> str:
return "budgets"
def request_params(
self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None
) -> MutableMapping[str, Any]:
return {}
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
return None
def stream_slices(
self, sync_mode: SyncMode.full_refresh, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"marketer_id": record.get("id")}
def parse_response(
self,
response: requests.Response,
stream_state: Mapping[str, Any],
stream_slice: Mapping[str, Any] = None,
next_page_token: Mapping[str, Any] = None,
) -> Iterable[Mapping]:
if response.json():
for x in response.json().get("budgets"):
x["marketer_id"] = stream_slice["marketer_id"]
yield x
def path(
self, stream_state: Mapping[str, Any] = None, stream_slice: Mapping[str, Any] = None, next_page_token: Mapping[str, Any] = None
) -> str:
return f"marketers/{stream_slice['marketer_id']}/budgets"
# Retrieve campaigns with performance statistics for a Marketer.
# The API in this sub-section allows retrieving marketer campaigns data with performance statistics.
| BudgetsForMarketers |
python | django__django | tests/gis_tests/gis_migrations/test_operations.py | {
"start": 4865,
"end": 15587
} | class ____(OperationTestCase):
def setUp(self):
super().setUp()
self.set_up_test_model()
def test_add_geom_field(self):
"""
Test the AddField operation with a geometry-enabled column.
"""
self.alter_gis_model(
migrations.AddField, "Neighborhood", "path", fields.LineStringField
)
self.assertColumnExists("gis_neighborhood", "path")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists("gis_neighborhood", "path")
@skipUnless(connection.vendor == "mysql", "MySQL specific test")
def test_remove_geom_field_nullable_with_index(self):
# MySQL doesn't support spatial indexes on NULL columns.
with self.assertNumQueries(1) as ctx:
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"path",
fields.LineStringField,
field_class_kwargs={"null": True},
)
self.assertColumnExists("gis_neighborhood", "path")
self.assertNotIn("CREATE SPATIAL INDEX", ctx.captured_queries[0]["sql"])
with self.assertNumQueries(1), self.assertNoLogs("django.contrib.gis", "ERROR"):
self.alter_gis_model(migrations.RemoveField, "Neighborhood", "path")
self.assertColumnNotExists("gis_neighborhood", "path")
@skipUnless(HAS_GEOMETRY_COLUMNS, "Backend doesn't support GeometryColumns.")
def test_geom_col_name(self):
self.assertEqual(
GeometryColumns.geom_col_name(),
"column_name" if connection.ops.oracle else "f_geometry_column",
)
@skipUnlessDBFeature("supports_raster")
def test_add_raster_field(self):
"""
Test the AddField operation with a raster-enabled column.
"""
self.alter_gis_model(
migrations.AddField, "Neighborhood", "heatmap", fields.RasterField
)
self.assertColumnExists("gis_neighborhood", "heatmap")
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists("gis_neighborhood", "heatmap", raster=True)
def test_add_blank_geom_field(self):
"""
Should be able to add a GeometryField with blank=True.
"""
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"path",
fields.LineStringField,
field_class_kwargs={"blank": True},
)
self.assertColumnExists("gis_neighborhood", "path")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(2)
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists("gis_neighborhood", "path")
@skipUnlessDBFeature("supports_raster")
def test_add_blank_raster_field(self):
"""
Should be able to add a RasterField with blank=True.
"""
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"heatmap",
fields.RasterField,
field_class_kwargs={"blank": True},
)
self.assertColumnExists("gis_neighborhood", "heatmap")
# Test spatial indices when available
if self.has_spatial_indexes:
self.assertSpatialIndexExists("gis_neighborhood", "heatmap", raster=True)
def test_remove_geom_field(self):
"""
Test the RemoveField operation with a geometry-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, "Neighborhood", "geom")
self.assertColumnNotExists("gis_neighborhood", "geom")
# Test GeometryColumns when available
if HAS_GEOMETRY_COLUMNS:
self.assertGeometryColumnsCount(0)
@skipUnlessDBFeature("supports_raster")
def test_remove_raster_field(self):
"""
Test the RemoveField operation with a raster-enabled column.
"""
self.alter_gis_model(migrations.RemoveField, "Neighborhood", "rast")
self.assertColumnNotExists("gis_neighborhood", "rast")
def test_create_model_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
self.assertSpatialIndexExists("gis_neighborhood", "geom")
if connection.features.supports_raster:
self.assertSpatialIndexExists("gis_neighborhood", "rast", raster=True)
@skipUnlessDBFeature("can_alter_geometry_field")
def test_alter_field_add_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": False},
)
self.assertSpatialIndexNotExists("gis_neighborhood", "point")
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": True},
)
self.assertSpatialIndexExists("gis_neighborhood", "point")
@skipUnlessDBFeature("can_alter_geometry_field")
def test_alter_field_remove_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
self.assertSpatialIndexExists("gis_neighborhood", "geom")
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"geom",
fields.MultiPolygonField,
field_class_kwargs={"spatial_index": False},
)
self.assertSpatialIndexNotExists("gis_neighborhood", "geom")
@skipUnlessDBFeature("can_alter_geometry_field")
@skipUnless(connection.vendor == "mysql", "MySQL specific test")
def test_alter_field_nullable_with_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": False, "null": True},
)
# MySQL doesn't support spatial indexes on NULL columns.
self.assertSpatialIndexNotExists("gis_neighborhood", "point")
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": True, "null": True},
)
self.assertSpatialIndexNotExists("gis_neighborhood", "point")
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": False, "null": True},
)
self.assertSpatialIndexNotExists("gis_neighborhood", "point")
@skipUnlessDBFeature("can_alter_geometry_field")
def test_alter_field_with_spatial_index(self):
if not self.has_spatial_indexes:
self.skipTest("No support for Spatial indexes")
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": True},
)
self.assertSpatialIndexExists("gis_neighborhood", "point")
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"point",
fields.PointField,
field_class_kwargs={"spatial_index": True, "srid": 3086},
)
self.assertSpatialIndexExists("gis_neighborhood", "point")
@skipUnlessDBFeature("supports_3d_storage")
def test_add_3d_field_opclass(self):
if not connection.ops.postgis:
self.skipTest("PostGIS-specific test.")
self.alter_gis_model(
migrations.AddField,
"Neighborhood",
"point3d",
field_class=fields.PointField,
field_class_kwargs={"dim": 3},
)
self.assertColumnExists("gis_neighborhood", "point3d")
self.assertSpatialIndexExists("gis_neighborhood", "point3d")
with connection.cursor() as cursor:
index_name = "gis_neighborhood_point3d_113bc868_id"
cursor.execute(self.get_opclass_query, [index_name])
self.assertEqual(
cursor.fetchall(),
[("gist_geometry_ops_nd", index_name)],
)
@skipUnlessDBFeature("can_alter_geometry_field", "supports_3d_storage")
def test_alter_geom_field_dim(self):
Neighborhood = self.current_state.apps.get_model("gis", "Neighborhood")
p1 = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
Neighborhood.objects.create(name="TestDim", geom=MultiPolygon(p1, p1))
# Add 3rd dimension.
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"geom",
fields.MultiPolygonField,
field_class_kwargs={"dim": 3},
)
self.assertTrue(Neighborhood.objects.first().geom.hasz)
# Rewind to 2 dimensions.
self.alter_gis_model(
migrations.AlterField,
"Neighborhood",
"geom",
fields.MultiPolygonField,
field_class_kwargs={"dim": 2},
)
self.assertFalse(Neighborhood.objects.first().geom.hasz)
@skipUnlessDBFeature(
"supports_column_check_constraints", "can_introspect_check_constraints"
)
def test_add_check_constraint(self):
Neighborhood = self.current_state.apps.get_model("gis", "Neighborhood")
poly = Polygon(((0, 0), (0, 1), (1, 1), (1, 0), (0, 0)))
constraint = models.CheckConstraint(
condition=models.Q(geom=poly),
name="geom_within_constraint",
)
Neighborhood._meta.constraints = [constraint]
with connection.schema_editor() as editor:
editor.add_constraint(Neighborhood, constraint)
with connection.cursor() as cursor:
constraints = connection.introspection.get_constraints(
cursor,
Neighborhood._meta.db_table,
)
self.assertIn("geom_within_constraint", constraints)
@skipIfDBFeature("supports_raster")
| OperationTests |
python | numba__numba | numba/tests/test_misc_coverage_support.py | {
"start": 211,
"end": 2134
} | class ____(TestCase):
@TestCase.run_test_in_subprocess(envvars={"NUMBA_JIT_COVERAGE": "1"})
def test_custom_loc_notifier(self):
class MyNotify(NotifyLocBase):
records = []
def notify(self, loc):
self.records.append(("NOTIFY", loc))
def close(self):
self.records.append(("CLOSE", None))
# Patch to install registry for testing
new_the_registry = _the_registry + [MyNotify]
gv = "numba.misc.coverage_support._the_registry"
with patch(gv, new_the_registry):
@njit
def foo():
return 123
res = foo()
self.assertEqual(res, 123)
# offset by +2 because:
# +1 for the decorator
# +1 for the `def` line
first_offset = 2
offset = foo.__code__.co_firstlineno + first_offset
loc = ir.Loc(__file__, 1)
self.assertIn(("NOTIFY", loc.with_lineno(offset)), MyNotify.records)
self.assertIn(("CLOSE", None), MyNotify.records)
# Test dead branch pruned
with patch(gv, new_the_registry):
cond = False
@njit
def foo():
if cond:
return 321
return 123
res = foo()
self.assertEqual(res, 123)
# `if cond` line is compiled
offset = foo.__code__.co_firstlineno + first_offset
self.assertIn(("NOTIFY", loc.with_lineno(offset)), MyNotify.records)
# ` return 321` line is not compiled
self.assertNotIn(
("NOTIFY", loc.with_lineno(offset + 1)), MyNotify.records
)
# ` return 123` line is compiled
self.assertIn(("NOTIFY", loc.with_lineno(offset + 2)), MyNotify.records)
self.assertIn(("CLOSE", None), MyNotify.records)
if __name__ == "__main__":
unittest.main()
| TestMiscCoverageSupport |
python | apache__airflow | providers/apache/tinkerpop/tests/unit/apache/tinkerpop/hooks/test_gremlin.py | {
"start": 1233,
"end": 5541
} | class ____:
@pytest.mark.parametrize(
("host", "port", "expected_uri"),
[
("host", None, "ws://host:443/gremlin"),
("myhost", 1234, "ws://myhost:1234/gremlin"),
("localhost", 8888, "ws://localhost:8888/gremlin"),
],
)
def test_get_uri(self, host, port, expected_uri, gremlin_hook):
"""
Test that get_uri builds the expected URI from the connection.
"""
conn = Connection(conn_id="gremlin_default", host=host, port=port)
with mock.patch.dict("os.environ", AIRFLOW_CONN_GREMLIN_DEFAULT=conn.get_uri()):
uri = gremlin_hook.get_uri(conn)
assert uri == expected_uri
def test_get_conn(self, gremlin_hook):
"""
Test that get_conn() retrieves the connection and creates a client correctly.
"""
conn = Connection(
conn_type="gremlin",
conn_id="gremlin_default",
host="host",
port=1234,
schema="mydb",
login="login",
password="mypassword",
)
gremlin_hook.get_connection = lambda conn_id: conn
with mock.patch("airflow.providers.apache.tinkerpop.hooks.gremlin.Client") as mock_client:
gremlin_hook.get_conn()
expected_uri = "wss://host:1234/"
expected_username = "/dbs/login/colls/mydb"
mock_client.assert_called_once_with(
url=expected_uri,
traversal_source=gremlin_hook.traversal_source,
username=expected_username,
password="mypassword",
)
@pytest.mark.parametrize(
("serializer", "should_include"),
[
(None, False),
("dummy_serializer", True),
],
)
def test_get_client_message_serializer(self, serializer, should_include, gremlin_hook):
"""
Test that get_client() includes message_serializer only when provided.
"""
conn = Connection(
conn_id="gremlin_default",
host="host",
port=1234,
schema="mydb",
login="login",
password="mypassword",
)
uri = "wss://test.uri"
traversal_source = "g"
with mock.patch("airflow.providers.apache.tinkerpop.hooks.gremlin.Client") as mock_client:
gremlin_hook.get_client(conn, traversal_source, uri, message_serializer=serializer)
call_args = mock_client.call_args.kwargs
if should_include:
assert "message_serializer" in call_args
assert call_args["message_serializer"] == serializer
else:
assert "message_serializer" not in call_args
@pytest.mark.parametrize(
("side_effect", "expected_exception", "expected_result"),
[
(None, None, ["dummy_result"]),
(Exception("Test error"), Exception, None),
],
)
def test_run(self, side_effect, expected_exception, expected_result, gremlin_hook):
"""
Test that run() returns the expected result or propagates an exception, with proper cleanup.
"""
query = "g.V().limit(1)"
# Mock the client instance
with mock.patch("airflow.providers.apache.tinkerpop.hooks.gremlin.Client") as mock_client:
instance = mock_client.return_value
if side_effect is None:
instance.submit.return_value.all.return_value.result.return_value = expected_result
else:
instance.submit.return_value.all.return_value.result.side_effect = side_effect
# Mock get_connection to simplify setup
conn = Connection(
conn_id="gremlin_default",
host="host",
port=1234,
schema="mydb",
login="login",
password="mypassword",
)
gremlin_hook.get_connection = lambda conn_id: conn
if expected_exception:
with pytest.raises(expected_exception, match="Test error"):
gremlin_hook.run(query)
else:
result = gremlin_hook.run(query)
assert result == expected_result
| TestGremlinHook |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/ir.py | {
"start": 11796,
"end": 32488
} | class ____(IR):
"""Input from files."""
__slots__ = (
"cloud_options",
"include_file_paths",
"n_rows",
"parquet_options",
"paths",
"predicate",
"reader_options",
"row_index",
"skip_rows",
"typ",
"with_columns",
)
_non_child = (
"schema",
"typ",
"reader_options",
"cloud_options",
"paths",
"with_columns",
"skip_rows",
"n_rows",
"row_index",
"include_file_paths",
"predicate",
"parquet_options",
)
typ: str
"""What type of file are we reading? Parquet, CSV, etc..."""
reader_options: dict[str, Any]
"""Reader-specific options, as dictionary."""
cloud_options: dict[str, Any] | None
"""Cloud-related authentication options, currently ignored."""
paths: list[str]
"""List of paths to read from."""
with_columns: list[str] | None
"""Projected columns to return."""
skip_rows: int
"""Rows to skip at the start when reading."""
n_rows: int
"""Number of rows to read after skipping."""
row_index: tuple[str, int] | None
"""If not None add an integer index column of the given name."""
include_file_paths: str | None
"""Include the path of the source file(s) as a column with this name."""
predicate: expr.NamedExpr | None
"""Mask to apply to the read dataframe."""
parquet_options: ParquetOptions
"""Parquet-specific options."""
PARQUET_DEFAULT_CHUNK_SIZE: int = 0 # unlimited
PARQUET_DEFAULT_PASS_LIMIT: int = 16 * 1024**3 # 16GiB
def __init__(
self,
schema: Schema,
typ: str,
reader_options: dict[str, Any],
cloud_options: dict[str, Any] | None,
paths: list[str],
with_columns: list[str] | None,
skip_rows: int,
n_rows: int,
row_index: tuple[str, int] | None,
include_file_paths: str | None,
predicate: expr.NamedExpr | None,
parquet_options: ParquetOptions,
):
self.schema = schema
self.typ = typ
self.reader_options = reader_options
self.cloud_options = cloud_options
self.paths = paths
self.with_columns = with_columns
self.skip_rows = skip_rows
self.n_rows = n_rows
self.row_index = row_index
self.include_file_paths = include_file_paths
self.predicate = predicate
self._non_child_args = (
schema,
typ,
reader_options,
paths,
with_columns,
skip_rows,
n_rows,
row_index,
include_file_paths,
predicate,
parquet_options,
)
self.children = ()
self.parquet_options = parquet_options
if self.typ not in ("csv", "parquet", "ndjson"): # pragma: no cover
# This line is unhittable ATM since IPC/Anonymous scan raise
# on the polars side
raise NotImplementedError(f"Unhandled scan type: {self.typ}")
if self.typ == "ndjson" and (self.n_rows != -1 or self.skip_rows != 0):
raise NotImplementedError("row limit in scan for json reader")
if self.skip_rows < 0:
# TODO: polars has this implemented for parquet,
# maybe we can do this too?
raise NotImplementedError("slice pushdown for negative slices")
if self.cloud_options is not None and any(
self.cloud_options.get(k) is not None for k in ("aws", "azure", "gcp")
):
raise NotImplementedError(
"Read from cloud storage"
) # pragma: no cover; no test yet
if (
any(str(p).startswith("https:/") for p in self.paths)
and POLARS_VERSION_LT_131
): # pragma: no cover; polars passed us the wrong URI
# https://github.com/pola-rs/polars/issues/22766
raise NotImplementedError("Read from https")
if any(
str(p).startswith("file:/" if POLARS_VERSION_LT_131 else "file://")
for p in self.paths
):
raise NotImplementedError("Read from file URI")
if self.typ == "csv":
if any(
plc.io.SourceInfo._is_remote_uri(p) for p in self.paths
): # pragma: no cover; no test yet
# This works fine when the file has no leading blank lines,
# but currently we do some file introspection
# to skip blanks before parsing the header.
# For remote files we cannot determine if leading blank lines
# exist, so we're punting on CSV support.
# TODO: Once the CSV reader supports skipping leading
# blank lines natively, we can remove this guard.
raise NotImplementedError(
"Reading CSV from remote is not yet supported"
)
if self.reader_options["skip_rows_after_header"] != 0:
raise NotImplementedError("Skipping rows after header in CSV reader")
parse_options = self.reader_options["parse_options"]
if (
null_values := parse_options["null_values"]
) is not None and "Named" in null_values:
raise NotImplementedError(
"Per column null value specification not supported for CSV reader"
)
if (
comment := parse_options["comment_prefix"]
) is not None and "Multi" in comment:
raise NotImplementedError(
"Multi-character comment prefix not supported for CSV reader"
)
if not self.reader_options["has_header"]:
# TODO: To support reading headerless CSV files without requiring new
# column names, we would need to do file introspection to infer the number
# of columns so column projection works right.
reader_schema = self.reader_options.get("schema")
if not (
reader_schema
and isinstance(schema, dict)
and "fields" in reader_schema
):
raise NotImplementedError(
"Reading CSV without header requires user-provided column names via new_columns"
)
elif self.typ == "ndjson":
# TODO: consider handling the low memory option here
# (maybe use chunked JSON reader)
if self.reader_options["ignore_errors"]:
raise NotImplementedError(
"ignore_errors is not supported in the JSON reader"
)
if include_file_paths is not None:
# TODO: Need to populate num_rows_per_source in read_json in libcudf
raise NotImplementedError("Including file paths in a json scan.")
elif (
self.typ == "parquet"
and self.row_index is not None
and self.with_columns is not None
and len(self.with_columns) == 0
):
raise NotImplementedError(
"Reading only parquet metadata to produce row index."
)
def get_hashable(self) -> Hashable:
"""
Hashable representation of the node.
The options dictionaries are serialised for hashing purposes
as json strings.
"""
schema_hash = tuple(self.schema.items())
return (
type(self),
schema_hash,
self.typ,
json.dumps(self.reader_options),
json.dumps(self.cloud_options),
tuple(self.paths),
tuple(self.with_columns) if self.with_columns is not None else None,
self.skip_rows,
self.n_rows,
self.row_index,
self.include_file_paths,
self.predicate,
self.parquet_options,
)
@staticmethod
def add_file_paths(
name: str, paths: list[str], rows_per_path: list[int], df: DataFrame
) -> DataFrame:
"""
Add a Column of file paths to the DataFrame.
Each path is repeated according to the number of rows read from it.
"""
(filepaths,) = plc.filling.repeat(
plc.Table(
[
plc.Column.from_arrow(
pl.Series(values=map(str, paths)),
stream=df.stream,
)
]
),
plc.Column.from_arrow(
pl.Series(values=rows_per_path, dtype=pl.datatypes.Int32()),
stream=df.stream,
),
stream=df.stream,
).columns()
dtype = DataType(pl.String())
return df.with_columns(
[Column(filepaths, name=name, dtype=dtype)], stream=df.stream
)
def fast_count(self) -> int: # pragma: no cover
"""Get the number of rows in a Parquet Scan."""
meta = plc.io.parquet_metadata.read_parquet_metadata(
plc.io.SourceInfo(self.paths)
)
total_rows = meta.num_rows() - self.skip_rows
if self.n_rows != -1:
total_rows = min(total_rows, self.n_rows)
return max(total_rows, 0)
@classmethod
@log_do_evaluate
@nvtx_annotate_cudf_polars(message="Scan")
def do_evaluate(
cls,
schema: Schema,
typ: str,
reader_options: dict[str, Any],
paths: list[str],
with_columns: list[str] | None,
skip_rows: int,
n_rows: int,
row_index: tuple[str, int] | None,
include_file_paths: str | None,
predicate: expr.NamedExpr | None,
parquet_options: ParquetOptions,
*,
context: IRExecutionContext,
) -> DataFrame:
"""Evaluate and return a dataframe."""
stream = context.get_cuda_stream()
if typ == "csv":
def read_csv_header(
path: Path | str, sep: str
) -> list[str]: # pragma: no cover
with Path(path).open() as f:
for line in f:
stripped = line.strip()
if stripped:
return stripped.split(sep)
return []
parse_options = reader_options["parse_options"]
sep = chr(parse_options["separator"])
quote = chr(parse_options["quote_char"])
eol = chr(parse_options["eol_char"])
if reader_options["schema"] is not None:
# Reader schema provides names
column_names = list(reader_options["schema"]["fields"].keys())
else:
# file provides column names
column_names = None
usecols = with_columns
has_header = reader_options["has_header"]
header = 0 if has_header else -1
# polars defaults to no null recognition
null_values = [""]
if parse_options["null_values"] is not None:
((typ, nulls),) = parse_options["null_values"].items()
if typ == "AllColumnsSingle":
# Single value
null_values.append(nulls)
else:
# List of values
null_values.extend(nulls)
if parse_options["comment_prefix"] is not None:
comment = chr(parse_options["comment_prefix"]["Single"])
else:
comment = None
decimal = "," if parse_options["decimal_comma"] else "."
# polars skips blank lines at the beginning of the file
pieces = []
seen_paths = []
read_partial = n_rows != -1
for p in paths:
skiprows = reader_options["skip_rows"]
path = Path(p)
with path.open() as f:
while f.readline() == "\n":
skiprows += 1
options = (
plc.io.csv.CsvReaderOptions.builder(plc.io.SourceInfo([path]))
.nrows(n_rows)
.skiprows(skiprows + skip_rows)
.skip_blank_lines(skip_blank_lines=False)
.lineterminator(str(eol))
.quotechar(str(quote))
.decimal(decimal)
.keep_default_na(keep_default_na=False)
.na_filter(na_filter=True)
.delimiter(str(sep))
.build()
)
if column_names is not None:
options.set_names([str(name) for name in column_names])
else:
if header > -1 and skip_rows > header: # pragma: no cover
# We need to read the header otherwise we would skip it
column_names = read_csv_header(path, str(sep))
options.set_names(column_names)
options.set_header(header)
options.set_dtypes(
{name: dtype.plc_type for name, dtype in schema.items()}
)
if usecols is not None:
options.set_use_cols_names([str(name) for name in usecols])
options.set_na_values(null_values)
if comment is not None:
options.set_comment(comment)
tbl_w_meta = plc.io.csv.read_csv(options, stream=stream)
pieces.append(tbl_w_meta)
if include_file_paths is not None:
seen_paths.append(p)
if read_partial:
n_rows -= tbl_w_meta.tbl.num_rows()
if n_rows <= 0:
break
tables, (colnames, *_) = zip(
*(
(piece.tbl, piece.column_names(include_children=False))
for piece in pieces
),
strict=True,
)
df = DataFrame.from_table(
plc.concatenate.concatenate(list(tables), stream=stream),
colnames,
[schema[colname] for colname in colnames],
stream=stream,
)
if include_file_paths is not None:
df = Scan.add_file_paths(
include_file_paths,
seen_paths,
[t.num_rows() for t in tables],
df,
)
elif typ == "parquet":
filters = None
if predicate is not None and row_index is None:
# Can't apply filters during read if we have a row index.
filters = to_parquet_filter(
_cast_literals_to_physical_types(
predicate.value,
_parquet_physical_types(
schema, paths, with_columns or list(schema.keys()), stream
),
),
stream=stream,
)
parquet_reader_options = plc.io.parquet.ParquetReaderOptions.builder(
plc.io.SourceInfo(paths)
).build()
if with_columns is not None:
parquet_reader_options.set_columns(with_columns)
if filters is not None:
parquet_reader_options.set_filter(filters)
if n_rows != -1:
parquet_reader_options.set_num_rows(n_rows)
if skip_rows != 0:
parquet_reader_options.set_skip_rows(skip_rows)
if parquet_options.chunked:
reader = plc.io.parquet.ChunkedParquetReader(
parquet_reader_options,
chunk_read_limit=parquet_options.chunk_read_limit,
pass_read_limit=parquet_options.pass_read_limit,
stream=stream,
)
chunk = reader.read_chunk()
# TODO: Nested column names
names = chunk.column_names(include_children=False)
concatenated_columns = chunk.tbl.columns()
while reader.has_next():
columns = reader.read_chunk().tbl.columns()
# Discard columns while concatenating to reduce memory footprint.
# Reverse order to avoid O(n^2) list popping cost.
for i in range(len(concatenated_columns) - 1, -1, -1):
concatenated_columns[i] = plc.concatenate.concatenate(
[concatenated_columns[i], columns.pop()], stream=stream
)
df = DataFrame.from_table(
plc.Table(concatenated_columns),
names=names,
dtypes=[schema[name] for name in names],
stream=stream,
)
df = _align_parquet_schema(df, schema)
if include_file_paths is not None:
df = Scan.add_file_paths(
include_file_paths, paths, chunk.num_rows_per_source, df
)
else:
tbl_w_meta = plc.io.parquet.read_parquet(
parquet_reader_options, stream=stream
)
# TODO: consider nested column names?
col_names = tbl_w_meta.column_names(include_children=False)
df = DataFrame.from_table(
tbl_w_meta.tbl,
col_names,
[schema[name] for name in col_names],
stream=stream,
)
df = _align_parquet_schema(df, schema)
if include_file_paths is not None:
df = Scan.add_file_paths(
include_file_paths, paths, tbl_w_meta.num_rows_per_source, df
)
if filters is not None:
# Mask must have been applied.
return df
elif typ == "ndjson":
json_schema: list[plc.io.json.NameAndType] = [
(name, typ.plc_type, []) for name, typ in schema.items()
]
json_reader_options = (
plc.io.json.JsonReaderOptions.builder(plc.io.SourceInfo(paths))
.lines(val=True)
.dtypes(json_schema)
.prune_columns(val=True)
.build()
)
plc_tbl_w_meta = plc.io.json.read_json(json_reader_options, stream=stream)
# TODO: I don't think cudf-polars supports nested types in general right now
# (but when it does, we should pass child column names from nested columns in)
col_names = plc_tbl_w_meta.column_names(include_children=False)
df = DataFrame.from_table(
plc_tbl_w_meta.tbl,
col_names,
[schema[name] for name in col_names],
stream=stream,
)
col_order = list(schema.keys())
if row_index is not None:
col_order.remove(row_index[0])
df = df.select(col_order)
else:
raise NotImplementedError(
f"Unhandled scan type: {typ}"
) # pragma: no cover; post init trips first
if row_index is not None:
name, offset = row_index
offset += skip_rows
dtype = schema[name]
step = plc.Scalar.from_py(1, dtype.plc_type, stream=stream)
init = plc.Scalar.from_py(offset, dtype.plc_type, stream=stream)
index_col = Column(
plc.filling.sequence(df.num_rows, init, step, stream=stream),
is_sorted=plc.types.Sorted.YES,
order=plc.types.Order.ASCENDING,
null_order=plc.types.NullOrder.AFTER,
name=name,
dtype=dtype,
)
df = DataFrame([index_col, *df.columns], stream=df.stream)
if next(iter(schema)) != name:
df = df.select(schema)
assert all(
c.obj.type() == schema[name].plc_type for name, c in df.column_map.items()
)
if predicate is None:
return df
else:
(mask,) = broadcast(
predicate.evaluate(df), target_length=df.num_rows, stream=df.stream
)
return df.filter(mask)
| Scan |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/orchestrator/orchestrator/assets/registry_entry.py | {
"start": 1677,
"end": 1796
} | class ____(str, Enum, metaclass=CaseInsensitveKeys):
SOURCE = "source"
DESTINATION = "destination"
| ConnectorTypes |
python | pytest-dev__pytest | testing/example_scripts/dataclasses/test_compare_recursive_dataclasses.py | {
"start": 235,
"end": 614
} | class ____:
g: S
h: C2
i: str
j: str
def test_recursive_dataclasses():
left = C3(
S(10, "ten"),
C2(C(S(1, "one"), S(2, "two")), S(2, "three")),
"equal",
"left",
)
right = C3(
S(20, "xxx"),
C2(C(S(1, "one"), S(2, "yyy")), S(3, "three")),
"equal",
"right",
)
assert left == right
| C3 |
python | Textualize__rich | rich/markdown.py | {
"start": 2959,
"end": 3480
} | class ____(TextElement):
"""A Paragraph."""
style_name = "markdown.paragraph"
justify: JustifyMethod
@classmethod
def create(cls, markdown: Markdown, token: Token) -> Paragraph:
return cls(justify=markdown.justify or "left")
def __init__(self, justify: JustifyMethod) -> None:
self.justify = justify
def __rich_console__(
self, console: Console, options: ConsoleOptions
) -> RenderResult:
self.text.justify = self.justify
yield self.text
| Paragraph |
python | scikit-learn__scikit-learn | sklearn/utils/tests/test_tags.py | {
"start": 390,
"end": 456
} | class ____(ClassifierMixin, BaseEstimator):
pass
| EmptyClassifier |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_operator.py | {
"start": 1718,
"end": 25804
} | class ____:
def test___all__(self):
operator = self.module
actual_all = set(operator.__all__)
computed_all = set()
for name in vars(operator):
if name.startswith('__'):
continue
value = getattr(operator, name)
if value.__module__ in ('operator', '_operator'):
computed_all.add(name)
self.assertSetEqual(computed_all, actual_all)
def test_lt(self):
operator = self.module
self.assertRaises(TypeError, operator.lt)
self.assertRaises(TypeError, operator.lt, 1j, 2j)
self.assertFalse(operator.lt(1, 0))
self.assertFalse(operator.lt(1, 0.0))
self.assertFalse(operator.lt(1, 1))
self.assertFalse(operator.lt(1, 1.0))
self.assertTrue(operator.lt(1, 2))
self.assertTrue(operator.lt(1, 2.0))
def test_le(self):
operator = self.module
self.assertRaises(TypeError, operator.le)
self.assertRaises(TypeError, operator.le, 1j, 2j)
self.assertFalse(operator.le(1, 0))
self.assertFalse(operator.le(1, 0.0))
self.assertTrue(operator.le(1, 1))
self.assertTrue(operator.le(1, 1.0))
self.assertTrue(operator.le(1, 2))
self.assertTrue(operator.le(1, 2.0))
def test_eq(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __eq__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.eq)
self.assertRaises(SyntaxError, operator.eq, C(), C())
self.assertFalse(operator.eq(1, 0))
self.assertFalse(operator.eq(1, 0.0))
self.assertTrue(operator.eq(1, 1))
self.assertTrue(operator.eq(1, 1.0))
self.assertFalse(operator.eq(1, 2))
self.assertFalse(operator.eq(1, 2.0))
def test_ne(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __ne__(self, other):
raise SyntaxError
self.assertRaises(TypeError, operator.ne)
self.assertRaises(SyntaxError, operator.ne, C(), C())
self.assertTrue(operator.ne(1, 0))
self.assertTrue(operator.ne(1, 0.0))
self.assertFalse(operator.ne(1, 1))
self.assertFalse(operator.ne(1, 1.0))
self.assertTrue(operator.ne(1, 2))
self.assertTrue(operator.ne(1, 2.0))
def test_ge(self):
operator = self.module
self.assertRaises(TypeError, operator.ge)
self.assertRaises(TypeError, operator.ge, 1j, 2j)
self.assertTrue(operator.ge(1, 0))
self.assertTrue(operator.ge(1, 0.0))
self.assertTrue(operator.ge(1, 1))
self.assertTrue(operator.ge(1, 1.0))
self.assertFalse(operator.ge(1, 2))
self.assertFalse(operator.ge(1, 2.0))
def test_gt(self):
operator = self.module
self.assertRaises(TypeError, operator.gt)
self.assertRaises(TypeError, operator.gt, 1j, 2j)
self.assertTrue(operator.gt(1, 0))
self.assertTrue(operator.gt(1, 0.0))
self.assertFalse(operator.gt(1, 1))
self.assertFalse(operator.gt(1, 1.0))
self.assertFalse(operator.gt(1, 2))
self.assertFalse(operator.gt(1, 2.0))
def test_abs(self):
operator = self.module
self.assertRaises(TypeError, operator.abs)
self.assertRaises(TypeError, operator.abs, None)
self.assertEqual(operator.abs(-1), 1)
self.assertEqual(operator.abs(1), 1)
def test_add(self):
operator = self.module
self.assertRaises(TypeError, operator.add)
self.assertRaises(TypeError, operator.add, None, None)
self.assertEqual(operator.add(3, 4), 7)
def test_bitwise_and(self):
operator = self.module
self.assertRaises(TypeError, operator.and_)
self.assertRaises(TypeError, operator.and_, None, None)
self.assertEqual(operator.and_(0xf, 0xa), 0xa)
def test_concat(self):
operator = self.module
self.assertRaises(TypeError, operator.concat)
self.assertRaises(TypeError, operator.concat, None, None)
self.assertEqual(operator.concat('py', 'thon'), 'python')
self.assertEqual(operator.concat([1, 2], [3, 4]), [1, 2, 3, 4])
self.assertEqual(operator.concat(Seq1([5, 6]), Seq1([7])), [5, 6, 7])
self.assertEqual(operator.concat(Seq2([5, 6]), Seq2([7])), [5, 6, 7])
self.assertRaises(TypeError, operator.concat, 13, 29)
def test_countOf(self):
operator = self.module
self.assertRaises(TypeError, operator.countOf)
self.assertRaises(TypeError, operator.countOf, None, None)
self.assertRaises(ZeroDivisionError, operator.countOf, BadIterable(), 1)
self.assertEqual(operator.countOf([1, 2, 1, 3, 1, 4], 3), 1)
self.assertEqual(operator.countOf([1, 2, 1, 3, 1, 4], 5), 0)
# is but not ==
nan = float("nan")
self.assertEqual(operator.countOf([nan, nan, 21], nan), 2)
# == but not is
self.assertEqual(operator.countOf([{}, 1, {}, 2], {}), 2)
def test_delitem(self):
operator = self.module
a = [4, 3, 2, 1]
self.assertRaises(TypeError, operator.delitem, a)
self.assertRaises(TypeError, operator.delitem, a, None)
self.assertIsNone(operator.delitem(a, 1))
self.assertEqual(a, [4, 2, 1])
def test_floordiv(self):
operator = self.module
self.assertRaises(TypeError, operator.floordiv, 5)
self.assertRaises(TypeError, operator.floordiv, None, None)
self.assertEqual(operator.floordiv(5, 2), 2)
def test_truediv(self):
operator = self.module
self.assertRaises(TypeError, operator.truediv, 5)
self.assertRaises(TypeError, operator.truediv, None, None)
self.assertEqual(operator.truediv(5, 2), 2.5)
def test_getitem(self):
operator = self.module
a = range(10)
self.assertRaises(TypeError, operator.getitem)
self.assertRaises(TypeError, operator.getitem, a, None)
self.assertEqual(operator.getitem(a, 2), 2)
def test_indexOf(self):
operator = self.module
self.assertRaises(TypeError, operator.indexOf)
self.assertRaises(TypeError, operator.indexOf, None, None)
self.assertRaises(ZeroDivisionError, operator.indexOf, BadIterable(), 1)
self.assertEqual(operator.indexOf([4, 3, 2, 1], 3), 1)
self.assertRaises(ValueError, operator.indexOf, [4, 3, 2, 1], 0)
nan = float("nan")
self.assertEqual(operator.indexOf([nan, nan, 21], nan), 0)
self.assertEqual(operator.indexOf([{}, 1, {}, 2], {}), 0)
it = iter('leave the iterator at exactly the position after the match')
self.assertEqual(operator.indexOf(it, 'a'), 2)
self.assertEqual(next(it), 'v')
def test_invert(self):
operator = self.module
self.assertRaises(TypeError, operator.invert)
self.assertRaises(TypeError, operator.invert, None)
self.assertEqual(operator.inv(4), -5)
def test_lshift(self):
operator = self.module
self.assertRaises(TypeError, operator.lshift)
self.assertRaises(TypeError, operator.lshift, None, 42)
self.assertEqual(operator.lshift(5, 1), 10)
self.assertEqual(operator.lshift(5, 0), 5)
self.assertRaises(ValueError, operator.lshift, 2, -1)
def test_mod(self):
operator = self.module
self.assertRaises(TypeError, operator.mod)
self.assertRaises(TypeError, operator.mod, None, 42)
self.assertEqual(operator.mod(5, 2), 1)
def test_mul(self):
operator = self.module
self.assertRaises(TypeError, operator.mul)
self.assertRaises(TypeError, operator.mul, None, None)
self.assertEqual(operator.mul(5, 2), 10)
def test_matmul(self):
operator = self.module
self.assertRaises(TypeError, operator.matmul)
self.assertRaises(TypeError, operator.matmul, 42, 42)
with torch._dynamo.error_on_graph_break(False):
class M:
def __matmul__(self, other):
return other - 1
self.assertEqual(M() @ 42, 41)
def test_neg(self):
operator = self.module
self.assertRaises(TypeError, operator.neg)
self.assertRaises(TypeError, operator.neg, None)
self.assertEqual(operator.neg(5), -5)
self.assertEqual(operator.neg(-5), 5)
self.assertEqual(operator.neg(0), 0)
self.assertEqual(operator.neg(-0), 0)
def test_bitwise_or(self):
operator = self.module
self.assertRaises(TypeError, operator.or_)
self.assertRaises(TypeError, operator.or_, None, None)
self.assertEqual(operator.or_(0xa, 0x5), 0xf)
def test_pos(self):
operator = self.module
self.assertRaises(TypeError, operator.pos)
self.assertRaises(TypeError, operator.pos, None)
self.assertEqual(operator.pos(5), 5)
self.assertEqual(operator.pos(-5), -5)
self.assertEqual(operator.pos(0), 0)
self.assertEqual(operator.pos(-0), 0)
def test_pow(self):
operator = self.module
self.assertRaises(TypeError, operator.pow)
self.assertRaises(TypeError, operator.pow, None, None)
self.assertEqual(operator.pow(3,5), 3**5)
self.assertRaises(TypeError, operator.pow, 1)
self.assertRaises(TypeError, operator.pow, 1, 2, 3)
def test_rshift(self):
operator = self.module
self.assertRaises(TypeError, operator.rshift)
self.assertRaises(TypeError, operator.rshift, None, 42)
self.assertEqual(operator.rshift(5, 1), 2)
self.assertEqual(operator.rshift(5, 0), 5)
self.assertRaises(ValueError, operator.rshift, 2, -1)
def test_contains(self):
operator = self.module
self.assertRaises(TypeError, operator.contains)
self.assertRaises(TypeError, operator.contains, None, None)
self.assertRaises(ZeroDivisionError, operator.contains, BadIterable(), 1)
self.assertTrue(operator.contains(range(4), 2))
self.assertFalse(operator.contains(range(4), 5))
def test_setitem(self):
operator = self.module
a = list(range(3))
self.assertRaises(TypeError, operator.setitem, a)
self.assertRaises(TypeError, operator.setitem, a, None, None)
self.assertIsNone(operator.setitem(a, 0, 2))
self.assertEqual(a, [2, 1, 2])
self.assertRaises(IndexError, operator.setitem, a, 4, 2)
def test_sub(self):
operator = self.module
self.assertRaises(TypeError, operator.sub)
self.assertRaises(TypeError, operator.sub, None, None)
self.assertEqual(operator.sub(5, 2), 3)
def test_truth(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __bool__(self):
raise SyntaxError
self.assertRaises(TypeError, operator.truth)
self.assertRaises(SyntaxError, operator.truth, C())
self.assertTrue(operator.truth(5))
self.assertTrue(operator.truth([0]))
self.assertFalse(operator.truth(0))
self.assertFalse(operator.truth([]))
def test_bitwise_xor(self):
operator = self.module
self.assertRaises(TypeError, operator.xor)
self.assertRaises(TypeError, operator.xor, None, None)
self.assertEqual(operator.xor(0xb, 0xc), 0x7)
def test_is(self):
operator = self.module
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_)
self.assertTrue(operator.is_(a, b))
self.assertFalse(operator.is_(a,c))
def test_is_not(self):
operator = self.module
a = b = 'xyzpdq'
c = a[:3] + b[3:]
self.assertRaises(TypeError, operator.is_not)
self.assertFalse(operator.is_not(a, b))
self.assertTrue(operator.is_not(a,c))
def test_attrgetter(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class A:
pass
a = A()
a.name = 'arthur'
f = operator.attrgetter('name')
self.assertEqual(f(a), 'arthur')
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 'dent')
self.assertRaises(TypeError, f, a, surname='dent')
f = operator.attrgetter('rank')
self.assertRaises(AttributeError, f, a)
self.assertRaises(TypeError, operator.attrgetter, 2)
self.assertRaises(TypeError, operator.attrgetter)
# multiple gets
record = A()
record.x = 'X'
record.y = 'Y'
record.z = 'Z'
self.assertEqual(operator.attrgetter('x','z','y')(record), ('X', 'Z', 'Y'))
self.assertRaises(TypeError, operator.attrgetter, ('x', (), 'y'))
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __getattr__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.attrgetter('foo'), C())
# recursive gets
a = A()
a.name = 'arthur'
a.child = A()
a.child.name = 'thomas'
f = operator.attrgetter('child.name')
self.assertEqual(f(a), 'thomas')
self.assertRaises(AttributeError, f, a.child)
f = operator.attrgetter('name', 'child.name')
self.assertEqual(f(a), ('arthur', 'thomas'))
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('child.')
self.assertRaises(AttributeError, f, a)
f = operator.attrgetter('.child')
self.assertRaises(AttributeError, f, a)
a.child.child = A()
a.child.child.name = 'johnson'
f = operator.attrgetter('child.child.name')
self.assertEqual(f(a), 'johnson')
f = operator.attrgetter('name', 'child.name', 'child.child.name')
self.assertEqual(f(a), ('arthur', 'thomas', 'johnson'))
def test_itemgetter(self):
operator = self.module
a = 'ABCDE'
f = operator.itemgetter(2)
self.assertEqual(f(a), 'C')
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 3)
self.assertRaises(TypeError, f, a, size=3)
f = operator.itemgetter(10)
self.assertRaises(IndexError, f, a)
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __getitem__(self, name):
raise SyntaxError
self.assertRaises(SyntaxError, operator.itemgetter(42), C())
f = operator.itemgetter('name')
self.assertRaises(TypeError, f, a)
self.assertRaises(TypeError, operator.itemgetter)
d = dict(key='val')
f = operator.itemgetter('key')
self.assertEqual(f(d), 'val')
f = operator.itemgetter('nonkey')
self.assertRaises(KeyError, f, d)
# example used in the docs
inventory = [('apple', 3), ('banana', 2), ('pear', 5), ('orange', 1)]
getcount = operator.itemgetter(1)
self.assertEqual(list(map(getcount, inventory)), [3, 2, 5, 1])
self.assertEqual(sorted(inventory, key=getcount),
[('orange', 1), ('banana', 2), ('apple', 3), ('pear', 5)])
# multiple gets
data = list(map(str, range(20)))
self.assertEqual(operator.itemgetter(2,10,5)(data), ('2', '10', '5'))
self.assertRaises(TypeError, operator.itemgetter(2, 'x', 5), data)
# interesting indices
t = tuple('abcde')
self.assertEqual(operator.itemgetter(-1)(t), 'e')
self.assertEqual(operator.itemgetter(slice(2, 4))(t), ('c', 'd'))
# interesting sequences
with torch._dynamo.error_on_graph_break(False):
class T(tuple):
'Tuple subclass'
pass
self.assertEqual(operator.itemgetter(0)(T('abc')), 'a')
self.assertEqual(operator.itemgetter(0)(['a', 'b', 'c']), 'a')
self.assertEqual(operator.itemgetter(0)(range(100, 200)), 100)
def test_methodcaller(self):
operator = self.module
self.assertRaises(TypeError, operator.methodcaller)
self.assertRaises(TypeError, operator.methodcaller, 12)
with torch._dynamo.error_on_graph_break(False):
class A:
def foo(self, *args, **kwds):
return args[0] + args[1]
def bar(self, f=42):
return f
def baz(*args, **kwds):
return kwds['name'], kwds['self']
a = A()
f = operator.methodcaller('foo')
self.assertRaises(IndexError, f, a)
f = operator.methodcaller('foo', 1, 2)
self.assertEqual(f(a), 3)
self.assertRaises(TypeError, f)
self.assertRaises(TypeError, f, a, 3)
self.assertRaises(TypeError, f, a, spam=3)
f = operator.methodcaller('bar')
self.assertEqual(f(a), 42)
self.assertRaises(TypeError, f, a, a)
f = operator.methodcaller('bar', f=5)
self.assertEqual(f(a), 5)
f = operator.methodcaller('baz', name='spam', self='eggs')
self.assertEqual(f(a), ('spam', 'eggs'))
def test_inplace(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class C(object):
def __iadd__ (self, other): return "iadd"
def __iand__ (self, other): return "iand"
def __ifloordiv__(self, other): return "ifloordiv"
def __ilshift__ (self, other): return "ilshift"
def __imod__ (self, other): return "imod"
def __imul__ (self, other): return "imul"
def __imatmul__ (self, other): return "imatmul"
def __ior__ (self, other): return "ior"
def __ipow__ (self, other): return "ipow"
def __irshift__ (self, other): return "irshift"
def __isub__ (self, other): return "isub"
def __itruediv__ (self, other): return "itruediv"
def __ixor__ (self, other): return "ixor"
def __getitem__(self, other): return 5 # so that C is a sequence
c = C()
self.assertEqual(operator.iadd (c, 5), "iadd")
self.assertEqual(operator.iand (c, 5), "iand")
self.assertEqual(operator.ifloordiv(c, 5), "ifloordiv")
self.assertEqual(operator.ilshift (c, 5), "ilshift")
self.assertEqual(operator.imod (c, 5), "imod")
self.assertEqual(operator.imul (c, 5), "imul")
self.assertEqual(operator.imatmul (c, 5), "imatmul")
self.assertEqual(operator.ior (c, 5), "ior")
self.assertEqual(operator.ipow (c, 5), "ipow")
self.assertEqual(operator.irshift (c, 5), "irshift")
self.assertEqual(operator.isub (c, 5), "isub")
self.assertEqual(operator.itruediv (c, 5), "itruediv")
self.assertEqual(operator.ixor (c, 5), "ixor")
self.assertEqual(operator.iconcat (c, c), "iadd")
def test_iconcat_without_getitem(self):
operator = self.module
msg = "'int' object can't be concatenated"
with self.assertRaisesRegex(TypeError, msg):
operator.iconcat(1, 0.5)
def test_index(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class X:
def __index__(self):
return 1
self.assertEqual(operator.index(X()), 1)
self.assertEqual(operator.index(0), 0)
self.assertEqual(operator.index(1), 1)
self.assertEqual(operator.index(2), 2)
with self.assertRaises((AttributeError, TypeError)):
operator.index(1.5)
with self.assertRaises((AttributeError, TypeError)):
operator.index(Fraction(3, 7))
with self.assertRaises((AttributeError, TypeError)):
operator.index(Decimal(1))
with self.assertRaises((AttributeError, TypeError)):
operator.index(None)
def test_not_(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class C:
def __bool__(self):
raise SyntaxError
self.assertRaises(TypeError, operator.not_)
self.assertRaises(SyntaxError, operator.not_, C())
self.assertFalse(operator.not_(5))
self.assertFalse(operator.not_([0]))
self.assertTrue(operator.not_(0))
self.assertTrue(operator.not_([]))
def test_length_hint(self):
operator = self.module
with torch._dynamo.error_on_graph_break(False):
class X(object):
def __init__(self, value):
self.value = value
def __length_hint__(self):
if type(self.value) is type:
raise self.value
else:
return self.value
self.assertEqual(operator.length_hint([], 2), 0)
self.assertEqual(operator.length_hint(iter([1, 2, 3])), 3)
self.assertEqual(operator.length_hint(X(2)), 2)
self.assertEqual(operator.length_hint(X(NotImplemented), 4), 4)
self.assertEqual(operator.length_hint(X(TypeError), 12), 12)
with self.assertRaises(TypeError):
operator.length_hint(X("abc"))
with self.assertRaises(ValueError):
operator.length_hint(X(-2))
with self.assertRaises(LookupError):
operator.length_hint(X(LookupError))
with torch._dynamo.error_on_graph_break(False):
class Y: pass
msg = "'str' object cannot be interpreted as an integer"
with self.assertRaisesRegex(TypeError, msg):
operator.length_hint(X(2), "abc")
self.assertEqual(operator.length_hint(Y(), 10), 10)
def test_call(self):
operator = self.module
def func(*args, **kwargs): return args, kwargs
self.assertEqual(operator.call(func), ((), {}))
self.assertEqual(operator.call(func, 0, 1), ((0, 1), {}))
self.assertEqual(operator.call(func, a=2, obj=3),
((), {"a": 2, "obj": 3}))
self.assertEqual(operator.call(func, 0, 1, a=2, obj=3),
((0, 1), {"a": 2, "obj": 3}))
def test_dunder_is_original(self):
operator = self.module
names = [name for name in dir(operator) if not name.startswith('_')]
for name in names:
orig = getattr(operator, name)
dunder = getattr(operator, '__' + name.strip('_') + '__', None)
if dunder:
self.assertIs(dunder, orig)
@support.requires_docstrings
def test_attrgetter_signature(self):
operator = self.module
sig = inspect.signature(operator.attrgetter)
self.assertEqual(str(sig), '(attr, /, *attrs)')
sig = inspect.signature(operator.attrgetter('x', 'z', 'y'))
self.assertEqual(str(sig), '(obj, /)')
@support.requires_docstrings
def test_itemgetter_signature(self):
operator = self.module
sig = inspect.signature(operator.itemgetter)
self.assertEqual(str(sig), '(item, /, *items)')
sig = inspect.signature(operator.itemgetter(2, 3, 5))
self.assertEqual(str(sig), '(obj, /)')
@support.requires_docstrings
def test_methodcaller_signature(self):
operator = self.module
sig = inspect.signature(operator.methodcaller)
self.assertEqual(str(sig), '(name, /, *args, **kwargs)')
sig = inspect.signature(operator.methodcaller('foo', 2, y=3))
self.assertEqual(str(sig), '(obj, /)')
| OperatorTestCase |
python | huggingface__transformers | src/transformers/pipelines/deprecated/text2text_generation.py | {
"start": 556,
"end": 10295
} | class ____(Pipeline):
"""
Pipeline for text to text generation using seq2seq models.
Unless the model you're using explicitly sets these generation parameters in its configuration files
(`generation_config.json`), the following default values will be used:
- max_new_tokens: 256
- num_beams: 4
Example:
```python
>>> from transformers import pipeline
>>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
>>> generator(
... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
... )
[{'generated_text': 'question: Who created the RuPERTa-base?'}]
```
Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
text generation parameters in [Text generation strategies](../generation_strategies) and [Text
generation](text_generation).
This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
identifier: `"text2text-generation"`.
The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
up-to-date list of available models on
[huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
parameters, see the [following
documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
Usage:
```python
text2text_generator = pipeline("text2text-generation")
text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
```"""
_pipeline_calls_generate = True
_load_processor = False
_load_image_processor = False
_load_feature_extractor = False
_load_tokenizer = True
# Make sure the docstring is updated when the default generation config is changed (in all pipelines in this file)
_default_generation_config = GenerationConfig(
max_new_tokens=256,
num_beams=4,
)
# Used in the return key of the pipeline.
return_name = "generated"
def __init__(self, *args, **kwargs):
if self.return_name == "generated": # Check this isn't summarization/translation instead
logger.warning_once(
"The `Text2TextGenerationPipeline` is deprecated and no longer maintained. For most "
"purposes, we recommend using newer models with causal pipelines like "
"`TextGenerationPipeline` or `ImageTextToTextPipeline`."
)
super().__init__(*args, **kwargs)
self.check_model_type(MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES)
def _sanitize_parameters(
self,
return_tensors=None,
return_text=None,
return_type=None,
clean_up_tokenization_spaces=None,
truncation=None,
stop_sequence=None,
**generate_kwargs,
):
preprocess_params = {}
if truncation is not None:
preprocess_params["truncation"] = truncation
forward_params = generate_kwargs
postprocess_params = {}
if return_tensors is not None and return_type is None:
return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
postprocess_params["return_type"] = return_type
if clean_up_tokenization_spaces is not None:
postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
if stop_sequence is not None:
stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
if len(stop_sequence_ids) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim."
)
generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
if self.assistant_model is not None:
forward_params["assistant_model"] = self.assistant_model
if self.assistant_tokenizer is not None:
forward_params["tokenizer"] = self.tokenizer
forward_params["assistant_tokenizer"] = self.assistant_tokenizer
return preprocess_params, forward_params, postprocess_params
def check_inputs(self, input_length: int, min_length: int, max_new_tokens: int):
"""
Checks whether there might be something wrong with given input with regard to the model.
"""
return True
def _parse_and_tokenize(self, *args, truncation):
prefix = self.prefix if self.prefix is not None else ""
if isinstance(args[0], list):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
args = ([prefix + arg for arg in args[0]],)
padding = True
elif isinstance(args[0], str):
args = (prefix + args[0],)
padding = False
else:
raise TypeError(
f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
)
inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors="pt")
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self, *args: str | list[str], **kwargs: Any) -> list[dict[str, str]]:
r"""
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `list[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
[here](./text_generation)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
"""
result = super().__call__(*args, **kwargs)
if (
isinstance(args[0], list)
and all(isinstance(el, str) for el in args[0])
and all(len(res) == 1 for res in result)
):
return [res[0] for res in result]
return result
def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
return inputs
def _forward(self, model_inputs, **generate_kwargs):
in_b, input_length = model_inputs["input_ids"].shape
# User-defined `generation_config` passed to the pipeline call take precedence
if "generation_config" not in generate_kwargs:
generate_kwargs["generation_config"] = self.generation_config
min_length = generate_kwargs.get("min_length", generate_kwargs["generation_config"].min_length)
max_new_tokens = generate_kwargs.get("max_new_tokens", generate_kwargs["generation_config"].max_new_tokens)
self.check_inputs(input_length, min_length, max_new_tokens)
output_ids = self.model.generate(**model_inputs, **generate_kwargs)
out_b = output_ids.shape[0]
output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
return {"output_ids": output_ids}
def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
records = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
record = {f"{self.return_name}_token_ids": output_ids}
elif return_type == ReturnType.TEXT:
record = {
f"{self.return_name}_text": self.tokenizer.decode(
output_ids,
skip_special_tokens=True,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
)
}
records.append(record)
return records
@add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
| Text2TextGenerationPipeline |
python | apache__airflow | providers/keycloak/src/airflow/providers/keycloak/auth_manager/keycloak_auth_manager.py | {
"start": 3140,
"end": 14767
} | class ____(BaseAuthManager[KeycloakAuthManagerUser]):
"""
Keycloak auth manager.
Leverages Keycloak to perform authentication and authorization in Airflow.
"""
def deserialize_user(self, token: dict[str, Any]) -> KeycloakAuthManagerUser:
return KeycloakAuthManagerUser(
user_id=token.pop("user_id"),
name=token.pop("name"),
access_token=token.pop("access_token"),
refresh_token=token.pop("refresh_token"),
)
def serialize_user(self, user: KeycloakAuthManagerUser) -> dict[str, Any]:
return {
"user_id": user.get_id(),
"name": user.get_name(),
"access_token": user.access_token,
"refresh_token": user.refresh_token,
}
def get_url_login(self, **kwargs) -> str:
base_url = conf.get("api", "base_url", fallback="/")
return urljoin(base_url, f"{AUTH_MANAGER_FASTAPI_APP_PREFIX}/login")
def get_url_logout(self) -> str | None:
base_url = conf.get("api", "base_url", fallback="/")
return urljoin(base_url, f"{AUTH_MANAGER_FASTAPI_APP_PREFIX}/logout")
def refresh_user(self, *, user: KeycloakAuthManagerUser) -> KeycloakAuthManagerUser | None:
if self._token_expired(user.access_token):
log.debug("Refreshing the token")
client = self.get_keycloak_client()
tokens = client.refresh_token(user.refresh_token)
user.refresh_token = tokens["refresh_token"]
user.access_token = tokens["access_token"]
return user
return None
def is_authorized_configuration(
self,
*,
method: ResourceMethod,
user: KeycloakAuthManagerUser,
details: ConfigurationDetails | None = None,
) -> bool:
config_section = details.section if details else None
return self._is_authorized(
method=method,
resource_type=KeycloakResource.CONFIGURATION,
user=user,
resource_id=config_section,
)
def is_authorized_connection(
self,
*,
method: ResourceMethod,
user: KeycloakAuthManagerUser,
details: ConnectionDetails | None = None,
) -> bool:
connection_id = details.conn_id if details else None
return self._is_authorized(
method=method, resource_type=KeycloakResource.CONNECTION, user=user, resource_id=connection_id
)
def is_authorized_dag(
self,
*,
method: ResourceMethod,
user: KeycloakAuthManagerUser,
access_entity: DagAccessEntity | None = None,
details: DagDetails | None = None,
) -> bool:
dag_id = details.id if details else None
access_entity_str = access_entity.value if access_entity else None
return self._is_authorized(
method=method,
resource_type=KeycloakResource.DAG,
user=user,
resource_id=dag_id,
attributes={"dag_entity": access_entity_str},
)
def is_authorized_backfill(
self, *, method: ResourceMethod, user: KeycloakAuthManagerUser, details: BackfillDetails | None = None
) -> bool:
backfill_id = str(details.id) if details else None
return self._is_authorized(
method=method, resource_type=KeycloakResource.BACKFILL, user=user, resource_id=backfill_id
)
def is_authorized_asset(
self, *, method: ResourceMethod, user: KeycloakAuthManagerUser, details: AssetDetails | None = None
) -> bool:
asset_id = details.id if details else None
return self._is_authorized(
method=method, resource_type=KeycloakResource.ASSET, user=user, resource_id=asset_id
)
def is_authorized_asset_alias(
self,
*,
method: ResourceMethod,
user: KeycloakAuthManagerUser,
details: AssetAliasDetails | None = None,
) -> bool:
asset_alias_id = details.id if details else None
return self._is_authorized(
method=method,
resource_type=KeycloakResource.ASSET_ALIAS,
user=user,
resource_id=asset_alias_id,
)
def is_authorized_variable(
self, *, method: ResourceMethod, user: KeycloakAuthManagerUser, details: VariableDetails | None = None
) -> bool:
variable_key = details.key if details else None
return self._is_authorized(
method=method, resource_type=KeycloakResource.VARIABLE, user=user, resource_id=variable_key
)
def is_authorized_pool(
self, *, method: ResourceMethod, user: KeycloakAuthManagerUser, details: PoolDetails | None = None
) -> bool:
pool_name = details.name if details else None
return self._is_authorized(
method=method, resource_type=KeycloakResource.POOL, user=user, resource_id=pool_name
)
def is_authorized_view(self, *, access_view: AccessView, user: KeycloakAuthManagerUser) -> bool:
return self._is_authorized(
method="GET",
resource_type=KeycloakResource.VIEW,
user=user,
resource_id=access_view.value,
)
def is_authorized_custom_view(
self, *, method: ResourceMethod | str, resource_name: str, user: KeycloakAuthManagerUser
) -> bool:
return self._is_authorized(
method=method, resource_type=KeycloakResource.CUSTOM, user=user, resource_id=resource_name
)
def filter_authorized_menu_items(
self, menu_items: list[MenuItem], *, user: KeycloakAuthManagerUser
) -> list[MenuItem]:
authorized_menus = self._is_batch_authorized(
permissions=[("MENU", menu_item.value) for menu_item in menu_items],
user=user,
)
return [MenuItem(menu[1]) for menu in authorized_menus]
def get_fastapi_app(self) -> FastAPI | None:
from airflow.providers.keycloak.auth_manager.routes.login import login_router
from airflow.providers.keycloak.auth_manager.routes.token import token_router
app = FastAPI(
title="Keycloak auth manager sub application",
description=(
"This is the Keycloak auth manager fastapi sub application. This API is only available if the "
"auth manager used in the Airflow environment is Keycloak auth manager. "
"This sub application provides login routes."
),
)
app.include_router(login_router)
app.include_router(token_router)
return app
@staticmethod
def get_cli_commands() -> list[CLICommand]:
"""Vends CLI commands to be included in Airflow CLI."""
return [
GroupCommand(
name="keycloak-auth-manager",
help="Manage resources used by Keycloak auth manager",
subcommands=KEYCLOAK_AUTH_MANAGER_COMMANDS,
),
]
@staticmethod
def get_keycloak_client() -> KeycloakOpenID:
client_id = conf.get(CONF_SECTION_NAME, CONF_CLIENT_ID_KEY)
client_secret = conf.get(CONF_SECTION_NAME, CONF_CLIENT_SECRET_KEY)
realm = conf.get(CONF_SECTION_NAME, CONF_REALM_KEY)
server_url = conf.get(CONF_SECTION_NAME, CONF_SERVER_URL_KEY)
return KeycloakOpenID(
server_url=server_url,
client_id=client_id,
client_secret_key=client_secret,
realm_name=realm,
)
def _is_authorized(
self,
*,
method: ResourceMethod | str,
resource_type: KeycloakResource,
user: KeycloakAuthManagerUser,
resource_id: str | None = None,
attributes: dict[str, str | None] | None = None,
) -> bool:
client_id = conf.get(CONF_SECTION_NAME, CONF_CLIENT_ID_KEY)
realm = conf.get(CONF_SECTION_NAME, CONF_REALM_KEY)
server_url = conf.get(CONF_SECTION_NAME, CONF_SERVER_URL_KEY)
context_attributes = prune_dict(attributes or {})
if resource_id:
context_attributes[RESOURCE_ID_ATTRIBUTE_NAME] = resource_id
elif method == "GET":
method = "LIST"
resp = requests.post(
self._get_token_url(server_url, realm),
data=self._get_payload(client_id, f"{resource_type.value}#{method}", context_attributes),
headers=self._get_headers(user.access_token),
)
if resp.status_code == 200:
return True
if resp.status_code == 403:
return False
if resp.status_code == 400:
error = json.loads(resp.text)
raise AirflowException(
f"Request not recognized by Keycloak. {error.get('error')}. {error.get('error_description')}"
)
raise AirflowException(f"Unexpected error: {resp.status_code} - {resp.text}")
def _is_batch_authorized(
self,
*,
permissions: list[tuple[ExtendedResourceMethod, str]],
user: KeycloakAuthManagerUser,
) -> set[tuple[ExtendedResourceMethod, str]]:
client_id = conf.get(CONF_SECTION_NAME, CONF_CLIENT_ID_KEY)
realm = conf.get(CONF_SECTION_NAME, CONF_REALM_KEY)
server_url = conf.get(CONF_SECTION_NAME, CONF_SERVER_URL_KEY)
resp = requests.post(
self._get_token_url(server_url, realm),
data=self._get_batch_payload(client_id, permissions),
headers=self._get_headers(user.access_token),
)
if resp.status_code == 200:
return {(perm["scopes"][0], perm["rsname"]) for perm in resp.json()}
if resp.status_code == 403:
return set()
if resp.status_code == 400:
error = json.loads(resp.text)
raise AirflowException(
f"Request not recognized by Keycloak. {error.get('error')}. {error.get('error_description')}"
)
raise AirflowException(f"Unexpected error: {resp.status_code} - {resp.text}")
@staticmethod
def _get_token_url(server_url, realm):
return f"{server_url}/realms/{realm}/protocol/openid-connect/token"
@staticmethod
def _get_payload(client_id: str, permission: str, attributes: dict[str, str] | None = None):
payload: dict[str, Any] = {
"grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket",
"audience": client_id,
"permission": permission,
}
if attributes:
payload["context"] = {"attributes": attributes}
return payload
@staticmethod
def _get_batch_payload(client_id: str, permissions: list[tuple[ExtendedResourceMethod, str]]):
payload: dict[str, Any] = {
"grant_type": "urn:ietf:params:oauth:grant-type:uma-ticket",
"audience": client_id,
"permission": [f"{permission[1]}#{permission[0]}" for permission in permissions],
"response_mode": "permissions",
}
return payload
@staticmethod
def _get_headers(access_token):
return {
"Authorization": f"Bearer {access_token}",
"Content-Type": "application/x-www-form-urlencoded",
}
@staticmethod
def _token_expired(token: str) -> bool:
"""
Check whether a JWT token is expired.
:meta private:
:param token: the token
"""
payload_b64 = token.split(".")[1] + "=="
payload_bytes = urlsafe_b64decode(payload_b64)
payload = json.loads(payload_bytes)
return payload["exp"] < int(time.time())
| KeycloakAuthManager |
python | PrefectHQ__prefect | tests/server/models/test_workers.py | {
"start": 7444,
"end": 8056
} | class ____:
async def test_count_work_pool(
self, session: AsyncSession, work_pool: schemas.core.WorkPool
):
result = await models.workers.count_work_pools(
session=session,
)
assert result == 1
random_name = "not-my-work-pool"
assert random_name != work_pool.name
filtered_result = await models.workers.count_work_pools(
session=session,
work_pool_filter=schemas.filters.WorkPoolFilter(
name={"any_": [random_name]}
),
)
assert filtered_result == 0
| TestCountWorkPools |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/digitalocean/tests.py | {
"start": 252,
"end": 1344
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = DigitalOceanProvider.id
def get_mocked_response(self):
return MockedResponse(
HTTPStatus.OK,
"""
{
"account": {
"droplet_limit": 25,
"floating_ip_limit": 5,
"email": "sammy@example.com",
"uuid": "b6fr89dbf6d9156cace5f3c78dc9851d957381ef",
"email_verified": true,
"status": "active",
"status_message": ""
}
}
""",
)
def get_login_response_json(self, with_refresh_token=True):
return """
{
"access_token": "testac",
"token_type": "bearer",
"expires_in": 2592000,
"refresh_token": "00a3aae641658d",
"scope": "read write",
"info": {
"name": "Sammy the Shark",
"email":"sammy@example.com",
"uuid":"b6fr89dbf6d9156cace5f3c78dc9851d957381ef"
}
}"""
def get_expected_to_str(self):
return "sammy@example.com"
| DigitalOceanTests |
python | huggingface__transformers | tests/models/llava_onevision/test_modeling_llava_onevision.py | {
"start": 6039,
"end": 10530
} | class ____(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase):
"""
Model tester for `LlavaOnevisionForConditionalGeneration`.
"""
all_model_classes = (
(
LlavaOnevisionModel,
LlavaOnevisionForConditionalGeneration,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{
"image-text-to-text": LlavaOnevisionForConditionalGeneration,
"any-to-any": LlavaOnevisionForConditionalGeneration,
}
if is_torch_available()
else {}
)
# MP works but offload doesn't work when the MultiheadAttention is offloaded
# TODO: One potential solution would be to add to set preload_module_classes = ["Siglip2MultiheadAttentionPoolingHead"]
# in the dispatch_model function
test_cpu_offload = False
test_disk_offload_safetensors = False
test_disk_offload_bin = False
_is_composite = True
def setUp(self):
self.model_tester = LlavaOnevisionVisionText2TextModelTester(self)
common_properties = ["image_token_index", "video_token_index", "vision_feature_layer"]
self.config_tester = ConfigTester(
self, config_class=LlavaOnevisionConfig, has_text_modality=False, common_properties=common_properties
)
def test_config(self):
self.config_tester.run_common_tests()
def test_odd_sized_image(self):
# prepare model configuration
config = self.model_tester.get_config()
# prepare input
num_image_tokens = 10
pixel_values = floats_tensor([1, 2, 3, config.vision_config.image_size, config.vision_config.image_size])
input_ids = ids_tensor([1, 64], config.text_config.vocab_size - 2) + 2
input_ids[:, :num_image_tokens] = config.image_token_index
attention_mask = torch.ones(input_ids.shape, dtype=torch.long).to(torch_device)
inputs_dict = {
"pixel_values": pixel_values,
"image_sizes": torch.tensor([[13, 16]]), # odd-sized image
"input_ids": input_ids,
"attention_mask": attention_mask,
}
# forward with odd-sized image input
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
model(**inputs_dict)
@parameterized.expand(
[
(-1,),
([-1],),
([-1, -2],),
],
)
def test_vision_feature_layers(self, vision_feature_layer):
"""
Test that we can use either one vision feature layer, or a list of
vision feature layers.
"""
config, input_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.vision_feature_layer = vision_feature_layer
num_feature_layers = 1 if isinstance(vision_feature_layer, int) else len(vision_feature_layer)
hidden_size = config.vision_config.hidden_size
expected_features = hidden_size * num_feature_layers
for model_class in self.all_model_classes:
model = model_class(config).to(torch_device)
# We should have the right number of input features,
# and should be able to run a forward pass without exploding
base_model = getattr(model, "model", model)
assert base_model.multi_modal_projector.linear_1.in_features == expected_features
model(**input_dict)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, SiglipVisionModel does not support standalone training"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
@unittest.skip(
"VLMs need lots of steps to prepare images/mask correctly to get pad-free inputs. Can be tested as part of LLM test"
)
def test_flash_attention_2_padding_matches_padding_free_with_position_ids(self):
pass
@require_torch
| LlavaOnevisionForConditionalGenerationModelTest |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/plugins/facets.py | {
"start": 3825,
"end": 4116
} | class ____(RedactMixin):
"""
Describes an unknown operator.
This specifies the (class) name of the operator and its properties.
"""
name: str
properties: dict[str, object]
type: str = "operator"
_skip_redact = ["name", "type"]
@define
| UnknownOperatorInstance |
python | Pylons__pyramid | docs/tutorials/wiki/src/tests/tests/test_views.py | {
"start": 1263,
"end": 2219
} | class ____:
def _callFUT(self, context, request):
from tutorial.views.default import add_page
return add_page(context, request)
def test_it_notsubmitted(self):
context = testing.DummyResource()
request = testing.DummyRequest()
request.subpath = ['AnotherPage']
info = self._callFUT(context, request)
assert info['page'].data == ''
assert info['save_url'] == request.resource_url(
context, 'add_page', 'AnotherPage')
def test_it_submitted(self):
context = testing.DummyResource()
request = testing.DummyRequest({
'form.submitted': True,
'body': 'Hello yo!',
})
request.subpath = ['AnotherPage']
self._callFUT(context, request)
page = context['AnotherPage']
assert page.data == 'Hello yo!'
assert page.__name__ == 'AnotherPage'
assert page.__parent__ == context
| Test_add_page |
python | pytest-dev__pytest | src/_pytest/reports.py | {
"start": 9330,
"end": 14951
} | class ____(BaseReport):
"""Basic test report object (also used for setup and teardown calls if
they fail).
Reports can contain arbitrary extra attributes.
"""
__test__ = False
# Defined by skipping plugin.
# xfail reason if xfailed, otherwise not defined. Use hasattr to distinguish.
wasxfail: str
def __init__(
self,
nodeid: str,
location: tuple[str, int | None, str],
keywords: Mapping[str, Any],
outcome: Literal["passed", "failed", "skipped"],
longrepr: None
| ExceptionInfo[BaseException]
| tuple[str, int, str]
| str
| TerminalRepr,
when: Literal["setup", "call", "teardown"],
sections: Iterable[tuple[str, str]] = (),
duration: float = 0,
start: float = 0,
stop: float = 0,
user_properties: Iterable[tuple[str, object]] | None = None,
**extra,
) -> None:
#: Normalized collection nodeid.
self.nodeid = nodeid
#: A (filesystempath, lineno, domaininfo) tuple indicating the
#: actual location of a test item - it might be different from the
#: collected one e.g. if a method is inherited from a different module.
#: The filesystempath may be relative to ``config.rootdir``.
#: The line number is 0-based.
self.location: tuple[str, int | None, str] = location
#: A name -> value dictionary containing all keywords and
#: markers associated with a test invocation.
self.keywords: Mapping[str, Any] = keywords
#: Test outcome, always one of "passed", "failed", "skipped".
self.outcome = outcome
#: None or a failure representation.
self.longrepr = longrepr
#: One of 'setup', 'call', 'teardown' to indicate runtest phase.
self.when: Literal["setup", "call", "teardown"] = when
#: User properties is a list of tuples (name, value) that holds user
#: defined properties of the test.
self.user_properties = list(user_properties or [])
#: Tuples of str ``(heading, content)`` with extra information
#: for the test report. Used by pytest to add text captured
#: from ``stdout``, ``stderr``, and intercepted logging events. May
#: be used by other plugins to add arbitrary information to reports.
self.sections = list(sections)
#: Time it took to run just the test.
self.duration: float = duration
#: The system time when the call started, in seconds since the epoch.
self.start: float = start
#: The system time when the call ended, in seconds since the epoch.
self.stop: float = stop
self.__dict__.update(extra)
def __repr__(self) -> str:
return f"<{self.__class__.__name__} {self.nodeid!r} when={self.when!r} outcome={self.outcome!r}>"
@classmethod
def from_item_and_call(cls, item: Item, call: CallInfo[None]) -> TestReport:
"""Create and fill a TestReport with standard item and call info.
:param item: The item.
:param call: The call info.
"""
when = call.when
# Remove "collect" from the Literal type -- only for collection calls.
assert when != "collect"
duration = call.duration
start = call.start
stop = call.stop
keywords = {x: 1 for x in item.keywords}
excinfo = call.excinfo
sections = []
if not call.excinfo:
outcome: Literal["passed", "failed", "skipped"] = "passed"
longrepr: (
None
| ExceptionInfo[BaseException]
| tuple[str, int, str]
| str
| TerminalRepr
) = None
else:
if not isinstance(excinfo, ExceptionInfo):
outcome = "failed"
longrepr = excinfo
elif isinstance(excinfo.value, skip.Exception):
outcome = "skipped"
r = excinfo._getreprcrash()
assert r is not None, (
"There should always be a traceback entry for skipping a test."
)
if excinfo.value._use_item_location:
path, line = item.reportinfo()[:2]
assert line is not None
longrepr = (os.fspath(path), line + 1, r.message)
else:
longrepr = (str(r.path), r.lineno, r.message)
elif isinstance(excinfo.value, BaseExceptionGroup) and (
excinfo.value.split(skip.Exception)[1] is None
):
# All exceptions in the group are skip exceptions.
outcome = "skipped"
excinfo = cast(
ExceptionInfo[
BaseExceptionGroup[BaseException | BaseExceptionGroup]
],
excinfo,
)
longrepr = _format_exception_group_all_skipped_longrepr(item, excinfo)
else:
outcome = "failed"
longrepr = _format_failed_longrepr(item, call, excinfo)
for rwhen, key, content in item._report_sections:
sections.append((f"Captured {key} {rwhen}", content))
return cls(
item.nodeid,
item.location,
keywords,
outcome,
longrepr,
when,
sections,
duration,
start,
stop,
user_properties=item.user_properties,
)
@final
| TestReport |
python | doocs__leetcode | solution/3200-3299/3264.Final Array State After K Multiplication Operations I/Solution.py | {
"start": 0,
"end": 321
} | class ____:
def getFinalState(self, nums: List[int], k: int, multiplier: int) -> List[int]:
pq = [(x, i) for i, x in enumerate(nums)]
heapify(pq)
for _ in range(k):
_, i = heappop(pq)
nums[i] *= multiplier
heappush(pq, (nums[i], i))
return nums
| Solution |
python | jazzband__django-oauth-toolkit | oauth2_provider/migrations/0009_add_hash_client_secret.py | {
"start": 92,
"end": 416
} | class ____(migrations.Migration):
dependencies = [
('oauth2_provider', '0008_alter_accesstoken_token'),
]
operations = [
migrations.AddField(
model_name='application',
name='hash_client_secret',
field=models.BooleanField(default=True),
),
]
| Migration |
python | Textualize__rich | examples/rainbow.py | {
"start": 166,
"end": 441
} | class ____(Highlighter):
def highlight(self, text):
for index in range(len(text)):
text.stylize(f"color({randint(16, 255)})", index, index + 1)
rainbow = RainbowHighlighter()
print(rainbow("I must not fear. Fear is the mind-killer."))
| RainbowHighlighter |
python | celery__celery | t/unit/backends/test_database.py | {
"start": 744,
"end": 1233
} | class ____:
def test_context(self):
session = Mock(name='session')
with session_cleanup(session):
pass
session.close.assert_called_with()
def test_context_raises(self):
session = Mock(name='session')
with pytest.raises(KeyError):
with session_cleanup(session):
raise KeyError()
session.rollback.assert_called_with()
session.close.assert_called_with()
@skip.if_pypy
| test_session_cleanup |
python | dagster-io__dagster | scripts/run-pyright.py | {
"start": 3726,
"end": 3866
} | class ____(TypedDict):
filesAnalyzed: int
errorCount: int
warningCount: int
informationCount: int
timeInSec: float
| Summary |
python | pytorch__pytorch | test/distributed/_composable/test_composability/test_2d_composability.py | {
"start": 2523,
"end": 3067
} | class ____(nn.Module):
def __init__(self):
super().__init__()
torch.manual_seed(0)
self.net1 = nn.Linear(5, 10)
self.relu = nn.ReLU()
self.net2 = nn.Linear(10, 15)
self.net3 = nn.Linear(15, 30)
self.net4 = nn.Linear(30, 5)
def forward(self, x):
x = F.relu(self.net1(x))
x = F.relu(self.net2(x))
x = F.relu(self.net3(x))
x = self.net4(x)
return x
def get_input(self):
return torch.rand(4, 5, device=device_type)
| SimpleModelUneven |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/extra/codemods.py | {
"start": 3144,
"end": 4196
} | class ____(VisitorBasedCodemodCommand):
"""Fix a deprecated min_magnitude=None argument for complex numbers::
st.complex_numbers(min_magnitude=None) -> st.complex_numbers(min_magnitude=0)
Note that this should be run *after* ``HypothesisFixPositionalKeywonlyArgs``,
in order to handle ``st.complex_numbers(None)``.
"""
DESCRIPTION = "Fix a deprecated min_magnitude=None argument for complex numbers."
METADATA_DEPENDENCIES = (cst.metadata.QualifiedNameProvider,)
@m.call_if_inside(
m.Call(metadata=match_qualname("hypothesis.strategies.complex_numbers"))
)
def leave_Arg(self, original_node, updated_node):
if m.matches(
updated_node, m.Arg(keyword=m.Name("min_magnitude"), value=m.Name("None"))
):
return updated_node.with_changes(value=cst.Integer("0"))
return updated_node
@functools.lru_cache
def get_fn(import_path):
mod, fn = import_path.rsplit(".", 1)
return getattr(importlib.import_module(mod), fn)
| HypothesisFixComplexMinMagnitude |
python | Pylons__pyramid | tests/test_url.py | {
"start": 42651,
"end": 43489
} | class ____(unittest.TestCase):
def _callFUT(self, route_name, request, *elements, **kw):
from pyramid.url import route_path
return route_path(route_name, request, *elements, **kw)
def _makeRequest(self):
class Request:
def route_path(self, route_name, *elements, **kw):
self.route_name = route_name
self.elements = elements
self.kw = kw
return 'route path'
return Request()
def test_it(self):
request = self._makeRequest()
result = self._callFUT('abc', request, 'a', _app_url='')
self.assertEqual(result, 'route path')
self.assertEqual(request.route_name, 'abc')
self.assertEqual(request.elements, ('a',))
self.assertEqual(request.kw, {'_app_url': ''})
| Test_route_path |
python | PyCQA__pylint | pylint/checkers/classes/class_checker.py | {
"start": 2071,
"end": 2227
} | class ____(NamedTuple):
args: list[str | None]
kws: dict[str | None, str | None]
starred_args: list[str]
starred_kws: list[str]
| _CallSignature |
python | pennersr__django-allauth | allauth/socialaccount/providers/openid/utils.py | {
"start": 1230,
"end": 1444
} | class ____:
PERSON_NAME = "http://openid.net/schema/namePerson"
PERSON_FIRST_NAME = "http://openid.net/schema/namePerson/first"
PERSON_LAST_NAME = "http://openid.net/schema/namePerson/last"
| OldAXAttribute |
python | anthropics__anthropic-sdk-python | src/anthropic/lib/streaming/_beta_messages.py | {
"start": 5169,
"end": 6223
} | class ____(Generic[ResponseFormatT]):
"""Wrapper over MessageStream that is returned by `.stream()`.
```py
with client.beta.messages.stream(...) as stream:
for chunk in stream:
...
```
"""
def __init__(
self,
api_request: Callable[[], Stream[BetaRawMessageStreamEvent]],
*,
output_format: ResponseFormatT | NotGiven,
) -> None:
self.__stream: BetaMessageStream[ResponseFormatT] | None = None
self.__api_request = api_request
self.__output_format = output_format
def __enter__(self) -> BetaMessageStream[ResponseFormatT]:
raw_stream = self.__api_request()
self.__stream = BetaMessageStream(raw_stream, output_format=self.__output_format)
return self.__stream
def __exit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
if self.__stream is not None:
self.__stream.close()
| BetaMessageStreamManager |
python | django__django | tests/db_functions/text/test_sha1.py | {
"start": 226,
"end": 1640
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
Author.objects.bulk_create(
[
Author(alias="John Smith"),
Author(alias="Jordan Élena"),
Author(alias="皇帝"),
Author(alias=""),
Author(alias=None),
]
)
def test_basic(self):
authors = (
Author.objects.annotate(
sha1_alias=SHA1("alias"),
)
.values_list("sha1_alias", flat=True)
.order_by("pk")
)
self.assertSequenceEqual(
authors,
[
"e61a3587b3f7a142b8c7b9263c82f8119398ecb7",
"0781e0745a2503e6ded05ed5bc554c421d781b0c",
"198d15ea139de04060caf95bc3e0ec5883cba881",
"da39a3ee5e6b4b0d3255bfef95601890afd80709",
(
"da39a3ee5e6b4b0d3255bfef95601890afd80709"
if connection.features.interprets_empty_strings_as_nulls
else None
),
],
)
def test_transform(self):
with register_lookup(CharField, SHA1):
authors = Author.objects.filter(
alias__sha1="e61a3587b3f7a142b8c7b9263c82f8119398ecb7",
).values_list("alias", flat=True)
self.assertSequenceEqual(authors, ["John Smith"])
| SHA1Tests |
python | fastapi__sqlmodel | docs_src/tutorial/relationship_attributes/cascade_delete_relationships/tutorial001.py | {
"start": 359,
"end": 3401
} | class ____(SQLModel, table=True):
id: Optional[int] = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: Optional[int] = Field(default=None, index=True)
team_id: Optional[int] = Field(
default=None, foreign_key="team.id", ondelete="CASCADE"
)
team: Optional[Team] = Relationship(back_populates="heroes")
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
with Session(engine) as session:
team_preventers = Team(name="Preventers", headquarters="Sharp Tower")
team_z_force = Team(name="Z-Force", headquarters="Sister Margaret's Bar")
hero_deadpond = Hero(
name="Deadpond", secret_name="Dive Wilson", team=team_z_force
)
hero_rusty_man = Hero(
name="Rusty-Man", secret_name="Tommy Sharp", age=48, team=team_preventers
)
hero_spider_boy = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
session.add(hero_deadpond)
session.add(hero_rusty_man)
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_deadpond)
session.refresh(hero_rusty_man)
session.refresh(hero_spider_boy)
print("Created hero:", hero_deadpond)
print("Created hero:", hero_rusty_man)
print("Created hero:", hero_spider_boy)
hero_spider_boy.team = team_preventers
session.add(hero_spider_boy)
session.commit()
session.refresh(hero_spider_boy)
print("Updated hero:", hero_spider_boy)
hero_black_lion = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_sure_e = Hero(name="Princess Sure-E", secret_name="Sure-E")
team_wakaland = Team(
name="Wakaland",
headquarters="Wakaland Capital City",
heroes=[hero_black_lion, hero_sure_e],
)
session.add(team_wakaland)
session.commit()
session.refresh(team_wakaland)
print("Team Wakaland:", team_wakaland)
def delete_team():
with Session(engine) as session:
statement = select(Team).where(Team.name == "Wakaland")
team = session.exec(statement).one()
session.delete(team)
session.commit()
print("Deleted team:", team)
def select_deleted_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.name == "Black Lion")
result = session.exec(statement)
hero = result.first()
print("Black Lion not found:", hero)
statement = select(Hero).where(Hero.name == "Princess Sure-E")
result = session.exec(statement)
hero = result.first()
print("Princess Sure-E not found:", hero)
def main():
create_db_and_tables()
create_heroes()
delete_team()
select_deleted_heroes()
if __name__ == "__main__":
main()
| Hero |
python | airbytehq__airbyte | airbyte-ci/connectors/metadata_service/lib/tests/test_registry.py | {
"start": 7285,
"end": 12173
} | class ____:
"""Tests for _build_connector_registry function."""
@pytest.mark.parametrize(
"entry_dicts,expected_sources_count,expected_destinations_count,description",
[
(
[
{
"sourceDefinitionId": "550e8400-e29b-41d4-a716-446655440001",
"name": "Source 1",
"dockerRepository": "test/source",
"dockerImageTag": "1.0.0",
"documentationUrl": "https://docs.test.com",
"spec": {},
},
{
"destinationDefinitionId": "550e8400-e29b-41d4-a716-446655440002",
"name": "Destination 1",
"dockerRepository": "test/dest",
"dockerImageTag": "1.0.0",
"documentationUrl": "https://docs.test.com",
"spec": {},
},
],
1,
1,
"mixed sources and destinations",
),
(
[
{
"sourceDefinitionId": "550e8400-e29b-41d4-a716-446655440001",
"name": "Source 1",
"dockerRepository": "test/source1",
"dockerImageTag": "1.0.0",
"documentationUrl": "https://docs.test.com",
"spec": {},
},
{
"sourceDefinitionId": "550e8400-e29b-41d4-a716-446655440002",
"name": "Source 2",
"dockerRepository": "test/source2",
"dockerImageTag": "1.0.0",
"documentationUrl": "https://docs.test.com",
"spec": {},
},
],
2,
0,
"sources only",
),
([], 0, 0, "empty entries"),
],
)
def test_build_connector_registry_scenarios(self, entry_dicts, expected_sources_count, expected_destinations_count, description):
"""Test registry building with different entry combinations."""
entries = []
for entry_dict in entry_dicts:
entry = Mock(spec=PolymorphicRegistryEntry)
if "sourceDefinitionId" in entry_dict:
setattr(entry, ConnectorTypePrimaryKey.SOURCE.value, entry_dict["sourceDefinitionId"])
if "destinationDefinitionId" in entry_dict:
setattr(entry, ConnectorTypePrimaryKey.DESTINATION.value, entry_dict["destinationDefinitionId"])
entries.append(entry)
with (
patch("metadata_service.registry.to_json_sanitized_dict") as mock_sanitize,
patch("metadata_service.registry._apply_metrics_to_registry_entry") as mock_apply_metrics,
patch("metadata_service.registry._apply_release_candidate_entries") as mock_apply_rc,
):
mock_sanitize.side_effect = entry_dicts
mock_apply_metrics.side_effect = lambda x, *args: x
mock_apply_rc.side_effect = lambda x, *args: x
result = _build_connector_registry(entries, {}, {})
assert isinstance(result, ConnectorRegistryV0)
assert len(result.sources) == expected_sources_count
assert len(result.destinations) == expected_destinations_count
def test_build_connector_registry_applies_metrics_and_rc(self):
"""Test that metrics and release candidates are properly applied."""
source_entry = Mock(spec=PolymorphicRegistryEntry)
setattr(source_entry, ConnectorTypePrimaryKey.SOURCE.value, "550e8400-e29b-41d4-a716-446655440001")
entry_dict = {
"sourceDefinitionId": "550e8400-e29b-41d4-a716-446655440001",
"name": "Test Source",
"dockerRepository": "test/source",
"dockerImageTag": "1.0.0",
"documentationUrl": "https://docs.test.com",
"spec": {},
}
with (
patch("metadata_service.registry.to_json_sanitized_dict") as mock_sanitize,
patch("metadata_service.registry._apply_metrics_to_registry_entry") as mock_apply_metrics,
patch("metadata_service.registry._apply_release_candidate_entries") as mock_apply_rc,
):
mock_sanitize.return_value = entry_dict
mock_apply_metrics.return_value = entry_dict
mock_apply_rc.return_value = entry_dict
result = _build_connector_registry([source_entry], {}, {})
mock_apply_metrics.assert_called_once()
mock_apply_rc.assert_called_once()
assert len(result.sources) == 1
| TestBuildConnectorRegistry |
python | apache__airflow | providers/ydb/tests/unit/ydb/hooks/test_ydb.py | {
"start": 1332,
"end": 1419
} | class ____:
def __init__(self, driver):
self._driver = driver
| FakeSessionPool |
python | pytorch__pytorch | torch/testing/_internal/autograd_function_db.py | {
"start": 14528,
"end": 19613
} | class ____(torch.autograd.Function):
@staticmethod
def forward(x, idx=(2,)):
return x[idx]
@staticmethod
def setup_context(ctx, inputs, output):
x, idx = inputs
ctx.x_shape = x.shape
ctx.idx = idx
@staticmethod
def backward(ctx, grad_output):
result = grad_output.new_zeros(ctx.x_shape)
result[ctx.idx] = grad_output
return result, None
@staticmethod
def vmap(info, in_dims, x, idx):
x_bdim, _ = in_dims
x = x.movedim(x_bdim, 1)
return ForwardHasDefaultArgs.apply(x, idx), 0
@staticmethod
def jvp(ctx, x_tangent, _):
return ForwardHasDefaultArgs.apply(x_tangent, ctx.idx)
autograd_function_db = [
OpInfo(
'NumpyCubeAutogradFunction',
op=NumpyCube.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_cube,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'NumpyExpMarkDirtyAutogradFunction',
op=lambda x: NumpyExp_.apply(x.clone()),
inplace_variant=NumpyExp_.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_cube,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'NumpyMulAutogradFunction',
op=NumpyMul.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_mul,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'NumpyCubeNotComposableAutogradFunction',
op=lambda x: NumpyCubeNotComposable.apply(x)[0],
supports_forward_ad=False,
supports_fwgrad_bwgrad=False,
sample_inputs_func=sample_inputs_numpy_cube,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'NumpySortAutogradFunction',
op=NumpySort.apply,
supports_forward_ad=False,
supports_fwgrad_bwgrad=False,
sample_inputs_func=sample_inputs_numpy_sort,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
gradcheck_wrapper=lambda y, ind: y,
),
OpInfo(
'NumpyTakeAutogradFunction',
op=NumpyTake.apply,
supports_forward_ad=False,
supports_fwgrad_bwgrad=False,
sample_inputs_func=sample_inputs_numpy_take,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'SelectAutogradFunction',
op=Select.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_select,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'CubeGenVmapAutogradFunction',
op=CubeGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_cube,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'MulGenVmapAutogradFunction',
op=MulGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_mul,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'SortGenVmapAutogradFunction',
op=SortGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_sort,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
gradcheck_wrapper=lambda y, ind: y,
),
OpInfo(
'SelectGenVmapAutogradFunction',
op=SelectGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_select,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'ScaleGradGenVmapAutogradFunction',
op=ScaleGradGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_cube,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'ZeroGradientsGenVmapAutogradFunction',
op=ZeroGradientsGenVmap.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_numpy_mul,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
OpInfo(
'ForwardHasDefaultArgsAutogradFunction',
op=ForwardHasDefaultArgs.apply,
supports_forward_ad=True,
supports_fwgrad_bwgrad=True,
sample_inputs_func=sample_inputs_forward_default_args,
dtypes=all_types_and(torch.bool, torch.half),
supports_out=False,
),
]
| ForwardHasDefaultArgs |
python | kamyu104__LeetCode-Solutions | Python/find-the-integer-added-to-array-ii.py | {
"start": 1582,
"end": 2099
} | class ____(object):
def minimumAddedInteger(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: int
"""
nums1.sort()
nums2.sort()
for i in xrange(3):
d = nums2[-1]-nums1[~i]
cnt = 0
for j in xrange(len(nums2)):
while j+cnt < len(nums1) and nums1[j+cnt]+d != nums2[j]:
cnt += 1
if cnt <= 2:
return d
return -1
| Solution3 |
python | django__django | tests/template_tests/filter_tests/test_urlize.py | {
"start": 4021,
"end": 19894
} | class ____(SimpleTestCase):
def test_urls(self):
self.assertEqual(
urlize("http://google.com"),
'<a href="http://google.com" rel="nofollow">http://google.com</a>',
)
self.assertEqual(
urlize("http://google.com/"),
'<a href="http://google.com/" rel="nofollow">http://google.com/</a>',
)
self.assertEqual(
urlize("www.google.com"),
'<a href="https://www.google.com" rel="nofollow">www.google.com</a>',
)
self.assertEqual(
urlize("djangoproject.org"),
'<a href="https://djangoproject.org" rel="nofollow">djangoproject.org</a>',
)
self.assertEqual(
urlize("djangoproject.org/"),
'<a href="https://djangoproject.org/" rel="nofollow">'
"djangoproject.org/</a>",
)
def test_url_split_chars(self):
# Quotes (single and double) and angle brackets shouldn't be considered
# part of URLs.
self.assertEqual(
urlize('www.server.com"abc'),
'<a href="https://www.server.com" rel="nofollow">www.server.com</a>"'
"abc",
)
self.assertEqual(
urlize("www.server.com'abc"),
'<a href="https://www.server.com" rel="nofollow">www.server.com</a>''
"abc",
)
self.assertEqual(
urlize("www.server.com<abc"),
'<a href="https://www.server.com" rel="nofollow">www.server.com</a><abc',
)
self.assertEqual(
urlize("www.server.com>abc"),
'<a href="https://www.server.com" rel="nofollow">www.server.com</a>>abc',
)
def test_email(self):
self.assertEqual(
urlize("info@djangoproject.org"),
'<a href="mailto:info@djangoproject.org">info@djangoproject.org</a>',
)
def test_word_with_dot(self):
self.assertEqual(urlize("some.organization"), "some.organization")
def test_https(self):
self.assertEqual(
urlize("https://google.com"),
'<a href="https://google.com" rel="nofollow">https://google.com</a>',
)
def test_quoting(self):
"""
#9655 - Check urlize doesn't overquote already quoted urls. The
teststring is the urlquoted version of 'http://hi.baidu.com/重新开始'
"""
self.assertEqual(
urlize("http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B"),
'<a href="http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B" '
'rel="nofollow">http://hi.baidu.com/%E9%87%8D%E6%96%B0%E5%BC%80%E5%A7%8B'
"</a>",
)
def test_urlencoded(self):
self.assertEqual(
urlize("www.mystore.com/30%OffCoupons!"),
'<a href="https://www.mystore.com/30%25OffCoupons" rel="nofollow">'
"www.mystore.com/30%OffCoupons</a>!",
)
self.assertEqual(
urlize("https://en.wikipedia.org/wiki/Caf%C3%A9"),
'<a href="https://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
"https://en.wikipedia.org/wiki/Caf%C3%A9</a>",
)
def test_unicode(self):
self.assertEqual(
urlize("https://en.wikipedia.org/wiki/Café"),
'<a href="https://en.wikipedia.org/wiki/Caf%C3%A9" rel="nofollow">'
"https://en.wikipedia.org/wiki/Café</a>",
)
def test_parenthesis(self):
"""
#11911 - Check urlize keeps balanced parentheses
"""
self.assertEqual(
urlize("https://en.wikipedia.org/wiki/Django_(web_framework)"),
'<a href="https://en.wikipedia.org/wiki/Django_(web_framework)" '
'rel="nofollow">https://en.wikipedia.org/wiki/Django_(web_framework)</a>',
)
self.assertEqual(
urlize("(see https://en.wikipedia.org/wiki/Django_(web_framework))"),
'(see <a href="https://en.wikipedia.org/wiki/Django_(web_framework)" '
'rel="nofollow">https://en.wikipedia.org/wiki/Django_(web_framework)</a>)',
)
def test_parenthesis_and_bracket(self):
self.assertEqual(
urlize("[(https://en.wikipedia.org/)]"),
'[(<a href="https://en.wikipedia.org/" '
'rel="nofollow">https://en.wikipedia.org/</a>)]',
)
def test_nofollow(self):
"""
#12183 - Check urlize adds nofollow properly - see #12183
"""
self.assertEqual(
urlize("foo@bar.com or www.bar.com"),
'<a href="mailto:foo@bar.com">foo@bar.com</a> or '
'<a href="https://www.bar.com" rel="nofollow">www.bar.com</a>',
)
def test_idn(self):
"""
#13704 - Check urlize handles IDN correctly
"""
# The "✶" below is \N{SIX POINTED BLACK STAR}, not "*" \N{ASTERISK}.
self.assertEqual(
urlize("http://c✶.ws"),
'<a href="http://c%E2%9C%B6.ws" rel="nofollow">http://c✶.ws</a>',
)
self.assertEqual(
urlize("www.c✶.ws"),
'<a href="https://www.c%E2%9C%B6.ws" rel="nofollow">www.c✶.ws</a>',
)
self.assertEqual(
urlize("c✶.org"),
'<a href="https://c%E2%9C%B6.org" rel="nofollow">c✶.org</a>',
)
self.assertEqual(
urlize("info@c✶.org"),
'<a href="mailto:info@c%E2%9C%B6.org">info@c✶.org</a>',
)
# Pre-encoded IDNA is urlized but not re-encoded.
self.assertEqual(
urlize("www.xn--iny-zx5a.com/idna2003"),
'<a href="https://www.xn--iny-zx5a.com/idna2003"'
' rel="nofollow">www.xn--iny-zx5a.com/idna2003</a>',
)
self.assertEqual(
urlize("www.xn--fa-hia.com/idna2008"),
'<a href="https://www.xn--fa-hia.com/idna2008"'
' rel="nofollow">www.xn--fa-hia.com/idna2008</a>',
)
def test_malformed(self):
"""
#16395 - Check urlize doesn't highlight malformed URIs
"""
self.assertEqual(urlize("http:///www.google.com"), "http:///www.google.com")
self.assertEqual(urlize("http://.google.com"), "http://.google.com")
self.assertEqual(urlize("http://@foo.com"), "http://@foo.com")
def test_tlds(self):
"""
#16656 - Check urlize accepts more TLDs
"""
self.assertEqual(
urlize("usa.gov"), '<a href="https://usa.gov" rel="nofollow">usa.gov</a>'
)
def test_invalid_email(self):
"""
#17592 - Check urlize don't crash on invalid email with dot-starting
domain
"""
self.assertEqual(urlize("email@.stream.ru"), "email@.stream.ru")
def test_uppercase(self):
"""
#18071 - Check urlize accepts uppercased URL schemes
"""
self.assertEqual(
urlize("HTTPS://github.com/"),
'<a href="https://github.com/" rel="nofollow">HTTPS://github.com/</a>',
)
def test_trailing_period(self):
"""
#18644 - Check urlize trims trailing period when followed by
parenthesis
"""
self.assertEqual(
urlize("(Go to http://www.example.com/foo.)"),
'(Go to <a href="http://www.example.com/foo" rel="nofollow">'
"http://www.example.com/foo</a>.)",
)
def test_trailing_multiple_punctuation(self):
self.assertEqual(
urlize("A test http://testing.com/example.."),
'A test <a href="http://testing.com/example" rel="nofollow">'
"http://testing.com/example</a>..",
)
self.assertEqual(
urlize("A test http://testing.com/example!!"),
'A test <a href="http://testing.com/example" rel="nofollow">'
"http://testing.com/example</a>!!",
)
self.assertEqual(
urlize("A test http://testing.com/example!!!"),
'A test <a href="http://testing.com/example" rel="nofollow">'
"http://testing.com/example</a>!!!",
)
self.assertEqual(
urlize('A test http://testing.com/example.,:;)"!'),
'A test <a href="http://testing.com/example" rel="nofollow">'
"http://testing.com/example</a>.,:;)"!",
)
def test_trailing_semicolon(self):
self.assertEqual(
urlize("http://example.com?x=&", autoescape=False),
'<a href="http://example.com?x=" rel="nofollow">'
"http://example.com?x=&</a>",
)
self.assertEqual(
urlize("http://example.com?x=&;", autoescape=False),
'<a href="http://example.com?x=" rel="nofollow">'
"http://example.com?x=&</a>;",
)
self.assertEqual(
urlize("http://example.com?x=&;;", autoescape=False),
'<a href="http://example.com?x=" rel="nofollow">'
"http://example.com?x=&</a>;;",
)
self.assertEqual(
urlize("http://example.com?x=&.;...;", autoescape=False),
'<a href="http://example.com?x=" rel="nofollow">'
"http://example.com?x=&</a>.;...;",
)
def test_brackets(self):
"""
#19070 - Check urlize handles brackets properly
"""
self.assertEqual(
urlize("[see www.example.com]"),
'[see <a href="https://www.example.com" rel="nofollow">'
"www.example.com</a>]",
)
self.assertEqual(
urlize("see test[at[example.com"), # Invalid hostname.
"see test[at[example.com",
)
self.assertEqual(
urlize("[http://168.192.0.1](http://168.192.0.1)"),
'[<a href="http://168.192.0.1](http://168.192.0.1)" rel="nofollow">'
"http://168.192.0.1](http://168.192.0.1)</a>",
)
def test_wrapping_characters(self):
wrapping_chars = (
("()", ("(", ")")),
("<>", ("<", ">")),
("[]", ("[", "]")),
('""', (""", """)),
("''", ("'", "'")),
)
for wrapping_in, (start_out, end_out) in wrapping_chars:
with self.subTest(wrapping_in=wrapping_in):
start_in, end_in = wrapping_in
self.assertEqual(
urlize(start_in + "https://www.example.org/" + end_in),
f'{start_out}<a href="https://www.example.org/" rel="nofollow">'
f"https://www.example.org/</a>{end_out}",
)
def test_ipv4(self):
self.assertEqual(
urlize("http://192.168.0.15/api/9"),
'<a href="http://192.168.0.15/api/9" rel="nofollow">'
"http://192.168.0.15/api/9</a>",
)
def test_ipv6(self):
self.assertEqual(
urlize("http://[2001:db8:cafe::2]/api/9"),
'<a href="http://[2001:db8:cafe::2]/api/9" rel="nofollow">'
"http://[2001:db8:cafe::2]/api/9</a>",
)
def test_quotation_marks(self):
"""
#20364 - Check urlize correctly include quotation marks in links
"""
self.assertEqual(
urlize('before "hi@example.com" afterward', autoescape=False),
'before "<a href="mailto:hi@example.com">hi@example.com</a>" afterward',
)
self.assertEqual(
urlize('before hi@example.com" afterward', autoescape=False),
'before <a href="mailto:hi@example.com">hi@example.com</a>" afterward',
)
self.assertEqual(
urlize('before "hi@example.com afterward', autoescape=False),
'before "<a href="mailto:hi@example.com">hi@example.com</a> afterward',
)
self.assertEqual(
urlize("before 'hi@example.com' afterward", autoescape=False),
"before '<a href=\"mailto:hi@example.com\">hi@example.com</a>' afterward",
)
self.assertEqual(
urlize("before hi@example.com' afterward", autoescape=False),
'before <a href="mailto:hi@example.com">hi@example.com</a>\' afterward',
)
self.assertEqual(
urlize("before 'hi@example.com afterward", autoescape=False),
'before \'<a href="mailto:hi@example.com">hi@example.com</a> afterward',
)
def test_quote_commas(self):
"""
#20364 - Check urlize copes with commas following URLs in quotes
"""
self.assertEqual(
urlize(
'Email us at "hi@example.com", or phone us at +xx.yy', autoescape=False
),
'Email us at "<a href="mailto:hi@example.com">hi@example.com</a>", or '
"phone us at +xx.yy",
)
def test_exclamation_marks(self):
"""
#23715 - Check urlize correctly handles exclamation marks after TLDs
or query string
"""
self.assertEqual(
urlize("Go to djangoproject.com! and enjoy."),
'Go to <a href="https://djangoproject.com" rel="nofollow">djangoproject.com'
"</a>! and enjoy.",
)
self.assertEqual(
urlize("Search for google.com/?q=! and see."),
'Search for <a href="https://google.com/?q=" rel="nofollow">google.com/?q='
"</a>! and see.",
)
self.assertEqual(
urlize("Search for google.com/?q=dj!`? and see."),
'Search for <a href="https://google.com/?q=dj%21%60%3F" rel="nofollow">'
"google.com/?q=dj!`?</a> and see.",
)
self.assertEqual(
urlize("Search for google.com/?q=dj!`?! and see."),
'Search for <a href="https://google.com/?q=dj%21%60%3F" rel="nofollow">'
"google.com/?q=dj!`?</a>! and see.",
)
def test_non_string_input(self):
self.assertEqual(urlize(123), "123")
def test_autoescape(self):
self.assertEqual(
urlize('foo<a href=" google.com ">bar</a>buz'),
'foo<a href=" <a href="https://google.com" rel="nofollow">'
"google.com</a> ">bar</a>buz",
)
def test_autoescape_off(self):
self.assertEqual(
urlize('foo<a href=" google.com ">bar</a>buz', autoescape=False),
'foo<a href=" <a href="https://google.com" rel="nofollow">google.com</a> ">'
"bar</a>buz",
)
def test_lazystring(self):
prepend_www = lazy(lambda url: "www." + url, str)
self.assertEqual(
urlize(prepend_www("google.com")),
'<a href="https://www.google.com" rel="nofollow">www.google.com</a>',
)
@mock.patch.object(Urlizer, "handle_word", return_value="test")
def test_caching_repeated_words(self, mock_handle_word):
urlize("test test test test")
common_handle_word_args = {
"safe_input": False,
"trim_url_limit": None,
"nofollow": True,
"autoescape": True,
}
self.assertEqual(
mock_handle_word.mock_calls,
[
mock.call("test", **common_handle_word_args),
mock.call(" ", **common_handle_word_args),
],
)
@mock.patch.object(Urlizer, "handle_word", return_value="test")
def test_caching_repeated_calls(self, mock_handle_word):
urlize("test")
handle_word_test = mock.call(
"test",
safe_input=False,
trim_url_limit=None,
nofollow=True,
autoescape=True,
)
self.assertEqual(mock_handle_word.mock_calls, [handle_word_test])
urlize("test")
self.assertEqual(
mock_handle_word.mock_calls, [handle_word_test, handle_word_test]
)
| FunctionTests |
python | spack__spack | var/spack/test_repos/spack_repo/requirements_test/packages/u/package.py | {
"start": 216,
"end": 307
} | class ____(Package):
version("1.1")
version("1.0")
depends_on("c", type="build")
| U |
python | tensorflow__tensorflow | third_party/xla/xla/python/xla_client.py | {
"start": 14507,
"end": 14920
} | class ____:
"""Python representation of a xla.ScatterDimensionNumbers protobuf."""
__slots__ = (
'update_window_dims',
'inserted_window_dims',
'scatter_dims_to_operand_dims',
'index_vector_dim',
)
def __init__(self):
self.update_window_dims = []
self.inserted_window_dims = []
self.scatter_dims_to_operand_dims = []
self.index_vector_dim = 0
| ScatterDimensionNumbers |
python | mlflow__mlflow | mlflow/store/tracking/databricks_rest_store.py | {
"start": 2433,
"end": 30451
} | class ____(RestStore):
"""
Client for a databricks tracking server accessed via REST API calls.
This is only used for Databricks-specific tracing APIs, all other APIs including
runs, experiments, models etc. should be implemented in the RestStore.
Args
get_host_creds: Method to be invoked prior to every REST request to get the
:py:class:`mlflow.rest_utils.MlflowHostCreds` for the request. Note that this
is a function so that we can obtain fresh credentials in the case of expiry.
"""
_METHOD_TO_INFO = extract_api_info_for_service(
MlflowService, _REST_API_PATH_PREFIX
) | extract_api_info_for_service(DatabricksTrackingService, _V4_REST_API_PATH_PREFIX)
def __init__(self, get_host_creds):
super().__init__(get_host_creds)
def _call_endpoint(
self,
api,
json_body=None,
endpoint=None,
retry_timeout_seconds=None,
response_proto=None,
):
try:
return super()._call_endpoint(
api,
json_body=json_body,
endpoint=endpoint,
retry_timeout_seconds=retry_timeout_seconds,
response_proto=response_proto,
)
except RestException as e:
if (
e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
and "Could not resolve a SQL warehouse ID" in e.message
):
raise MlflowException(
message=(
"SQL warehouse ID is required for accessing traces in UC tables.\n"
f"Please set the {MLFLOW_TRACING_SQL_WAREHOUSE_ID.name} environment "
"variable to your SQL warehouse ID.\n"
"```\nexport MLFLOW_TRACING_SQL_WAREHOUSE_ID=<your_sql_warehouse_id>\n```\n"
"See https://docs.databricks.com/compute/sql-warehouse for how to "
"set up a SQL warehouse and get its ID."
),
error_code=BAD_REQUEST,
) from e
raise
def start_trace(self, trace_info: TraceInfo) -> TraceInfo:
"""
Create a new trace using the V4 API format.
Args:
trace_info: The TraceInfo object to create in the backend. Currently, this
only supports trace_location with uc_schema, or mlflow_experiment that's
linked to a UC table.
Returns:
The returned TraceInfo object from the backend.
"""
try:
if trace_info._is_v4():
return self._start_trace_v4(trace_info)
# Temporarily we capture all exceptions and fallback to v3 if the trace location is not uc
# TODO: remove this once the endpoint is fully rolled out
except Exception as e:
if trace_info.trace_location.mlflow_experiment is None:
_logger.debug("MLflow experiment is not set for trace, cannot fallback to V3 API.")
raise
_logger.debug(f"Falling back to V3 API due to {e!s}")
return super().start_trace(trace_info)
def _start_trace_v4(self, trace_info: TraceInfo) -> TraceInfo:
location, otel_trace_id = parse_trace_id_v4(trace_info.trace_id)
if location is None:
raise MlflowException("Invalid trace ID format for v4 API.")
req_body = message_to_json(trace_info.to_proto())
response_proto = self._call_endpoint(
CreateTraceInfo,
req_body,
endpoint=f"{_V4_REST_API_PATH_PREFIX}/mlflow/traces/{location}/{otel_trace_id}/info",
retry_timeout_seconds=MLFLOW_ASYNC_TRACE_LOGGING_RETRY_TIMEOUT.get(),
response_proto=ProtoTraceInfo(),
)
return TraceInfo.from_proto(response_proto)
def batch_get_traces(self, trace_ids: list[str], location: str | None = None) -> list[Trace]:
"""
Get a batch of complete traces with spans for given trace ids.
Args:
trace_ids: List of trace IDs to fetch.
location: Location of the trace. For example, "catalog.schema" for UC schema.
Returns:
List of Trace objects.
"""
trace_ids = [parse_trace_id_v4(trace_id)[1] for trace_id in trace_ids]
req_body = message_to_json(
BatchGetTraces(
location_id=location,
trace_ids=trace_ids,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
)
response_proto = self._call_endpoint(
BatchGetTraces,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location}/batchGet",
)
return [trace_from_proto(proto, location) for proto in response_proto.traces]
def get_trace_info(self, trace_id: str) -> TraceInfo:
"""
Get the trace info matching the `trace_id`.
Args:
trace_id: String id of the trace to fetch.
Returns:
The fetched ``mlflow.entities.TraceInfo`` object.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
trace_v4_req_body = message_to_json(
GetTraceInfo(
trace_id=trace_id, location=location, sql_warehouse_id=sql_warehouse_id
)
)
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/info"
response_proto = self._call_endpoint(GetTraceInfo, trace_v4_req_body, endpoint=endpoint)
return TraceInfo.from_proto(response_proto.trace.trace_info)
return super().get_trace_info(trace_id)
def get_trace(self, trace_id: str, *, allow_partial: bool = False) -> Trace:
"""
Get a trace with spans for given trace id.
Args:
trace_id: String id of the trace to fetch.
allow_partial: Whether to allow partial traces. If True, the trace will be returned
even if it is not fully exported yet. If False, MLflow retries and returns
the trace until all spans are exported or the retry timeout is reached. Default
to False.
Returns:
The fetched Trace object, of type ``mlflow.entities.Trace``.
"""
raise MlflowNotImplementedException()
def set_trace_tag(self, trace_id: str, key: str, value: str):
"""
Set a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
value: The string value of the tag.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/tags"
req_body = message_to_json(
SetTraceTag(
key=key,
value=value,
)
)
self._call_endpoint(SetTraceTag, req_body, endpoint=endpoint)
return
return super().set_trace_tag(trace_id, key, value)
def delete_trace_tag(self, trace_id: str, key: str):
"""
Delete a tag on the trace with the given trace_id.
Args:
trace_id: The ID of the trace.
key: The string key of the tag.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
encoded_key = quote(key, safe="")
endpoint = f"{get_single_trace_endpoint_v4(location, trace_id)}/tags/{encoded_key}"
req_body = message_to_json(DeleteTraceTag(sql_warehouse_id=sql_warehouse_id))
self._call_endpoint(DeleteTraceTag, req_body, endpoint=endpoint)
return
return super().delete_trace_tag(trace_id, key)
def search_traces(
self,
experiment_ids: list[str] | None = None,
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
model_id: str | None = None,
locations: list[str] | None = None,
) -> tuple[list[TraceInfo], str | None]:
# This API is not client-facing, so we should always use `locations`.
if experiment_ids is not None:
raise MlflowException("`experiment_ids` is deprecated, use `locations` instead.")
if not locations:
raise MlflowException.invalid_parameter_value(
"At least one location must be specified for searching traces."
)
# model_id is only supported by V3 API
if model_id is not None:
return self._search_unified_traces(
model_id=model_id,
locations=locations,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
contain_uc_schemas = False
trace_locations = []
for location in locations:
match location.split("."):
case [experiment_id]:
trace_locations.append(
trace_location_to_proto(TraceLocation.from_experiment_id(experiment_id))
)
case [catalog, schema]:
trace_locations.append(
trace_location_to_proto(
TraceLocation.from_databricks_uc_schema(catalog, schema)
)
)
contain_uc_schemas = True
case _:
raise MlflowException.invalid_parameter_value(
f"Invalid location type: {location}. Expected type: "
"`<catalog_name>.<schema_name>` or `<experiment_id>`."
)
request = SearchTraces(
locations=trace_locations,
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
req_body = message_to_json(request)
try:
response_proto = self._call_endpoint(
SearchTraces,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/search",
)
except MlflowException as e:
# There are 2 expected failure cases:
# 1. Server does not support SearchTracesV4 API yet.
# 2. Server supports V4 API but the experiment location is not supported yet.
# For these known cases, MLflow fallback to V3 API.
if e.error_code == ErrorCode.Name(ENDPOINT_NOT_FOUND):
if contain_uc_schemas:
raise MlflowException.invalid_parameter_value(
"Searching traces in UC tables is not supported yet. Only experiment IDs "
"are supported for searching traces."
)
_logger.debug("SearchTracesV4 API is not available yet. Falling back to V3 API.")
elif (
e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE)
and "locations not yet supported" in e.message
):
if contain_uc_schemas:
raise MlflowException.invalid_parameter_value(
"The `locations` parameter cannot contain both MLflow experiment and UC "
"schema in the same request. Please specify only one type of location "
"at a time."
)
_logger.debug("Experiment locations are not supported yet. Falling back to V3 API.")
else:
raise
return self._search_traces(
locations=locations,
filter_string=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
trace_infos = [TraceInfo.from_proto(t) for t in response_proto.trace_infos]
return trace_infos, response_proto.next_page_token or None
def _search_unified_traces(
self,
model_id: str,
locations: list[str],
filter_string: str | None = None,
max_results: int = SEARCH_TRACES_DEFAULT_MAX_RESULTS,
order_by: list[str] | None = None,
page_token: str | None = None,
) -> tuple[list[TraceInfo], str | None]:
sql_warehouse_id = MLFLOW_TRACING_SQL_WAREHOUSE_ID.get()
if sql_warehouse_id is None:
raise MlflowException.invalid_parameter_value(
"SQL warehouse ID is required for searching traces by model ID in UC tables, "
f"set it with the `{MLFLOW_TRACING_SQL_WAREHOUSE_ID.name}` environment variable."
)
request = SearchUnifiedTraces(
model_id=model_id,
sql_warehouse_id=sql_warehouse_id,
experiment_ids=locations,
filter=filter_string,
max_results=max_results,
order_by=order_by,
page_token=page_token,
)
req_body = message_to_json(request)
response_proto = self._call_endpoint(SearchUnifiedTraces, req_body)
# Convert TraceInfo (v2) objects to TraceInfoV3 objects for consistency
trace_infos = [TraceInfo.from_proto(t) for t in response_proto.traces]
return trace_infos, response_proto.next_page_token or None
def get_online_trace_details(
self,
trace_id: str,
source_inference_table: str,
source_databricks_request_id: str,
):
req = GetOnlineTraceDetails(
trace_id=trace_id,
sql_warehouse_id=MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
source_inference_table=source_inference_table,
source_databricks_request_id=source_databricks_request_id,
)
req_body = message_to_json(req)
response_proto = self._call_endpoint(GetOnlineTraceDetails, req_body)
return response_proto.trace_data
def set_experiment_trace_location(
self,
location: UCSchemaLocationEntity,
experiment_id: str,
sql_warehouse_id: str | None = None,
) -> UCSchemaLocationEntity:
req_body = message_to_json(
CreateTraceUCStorageLocation(
uc_schema=uc_schema_location_to_proto(location),
sql_warehouse_id=sql_warehouse_id or MLFLOW_TRACING_SQL_WAREHOUSE_ID.get(),
)
)
try:
response = self._call_endpoint(
CreateTraceUCStorageLocation,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/location",
)
location = uc_schema_location_from_proto(response.uc_schema)
except MlflowException as e:
if e.error_code == ErrorCode.Name(ALREADY_EXISTS):
_logger.debug(f"Trace UC storage location already exists: {location}")
else:
raise
_logger.debug(f"Created trace UC storage location: {location}")
# link experiment to uc trace location
req_body = message_to_json(
LinkExperimentToUCTraceLocation(
experiment_id=experiment_id,
uc_schema=uc_schema_location_to_proto(location),
)
)
self._call_endpoint(
LinkExperimentToUCTraceLocation,
req_body,
endpoint=f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/link-location",
)
_logger.debug(f"Linked experiment {experiment_id} to UC trace location: {location}")
return location
def unset_experiment_trace_location(
self, experiment_id: str, location: UCSchemaLocationEntity
) -> None:
request = UnLinkExperimentToUCTraceLocation(
experiment_id=experiment_id,
uc_schema=uc_schema_location_to_proto(location),
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{experiment_id}/unlink-location"
req_body = message_to_json(request)
self._call_endpoint(
UnLinkExperimentToUCTraceLocation,
req_body,
endpoint=endpoint,
)
_logger.debug(f"Unlinked experiment {experiment_id} from trace location: {location}")
def log_spans(self, location: str, spans: list[Span], tracking_uri=None) -> list[Span]:
if not spans:
return []
if tracking_uri is None:
raise MlflowException(
"`tracking_uri` must be provided to log spans to with Databricks tracking server."
)
endpoint = f"/api/2.0/otel{OTLP_TRACES_PATH}"
try:
config = get_databricks_workspace_client_config(tracking_uri)
except Exception as e:
raise MlflowException(
"Failed to log spans to UC table: could not identify Databricks workspace "
"configuration"
) from e
request = ExportTraceServiceRequest()
resource_spans = request.resource_spans.add()
scope_spans = resource_spans.scope_spans.add()
scope_spans.spans.extend(span.to_otel_proto() for span in spans)
response = http_request(
host_creds=self.get_host_creds(),
endpoint=endpoint,
method="POST",
data=request.SerializeToString(),
extra_headers={
"Content-Type": "application/x-protobuf",
DATABRICKS_UC_TABLE_HEADER: location,
**config.authenticate(),
},
)
verify_rest_response(response, endpoint)
return spans
def create_assessment(self, assessment: Assessment) -> Assessment:
"""
Create an assessment entity in the backend store.
Args:
assessment: The assessment to log (without an assessment_id).
Returns:
The created Assessment object.
"""
location, trace_id = parse_trace_id_v4(assessment.trace_id)
if location is not None:
req_body = message_to_json(assessment_to_proto(assessment))
endpoint = self._append_sql_warehouse_id_param(
f"{get_single_trace_endpoint_v4(location, trace_id)}/assessments",
)
response_proto = self._call_endpoint(
CreateAssessment,
req_body,
endpoint=endpoint,
response_proto=ProtoAssessment(),
)
return Assessment.from_proto(response_proto)
return super().create_assessment(assessment)
def update_assessment(
self,
trace_id: str,
assessment_id: str,
name: str | None = None,
expectation: ExpectationValue | None = None,
feedback: FeedbackValue | None = None,
rationale: str | None = None,
metadata: dict[str, str] | None = None,
) -> Assessment:
"""
Update an existing assessment entity in the backend store.
Args:
trace_id: The ID of the trace.
assessment_id: The ID of the assessment to update.
name: The updated name of the assessment.
expectation: The updated expectation value of the assessment.
feedback: The updated feedback value of the assessment.
rationale: The updated rationale of the feedback. Not applicable for expectations.
metadata: Additional metadata for the assessment.
"""
if expectation is not None and feedback is not None:
raise MlflowException.invalid_parameter_value(
"Exactly one of `expectation` or `feedback` should be specified."
)
location, parsed_trace_id = parse_trace_id_v4(trace_id)
if location is not None:
assessment = UpdateAssessment().assessment
assessment.assessment_id = assessment_id
catalog, schema = location.split(".")
assessment.trace_location.CopyFrom(
trace_location_to_proto(TraceLocation.from_databricks_uc_schema(catalog, schema)),
)
assessment.trace_id = parsed_trace_id
# Field mask specifies which fields to update.
mask = UpdateAssessment().update_mask
if name is not None:
assessment.assessment_name = name
mask.paths.append("assessment_name")
if expectation is not None:
assessment.expectation.CopyFrom(expectation.to_proto())
mask.paths.append("expectation")
if feedback is not None:
assessment.feedback.CopyFrom(feedback.to_proto())
mask.paths.append("feedback")
if rationale is not None:
assessment.rationale = rationale
mask.paths.append("rationale")
if metadata is not None:
assessment.metadata.update(metadata)
mask.paths.append("metadata")
endpoint = get_single_assessment_endpoint_v4(location, parsed_trace_id, assessment_id)
endpoint = self._append_sql_warehouse_id_param(endpoint)
if mask.paths:
mask_param = ",".join(mask.paths)
endpoint = f"{endpoint}&update_mask={mask_param}"
req_body = message_to_json(assessment)
response_proto = self._call_endpoint(
UpdateAssessment,
req_body,
endpoint=endpoint,
response_proto=ProtoAssessment(),
)
return Assessment.from_proto(response_proto)
else:
return super().update_assessment(
trace_id, assessment_id, name, expectation, feedback, rationale, metadata
)
def get_assessment(self, trace_id: str, assessment_id: str) -> Assessment:
"""
Get an assessment entity from the backend store.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = self._append_sql_warehouse_id_param(
get_single_assessment_endpoint_v4(location, trace_id, assessment_id)
)
response_proto = self._call_endpoint(
GetAssessment, endpoint=endpoint, response_proto=ProtoAssessment()
)
return Assessment.from_proto(response_proto)
return super().get_assessment(trace_id, assessment_id)
def delete_assessment(self, trace_id: str, assessment_id: str):
"""
Delete an assessment associated with a trace.
Args:
trace_id: String ID of the trace.
assessment_id: String ID of the assessment to delete.
"""
location, trace_id = parse_trace_id_v4(trace_id)
if location is not None:
endpoint = self._append_sql_warehouse_id_param(
get_single_assessment_endpoint_v4(location, trace_id, assessment_id)
)
self._call_endpoint(DeleteAssessment, endpoint=endpoint)
else:
return super().delete_assessment(trace_id, assessment_id)
def _group_traces_by_location(self, trace_ids: list[str]) -> dict[str | None, list[str]]:
"""
Group trace IDs by location to separate V3 and V4 traces.
Args:
trace_ids: List of trace IDs (can be V3 or V4 format).
Returns:
Dict mapping location to list of trace IDs where:
- None key: List of V3 trace IDs (without location prefix)
- str keys: Location IDs (e.g., "catalog.schema") mapping to OTEL trace IDs
"""
traces_by_location: dict[str | None, list[str]] = defaultdict(list)
for trace_id in trace_ids:
location_id, trace_id = parse_trace_id_v4(trace_id)
traces_by_location[location_id].append(trace_id)
return traces_by_location
def _batch_link_traces_to_run(
self, location_id: str, otel_trace_ids: list[str], run_id: str
) -> None:
"""
Link multiple traces to a run by creating internal trace-to-run relationships.
Args:
location_id: The location ID (e.g., "catalog.schema") for the traces.
otel_trace_ids: List of OTEL trace IDs to link to the run.
run_id: ID of the run to link traces to.
"""
if not otel_trace_ids:
return
req_body = message_to_json(
BatchLinkTraceToRun(
location_id=location_id,
trace_ids=otel_trace_ids,
run_id=run_id,
)
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location_id}/link-to-run/batchCreate"
self._call_endpoint(BatchLinkTraceToRun, req_body, endpoint=endpoint)
def _batch_unlink_traces_from_run(
self, location_id: str, otel_trace_ids: list[str], run_id: str
) -> None:
"""
Unlink multiple traces from a run by removing the internal trace-to-run relationships.
Args:
location_id: The location ID (e.g., "catalog.schema") for the traces.
otel_trace_ids: List of OTEL trace IDs to unlink from the run.
run_id: ID of the run to unlink traces from.
"""
if not otel_trace_ids:
return
req_body = message_to_json(
BatchUnlinkTraceFromRun(
location_id=location_id,
trace_ids=otel_trace_ids,
run_id=run_id,
)
)
endpoint = f"{_V4_TRACE_REST_API_PATH_PREFIX}/{location_id}/unlink-from-run/batchDelete"
self._call_endpoint(BatchUnlinkTraceFromRun, req_body, endpoint=endpoint)
def link_traces_to_run(self, trace_ids: list[str], run_id: str) -> None:
"""
Link multiple traces to a run by creating trace-to-run relationships.
Args:
trace_ids: List of trace IDs to link to the run.
run_id: ID of the run to link traces to.
"""
if not trace_ids:
return
traces_by_location = self._group_traces_by_location(trace_ids)
for location_id, batch_trace_ids in traces_by_location.items():
if location_id is None:
super().link_traces_to_run(batch_trace_ids, run_id)
else:
self._batch_link_traces_to_run(location_id, batch_trace_ids, run_id)
def unlink_traces_from_run(self, trace_ids: list[str], run_id: str) -> None:
"""
Unlink multiple traces from a run by removing trace-to-run relationships.
Args:
trace_ids: List of trace IDs to unlink from the run.
run_id: ID of the run to unlink traces from.
"""
if not trace_ids:
return
traces_by_location = self._group_traces_by_location(trace_ids)
if v3_trace_ids := traces_by_location.pop(None, []):
raise MlflowException(
"Unlinking traces from runs is only supported for traces with UC schema "
f"locations. Unsupported trace IDs: {v3_trace_ids}"
)
for location_id, batch_trace_ids in traces_by_location.items():
self._batch_unlink_traces_from_run(location_id, batch_trace_ids, run_id)
def _append_sql_warehouse_id_param(self, endpoint: str) -> str:
if sql_warehouse_id := MLFLOW_TRACING_SQL_WAREHOUSE_ID.get():
return f"{endpoint}?sql_warehouse_id={sql_warehouse_id}"
return endpoint
| DatabricksTracingRestStore |
python | PrefectHQ__prefect | src/prefect/utilities/annotations.py | {
"start": 3189,
"end": 4396
} | class ____(BaseAnnotation[T]):
"""
Wrapper for parameters in deployments.
Indicates that this parameter should be frozen in the UI and not editable
when creating flow runs from this deployment.
Example:
```python
@flow
def my_flow(customer_id: str):
# flow logic
deployment = my_flow.deploy(parameters={"customer_id": freeze("customer123")})
```
"""
def __new__(cls, value: T) -> Self:
try:
to_json(value)
except Exception:
raise ValueError("Value must be JSON serializable")
return super().__new__(cls, value)
def unfreeze(self) -> T:
"""Return the unwrapped value."""
return self.unwrap()
@classmethod
def __get_pydantic_core_schema__(
cls, source: type[Any], handler: GetCoreSchemaHandler
) -> core_schema.CoreSchema:
return core_schema.no_info_after_validator_function(
cls, # Use the class itself as the validator
core_schema.any_schema(),
serialization=core_schema.plain_serializer_function_ser_schema(
lambda x: x.unfreeze() # Serialize by unwrapping the value
),
)
| freeze |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 16881,
"end": 17358
} | class ____(_Multi2VecBase):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.MULTI2VEC_JINAAI, frozen=True, exclude=True
)
baseURL: Optional[AnyHttpUrl]
model: Optional[str]
dimensions: Optional[int]
def _to_dict(self) -> Dict[str, Any]:
ret_dict = super()._to_dict()
if self.baseURL is not None:
ret_dict["baseURL"] = self.baseURL.unicode_string()
return ret_dict
| _Multi2VecJinaConfig |
python | ipython__ipython | IPython/core/completer.py | {
"start": 28633,
"end": 30548
} | class ____:
"""An object to split an input line in a manner similar to readline.
By having our own implementation, we can expose readline-like completion in
a uniform manner to all frontends. This object only needs to be given the
line of text to be split and the cursor position on said line, and it
returns the 'word' to be completed on at the cursor after splitting the
entire line.
What characters are used as splitting delimiters can be controlled by
setting the ``delims`` attribute (this is a property that internally
automatically builds the necessary regular expression)"""
# Private interface
# A string of delimiter characters. The default value makes sense for
# IPython's most typical usage patterns.
_delims = DELIMS
# The expression (a normal string) to be compiled into a regular expression
# for actual splitting. We store it as an attribute mostly for ease of
# debugging, since this type of code can be so tricky to debug.
_delim_expr = None
# The regular expression that does the actual splitting
_delim_re = None
def __init__(self, delims=None):
delims = CompletionSplitter._delims if delims is None else delims
self.delims = delims
@property
def delims(self):
"""Return the string of delimiter characters."""
return self._delims
@delims.setter
def delims(self, delims):
"""Set the delimiters for line splitting."""
expr = '[' + ''.join('\\'+ c for c in delims) + ']'
self._delim_re = re.compile(expr)
self._delims = delims
self._delim_expr = expr
def split_line(self, line, cursor_pos=None):
"""Split a line of text with a cursor at the given position.
"""
cut_line = line if cursor_pos is None else line[:cursor_pos]
return self._delim_re.split(cut_line)[-1]
| CompletionSplitter |
python | wandb__wandb | wandb/sdk/artifacts/_generated/update_artifact_sequence_type.py | {
"start": 259,
"end": 361
} | class ____(GQLResult):
result: Optional[UpdateArtifactSequenceTypeResult]
| UpdateArtifactSequenceType |
python | tiangolo__fastapi | docs_src/graphql/tutorial001.py | {
"start": 110,
"end": 168
} | class ____:
name: str
age: int
@strawberry.type
| User |
python | optuna__optuna | tests/test_deprecated.py | {
"start": 108,
"end": 6676
} | class ____:
def __init__(self, a: Any, b: Any, c: Any) -> None:
pass
def _method(self) -> None:
"""summary
detail
"""
pass
def _method_expected(self) -> None:
"""summary
detail
.. warning::
Deprecated in v1.1.0. This feature will be removed in the future. The removal of this
feature is currently scheduled for v3.0.0, but this schedule is subject to change.
See https://github.com/optuna/optuna/releases/tag/v1.1.0.
"""
pass
@pytest.mark.parametrize("deprecated_version", ["1.1", 100, None, "2.0.0"])
@pytest.mark.parametrize("removed_version", ["1.1", 10, "1.0.0"])
def test_deprecation_raises_error_for_invalid_version(
deprecated_version: Any, removed_version: Any
) -> None:
with pytest.raises(ValueError):
_deprecated.deprecated_func(deprecated_version, removed_version)
with pytest.raises(ValueError):
_deprecated.deprecated_class(deprecated_version, removed_version)
def test_deprecation_decorator() -> None:
deprecated_version = "1.1.0"
removed_version = "3.0.0"
decorator_deprecation = _deprecated.deprecated_func(deprecated_version, removed_version)
assert callable(decorator_deprecation)
def _func() -> int:
return 10
decorated_func = decorator_deprecation(_func)
assert decorated_func.__name__ == _func.__name__
assert decorated_func.__doc__ == _deprecated._DEPRECATION_NOTE_TEMPLATE.format(
d_ver=deprecated_version, r_ver=removed_version
)
with pytest.warns(FutureWarning):
decorated_func()
def test_deprecation_instance_method_decorator() -> None:
deprecated_version = "1.1.0"
removed_version = "3.0.0"
decorator_deprecation = _deprecated.deprecated_func(deprecated_version, removed_version)
assert callable(decorator_deprecation)
decorated_method = decorator_deprecation(_Sample._method)
assert decorated_method.__name__ == _Sample._method.__name__
assert decorated_method.__doc__ == _Sample._method_expected.__doc__
with pytest.warns(FutureWarning):
decorated_method(None) # type: ignore
def test_deprecation_class_decorator() -> None:
deprecated_version = "1.1.0"
removed_version = "3.0.0"
decorator_deprecation = _deprecated.deprecated_class(deprecated_version, removed_version)
assert callable(decorator_deprecation)
decorated_class = decorator_deprecation(_Sample)
assert decorated_class.__name__ == "_Sample"
assert decorated_class.__init__.__name__ == "__init__"
assert decorated_class.__doc__ == _deprecated._DEPRECATION_NOTE_TEMPLATE.format(
d_ver=deprecated_version, r_ver=removed_version
)
with pytest.warns(FutureWarning):
decorated_class("a", "b", "c")
def test_deprecation_class_decorator_name() -> None:
name = "foo"
decorator_deprecation = _deprecated.deprecated_class("1.1.0", "3.0.0", name=name)
decorated_sample = decorator_deprecation(_Sample)
with pytest.warns(FutureWarning) as record:
decorated_sample("a", "b", "c")
assert isinstance(record.list[0].message, Warning)
assert name in record.list[0].message.args[0]
def test_deprecation_decorator_name() -> None:
def _func() -> int:
return 10
name = "bar"
decorator_deprecation = _deprecated.deprecated_func("1.1.0", "3.0.0", name=name)
decorated_sample_func = decorator_deprecation(_func)
with pytest.warns(FutureWarning) as record:
decorated_sample_func()
assert isinstance(record.list[0].message, Warning)
assert name in record.list[0].message.args[0]
@pytest.mark.parametrize("text", [None, "", "test", "test" * 100])
def test_deprecation_text_specified(text: str | None) -> None:
def _func() -> int:
return 10
decorator_deprecation = _deprecated.deprecated_func("1.1.0", "3.0.0", text=text)
decorated_func = decorator_deprecation(_func)
expected_func_doc = _deprecated._DEPRECATION_NOTE_TEMPLATE.format(d_ver="1.1.0", r_ver="3.0.0")
if text is None:
pass
elif len(text) > 0:
expected_func_doc += "\n\n " + text + "\n"
else:
expected_func_doc += "\n\n\n"
assert decorated_func.__name__ == _func.__name__
assert decorated_func.__doc__ == expected_func_doc
with pytest.warns(FutureWarning) as record:
decorated_func()
assert isinstance(record.list[0].message, Warning)
expected_warning_message = _deprecated._DEPRECATION_WARNING_TEMPLATE.format(
name="_func", d_ver="1.1.0", r_ver="3.0.0"
)
if text is not None:
expected_warning_message += " " + text
assert record.list[0].message.args[0] == expected_warning_message
@pytest.mark.parametrize("text", [None, "", "test", "test" * 100])
def test_deprecation_class_text_specified(text: str | None) -> None:
class _Class:
def __init__(self, a: Any, b: Any, c: Any) -> None:
pass
decorator_deprecation = _deprecated.deprecated_class("1.1.0", "3.0.0", text=text)
decorated_class = decorator_deprecation(_Class)
expected_class_doc = _deprecated._DEPRECATION_NOTE_TEMPLATE.format(
d_ver="1.1.0", r_ver="3.0.0"
)
if text is None:
pass
elif len(text) > 0:
expected_class_doc += "\n\n " + text + "\n"
else:
expected_class_doc += "\n\n\n"
assert decorated_class.__name__ == _Class.__name__
assert decorated_class.__doc__ == expected_class_doc
with pytest.warns(FutureWarning) as record:
decorated_class(None, None, None)
assert isinstance(record.list[0].message, Warning)
expected_warning_message = _deprecated._DEPRECATION_WARNING_TEMPLATE.format(
name="_Class", d_ver="1.1.0", r_ver="3.0.0"
)
if text is not None:
expected_warning_message += " " + text
assert record.list[0].message.args[0] == expected_warning_message
def test_deprecation_decorator_default_removed_version() -> None:
deprecated_version = "1.1.0"
removed_version = "3.0.0"
decorator_deprecation = _deprecated.deprecated_func(deprecated_version, removed_version)
assert callable(decorator_deprecation)
def _func() -> int:
return 10
decorated_func = decorator_deprecation(_func)
assert decorated_func.__name__ == _func.__name__
assert decorated_func.__doc__ == _deprecated._DEPRECATION_NOTE_TEMPLATE.format(
d_ver=deprecated_version, r_ver=removed_version
)
with pytest.warns(FutureWarning):
decorated_func()
| _Sample |
python | catalyst-team__catalyst | catalyst/data/sampler.py | {
"start": 352,
"end": 3556
} | class ____(Sampler):
"""Allows you to create stratified sample on unbalanced classes.
Args:
labels: list of class label for each elem in the dataset
mode: Strategy to balance classes.
Must be one of [downsampling, upsampling]
Python API examples:
.. code-block:: python
import os
from torch import nn, optim
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.data import ToTensor, BalanceClassSampler
from catalyst.contrib.datasets import MNIST
train_data = MNIST(os.getcwd(), train=True, download=True, transform=ToTensor())
train_labels = train_data.targets.cpu().numpy().tolist()
train_sampler = BalanceClassSampler(train_labels, mode=5000)
valid_data = MNIST(os.getcwd(), train=False)
loaders = {
"train": DataLoader(train_data, sampler=train_sampler, batch_size=32),
"valid": DataLoader(valid_data, batch_size=32),
}
model = nn.Sequential(nn.Flatten(), nn.Linear(28 * 28, 10))
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.02)
runner = dl.SupervisedRunner()
# model training
runner.train(
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
num_epochs=1,
logdir="./logs",
valid_loader="valid",
valid_metric="loss",
minimize_valid_metric=True,
verbose=True,
)
"""
def __init__(self, labels: List[int], mode: Union[str, int] = "downsampling"):
"""Sampler initialisation."""
super().__init__(labels)
labels = np.array(labels)
samples_per_class = {label: (labels == label).sum() for label in set(labels)}
self.lbl2idx = {
label: np.arange(len(labels))[labels == label].tolist()
for label in set(labels)
}
if isinstance(mode, str):
assert mode in ["downsampling", "upsampling"]
if isinstance(mode, int) or mode == "upsampling":
samples_per_class = (
mode if isinstance(mode, int) else max(samples_per_class.values())
)
else:
samples_per_class = min(samples_per_class.values())
self.labels = labels
self.samples_per_class = samples_per_class
self.length = self.samples_per_class * len(set(labels))
def __iter__(self) -> Iterator[int]:
"""
Returns:
iterator of indices of stratified sample
"""
indices = []
for key in sorted(self.lbl2idx):
replace_flag = self.samples_per_class > len(self.lbl2idx[key])
indices += np.random.choice(
self.lbl2idx[key], self.samples_per_class, replace=replace_flag
).tolist()
assert len(indices) == self.length
np.random.shuffle(indices)
return iter(indices)
def __len__(self) -> int:
"""
Returns:
length of result sample
"""
return self.length
| BalanceClassSampler |
python | getsentry__sentry | src/sentry/notifications/helpers.py | {
"start": 2844,
"end": 6382
} | class ____(TypedDict, total=False):
disabled: bool
reason: str
def get_subscription_from_attributes(
attrs: Mapping[str, Any],
) -> tuple[bool, SubscriptionDetails | None]:
subscription_details: SubscriptionDetails | None = None
is_disabled, is_subscribed, subscription = attrs["subscription"]
if is_disabled:
subscription_details = {"disabled": True}
elif subscription and subscription.is_active:
subscription_details = {
"reason": SUBSCRIPTION_REASON_MAP.get(subscription.reason, "unknown")
}
return is_subscribed, subscription_details
def collect_groups_by_project(groups: Iterable[Group]) -> Mapping[int, set[Group]]:
"""
Collect all of the projects to look up, and keep a set of groups that are
part of that project. (Note that the common -- but not only -- case here is
that all groups are part of the same project.)
"""
projects = defaultdict(set)
for group in groups:
projects[group.project_id].add(group)
return projects
def get_reason_context(extra_context: Mapping[str, Any]) -> MutableMapping[str, str]:
"""Get user-specific context. Do not call get_context() here."""
reason = extra_context.get("reason", 0)
return {
"reason": GroupSubscriptionReason.descriptions.get(reason, "are subscribed to this issue")
}
def recipient_is_user(
recipient: Actor | Team | RpcUser | User,
) -> TypeGuard[Actor | RpcUser | User]:
from sentry.users.models.user import User
if isinstance(recipient, Actor) and recipient.is_user:
return True
return isinstance(recipient, (RpcUser, User))
def recipient_is_team(recipient: Actor | Team | RpcUser | User) -> TypeGuard[Actor | Team]:
from sentry.models.team import Team
if isinstance(recipient, Actor) and recipient.is_team:
return True
return isinstance(recipient, Team)
def team_is_valid_recipient(team: Team | Actor) -> bool:
"""
A team is a valid recipient if it has a linked integration (ie. linked Slack channel)
for any one of the providers allowed for personal notifications.
"""
linked_integration = ExternalActorReplica.objects.filter(
team_id=team.id,
provider__in=PERSONAL_NOTIFICATION_PROVIDERS_AS_INT,
)
if linked_integration:
return True
return False
def get_team_members(team: Team | Actor) -> list[Actor]:
if recipient_is_team(team): # handles type error below
team_id = team.id
else: # team is either Team or Actor, so if recipient_is_team returns false it is because Actor has a different type
raise Exception(
"Actor team has ActorType %s, expected ActorType Team", team.actor_type # type: ignore[union-attr]
)
# get organization member IDs of all members in the team
team_members = OrganizationMemberTeamReplica.objects.filter(team_id=team_id)
# use the first member to get the org id + determine if there are any members to begin with
first_member = team_members.first()
if not first_member:
return []
org_id = first_member.organization_id
# get user IDs for all members in the team
members = OrganizationMemberMapping.objects.filter(
organization_id=org_id,
organizationmember_id__in=Subquery(team_members.values("organizationmember_id")),
)
return [
Actor(id=user_id, actor_type=ActorType.USER)
for user_id in members.values_list("user_id", flat=True)
if user_id
]
| SubscriptionDetails |
python | facebookresearch__faiss | tests/test_ivflib.py | {
"start": 1730,
"end": 2296
} | class ____(unittest.TestCase):
def test_sequential_scan(self):
d = 20
index = faiss.index_factory(d, 'IVF100,SQ8')
rs = np.random.RandomState(123)
xt = rs.rand(5000, d).astype('float32')
xb = rs.rand(10000, d).astype('float32')
index.train(xt)
index.add(xb)
k = 15
xq = rs.rand(200, d).astype('float32')
ref_D, ref_I = index.search(xq, k)
D, I = search_single_scan(index, xq, k, bs=10)
assert np.all(D == ref_D)
assert np.all(I == ref_I)
| TestSequentialScan |
python | crytic__slither | slither/core/declarations/contract.py | {
"start": 1668,
"end": 62849
} | class ____(SourceMapping): # pylint: disable=too-many-public-methods
"""
Contract class
"""
def __init__(self, compilation_unit: "SlitherCompilationUnit", scope: "FileScope") -> None:
super().__init__()
self._name: Optional[str] = None
self._id: Optional[int] = None
self._inheritance: List["Contract"] = [] # all contract inherited, c3 linearization
self._immediate_inheritance: List["Contract"] = [] # immediate inheritance
# Constructors called on contract's definition
# contract B is A(1) { ..
self._explicit_base_constructor_calls: List["Contract"] = []
self._enums: Dict[str, "EnumContract"] = {}
self._structures: Dict[str, "StructureContract"] = {}
self._events: Dict[str, "EventContract"] = {}
# map accessible variable from name -> variable
# do not contain private variables inherited from contract
self._variables: Dict[str, "StateVariable"] = {}
self._variables_ordered: List["StateVariable"] = []
# Reference id -> variable declaration (only available for compact AST)
self._state_variables_by_ref_id: Dict[int, "StateVariable"] = {}
self._modifiers: Dict[str, "Modifier"] = {}
self._functions: Dict[str, "FunctionContract"] = {}
self._linearizedBaseContracts: List[int] = []
self._custom_errors: Dict[str, "CustomErrorContract"] = {}
self._type_aliases: Dict[str, "TypeAliasContract"] = {}
# The only str is "*"
self._using_for: USING_FOR = {}
self._using_for_complete: Optional[USING_FOR] = None
self._kind: Optional[str] = None
self._is_interface: bool = False
self._is_library: bool = False
self._is_fully_implemented: bool = False
self._is_abstract: bool = False
self._signatures: Optional[List[str]] = None
self._signatures_declared: Optional[List[str]] = None
self._fallback_function: Optional["FunctionContract"] = None
self._receive_function: Optional["FunctionContract"] = None
self._is_upgradeable: Optional[bool] = None
self._is_upgradeable_proxy: Optional[bool] = None
self._upgradeable_version: Optional[str] = None
self._initial_state_variables: List["StateVariable"] = [] # ssa
self._is_incorrectly_parsed: bool = False
self._available_functions_as_dict: Optional[Dict[str, "Function"]] = None
self._all_functions_called: Optional[List["Function"]] = None
self.compilation_unit: "SlitherCompilationUnit" = compilation_unit
self.file_scope: "FileScope" = scope
# memoize
self._state_variables_used_in_reentrant_targets: Optional[
Dict["StateVariable", Set[Union["StateVariable", "Function"]]]
] = None
self._comments: Optional[str] = None
###################################################################################
###################################################################################
# region General's properties
###################################################################################
###################################################################################
@property
def name(self) -> str:
"""str: Name of the contract."""
assert self._name
return self._name
@name.setter
def name(self, name: str) -> None:
self._name = name
@property
def id(self) -> int:
"""Unique id."""
assert self._id is not None
return self._id
@id.setter
def id(self, new_id: int) -> None:
"""Unique id."""
self._id = new_id
@property
def contract_kind(self) -> Optional[str]:
"""
contract_kind can be None if the legacy ast format is used
:return:
"""
return self._kind
@contract_kind.setter
def contract_kind(self, kind: str) -> None:
self._kind = kind
@property
def is_interface(self) -> bool:
return self._is_interface
@is_interface.setter
def is_interface(self, is_interface: bool) -> None:
self._is_interface = is_interface
@property
def is_library(self) -> bool:
return self._is_library
@is_library.setter
def is_library(self, is_library: bool) -> None:
self._is_library = is_library
@property
def comments(self) -> Optional[str]:
"""
Return the comments associated with the contract.
When using comments, avoid strict text matching, as the solc behavior might change.
For example, for old solc version, the first space after the * is not kept, i.e:
* @title Test Contract
* @dev Test comment
Returns
- " @title Test Contract\n @dev Test comment" for newest versions
- "@title Test Contract\n@dev Test comment" for older versions
Returns:
the comment as a string
"""
return self._comments
@comments.setter
def comments(self, comments: str):
self._comments = comments
@property
def is_fully_implemented(self) -> bool:
"""
bool: True if the contract defines all functions.
In modern Solidity, virtual functions can lack an implementation.
Prior to Solidity 0.6.0, functions like the following would be not fully implemented:
```solidity
contract ImplicitAbstract{
function f() public;
}
```
"""
return self._is_fully_implemented
@is_fully_implemented.setter
def is_fully_implemented(self, is_fully_implemented: bool):
self._is_fully_implemented = is_fully_implemented
@property
def is_abstract(self) -> bool:
"""
Note for Solidity < 0.6.0 it will always be false
bool: True if the contract is abstract.
"""
return self._is_abstract
@is_abstract.setter
def is_abstract(self, is_abstract: bool):
self._is_abstract = is_abstract
# endregion
###################################################################################
###################################################################################
# region Structures
###################################################################################
###################################################################################
@property
def structures(self) -> List["StructureContract"]:
"""
list(Structure): List of the structures
"""
return list(self._structures.values())
@property
def structures_inherited(self) -> List["StructureContract"]:
"""
list(Structure): List of the inherited structures
"""
return [s for s in self.structures if s.contract != self]
@property
def structures_declared(self) -> List["StructureContract"]:
"""
list(Structues): List of the structures declared within the contract (not inherited)
"""
return [s for s in self.structures if s.contract == self]
@property
def structures_as_dict(self) -> Dict[str, "StructureContract"]:
return self._structures
# endregion
###################################################################################
###################################################################################
# region Enums
###################################################################################
###################################################################################
@property
def enums(self) -> List["EnumContract"]:
return list(self._enums.values())
@property
def enums_inherited(self) -> List["EnumContract"]:
"""
list(Enum): List of the inherited enums
"""
return [e for e in self.enums if e.contract != self]
@property
def enums_declared(self) -> List["EnumContract"]:
"""
list(Enum): List of the enums declared within the contract (not inherited)
"""
return [e for e in self.enums if e.contract == self]
@property
def enums_as_dict(self) -> Dict[str, "EnumContract"]:
return self._enums
# endregion
###################################################################################
###################################################################################
# region Events
###################################################################################
###################################################################################
@property
def events(self) -> List["EventContract"]:
"""
list(Event): List of the events
"""
return list(self._events.values())
@property
def events_inherited(self) -> List["EventContract"]:
"""
list(Event): List of the inherited events
"""
return [e for e in self.events if e.contract != self]
@property
def events_declared(self) -> List["EventContract"]:
"""
list(Event): List of the events declared within the contract (not inherited)
"""
return [e for e in self.events if e.contract == self]
@property
def events_as_dict(self) -> Dict[str, "EventContract"]:
return self._events
# endregion
###################################################################################
###################################################################################
# region Using for
###################################################################################
###################################################################################
@property
def using_for(self) -> USING_FOR:
return self._using_for
@property
def using_for_complete(self) -> USING_FOR:
"""
USING_FOR: Dict of merged local using for directive with top level directive
"""
if self._using_for_complete is None:
result = self.using_for
top_level_using_for = self.file_scope.using_for_directives
for uftl in top_level_using_for:
result = merge_using_for(result, uftl.using_for)
self._using_for_complete = result
return self._using_for_complete
# endregion
###################################################################################
###################################################################################
# region Custom Errors
###################################################################################
###################################################################################
@property
def custom_errors(self) -> List["CustomErrorContract"]:
"""
list(CustomErrorContract): List of the contract's custom errors
"""
return list(self._custom_errors.values())
@property
def custom_errors_inherited(self) -> List["CustomErrorContract"]:
"""
list(CustomErrorContract): List of the inherited custom errors
"""
return [s for s in self.custom_errors if s.contract != self]
@property
def custom_errors_declared(self) -> List["CustomErrorContract"]:
"""
list(CustomErrorContract): List of the custom errors declared within the contract (not inherited)
"""
return [s for s in self.custom_errors if s.contract == self]
@property
def custom_errors_as_dict(self) -> Dict[str, "CustomErrorContract"]:
return self._custom_errors
# endregion
###################################################################################
###################################################################################
# region Custom Errors
###################################################################################
###################################################################################
@property
def type_aliases(self) -> List["TypeAliasContract"]:
"""
list(TypeAliasContract): List of the contract's custom errors
"""
return list(self._type_aliases.values())
@property
def type_aliases_inherited(self) -> List["TypeAliasContract"]:
"""
list(TypeAliasContract): List of the inherited custom errors
"""
return [s for s in self.type_aliases if s.contract != self]
@property
def type_aliases_declared(self) -> List["TypeAliasContract"]:
"""
list(TypeAliasContract): List of the custom errors declared within the contract (not inherited)
"""
return [s for s in self.type_aliases if s.contract == self]
@property
def type_aliases_as_dict(self) -> Dict[str, "TypeAliasContract"]:
return self._type_aliases
# endregion
###################################################################################
###################################################################################
# region Variables
###################################################################################
###################################################################################
@property
def state_variables_by_ref_id(self) -> Dict[int, "StateVariable"]:
"""
Returns the state variables by reference id (only available for compact AST).
"""
return self._state_variables_by_ref_id
@property
def variables(self) -> List["StateVariable"]:
"""
Returns all the accessible variables (do not include private variable from inherited contract)
list(StateVariable): List of the state variables. Alias to self.state_variables.
"""
return list(self.state_variables)
@property
def variables_as_dict(self) -> Dict[str, "StateVariable"]:
return self._variables
@property
def state_variables(self) -> List["StateVariable"]:
"""
Returns all the accessible variables (do not include private variable from inherited contract).
Use stored_state_variables_ordered for all the storage variables following the storage order
Use transient_state_variables_ordered for all the transient variables following the storage order
list(StateVariable): List of the state variables.
"""
return list(self._variables.values())
@property
def state_variables_entry_points(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the state variables that are public.
"""
return [var for var in self._variables.values() if var.visibility == "public"]
@property
def state_variables_ordered(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the state variables by order of declaration.
"""
return self._variables_ordered
def add_state_variables_ordered(self, new_vars: List["StateVariable"]) -> None:
self._variables_ordered += new_vars
@property
def storage_variables_ordered(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the state variables in storage location by order of declaration.
"""
return [v for v in self._variables_ordered if v.is_stored]
@property
def transient_variables_ordered(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the state variables in transient location by order of declaration.
"""
return [v for v in self._variables_ordered if v.is_transient]
@property
def state_variables_inherited(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the inherited state variables
"""
return [s for s in self.state_variables if s.contract != self]
@property
def state_variables_declared(self) -> List["StateVariable"]:
"""
list(StateVariable): List of the state variables declared within the contract (not inherited)
"""
return [s for s in self.state_variables if s.contract == self]
@property
def slithir_variables(self) -> List["SlithIRVariable"]:
"""
List all of the slithir variables (non SSA)
"""
slithir_variabless = [f.slithir_variables for f in self.functions + self.modifiers] # type: ignore
slithir_variables = [item for sublist in slithir_variabless for item in sublist]
return list(set(slithir_variables))
@property
def state_variables_used_in_reentrant_targets(
self,
) -> Dict["StateVariable", Set[Union["StateVariable", "Function"]]]:
"""
Returns the state variables used in reentrant targets. Heuristics:
- Variable used (read/write) in entry points that are reentrant
- State variables that are public
"""
from slither.core.variables.state_variable import StateVariable
if self._state_variables_used_in_reentrant_targets is None:
reentrant_functions = [f for f in self.functions_entry_points if f.is_reentrant]
variables_used: Dict[
StateVariable, Set[Union[StateVariable, "Function"]]
] = defaultdict(set)
for function in reentrant_functions:
for ir in function.all_slithir_operations():
state_variables = [v for v in ir.used if isinstance(v, StateVariable)]
for state_variable in state_variables:
variables_used[state_variable].add(ir.node.function)
for variable in [v for v in self.state_variables if v.visibility == "public"]:
variables_used[variable].add(variable)
self._state_variables_used_in_reentrant_targets = variables_used
return self._state_variables_used_in_reentrant_targets
# endregion
###################################################################################
###################################################################################
# region Constructors
###################################################################################
###################################################################################
@property
def constructor(self) -> Optional["Function"]:
"""
Return the contract's immediate constructor.
If there is no immediate constructor, returns the first constructor
executed, following the c3 linearization
Return None if there is no constructor.
"""
cst = self.constructors_declared
if cst:
return cst
for inherited_contract in self.inheritance:
cst = inherited_contract.constructors_declared
if cst:
return cst
return None
@property
def constructors_declared(self) -> Optional["Function"]:
return next(
(
func
for func in self.functions
if func.is_constructor and func.contract_declarer == self
),
None,
)
@property
def constructors(self) -> List["FunctionContract"]:
"""
Return the list of constructors (including inherited)
"""
return [func for func in self.functions if func.is_constructor]
@property
def explicit_base_constructor_calls(self) -> List["Function"]:
"""
list(Function): List of the base constructors called explicitly by this contract definition.
Base constructors called by any constructor definition will not be included.
Base constructors implicitly called by the contract definition (without
parenthesis) will not be included.
On "contract B is A(){..}" it returns the constructor of A
"""
return [c.constructor for c in self._explicit_base_constructor_calls if c.constructor]
# endregion
###################################################################################
###################################################################################
# region Functions and Modifiers
###################################################################################
###################################################################################
@property
def functions_signatures(self) -> List[str]:
"""
Return the signatures of all the public/eterxnal functions/state variables
:return: list(string) the signatures of all the functions that can be called
"""
if self._signatures is None:
sigs = [
v.full_name for v in self.state_variables if v.visibility in ["public", "external"]
]
sigs += {f.full_name for f in self.functions if f.visibility in ["public", "external"]}
self._signatures = list(set(sigs))
return self._signatures
@property
def functions_signatures_declared(self) -> List[str]:
"""
Return the signatures of the public/eterxnal functions/state variables that are declared by this contract
:return: list(string) the signatures of all the functions that can be called and are declared by this contract
"""
if self._signatures_declared is None:
sigs = [
v.full_name
for v in self.state_variables_declared
if v.visibility in ["public", "external"]
]
sigs += {
f.full_name
for f in self.functions_declared
if f.visibility in ["public", "external"]
}
self._signatures_declared = list(set(sigs))
return self._signatures_declared
@property
def functions(self) -> List["FunctionContract"]:
"""
list(Function): List of the functions
"""
return list(self._functions.values())
def available_functions_as_dict(self) -> Dict[str, "Function"]:
if self._available_functions_as_dict is None:
self._available_functions_as_dict = {
f.full_name: f for f in self._functions.values() if not f.is_shadowed
}
return self._available_functions_as_dict
def add_function(self, func: "FunctionContract") -> None:
self._functions[func.canonical_name] = func
def set_functions(self, functions: Dict[str, "FunctionContract"]) -> None:
"""
Set the functions
:param functions: dict full_name -> function
:return:
"""
self._functions = functions
@property
def functions_inherited(self) -> List["FunctionContract"]:
"""
list(Function): List of the inherited functions
"""
return [f for f in self.functions if f.contract_declarer != self]
@property
def functions_declared(self) -> List["FunctionContract"]:
"""
list(Function): List of the functions defined within the contract (not inherited)
"""
return [f for f in self.functions if f.contract_declarer == self]
@property
def functions_entry_points(self) -> List["FunctionContract"]:
"""
list(Functions): List of public and external functions
"""
return [
f
for f in self.functions
if f.visibility in ["public", "external"] and not f.is_shadowed or f.is_fallback
]
@property
def modifiers(self) -> List["Modifier"]:
"""
list(Modifier): List of the modifiers
"""
return list(self._modifiers.values())
def available_modifiers_as_dict(self) -> Dict[str, "Modifier"]:
return {m.full_name: m for m in self._modifiers.values() if not m.is_shadowed}
def set_modifiers(self, modifiers: Dict[str, "Modifier"]) -> None:
"""
Set the modifiers
:param modifiers: dict full_name -> modifier
:return:
"""
self._modifiers = modifiers
@property
def modifiers_inherited(self) -> List["Modifier"]:
"""
list(Modifier): List of the inherited modifiers
"""
return [m for m in self.modifiers if m.contract_declarer != self]
@property
def modifiers_declared(self) -> List["Modifier"]:
"""
list(Modifier): List of the modifiers defined within the contract (not inherited)
"""
return [m for m in self.modifiers if m.contract_declarer == self]
@property
def functions_and_modifiers(self) -> List["Function"]:
"""
list(Function|Modifier): List of the functions and modifiers
"""
return self.functions + self.modifiers # type: ignore
@property
def functions_and_modifiers_inherited(self) -> List["Function"]:
"""
list(Function|Modifier): List of the inherited functions and modifiers
"""
return self.functions_inherited + self.modifiers_inherited # type: ignore
@property
def functions_and_modifiers_declared(self) -> List["Function"]:
"""
list(Function|Modifier): List of the functions and modifiers defined within the contract (not inherited)
"""
return self.functions_declared + self.modifiers_declared # type: ignore
@property
def fallback_function(self) -> Optional["FunctionContract"]:
if self._fallback_function is None:
for f in self.functions:
if f.is_fallback:
self._fallback_function = f
break
return self._fallback_function
@property
def receive_function(self) -> Optional["FunctionContract"]:
if self._receive_function is None:
for f in self.functions:
if f.is_receive:
self._receive_function = f
break
return self._receive_function
def available_elements_from_inheritances(
self,
elements: Dict[str, "Function"],
getter_available: Callable[["Contract"], List["FunctionContract"]],
) -> Dict[str, "Function"]:
"""
:param elements: dict(canonical_name -> elements)
:param getter_available: fun x
:return:
"""
# keep track of the contracts visited
# to prevent an ovveride due to multiple inheritance of the same contract
# A is B, C, D is C, --> the second C was already seen
inherited_elements: Dict[str, "FunctionContract"] = {}
accessible_elements = {}
contracts_visited = []
for father in self.inheritance_reverse:
functions: Dict[str, "FunctionContract"] = {
v.full_name: v
for v in getter_available(father)
if v.contract not in contracts_visited
and v.function_language
!= FunctionLanguage.Yul # Yul functions are not propagated in the inheritance
}
contracts_visited.append(father)
inherited_elements.update(functions)
for element in inherited_elements.values():
accessible_elements[element.full_name] = elements[element.canonical_name]
return accessible_elements
# endregion
###################################################################################
###################################################################################
# region Inheritance
###################################################################################
###################################################################################
@property
def inheritance(self) -> List["Contract"]:
"""
list(Contract): Inheritance list. Order: the first elem is the first father to be executed
"""
return list(self._inheritance)
@property
def immediate_inheritance(self) -> List["Contract"]:
"""
list(Contract): List of contracts immediately inherited from (fathers). Order: order of declaration.
"""
return list(self._immediate_inheritance)
@property
def inheritance_reverse(self) -> List["Contract"]:
"""
list(Contract): Inheritance list. Order: the last elem is the first father to be executed
"""
return list(reversed(self._inheritance))
def set_inheritance(
self,
inheritance: List["Contract"],
immediate_inheritance: List["Contract"],
called_base_constructor_contracts: List["Contract"],
) -> None:
self._inheritance = inheritance
self._immediate_inheritance = immediate_inheritance
self._explicit_base_constructor_calls = called_base_constructor_contracts
@property
def derived_contracts(self) -> List["Contract"]:
"""
list(Contract): Return the list of contracts derived from self
"""
candidates = self.compilation_unit.contracts
return [c for c in candidates if self in c.inheritance] # type: ignore
# endregion
###################################################################################
###################################################################################
# region Getters from/to object
###################################################################################
###################################################################################
def get_functions_reading_from_variable(self, variable: "Variable") -> List["Function"]:
"""
Return the functions reading the variable
"""
return [f for f in self.functions if f.is_reading(variable)]
def get_functions_writing_to_variable(self, variable: "Variable") -> List["Function"]:
"""
Return the functions writting the variable
"""
return [f for f in self.functions if f.is_writing(variable)]
def get_function_from_full_name(self, full_name: str) -> Optional["Function"]:
"""
Return a function from a full name
The full name differs from the solidity's signature are the type are conserved
For example contract type are kept, structure are not unrolled, etc
Args:
full_name (str): signature of the function (without return statement)
Returns:
Function
"""
return next(
(f for f in self.functions if f.full_name == full_name and not f.is_shadowed),
None,
)
def get_function_from_signature(self, function_signature: str) -> Optional["Function"]:
"""
Return a function from a signature
Args:
function_signature (str): signature of the function (without return statement)
Returns:
Function
"""
return next(
(
f
for f in self.functions
if f.solidity_signature == function_signature and not f.is_shadowed
),
None,
)
def get_modifier_from_signature(self, modifier_signature: str) -> Optional["Modifier"]:
"""
Return a modifier from a signature
:param modifier_signature:
"""
return next(
(m for m in self.modifiers if m.full_name == modifier_signature and not m.is_shadowed),
None,
)
def get_function_from_canonical_name(self, canonical_name: str) -> Optional["Function"]:
"""
Return a function from a canonical name (contract.signature())
Args:
canonical_name (str): canonical name of the function (without return statement)
Returns:
Function
"""
return next((f for f in self.functions if f.canonical_name == canonical_name), None)
def get_modifier_from_canonical_name(self, canonical_name: str) -> Optional["Modifier"]:
"""
Return a modifier from a canonical name (contract.signature())
Args:
canonical_name (str): canonical name of the modifier
Returns:
Modifier
"""
return next((m for m in self.modifiers if m.canonical_name == canonical_name), None)
def get_state_variable_from_name(self, variable_name: str) -> Optional["StateVariable"]:
"""
Return a state variable from a name
:param variable_name:
"""
return next((v for v in self.state_variables if v.name == variable_name), None)
def get_state_variable_from_canonical_name(
self, canonical_name: str
) -> Optional["StateVariable"]:
"""
Return a state variable from a canonical_name
Args:
canonical_name (str): name of the variable
Returns:
StateVariable
"""
return next((v for v in self.state_variables if v.canonical_name == canonical_name), None)
def get_structure_from_name(self, structure_name: str) -> Optional["StructureContract"]:
"""
Return a structure from a name
Args:
structure_name (str): name of the structure
Returns:
StructureContract
"""
return next((st for st in self.structures if st.name == structure_name), None)
def get_structure_from_canonical_name(
self, structure_name: str
) -> Optional["StructureContract"]:
"""
Return a structure from a canonical name
Args:
structure_name (str): canonical name of the structure
Returns:
StructureContract
"""
return next((st for st in self.structures if st.canonical_name == structure_name), None)
def get_event_from_signature(self, event_signature: str) -> Optional["Event"]:
"""
Return an event from a signature
Args:
event_signature (str): signature of the event
Returns:
Event
"""
return next((e for e in self.events if e.full_name == event_signature), None)
def get_event_from_canonical_name(self, event_canonical_name: str) -> Optional["Event"]:
"""
Return an event from a canonical name
Args:
event_canonical_name (str): name of the event
Returns:
Event
"""
return next((e for e in self.events if e.canonical_name == event_canonical_name), None)
def get_enum_from_name(self, enum_name: str) -> Optional["Enum"]:
"""
Return an enum from a name
Args:
enum_name (str): name of the enum
Returns:
Enum
"""
return next((e for e in self.enums if e.name == enum_name), None)
def get_enum_from_canonical_name(self, enum_name: str) -> Optional["Enum"]:
"""
Return an enum from a canonical name
Args:
enum_name (str): canonical name of the enum
Returns:
Enum
"""
return next((e for e in self.enums if e.canonical_name == enum_name), None)
def get_functions_overridden_by(self, function: "Function") -> List["Function"]:
"""
Return the list of functions overridden by the function
Args:
(core.Function)
Returns:
list(core.Function)
"""
return function.overrides
# endregion
###################################################################################
###################################################################################
# region Recursive getters
###################################################################################
###################################################################################
@property
def all_functions_called(self) -> List["Function"]:
"""
list(Function): List of functions reachable from the contract
Includes super, and private/internal functions not shadowed
"""
from slither.slithir.operations import Operation
if self._all_functions_called is None:
all_functions = [f for f in self.functions + self.modifiers if not f.is_shadowed] # type: ignore
all_callss = [f.all_internal_calls() for f in all_functions] + [list(all_functions)]
all_calls = [
item.function if isinstance(item, Operation) else item
for sublist in all_callss
for item in sublist
]
all_calls = list(set(all_calls))
all_constructors = [c.constructor for c in self.inheritance if c.constructor]
all_constructors = list(set(all_constructors))
set_all_calls = set(all_calls + list(all_constructors))
self._all_functions_called = [c for c in set_all_calls if isinstance(c, Function)]
return self._all_functions_called
@property
def all_state_variables_written(self) -> List["StateVariable"]:
"""
list(StateVariable): List all of the state variables written
"""
all_state_variables_writtens = [
f.all_state_variables_written() for f in self.functions + self.modifiers # type: ignore
]
all_state_variables_written = [
item for sublist in all_state_variables_writtens for item in sublist
]
return list(set(all_state_variables_written))
@property
def all_state_variables_read(self) -> List["StateVariable"]:
"""
list(StateVariable): List all of the state variables read
"""
all_state_variables_reads = [
f.all_state_variables_read() for f in self.functions + self.modifiers # type: ignore
]
all_state_variables_read = [
item for sublist in all_state_variables_reads for item in sublist
]
return list(set(all_state_variables_read))
@property
def all_library_calls(self) -> List["LibraryCall"]:
"""
list(LibraryCall): List all of the libraries func called
"""
all_high_level_callss = [f.all_library_calls() for f in self.functions + self.modifiers] # type: ignore
all_high_level_calls = [item for sublist in all_high_level_callss for item in sublist]
return list(set(all_high_level_calls))
@property
def all_high_level_calls(self) -> List[Tuple["Contract", "HighLevelCall"]]:
"""
list(Tuple("Contract", "HighLevelCall")): List all of the external high level calls
"""
all_high_level_callss = [f.all_high_level_calls() for f in self.functions + self.modifiers] # type: ignore
all_high_level_calls = [item for sublist in all_high_level_callss for item in sublist]
return list(set(all_high_level_calls))
# endregion
###################################################################################
###################################################################################
# region Summary information
###################################################################################
###################################################################################
def get_summary(
self, include_shadowed: bool = True
) -> Tuple[str, List[str], List[str], List, List]:
"""Return the function summary
:param include_shadowed: boolean to indicate if shadowed functions should be included (default True)
Returns:
(str, list, list, list, list): (name, inheritance, variables, fuction summaries, modifier summaries)
"""
func_summaries = [
f.get_summary() for f in self.functions if (not f.is_shadowed or include_shadowed)
]
modif_summaries = [
f.get_summary() for f in self.modifiers if (not f.is_shadowed or include_shadowed)
]
return (
self.name,
[str(x) for x in self.inheritance],
[str(x) for x in self.variables],
func_summaries,
modif_summaries,
)
def is_signature_only(self) -> bool:
"""Detect if the contract has only abstract functions
Returns:
bool: true if the function are abstract functions
"""
return all((not f.is_implemented) for f in self.functions)
# endregion
###################################################################################
###################################################################################
# region ERC conformance
###################################################################################
###################################################################################
def ercs(self) -> List[str]:
"""
Return the ERC implemented
:return: list of string
"""
all_erc = [
("ERC20", self.is_erc20),
("ERC165", self.is_erc165),
("ERC1820", self.is_erc1820),
("ERC223", self.is_erc223),
("ERC721", self.is_erc721),
("ERC777", self.is_erc777),
("ERC2612", self.is_erc2612),
("ERC1363", self.is_erc1363),
("ERC4626", self.is_erc4626),
]
return [erc for erc, is_erc in all_erc if is_erc()]
def is_erc20(self) -> bool:
"""
Check if the contract is an erc20 token
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc20
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC20_signatures)
def is_erc165(self) -> bool:
"""
Check if the contract is an erc165 token
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc165
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC165_signatures)
def is_erc1820(self) -> bool:
"""
Check if the contract is an erc1820
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc165
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC1820_signatures)
def is_erc223(self) -> bool:
"""
Check if the contract is an erc223 token
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc223
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC223_signatures)
def is_erc721(self) -> bool:
"""
Check if the contract is an erc721 token
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc721
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC721_signatures)
def is_erc777(self) -> bool:
"""
Check if the contract is an erc777
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc165
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC777_signatures)
def is_erc1155(self) -> bool:
"""
Check if the contract is an erc1155
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc1155
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC1155_signatures)
def is_erc4626(self) -> bool:
"""
Check if the contract is an erc4626
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc4626
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC4626_signatures)
def is_erc2612(self) -> bool:
"""
Check if the contract is an erc2612
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc2612
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC2612_signatures)
def is_erc1363(self) -> bool:
"""
Check if the contract is an erc1363
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc1363
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC1363_signatures)
def is_erc4524(self) -> bool:
"""
Check if the contract is an erc4524
Note: it does not check for correct return values
:return: Returns a true if the contract is an erc4524
"""
full_names = self.functions_signatures
return all(s in full_names for s in ERC4524_signatures)
@property
def is_token(self) -> bool:
"""
Check if the contract follows one of the standard ERC token
:return:
"""
return (
self.is_erc20()
or self.is_erc721()
or self.is_erc165()
or self.is_erc223()
or self.is_erc777()
or self.is_erc1155()
)
def is_possible_erc20(self) -> bool:
"""
Checks if the provided contract could be attempting to implement ERC20 standards.
:return: Returns a boolean indicating if the provided contract met the token standard.
"""
# We do not check for all the functions, as name(), symbol(), might give too many FPs
full_names = self.functions_signatures
return (
"transfer(address,uint256)" in full_names
or "transferFrom(address,address,uint256)" in full_names
or "approve(address,uint256)" in full_names
)
def is_possible_erc721(self) -> bool:
"""
Checks if the provided contract could be attempting to implement ERC721 standards.
:return: Returns a boolean indicating if the provided contract met the token standard.
"""
# We do not check for all the functions, as name(), symbol(), might give too many FPs
full_names = self.functions_signatures
return (
"ownerOf(uint256)" in full_names
or "safeTransferFrom(address,address,uint256,bytes)" in full_names
or "safeTransferFrom(address,address,uint256)" in full_names
or "setApprovalForAll(address,bool)" in full_names
or "getApproved(uint256)" in full_names
or "isApprovedForAll(address,address)" in full_names
)
@property
def is_possible_token(self) -> bool:
"""
Check if the contract is a potential token (it might not implement all the functions)
:return:
"""
return self.is_possible_erc20() or self.is_possible_erc721()
# endregion
###################################################################################
###################################################################################
# region Dependencies
###################################################################################
###################################################################################
def is_from_dependency(self) -> bool:
return self.compilation_unit.core.crytic_compile.is_dependency(
self.source_mapping.filename.absolute
)
# endregion
###################################################################################
###################################################################################
# region Test
###################################################################################
###################################################################################
@property
def is_truffle_migration(self) -> bool:
"""
Return true if the contract is the Migrations contract needed for Truffle
:return:
"""
if self.compilation_unit.core.crytic_compile.platform == PlatformType.TRUFFLE:
if self.name == "Migrations":
paths = Path(self.source_mapping.filename.absolute).parts
if len(paths) >= 2:
return paths[-2] == "contracts" and paths[-1] == "migrations.sol"
return False
@property
def is_test(self) -> bool:
return is_test_contract(self) or self.is_truffle_migration # type: ignore
# endregion
###################################################################################
###################################################################################
# region Function analyses
###################################################################################
###################################################################################
def update_read_write_using_ssa(self) -> None:
for function in self.functions + list(self.modifiers):
function.update_read_write_using_ssa()
# endregion
###################################################################################
###################################################################################
# region Upgradeability
###################################################################################
###################################################################################
@property
def is_upgradeable(self) -> bool:
if self._is_upgradeable is None:
self._is_upgradeable = False
initializable = self.file_scope.get_contract_from_name("Initializable")
if initializable:
if initializable in self.inheritance:
self._is_upgradeable = True
else:
for contract in self.inheritance + [self]:
# This might lead to false positive
# Not sure why pylint is having a trouble here
# pylint: disable=no-member
lower_name = contract.name.lower()
if "upgradeable" in lower_name or "upgradable" in lower_name:
self._is_upgradeable = True
break
if "initializable" in lower_name:
self._is_upgradeable = True
break
return self._is_upgradeable
@is_upgradeable.setter
def is_upgradeable(self, upgradeable: bool) -> None:
self._is_upgradeable = upgradeable
@property
def is_upgradeable_proxy(self) -> bool:
from slither.core.cfg.node import NodeType
from slither.slithir.operations import LowLevelCall
if self._is_upgradeable_proxy is None:
self._is_upgradeable_proxy = False
if "Proxy" in self.name:
self._is_upgradeable_proxy = True
return True
for f in self.functions:
if f.is_fallback:
for node in f.all_nodes():
for ir in node.irs:
if isinstance(ir, LowLevelCall) and ir.function_name == "delegatecall":
self._is_upgradeable_proxy = True
return self._is_upgradeable_proxy
if node.type == NodeType.ASSEMBLY:
inline_asm = node.inline_asm
if inline_asm:
if "delegatecall" in inline_asm:
self._is_upgradeable_proxy = True
return self._is_upgradeable_proxy
return self._is_upgradeable_proxy
@is_upgradeable_proxy.setter
def is_upgradeable_proxy(self, upgradeable_proxy: bool) -> None:
self._is_upgradeable_proxy = upgradeable_proxy
@property
def upgradeable_version(self) -> Optional[str]:
return self._upgradeable_version
@upgradeable_version.setter
def upgradeable_version(self, version_name: str) -> None:
self._upgradeable_version = version_name
# endregion
###################################################################################
###################################################################################
# region Internals
###################################################################################
###################################################################################
@property
def is_incorrectly_constructed(self) -> bool:
"""
Return true if there was an internal Slither's issue when analyzing the contract
:return:
"""
return self._is_incorrectly_parsed
@is_incorrectly_constructed.setter
def is_incorrectly_constructed(self, incorrect: bool) -> None:
self._is_incorrectly_parsed = incorrect
def add_constructor_variables(self) -> None:
from slither.core.declarations.function_contract import FunctionContract
if self.state_variables:
for (idx, variable_candidate) in enumerate(self.state_variables):
if variable_candidate.expression and not variable_candidate.is_constant:
constructor_variable = FunctionContract(self.compilation_unit)
constructor_variable.set_function_type(FunctionType.CONSTRUCTOR_VARIABLES)
constructor_variable.set_contract(self) # type: ignore
constructor_variable.set_contract_declarer(self) # type: ignore
constructor_variable.set_visibility("internal")
# For now, source mapping of the constructor variable is the whole contract
# Could be improved with a targeted source mapping
constructor_variable.set_offset(self.source_mapping, self.compilation_unit)
self._functions[constructor_variable.canonical_name] = constructor_variable
prev_node = self._create_node(
constructor_variable, 0, variable_candidate, constructor_variable
)
variable_candidate.node_initialization = prev_node
counter = 1
for v in self.state_variables[idx + 1 :]:
if v.expression and not v.is_constant:
next_node = self._create_node(
constructor_variable, counter, v, prev_node.scope
)
v.node_initialization = next_node
prev_node.add_son(next_node)
next_node.add_father(prev_node)
prev_node = next_node
counter += 1
break
for (idx, variable_candidate) in enumerate(self.state_variables):
if variable_candidate.expression and variable_candidate.is_constant:
constructor_variable = FunctionContract(self.compilation_unit)
constructor_variable.set_function_type(
FunctionType.CONSTRUCTOR_CONSTANT_VARIABLES
)
constructor_variable.set_contract(self) # type: ignore
constructor_variable.set_contract_declarer(self) # type: ignore
constructor_variable.set_visibility("internal")
# For now, source mapping of the constructor variable is the whole contract
# Could be improved with a targeted source mapping
constructor_variable.set_offset(self.source_mapping, self.compilation_unit)
self._functions[constructor_variable.canonical_name] = constructor_variable
prev_node = self._create_node(
constructor_variable, 0, variable_candidate, constructor_variable
)
variable_candidate.node_initialization = prev_node
counter = 1
for v in self.state_variables[idx + 1 :]:
if v.expression and v.is_constant:
next_node = self._create_node(
constructor_variable, counter, v, prev_node.scope
)
v.node_initialization = next_node
prev_node.add_son(next_node)
next_node.add_father(prev_node)
prev_node = next_node
counter += 1
break
def _create_node(
self, func: Function, counter: int, variable: "Variable", scope: Union[Scope, Function]
) -> "Node":
from slither.core.cfg.node import Node, NodeType
from slither.core.expressions import (
AssignmentOperationType,
AssignmentOperation,
Identifier,
)
# Function uses to create node for state variable declaration statements
node = Node(NodeType.OTHER_ENTRYPOINT, counter, scope, func.file_scope)
node.set_offset(variable.source_mapping, self.compilation_unit)
node.set_function(func)
func.add_node(node)
assert variable.expression
expression = AssignmentOperation(
Identifier(variable),
variable.expression,
AssignmentOperationType.ASSIGN,
variable.type,
)
expression.set_offset(variable.source_mapping, self.compilation_unit)
node.add_expression(expression)
return node
# endregion
###################################################################################
###################################################################################
# region SlithIR
###################################################################################
###################################################################################
def convert_expression_to_slithir_ssa(self) -> None:
"""
Assume generate_slithir_and_analyze was called on all functions
:return:
"""
from slither.slithir.variables import StateIRVariable
all_ssa_state_variables_instances = {}
for contract in self.inheritance:
for v in contract.state_variables_declared:
new_var = StateIRVariable(v)
all_ssa_state_variables_instances[v.canonical_name] = new_var
self._initial_state_variables.append(new_var)
for v in self.variables:
if v.contract == self:
new_var = StateIRVariable(v)
all_ssa_state_variables_instances[v.canonical_name] = new_var
self._initial_state_variables.append(new_var)
for func in self.functions + list(self.modifiers):
func.generate_slithir_ssa(all_ssa_state_variables_instances)
def fix_phi(self) -> None:
last_state_variables_instances: Dict[str, List["StateVariable"]] = {}
initial_state_variables_instances: Dict[str, "StateVariable"] = {}
for v in self._initial_state_variables:
last_state_variables_instances[v.canonical_name] = []
initial_state_variables_instances[v.canonical_name] = v
for func in self.functions + list(self.modifiers):
result = func.get_last_ssa_state_variables_instances()
for variable_name, instances in result.items():
# TODO: investigate the next operation
last_state_variables_instances[variable_name] += list(instances)
for func in self.functions + list(self.modifiers):
func.fix_phi(last_state_variables_instances, initial_state_variables_instances)
# endregion
###################################################################################
###################################################################################
# region Built in definitions
###################################################################################
###################################################################################
def __eq__(self, other: Any) -> bool:
if isinstance(other, str):
return other == self.name
return NotImplemented
def __neq__(self, other: Any) -> bool:
if isinstance(other, str):
return other != self.name
return NotImplemented
def __str__(self) -> str:
return self.name
def __hash__(self) -> int:
return self._id # type:ignore
# endregion
| Contract |
python | getsentry__sentry | tests/sentry/monitors/consumers/test_clock_tick_consumer.py | {
"start": 4363,
"end": 7984
} | class ____(TestCase):
@thread_leaks.thread_leak_allowlist(reason="monitors", issue=97032)
@override_settings(SENTRY_EVENTSTREAM="sentry.eventstream.kafka.KafkaEventStream")
def test_end_to_end(self) -> None:
ts = timezone.now().replace(second=0, microsecond=0)
broker: LocalBroker[KafkaPayload] = LocalBroker(MemoryMessageStorage())
clock_tick_topic = Topic("monitors-clock-tick")
clock_tasks_topic = Topic("monitors-clock-tasks")
broker.create_topic(clock_tick_topic, partitions=1)
broker.create_topic(clock_tasks_topic, partitions=1)
# Setup one monitor which should be marked as missed, and one check-in that
# should be marked as timed-out
monitor = Monitor.objects.create(
organization_id=self.organization.id,
project_id=self.project.id,
config={
"schedule": "* * * * *",
"schedule_type": ScheduleType.CRONTAB,
"checkin_margin": 1,
"max_runtime": 1,
},
)
monitor_environment = MonitorEnvironment.objects.create(
monitor=monitor,
environment_id=self.environment.id,
status=MonitorStatus.OK,
last_checkin=ts - timedelta(minutes=2),
next_checkin=ts - timedelta(minutes=1),
next_checkin_latest=ts,
)
checkin = MonitorCheckIn.objects.create(
monitor=monitor,
monitor_environment=monitor_environment,
project_id=self.project.id,
status=CheckInStatus.IN_PROGRESS,
date_added=ts - timedelta(minutes=1),
timeout_at=ts,
)
producer = broker.get_producer()
tick_consumer = broker.get_consumer("monitors-clock-tick")
tasks_consumer = broker.get_consumer("monitors-clock-tasks")
# Dispatch a clock tick
with mock.patch(
"sentry.monitors.clock_dispatch._clock_tick_producer",
producer,
):
try_monitor_clock_tick(ts, 0)
tick_processor = StreamProcessor(
consumer=tick_consumer,
topic=clock_tick_topic,
processor_factory=MonitorClockTickStrategyFactory(),
commit_policy=ONCE_PER_SECOND,
)
task_processor = StreamProcessor(
consumer=tasks_consumer,
topic=clock_tasks_topic,
processor_factory=MonitorClockTasksStrategyFactory(),
commit_policy=ONCE_PER_SECOND,
)
# Process the tick. This will produce two tasks, one for the missed
# check-in and one for the timed-out check-in. This will produce two
# tasks, one for the missed check-in and one for the timed-out check-in
with mock.patch(
"sentry.monitors.clock_tasks.producer._clock_task_producer",
producer,
):
tick_processor._run_once()
# process the two tasks
task_processor._run_once()
task_processor._run_once()
# Missed check-in was created
missed_checkin = MonitorCheckIn.objects.filter(
monitor_environment=monitor_environment, status=CheckInStatus.MISSED
)
assert missed_checkin.exists()
# The missed check-in date_added is set to when it should have been
# sent, not when we detect it
assert missed_checkin[0].date_added == ts - timedelta(minutes=1)
# Missed check-in was created
checkin.refresh_from_db()
assert checkin.status == CheckInStatus.TIMEOUT
| MonitorsClockTickEndToEndTest |
python | ray-project__ray | doc/source/serve/doc_code/quickstart_composed.py | {
"start": 517,
"end": 1434
} | class ____:
def __init__(
self,
adder1: DeploymentHandle,
adder2: DeploymentHandle,
combiner: DeploymentHandle,
):
self._adder1 = adder1
self._adder2 = adder2
self._combiner = combiner
async def __call__(self, request: starlette.requests.Request) -> Dict[str, float]:
input_json = await request.json()
final_result = await self._combiner.average.remote(
self._adder1.add.remote(input_json["val"]),
self._adder2.add.remote(input_json["val"]),
)
return {"result": final_result}
# 2. Build the application consisting of the models and ingress.
app = Ingress.bind(Adder.bind(increment=1), Adder.bind(increment=2), Combiner.bind())
serve.run(app)
# 3: Query the application and print the result.
print(requests.post("http://localhost:8000/", json={"val": 100.0}).json())
# {"result": 101.5}
| Ingress |
python | matplotlib__matplotlib | lib/matplotlib/font_manager.py | {
"start": 32971,
"end": 35056
} | class ____(json.JSONEncoder):
def default(self, o):
if isinstance(o, FontManager):
return dict(o.__dict__, __class__='FontManager')
elif isinstance(o, FontEntry):
d = dict(o.__dict__, __class__='FontEntry')
try:
# Cache paths of fonts shipped with Matplotlib relative to the
# Matplotlib data path, which helps in the presence of venvs.
d["fname"] = str(Path(d["fname"]).relative_to(mpl.get_data_path()))
except ValueError:
pass
return d
else:
return super().default(o)
def _json_decode(o):
cls = o.pop('__class__', None)
if cls is None:
return o
elif cls == 'FontManager':
r = FontManager.__new__(FontManager)
r.__dict__.update(o)
return r
elif cls == 'FontEntry':
if not os.path.isabs(o['fname']):
o['fname'] = os.path.join(mpl.get_data_path(), o['fname'])
r = FontEntry(**o)
return r
else:
raise ValueError("Don't know how to deserialize __class__=%s" % cls)
def json_dump(data, filename):
"""
Dump `FontManager` *data* as JSON to the file named *filename*.
See Also
--------
json_load
Notes
-----
File paths that are children of the Matplotlib data path (typically, fonts
shipped with Matplotlib) are stored relative to that data path (to remain
valid across virtualenvs).
This function temporarily locks the output file to prevent multiple
processes from overwriting one another's output.
"""
try:
with cbook._lock_path(filename), open(filename, 'w') as fh:
json.dump(data, fh, cls=_JSONEncoder, indent=2)
except OSError as e:
_log.warning('Could not save font_manager cache %s', e)
def json_load(filename):
"""
Load a `FontManager` from the JSON file named *filename*.
See Also
--------
json_dump
"""
with open(filename) as fh:
return json.load(fh, object_hook=_json_decode)
| _JSONEncoder |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-path-with-alternating-directions-ii.py | {
"start": 690,
"end": 1297
} | class ____(object):
def minCost(self, m, n, waitCost):
"""
:type m: int
:type n: int
:type waitCost: List[List[int]]
:rtype: int
"""
waitCost[0][0] = waitCost[m-1][n-1] = 0
dp = [0]*n
for i in xrange(m):
for j in xrange(n):
prev = 0 if (i, j) == (0, 0) else float("inf")
if i-1 >= 0:
prev = min(prev, dp[j])
if j-1 >= 0:
prev = min(prev, dp[j-1])
dp[j] = prev+waitCost[i][j]+(i+1)*(j+1)
return dp[n-1]
| Solution2 |
python | django__django | tests/select_related_onetoone/models.py | {
"start": 139,
"end": 315
} | class ____(models.Model):
user = models.OneToOneField(User, models.CASCADE)
city = models.CharField(max_length=100)
state = models.CharField(max_length=2)
| UserProfile |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/rolling.py | {
"start": 1420,
"end": 2777
} | class ____(UnaryOp):
pass
def to_request(
value: expr.Expr, orderby: Column, df: DataFrame
) -> plc.rolling.RollingRequest:
"""
Produce a rolling request for evaluation with pylibcudf.
Parameters
----------
value
The expression to perform the rolling aggregation on.
orderby
Orderby column, used as input to the request when the aggregation is Len.
df
DataFrame used to evaluate the inputs to the aggregation.
"""
min_periods = 1
if isinstance(value, expr.Len):
# A count aggregation, we need a column so use the orderby column
col = orderby
elif isinstance(value, expr.Agg):
child = value.children[0]
col = child.evaluate(df, context=ExecutionContext.ROLLING)
if value.name == "var":
# Polars variance produces null if nvalues <= ddof
# libcudf produces NaN. However, we can get the polars
# behaviour by setting the minimum window size to ddof +
# 1.
min_periods = value.options + 1
else:
col = value.evaluate(
df, context=ExecutionContext.ROLLING
) # pragma: no cover; raise before we get here because we
# don't do correct handling of empty groups
return plc.rolling.RollingRequest(col.obj, min_periods, value.agg_request)
| CumSumOp |
python | great-expectations__great_expectations | great_expectations/data_context/store/gx_cloud_store_backend.py | {
"start": 986,
"end": 1113
} | class ____(TypedDict):
code: Optional[str]
detail: Optional[str]
source: Union[str, Dict[str, str], None]
| ErrorDetail |
python | realpython__materials | inheritance-and-composition/inheritance/productivity.py | {
"start": 306,
"end": 409
} | class ____:
def work(self, hours):
return f"screams and yells for {hours} hours."
| ManagerRole |
python | coleifer__peewee | peewee.py | {
"start": 260995,
"end": 262084
} | class ____(_ModelWriteQueryHelper, Insert):
default_row_type = ROW.TUPLE
def __init__(self, *args, **kwargs):
super(ModelInsert, self).__init__(*args, **kwargs)
if self._returning is None and self.model._meta.database is not None:
if self.model._meta.database.returning_clause:
self._returning = self.model._meta.get_primary_keys()
def returning(self, *returning):
# By default ModelInsert will yield a `tuple` containing the
# primary-key of the newly inserted row. But if we are explicitly
# specifying a returning clause and have not set a row type, we will
# default to returning model instances instead.
if returning and self._row_type is None:
self._row_type = ROW.MODEL
return super(ModelInsert, self).returning(*returning)
def get_default_data(self):
return self.model._meta.defaults
def get_default_columns(self):
fields = self.model._meta.sorted_fields
return fields[1:] if self.model._meta.auto_increment else fields
| ModelInsert |
python | celery__celery | t/unit/tasks/test_canvas.py | {
"start": 1169,
"end": 2691
} | class ____:
def setup_method(self):
@self.app.task(shared=False)
def add(x, y):
return x + y
self.add = add
@self.app.task(shared=False)
def mul(x, y):
return x * y
self.mul = mul
@self.app.task(shared=False)
def div(x, y):
return x / y
self.div = div
@self.app.task(shared=False)
def xsum(numbers):
return sum(sum(num) if isinstance(num, Iterable) else num for num in numbers)
self.xsum = xsum
@self.app.task(shared=False, bind=True)
def replaced(self, x, y):
return self.replace(add.si(x, y))
self.replaced = replaced
@self.app.task(shared=False, bind=True)
def replaced_group(self, x, y):
return self.replace(group(add.si(x, y), mul.si(x, y)))
self.replaced_group = replaced_group
@self.app.task(shared=False, bind=True)
def replace_with_group(self, x, y):
return self.replace(group(add.si(x, y), mul.si(x, y)))
self.replace_with_group = replace_with_group
@self.app.task(shared=False, bind=True)
def replace_with_chain(self, x, y):
return self.replace(group(add.si(x, y) | mul.s(y), add.si(x, y)))
self.replace_with_chain = replace_with_chain
@self.app.task(shared=False)
def xprod(numbers):
return math.prod(numbers)
self.xprod = xprod
@Signature.register_type()
| CanvasCase |
python | huggingface__transformers | tests/models/switch_transformers/test_modeling_switch_transformers.py | {
"start": 1660,
"end": 21387
} | class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
encoder_seq_length=7,
decoder_seq_length=9,
# For common tests
is_training=True,
use_attention_mask=True,
use_labels=True,
hidden_size=32,
num_hidden_layers=2,
num_attention_heads=4,
d_ff=37,
relative_attention_num_buckets=8,
dropout_rate=0.1,
initializer_factor=0.002,
eos_token_id=1,
pad_token_id=0,
decoder_start_token_id=0,
decoder_layers=None,
sparse_step=1,
num_sparse_decoder_layers=2,
num_sparse_encoder_layers=2,
expert_capacity=100,
router_jitter_noise=0.0,
):
self.parent = parent
self.batch_size = batch_size
self.encoder_seq_length = encoder_seq_length
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.d_ff = d_ff
self.relative_attention_num_buckets = relative_attention_num_buckets
self.dropout_rate = dropout_rate
self.initializer_factor = initializer_factor
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.scope = None
self.decoder_layers = decoder_layers
self.sparse_step = sparse_step
self.num_sparse_decoder_layers = num_sparse_decoder_layers
self.num_sparse_encoder_layers = num_sparse_encoder_layers
self.expert_capacity = expert_capacity
self.router_jitter_noise = router_jitter_noise
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.encoder_seq_length], self.vocab_size)
decoder_input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
decoder_attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.encoder_seq_length], vocab_size=2)
decoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = self.get_config()
return (
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
)
def get_pipeline_config(self):
return SwitchTransformersConfig(
vocab_size=166, # switch_transformers forces 100 extra tokens
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_decoder_layers=self.decoder_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
expert_capacity=self.expert_capacity,
router_jitter_noise=self.router_jitter_noise,
)
def get_config(self):
return SwitchTransformersConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
d_ff=self.d_ff,
d_kv=self.hidden_size // self.num_attention_heads,
num_layers=self.num_hidden_layers,
num_decoder_layers=self.decoder_layers,
num_heads=self.num_attention_heads,
relative_attention_num_buckets=self.relative_attention_num_buckets,
dropout_rate=self.dropout_rate,
initializer_factor=self.initializer_factor,
eos_token_id=self.eos_token_id,
bos_token_id=self.pad_token_id,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
sparse_step=self.sparse_step,
num_sparse_encoder_layers=self.num_sparse_encoder_layers,
num_sparse_decoder_layers=self.num_sparse_decoder_layers,
)
def check_prepare_lm_labels_via_shift_left(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config)
model.to(torch_device)
model.eval()
# make sure that lm_labels are correctly padded from the right
lm_labels.masked_fill_((lm_labels == self.decoder_start_token_id), self.eos_token_id)
# add casaul pad token mask
triangular_mask = torch.tril(lm_labels.new_ones(lm_labels.shape)).logical_not()
lm_labels.masked_fill_(triangular_mask, self.pad_token_id)
decoder_input_ids = model._shift_right(lm_labels)
for i, (decoder_input_ids_slice, lm_labels_slice) in enumerate(zip(decoder_input_ids, lm_labels)):
# first item
self.parent.assertEqual(decoder_input_ids_slice[0].item(), self.decoder_start_token_id)
if i < decoder_input_ids_slice.shape[-1]:
if i < decoder_input_ids.shape[-1] - 1:
# items before diagonal
self.parent.assertListEqual(
decoder_input_ids_slice[1 : i + 1].tolist(), lm_labels_slice[:i].tolist()
)
# pad items after diagonal
if i < decoder_input_ids.shape[-1] - 2:
self.parent.assertListEqual(
decoder_input_ids_slice[i + 2 :].tolist(), lm_labels_slice[i + 1 : -1].tolist()
)
else:
# all items after square
self.parent.assertListEqual(decoder_input_ids_slice[1:].tolist(), lm_labels_slice[:-1].tolist())
def create_and_check_model(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config)
model.to(torch_device)
model.eval()
result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
result = model(input_ids=input_ids, decoder_input_ids=decoder_input_ids)
decoder_output = result.last_hidden_state
decoder_past = result.past_key_values
encoder_output = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size(), (self.batch_size, self.encoder_seq_length, self.hidden_size))
self.parent.assertEqual(decoder_output.size(), (self.batch_size, self.decoder_seq_length, self.hidden_size))
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(decoder_past), config.num_layers)
def create_and_check_with_lm_head(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersForConditionalGeneration(config=config).to(torch_device).eval()
outputs = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
labels=lm_labels,
output_attentions=True,
output_router_logits=True,
output_hidden_states=True,
)
self.parent.assertEqual(len(outputs), 13)
self.parent.assertEqual(outputs["logits"].size(), (self.batch_size, self.decoder_seq_length, self.vocab_size))
self.parent.assertEqual(outputs["loss"].size(), ())
def create_and_check_decoder_model_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config).get_decoder().to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True, output_router_logits=False)
outputs_use_cache_conf = model(input_ids, output_router_logits=False)
outputs_no_past = model(input_ids, use_cache=False, output_router_logits=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
output, past_key_values = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids, output_router_logits=False)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values, output_router_logits=False)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config).get_decoder()
model.to(torch_device)
model.eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
output, past_key_values = model(
input_ids, attention_mask=attn_mask, use_cache=True, output_router_logits=False
).to_tuple()
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask, output_router_logits=False)[
"last_hidden_state"
]
output_from_past = model(
next_tokens, past_key_values=past_key_values, attention_mask=attn_mask, output_router_logits=False
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def create_and_check_decoder_model_past_large_inputs(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config).get_decoder().to(torch_device).eval()
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True, output_router_logits=False)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_mask = ids_tensor((self.batch_size, 3), vocab_size=2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask, output_router_logits=False)[
"last_hidden_state"
]
output_from_past = model(
next_tokens,
attention_mask=next_attention_mask,
past_key_values=past_key_values,
output_router_logits=False,
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
@slow
def create_and_check_generate_with_past_key_values(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
r"""
This test does not pass for small models due to precision errors. It is therefore only run for slightly larger models.
"""
model = (
SwitchTransformersForConditionalGeneration.from_pretrained("google/switch-base-8").to(torch_device).eval()
)
torch.manual_seed(0)
output_without_past_cache = model.generate(
input_ids[:1], num_beams=2, max_length=5, do_sample=True, use_cache=False
)
torch.manual_seed(0)
output_with_past_cache = model.generate(input_ids[:1], num_beams=2, max_length=5, do_sample=True)
self.parent.assertTrue(torch.all(output_with_past_cache == output_without_past_cache))
def create_and_check_model_fp16_forward(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
model = SwitchTransformersModel(config=config).to(torch_device).half().eval()
output = model(input_ids, decoder_input_ids=input_ids, attention_mask=attention_mask)["last_hidden_state"]
self.parent.assertFalse(torch.isnan(output).any().item())
def create_and_check_encoder_decoder_shared_weights(
self,
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
):
for model_class in [SwitchTransformersModel, SwitchTransformersForConditionalGeneration]:
torch.manual_seed(0)
model = model_class(config=config).to(torch_device).eval()
# load state dict copies weights but does not tie them
model.encoder.load_state_dict(model.decoder.state_dict(), strict=False)
torch.manual_seed(0)
tied_config = copy.deepcopy(config)
tied_config.tie_encoder_decoder = True
tied_model = model_class(config=tied_config).to(torch_device).eval()
model_result = model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx], tied_model_result[0][0, :, random_slice_idx], atol=1e-4
)
)
# check that outputs after saving and loading are equal
with tempfile.TemporaryDirectory() as tmpdirname:
tied_model.save_pretrained(tmpdirname)
tied_model = model_class.from_pretrained(tmpdirname)
tied_model.to(torch_device)
tied_model.eval()
random_slice_idx = ids_tensor((1,), model_result[0].shape[-1]).item()
tied_model_result = tied_model(
input_ids=input_ids,
decoder_input_ids=decoder_input_ids,
attention_mask=attention_mask,
decoder_attention_mask=decoder_attention_mask,
)
# check that outputs are equal
self.parent.assertTrue(
torch.allclose(
model_result[0][0, :, random_slice_idx],
tied_model_result[0][0, :, random_slice_idx],
atol=1e-4,
)
)
def check_resize_embeddings_switch_transformers_v1_1(
self,
config,
):
prev_vocab_size = config.vocab_size
config.tie_word_embeddings = False
model = SwitchTransformersForConditionalGeneration(config=config).to(torch_device).eval()
model.resize_token_embeddings(prev_vocab_size - 10)
self.parent.assertEqual(model.get_input_embeddings().weight.shape[0], prev_vocab_size - 10)
self.parent.assertEqual(model.get_output_embeddings().weight.shape[0], prev_vocab_size - 10)
self.parent.assertEqual(model.config.vocab_size, prev_vocab_size - 10)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
decoder_input_ids,
attention_mask,
decoder_attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"use_cache": False,
"output_router_logits": False,
}
return config, inputs_dict
@require_torch
| SwitchTransformersModelTester |
python | ray-project__ray | ci/ray_ci/bisect/macos_validator.py | {
"start": 212,
"end": 762
} | class ____(Validator):
def run(self, test: Test, revision: str) -> bool:
env = os.environ.copy()
# We need to unset PYTHONPATH to avoid conflicts with the Python from the
# Bazel runfiles.
env.update({"RAYCI_BISECT_RUN": "1", "PYTHONPATH": ""})
return (
subprocess.run(
[f"{bazel_runfile(TEST_SCRIPT)}", "run_tests", test.get_target()],
cwd=os.environ["RAYCI_CHECKOUT_DIR"],
env=env,
).returncode
== 0
)
| MacOSValidator |
python | getsentry__sentry | tests/sentry/workflow_engine/models/test_data_condition.py | {
"start": 388,
"end": 452
} | class ____(IntEnum):
FOO = 1
BAR = 2
| MockDataConditionEnum |
python | plotly__plotly.py | plotly/graph_objs/table/cells/_font.py | {
"start": 233,
"end": 17063
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "table.cells"
_path_str = "table.cells.font"
_valid_props = {
"color",
"colorsrc",
"family",
"familysrc",
"lineposition",
"linepositionsrc",
"shadow",
"shadowsrc",
"size",
"sizesrc",
"style",
"stylesrc",
"textcase",
"textcasesrc",
"variant",
"variantsrc",
"weight",
"weightsrc",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `color`.
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for `family`.
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def linepositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`lineposition`.
The 'linepositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["linepositionsrc"]
@linepositionsrc.setter
def linepositionsrc(self, val):
self["linepositionsrc"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def shadowsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `shadow`.
The 'shadowsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["shadowsrc"]
@shadowsrc.setter
def shadowsrc(self, val):
self["shadowsrc"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `size`.
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def stylesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `style`.
The 'stylesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["stylesrc"]
@stylesrc.setter
def stylesrc(self, val):
self["stylesrc"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def textcasesrc(self):
"""
Sets the source reference on Chart Studio Cloud for `textcase`.
The 'textcasesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textcasesrc"]
@textcasesrc.setter
def textcasesrc(self, val):
self["textcasesrc"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def variantsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `variant`.
The 'variantsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["variantsrc"]
@variantsrc.setter
def variantsrc(self, val):
self["variantsrc"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def weightsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `weight`.
The 'weightsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["weightsrc"]
@weightsrc.setter
def weightsrc(self, val):
self["weightsrc"] = val
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
lineposition=None,
linepositionsrc=None,
shadow=None,
shadowsrc=None,
size=None,
sizesrc=None,
style=None,
stylesrc=None,
textcase=None,
textcasesrc=None,
variant=None,
variantsrc=None,
weight=None,
weightsrc=None,
**kwargs,
):
"""
Construct a new Font object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.table.cells.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
`color`.
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
familysrc
Sets the source reference on Chart Studio Cloud for
`family`.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud for
`lineposition`.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
shadowsrc
Sets the source reference on Chart Studio Cloud for
`shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud for
`size`.
style
Sets whether a font should be styled with a normal or
italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud for
`style`.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud for
`textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud for
`variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud for
`weight`.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.table.cells.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.table.cells.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("colorsrc", arg, colorsrc)
self._set_property("family", arg, family)
self._set_property("familysrc", arg, familysrc)
self._set_property("lineposition", arg, lineposition)
self._set_property("linepositionsrc", arg, linepositionsrc)
self._set_property("shadow", arg, shadow)
self._set_property("shadowsrc", arg, shadowsrc)
self._set_property("size", arg, size)
self._set_property("sizesrc", arg, sizesrc)
self._set_property("style", arg, style)
self._set_property("stylesrc", arg, stylesrc)
self._set_property("textcase", arg, textcase)
self._set_property("textcasesrc", arg, textcasesrc)
self._set_property("variant", arg, variant)
self._set_property("variantsrc", arg, variantsrc)
self._set_property("weight", arg, weight)
self._set_property("weightsrc", arg, weightsrc)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | doocs__leetcode | solution/3300-3399/3398.Smallest Substring With Identical Characters I/Solution.py | {
"start": 0,
"end": 631
} | class ____:
def minLength(self, s: str, numOps: int) -> int:
def check(m: int) -> bool:
cnt = 0
if m == 1:
t = "01"
cnt = sum(c == t[i & 1] for i, c in enumerate(s))
cnt = min(cnt, n - cnt)
else:
k = 0
for i, c in enumerate(s):
k += 1
if i == len(s) - 1 or c != s[i + 1]:
cnt += k // (m + 1)
k = 0
return cnt <= numOps
n = len(s)
return bisect_left(range(n), True, lo=1, key=check)
| Solution |
python | pytorch__pytorch | torch/_logging/_internal.py | {
"start": 1810,
"end": 5335
} | class ____:
# shorthand name to log qualified name
# Note: this only contains loggers registered
# from register_log
# e.g. "dynamo" -> "torch._dynamo"
log_alias_to_log_qnames: dict[str, list[str]] = field(default_factory=dict)
# artifact logger qualified names,
# this is populated lazily, as calls to getArtifactLogger
# currently formatted as <module>.__<artifact_name>
# e.g. "torch._dynamo.convert_frame.__guards"
artifact_log_qnames: set[str] = field(default_factory=set)
# child logs of registered logs if specified via open
# registration by the user (ie placing "torch._dynamo.output_graph" in the env var)
# these need to be tracked so their levels can be reset properly
# e.g. "torch._dynamo.output_graph"
child_log_qnames: set[str] = field(default_factory=set)
# artifact names, populated by register_artifact
# e.g. "guards"
artifact_names: set[str] = field(default_factory=set)
# Artifacts that should be visible by default in the error message
visible_artifacts: set[str] = field(default_factory=set)
# A short description of each artifact
artifact_descriptions: dict[str, str] = field(default_factory=dict)
# artifacts which are not displayed unless explicitly named in the
# settings. Ex. output_code is NOT displayed even if the inductor
# log level is set to DEBUG. It must be explicitly named in the settings
off_by_default_artifact_names: set[str] = field(default_factory=set)
# logging format string for artifacts
artifact_log_formatters: dict[str, logging.Formatter] = field(default_factory=dict)
def is_artifact(self, name):
return name in self.artifact_names
def is_log(self, alias):
return alias in self.log_alias_to_log_qnames
# register a log with an alias
def register_log(self, alias, log_qnames: Union[str, list[str]]) -> None:
if isinstance(log_qnames, str):
log_qnames = [log_qnames]
self.log_alias_to_log_qnames[alias] = log_qnames
# register an artifact name
def register_artifact_name(
self, name, description, visible, off_by_default, log_format
) -> None:
self.artifact_names.add(name)
if visible:
self.visible_artifacts.add(name)
self.artifact_descriptions[name] = description
# if off by default, don't enable it
# when log_name's log_level is set to DEBUG
if off_by_default:
self.off_by_default_artifact_names.add(name)
if log_format is not None:
self.artifact_log_formatters[name] = logging.Formatter(log_format)
# register the qualified name of an artifact log
# this is needed to know which logs need to be reset
# whenever the log_state is changed
def register_artifact_log(self, artifact_log_qname) -> None:
self.artifact_log_qnames.add(artifact_log_qname)
def register_child_log(self, log_qname) -> None:
self.child_log_qnames.add(log_qname)
# flattens all the qnames together (TODO: consider memoizing?)
def get_log_qnames(self) -> set[str]:
return set(itertools.chain.from_iterable(self.log_alias_to_log_qnames.values()))
def get_artifact_log_qnames(self):
return set(self.artifact_log_qnames)
def get_child_log_qnames(self):
return set(self.child_log_qnames)
def is_off_by_default(self, artifact_qname):
return artifact_qname in self.off_by_default_artifact_names
@dataclass
| LogRegistry |
python | django-extensions__django-extensions | django_extensions/mongodb/fields/__init__.py | {
"start": 6142,
"end": 6522
} | class ____(DateTimeField):
"""
CreationDateTimeField
By default, sets editable=False, blank=True, default=datetime.now
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault("default", datetime.datetime.now)
DateTimeField.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "DateTimeField"
| CreationDateTimeField |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 25740,
"end": 25961
} | class ____(AnyUrl):
"""A type that will accept any AMQP DSN.
* User info required
* TLD not required
* Host not required
"""
_constraints = UrlConstraints(allowed_schemes=['amqp', 'amqps'])
| AmqpDsn |
python | sqlalchemy__sqlalchemy | test/engine/test_deprecations.py | {
"start": 15172,
"end": 15728
} | class ____(fixtures.TestBase):
__backend__ = True
@testing.combinations(True, False, None, argnames="implicit_returning")
def test_implicit_returning_engine_parameter(self, implicit_returning):
if implicit_returning is None:
engines.testing_engine()
else:
with assertions.expect_deprecated(ce_implicit_returning):
engines.testing_engine(
options={"implicit_returning": implicit_returning}
)
# parameter has no effect
| ImplicitReturningFlagTest |
python | mlflow__mlflow | mlflow/pyfunc/model.py | {
"start": 61469,
"end": 66208
} | class ____(PythonModel):
"""
A PythonModel wrapper for invoking an MLflow Deployments endpoint.
This class is particularly used for running evaluation against an MLflow Deployments endpoint.
"""
def __init__(self, endpoint, params):
self.endpoint = endpoint
self.params = params
def predict(self, context, model_input: pd.DataFrame | dict[str, Any] | list[dict[str, Any]]):
"""
Run prediction on the input data.
Args:
context: A :class:`~PythonModelContext` instance containing artifacts that the model
can use to perform inference.
model_input: The input data for prediction, either of the following:
- Pandas DataFrame: If the default evaluator is used, input is a DF
that contains the multiple request payloads in a single column.
- A dictionary: If the model_type is "databricks-agents" and the
Databricks RAG evaluator is used, this PythonModel can be invoked
with a single dict corresponding to the ChatCompletionsRequest schema.
- A list of dictionaries: Currently we don't have any evaluator that
gives this input format, but we keep this for future use cases and
compatibility with normal pyfunc models.
Return:
The prediction result. The return type will be consistent with the model input type,
e.g., if the input is a Pandas DataFrame, the return will be a Pandas Series.
"""
if isinstance(model_input, dict):
return self._predict_single(model_input)
elif isinstance(model_input, list) and all(isinstance(data, dict) for data in model_input):
return [self._predict_single(data) for data in model_input]
elif isinstance(model_input, pd.DataFrame):
if len(model_input.columns) != 1:
raise MlflowException(
f"The number of input columns must be 1, but got {model_input.columns}. "
"Multi-column input is not supported for evaluating an MLflow Deployments "
"endpoint. Please include the input text or payload in a single column.",
error_code=INVALID_PARAMETER_VALUE,
)
input_column = model_input.columns[0]
predictions = [self._predict_single(data) for data in model_input[input_column]]
return pd.Series(predictions)
else:
raise MlflowException(
f"Invalid input data type: {type(model_input)}. The input data must be either "
"a Pandas DataFrame, a dictionary, or a list of dictionaries containing the "
"request payloads for evaluating an MLflow Deployments endpoint.",
error_code=INVALID_PARAMETER_VALUE,
)
def _predict_single(self, data: str | dict[str, Any]) -> dict[str, Any]:
"""
Send a single prediction request to the MLflow Deployments endpoint.
Args:
data: The single input data for prediction. If the input data is a string, we will
construct the request payload from it. If the input data is a dictionary, we
will directly use it as the request payload.
Returns:
The prediction result from the MLflow Deployments endpoint as a dictionary.
"""
from mlflow.metrics.genai.model_utils import call_deployments_api, get_endpoint_type
endpoint_type = get_endpoint_type(f"endpoints:/{self.endpoint}")
if isinstance(data, str):
# If the input payload is string, MLflow needs to construct the JSON
# payload based on the endpoint type. If the endpoint type is not
# set on the endpoint, we will default to chat format.
endpoint_type = endpoint_type or "llm/v1/chat"
prediction = call_deployments_api(self.endpoint, data, self.params, endpoint_type)
elif isinstance(data, dict):
# If the input is dictionary, we assume the input is already in the
# compatible format for the endpoint.
prediction = call_deployments_api(self.endpoint, data, self.params, endpoint_type)
else:
raise MlflowException(
f"Invalid input data type: {type(data)}. The feature column of the evaluation "
"dataset must contain only strings or dictionaries containing the request "
"payload for evaluating an MLflow Deployments endpoint.",
error_code=INVALID_PARAMETER_VALUE,
)
return prediction
| ModelFromDeploymentEndpoint |
python | getsentry__sentry | src/sentry/hybridcloud/rpc/sig.py | {
"start": 819,
"end": 6729
} | class ____:
"""Represent a function's parameters and return type for serialization."""
def __init__(self, base_function: Callable[..., Any], is_instance_method: bool = False) -> None:
super().__init__()
self.base_function = base_function
self.is_instance_method = is_instance_method
self._parameter_model = self._create_parameter_model()
self._return_model = self._create_return_model()
def get_name_segments(self) -> Sequence[str]:
return (self.base_function.__name__,)
def generate_name(self, joiner: str, suffix: str | None = None) -> str:
segments: Iterable[str] = self.get_name_segments()
if suffix is not None:
segments = itertools.chain(segments, (suffix,))
return joiner.join(segments)
def _validate_type_token(self, value_label: str, token: Any) -> None:
"""Check whether a type token is usable.
Strings as type annotations, which Mypy can use if their types are imported
in an `if TYPE_CHECKING` block, can't be used for (de)serialization. Raise an
exception if the given token is one of these.
We can check only on a best-effort basis. String tokens may still be nested
in type parameters (e.g., `Optional["RpcThing"]`), which this won't catch.
Such a state would cause an exception when we attempt to use the signature
object to (de)serialize something.
"""
if isinstance(token, str):
raise SerializableFunctionSignatureSetupException(
self,
f"Invalid type token on {value_label} "
"(serializable functions must use concrete type tokens, not strings)",
)
def _create_parameter_model(self) -> type[pydantic.BaseModel]:
"""Dynamically create a Pydantic model class representing the parameters."""
def create_field(param: inspect.Parameter) -> tuple[Any, Any]:
if param.annotation is param.empty:
raise SerializableFunctionSignatureSetupException(
self, "Type annotations are required to serialize"
)
self._validate_type_token(f"parameter `{param.name}`", param.annotation)
default_value = ... if param.default is param.empty else param.default
return param.annotation, default_value
model_name = self.generate_name("__", "ParameterModel")
parameters = list(inspect.signature(self.base_function).parameters.values())
if self.is_instance_method:
parameters = parameters[1:] # exclude `self` argument
field_definitions = {p.name: create_field(p) for p in parameters}
return pydantic.create_model(model_name, **field_definitions) # type: ignore[call-overload]
_RETURN_MODEL_ATTR = "value"
def _create_return_model(self) -> type[pydantic.BaseModel]:
"""Dynamically create a Pydantic model class representing the return value.
The created model has a single attribute containing the return value. This
extra abstraction is necessary in order to have Pydantic handle generic
return annotations such as `Optional[RpcOrganization]` or `List[RpcUser]`,
where we can't directly access an RpcModel class on which to call `parse_obj`.
"""
model_name = self.generate_name("__", "ReturnModel")
return_type = inspect.signature(self.base_function).return_annotation
if return_type is None:
return_type = type(None)
self._validate_type_token("return type", return_type)
field_definitions = {self._RETURN_MODEL_ATTR: (return_type, ...)}
return pydantic.create_model(model_name, **field_definitions) # type: ignore[call-overload]
@staticmethod
def _unwrap_lazy_django_object(arg: Any) -> Any:
"""Unwrap any lazy objects before attempting to serialize.
It's possible to receive a SimpleLazyObject initialized by the Django
framework and pass it to an RPC (typically `request.user` as an RpcUser
argument). These objects are supposed to behave seamlessly like the
underlying type, but don't play nice with the reflection that Pydantic uses
to serialize. So, we manually check and force them to unwrap.
"""
if isinstance(arg, LazyObject):
return getattr(arg, "_wrapped")
else:
return arg
def serialize_arguments(self, raw_arguments: ArgumentDict) -> ArgumentDict:
raw_arguments = {
key: self._unwrap_lazy_django_object(arg) for (key, arg) in raw_arguments.items()
}
try:
model_instance = self._parameter_model(**raw_arguments)
except Exception as e:
raise SerializableFunctionValueException(self, "Could not serialize arguments") from e
return model_instance.dict()
def deserialize_arguments(self, serial_arguments: ArgumentDict) -> pydantic.BaseModel:
try:
return self._parameter_model.parse_obj(serial_arguments)
except Exception as e:
raise SerializableFunctionValueException(self, "Could not deserialize arguments") from e
def deserialize_return_value(self, value: Any) -> Any:
parsed = self._return_model.parse_obj({self._RETURN_MODEL_ATTR: value})
return getattr(parsed, self._RETURN_MODEL_ATTR)
def get_schemas(self) -> tuple[type[pydantic.BaseModel], type[pydantic.BaseModel]]:
"""Access the schema representations directly.
This generally should be needed only for reflective operations such as
checking for cross-version compatibility. Routine operations on the parameter
and return values should be done through the "serialize" and "deserialize"
methods.
"""
return self._parameter_model, self._return_model
| SerializableFunctionSignature |
python | django__django | tests/multiple_database/models.py | {
"start": 1823,
"end": 1994
} | class ____(models.Model):
name = models.CharField(max_length=100)
owner = models.ForeignKey(Person, models.CASCADE)
class Meta:
ordering = ("name",)
| Pet |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 11064,
"end": 12815
} | class ____(MutableMapping[str, Any]):
"""A dictionary view of dialect-level arguments in the form
<dialectname>_<argument_name>.
"""
__slots__ = ("obj",)
def __init__(self, obj: DialectKWArgs) -> None:
self.obj = obj
def _key(self, key: str) -> Tuple[str, str]:
try:
dialect, value_key = key.split("_", 1)
except ValueError as err:
raise KeyError(key) from err
else:
return dialect, value_key
def __getitem__(self, key: str) -> Any:
dialect, value_key = self._key(key)
try:
opt = self.obj.dialect_options[dialect]
except exc.NoSuchModuleError as err:
raise KeyError(key) from err
else:
return opt[value_key]
def __setitem__(self, key: str, value: Any) -> None:
try:
dialect, value_key = self._key(key)
except KeyError as err:
raise exc.ArgumentError(
"Keys must be of the form <dialectname>_<argname>"
) from err
else:
self.obj.dialect_options[dialect][value_key] = value
def __delitem__(self, key: str) -> None:
dialect, value_key = self._key(key)
del self.obj.dialect_options[dialect][value_key]
def __len__(self) -> int:
return sum(
len(args._non_defaults)
for args in self.obj.dialect_options.values()
)
def __iter__(self) -> Generator[str, None, None]:
return (
"%s_%s" % (dialect_name, value_name)
for dialect_name in self.obj.dialect_options
for value_name in self.obj.dialect_options[
dialect_name
]._non_defaults
)
| _DialectArgView |
python | pytorch__pytorch | torch/_inductor/comm_analysis.py | {
"start": 578,
"end": 3325
} | class ____(IntEnum):
VOLTA = 0
AMPERE = 1
HOPPER = 2
@functools.lru_cache
def get_gpu_type() -> NVIDIA_GPU_TYPE:
gpu_info = torch.utils.collect_env.get_gpu_info(torch.utils.collect_env.run) or ""
if "V100" in gpu_info:
return NVIDIA_GPU_TYPE.VOLTA
elif "A100" in gpu_info:
return NVIDIA_GPU_TYPE.AMPERE
elif "H100" in gpu_info:
return NVIDIA_GPU_TYPE.HOPPER
else:
# for other gpu types, assume Ampere
return NVIDIA_GPU_TYPE.AMPERE
def get_collective_type_from_kernel_name(kernel_name: str) -> NCCL_COLL:
assert kernel_name is not None
if "all_reduce" in kernel_name:
return NCCL_COLL.ALL_REDUCE
elif "all_gather" in kernel_name:
return NCCL_COLL.ALL_GATHER
elif "reduce_scatter" in kernel_name:
return NCCL_COLL.REDUCE_SCATTER
elif any(comm in kernel_name for comm in ("all_to_all", "alltoall")):
return NCCL_COLL.ALL_TO_ALL
else:
return NCCL_COLL.UNSUPPORTED
def get_collective_type(node: ir.IRNode) -> NCCL_COLL:
if not isinstance(node, ir._CollectiveKernel):
raise ValueError(f"node is not a collective kernel: {node}")
name = node.python_kernel_name
assert name is not None
return get_collective_type_from_kernel_name(name)
def get_ir_node_size_numel(size: torch.Size, fallback: int = 4096 * 4096) -> int:
numel = sympy_product(size)
if isinstance(numel, sympy.Integer):
return int(numel)
return V.graph.sizevars.size_hint(numel, fallback=fallback)
def get_fx_node_size_numel(size: torch.Size, fallback: int = 4096 * 4096) -> int:
numel = functools.reduce(operator.mul, size, 1)
result = hint_int(numel, fallback=fallback)
return result
def get_collective_input_size_bytes(node: ir.IRNode) -> int:
sz_bytes = 0
for inp in node.inputs: # type: ignore[attr-defined]
numel = get_ir_node_size_numel(inp.layout.size)
sz_bytes += numel * get_dtype_size(inp.layout.dtype)
return sz_bytes
def get_collective_group_size(node: ir.IRNode) -> int:
if isinstance(node, ir._CollectiveKernel) and not isinstance(node, ir._WaitKernel):
from torch.distributed.distributed_c10d import _get_group_size_by_name
return _get_group_size_by_name(node.constant_args[-1])
else:
raise TypeError(f"Unsupported collective type: {node}")
####################################################################################################################
# The following code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
####################################################################################################################
| NVIDIA_GPU_TYPE |
python | google__jax | jax/_src/interpreters/mlir.py | {
"start": 26892,
"end": 31674
} | class ____:
"""Module-wide context information for MLIR lowering."""
context: ir.Context
module: ir.Module
ip: ir.InsertionPoint
symbol_table: ir.SymbolTable
# The lowering platforms for the module. Can be more than one only when
# exporting.
platforms: Sequence[str]
# See ModuleContext.get_backend() for backend and platforms usage.
backend: xb.XlaBackend | None
axis_context: AxisContext
keepalives: list[Any]
channel_iterator: Iterator[int]
host_callbacks: list[Any]
# Keep state for the lowering of shape polymorphism
shape_poly_state: ShapePolyLoweringState
all_default_mem_kind: bool
# Cached primitive lowerings.
lowering_cache: dict[LoweringCacheKey, LoweringCacheValue]
cached_primitive_lowerings: dict[Any, func_dialect.FuncOp]
# Cached traceback information.
traceback_caches: TracebackCaches
lowering_parameters: LoweringParameters
@property
def axis_env(self) -> sharding_impls.AxisEnv:
return self.axis_context.axis_env
def __init__(
self,
*,
platforms: Sequence[str],
backend: xb.XlaBackend | None,
axis_context: AxisContext,
keepalives: list[Any],
channel_iterator: Iterator[int],
host_callbacks: list[Any],
lowering_parameters: LoweringParameters,
context: ir.Context | None = None,
module: ir.Module | None = None,
ip: ir.InsertionPoint | None = None,
symbol_table: ir.SymbolTable | None = None,
lowering_cache: None | dict[LoweringCacheKey, Any] = None,
cached_primitive_lowerings: None | dict[Any, func_dialect.FuncOp] = None,
traceback_caches: None | TracebackCaches = None,
shape_poly_state = None,
all_default_mem_kind: bool = True):
self.context = context or make_ir_context()
self.module = module or ir.Module.create(loc=ir.Location.unknown(self.context))
self.ip = ip or ir.InsertionPoint(self.module.body)
self.symbol_table = symbol_table or ir.SymbolTable(self.module.operation)
self.backend = backend
self.platforms = platforms
self.axis_context = axis_context
self.lowering_cache = ({} if lowering_cache is None else lowering_cache)
self.cached_primitive_lowerings = ({} if cached_primitive_lowerings is None
else cached_primitive_lowerings)
with self.context:
self.traceback_caches = (TracebackCaches() if traceback_caches is None
else traceback_caches)
self.channel_iterator = channel_iterator
self.keepalives = keepalives
self.host_callbacks = host_callbacks
self.shape_poly_state = (
shape_poly_state or ShapePolyLoweringState((), tuple(platforms)))
self.all_default_mem_kind = all_default_mem_kind
self.lowering_parameters = lowering_parameters
def get_backend(self, optional: bool = False) -> xb.XlaBackend | None:
if len(self.platforms) > 1:
if optional:
return None
raise NotImplementedError(
"accessing .backend in multi-lowering setting. This can occur when "
"lowering a primitive that has not been adapted to multi-platform "
"lowering")
if self.backend is not None:
if xb.canonicalize_platform(self.backend.platform) != self.platforms[0]:
if optional:
return None
raise ValueError(
"the platform for the specified backend "
f"{xb.canonicalize_platform(self.backend.platform)} is different "
f"from the lowering platform {self.platforms[0]}")
return self.backend
return xb.get_backend(self.platforms[0])
def new_channel(self) -> int:
channel = next(self.channel_iterator)
# `xla::HostCallback` requires a 16-bit channel ID.
if channel >= (1 << 16):
raise RuntimeError(
"Host callback lowering created too many channels. PjRt does not"
" support more than 65535 channels")
return channel
# Adds an IFRT host callback object to the context. A reference to these
# callbacks will be provided to IFRT during compilation so it can do things
# like serialize them and keep them alive.
def add_host_callback(self, host_callback: Any) -> None:
self.host_callbacks.append(host_callback)
# Keeps a value alive as long as the Python executable is alive.
# TODO(phawkins): this feature is problematic, because you almost certainly
# want to keep alive values as long as the underlying runtime executable is
# still alive/executing. The Python executable object may have a shorter
# lifetime, so it's highly likely any caller of this method is buggy.
def add_keepalive(self, keepalive: Any) -> None:
self.keepalives.append(keepalive)
def replace(self, **kw): return dataclasses.replace(self, **kw)
@dataclasses.dataclass
| ModuleContext |
python | scipy__scipy | scipy/sparse/linalg/_eigen/tests/test_svds.py | {
"start": 34153,
"end": 34449
} | class ____:
@pytest.mark.parametrize("solver", ['ekki', object])
def test_svds_input_validation_solver(self, solver):
message = "solver must be one of"
with pytest.raises(ValueError, match=message):
svds(np.ones((3, 4)), k=2, solver=solver, rng=0)
| Test_SVDS_once |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass3.py | {
"start": 528,
"end": 554
} | class ____(type): ...
| Meta10 |
python | Textualize__textual | src/textual/getters.py | {
"start": 1801,
"end": 4518
} | class ____(Generic[QueryType]):
"""Create a query one property.
A query one property calls [Widget.query_one][textual.dom.DOMNode.query_one] when accessed, and returns
a widget. If the widget doesn't exist, then the property will raise the same exceptions as `Widget.query_one`.
Example:
```python
from textual import getters
class MyScreen(screen):
# Note this is at the class level
output_log = getters.query_one("#output", RichLog)
def compose(self) -> ComposeResult:
with containers.Vertical():
yield RichLog(id="output")
def on_mount(self) -> None:
self.output_log.write("Screen started")
# Equivalent to the following line:
# self.query_one("#output", RichLog).write("Screen started")
```
Args:
selector: A TCSS selector, e.g. "#mywidget". Or a widget type, i.e. `Input`.
expect_type: The type of the expected widget, e.g. `Input`, if the first argument is a selector.
"""
selector: str
expect_type: type["Widget"]
@overload
def __init__(self, selector: str) -> None:
"""
Args:
selector: A TCSS selector, e.g. "#mywidget"
"""
@overload
def __init__(self, selector: type[QueryType]) -> None: ...
@overload
def __init__(self, selector: str, expect_type: type[QueryType]) -> None: ...
@overload
def __init__(
self, selector: type[QueryType], expect_type: type[QueryType]
) -> None: ...
def __init__(
self,
selector: str | type[QueryType],
expect_type: type[QueryType] | None = None,
) -> None:
if expect_type is None:
from textual.widget import Widget
self.expect_type = Widget
else:
self.expect_type = expect_type
if isinstance(selector, str):
self.selector = selector
else:
self.selector = selector.__name__
self.expect_type = selector
@overload
def __get__(
self: "query_one[QueryType]", obj: DOMNode, obj_type: type[DOMNode]
) -> QueryType: ...
@overload
def __get__(
self: "query_one[QueryType]", obj: None, obj_type: type[DOMNode]
) -> "query_one[QueryType]": ...
def __get__(
self: "query_one[QueryType]", obj: DOMNode | None, obj_type: type[DOMNode]
) -> QueryType | Widget | "query_one":
"""Get the widget matching the selector and/or type."""
if obj is None:
return self
query_node = obj.query_one(self.selector, self.expect_type)
return query_node
| query_one |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 4802,
"end": 5482
} | class ____(TypeEngineMixin):
"""A mixin that marks a type as supporting indexing operations,
such as array or JSON structures.
"""
class Comparator(TypeEngine.Comparator[_T]):
__slots__ = ()
def _setup_getitem(self, index):
raise NotImplementedError()
def __getitem__(self, index):
(
adjusted_op,
adjusted_right_expr,
result_type,
) = self._setup_getitem(index)
return self.operate(
adjusted_op, adjusted_right_expr, result_type=result_type
)
comparator_factory: _ComparatorFactory[Any] = Comparator
| Indexable |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-iterable/source_iterable/streams.py | {
"start": 16178,
"end": 16271
} | class ____(IterableExportStreamAdjustableRange):
data_field = "emailSendSkip"
| EmailSendSkip |
python | matplotlib__matplotlib | lib/matplotlib/transforms.py | {
"start": 37747,
"end": 40080
} | class ____(BboxBase):
"""
A `Bbox` that is automatically transformed by a given
transform. When either the child bounding box or transform
changes, the bounds of this bbox will update accordingly.
"""
def __init__(self, bbox, transform, **kwargs):
"""
Parameters
----------
bbox : `Bbox`
transform : `Transform`
"""
_api.check_isinstance(BboxBase, bbox=bbox)
_api.check_isinstance(Transform, transform=transform)
if transform.input_dims != 2 or transform.output_dims != 2:
raise ValueError(
"The input and output dimensions of 'transform' must be 2")
super().__init__(**kwargs)
self._bbox = bbox
self._transform = transform
self.set_children(bbox, transform)
self._points = None
__str__ = _make_str_method("_bbox", "_transform")
def get_points(self):
# docstring inherited
if self._invalid:
p = self._bbox.get_points()
# Transform all four points, then make a new bounding box
# from the result, taking care to make the orientation the
# same.
points = self._transform.transform(
[[p[0, 0], p[0, 1]],
[p[1, 0], p[0, 1]],
[p[0, 0], p[1, 1]],
[p[1, 0], p[1, 1]]])
points = np.ma.filled(points, 0.0)
xs = min(points[:, 0]), max(points[:, 0])
if p[0, 0] > p[1, 0]:
xs = xs[::-1]
ys = min(points[:, 1]), max(points[:, 1])
if p[0, 1] > p[1, 1]:
ys = ys[::-1]
self._points = np.array([
[xs[0], ys[0]],
[xs[1], ys[1]]
])
self._invalid = 0
return self._points
if DEBUG:
_get_points = get_points
def get_points(self):
points = self._get_points()
self._check(points)
return points
def contains(self, x, y):
# Docstring inherited.
return self._bbox.contains(*self._transform.inverted().transform((x, y)))
def fully_contains(self, x, y):
# Docstring inherited.
return self._bbox.fully_contains(*self._transform.inverted().transform((x, y)))
| TransformedBbox |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/django/toystore/models.py | {
"start": 4070,
"end": 4311
} | class ____(models.IntegerField):
def pre_save(self, model_instance, add):
value = getattr(model_instance, self.attname)
value += 1
setattr(model_instance, self.attname, value)
return value
| SelfModifyingField |
python | pytorch__pytorch | torch/_inductor/fuzzer.py | {
"start": 14667,
"end": 19862
} | class ____:
"""
The mapping of the combo strings to the result status after running the config fuzzer.
"""
_vals: dict[ComboType, Status]
def __repr__(self) -> str:
return f"ResultType[{self._vals}]"
def __init__(self) -> None:
self._vals = {}
def __len__(self) -> int:
return len(self._vals)
def num_ran(self) -> int:
"""
Returns how many combos actually ran (weren't skipped).
"""
ret = len(self._vals)
for status in self._vals.values():
if status == Status.SKIPPED:
ret -= 1
return ret
def set(self, combo: ComboType, status: Status) -> None:
combo = tuple(sorted(combo))
self._vals[combo] = status
def lookup(self, combo: ComboType) -> Optional[Status]:
combo = tuple(sorted(combo))
return self._vals.get(combo, None)
def keys(self) -> KeysView[ComboType]:
return self._vals.keys()
# Type that maps config strings to their default value
ConfigType = dict[str, Any]
# Callable that returns a bool
FactoryOutputType = Callable[[], bool]
# input function factory
FactoryType = Callable[[], FactoryOutputType]
# Why are some configs disabled by default? Because if we don't the fuzzer produces uninteresting results.
# It will always hone-in on these failures, even with the most basic model, making it useless for
# debugging more complex models.
#
# More explicit explanations are below:
# Out of Scope: We can't fuzz, say, the cuda version because that comes from the environment and will
# produce a failure if not aligned with env.
# Known Failure: Disabled due to known failure. Hopefully re-enable. Known failures are listed in the
# docstring of this file.
# Required: Required for the fuzzer to operate (removing caching, etc.)
# FSDP: Flag meant for FSDP that fails in non FSDP envs. Re-enable these if you're testing FSDP.
# Typing: disabled because the type annotation of the config isn't constrained enough to produce
# meaningful fuzz values. These could be improved.
# Timing: These take too long to compile, feel free to enable.
MODULE_DEFAULTS: dict[str, ConfigType] = {
"torch._inductor.config": {
"force_disable_caches": True, # Required
"cpp.cxx": DEFAULT, # Out of Scope
"TYPE_CHECKING": DEFAULT, # Not a config
"max_autotune_pointwise": DEFAULT, # Timing
"max_autotune_gemm": DEFAULT, # Timing, re-enable when autotune speed improvements merged.
"max_autotune_gemm_backends": DEFAULT, # Timing
"max_autotune_conv_backends": DEFAULT, # Timing
"max_autotune_gemm_search_space": DEFAULT, # Timing
"max_autotune_subproc_result_timeout_seconds": DEFAULT, # Timing
"max_autotune_subproc_graceful_timeout_seconds": DEFAULT, # Timing
"max_autotune_subproc_terminate_timeout_seconds": DEFAULT, # Timing
"aot_inductor.presets": DEFAULT, # Typing
"cuda.arch": DEFAULT, # Out of Scope
"cuda.version": DEFAULT, # Out of Scope
"cuda.cutlass_dir": DEFAULT, # Out of Scope
"cuda.cuda_cxx": DEFAULT, # Out of Scope
"rocm.arch": DEFAULT, # Out of Scope
"rocm.ck_supported_arch": DEFAULT, # Out of Scope
"rocm.ck_dir": DEFAULT, # Out of Scope
"rocm.rocm_home": DEFAULT, # Out of Scope
"check_stack_no_cycles_TESTING_ONLY": DEFAULT, # Testing
"sleep_sec_TESTING_ONLY": DEFAULT, # Testing
"triton.inject_relu_bug_TESTING_ONLY": DEFAULT, # Testing
"reorder_for_compute_comm_overlap": DEFAULT, # FSDP
"enabled_metric_tables": DEFAULT, # Typing
"triton.debug_sync_graph": DEFAULT, # Known Failure
"triton.debug_sync_kernel": DEFAULT, # Known Failure
"profile_bandwidth_regex": DEFAULT, # Known Failure
"disable_cpp_codegen": DEFAULT, # Known Failure
"trace.save_real_tensors": DEFAULT, # Known Failure
"pre_grad_fusion_options": DEFAULT, # Typing
"external_matmul": DEFAULT, # Typing, need to add this to type overrides or type exemplars.
"test_configs.autotune_choice_name_regex": DEFAULT, # Typing
"test_configs.autotune_choice_desc_regex": DEFAULT, # Typing
"cpp.enable_floating_point_contract_flag": DEFAULT, # Typing
"post_grad_custom_pre_pass": DEFAULT, # Typing
"post_grad_custom_post_pass": DEFAULT, # Typing
"reorder_for_compute_comm_overlap_passes": DEFAULT, # Typing
"joint_custom_post_pass": DEFAULT, # Typing
"joint_custom_pre_pass": DEFAULT, # Typing
"pre_grad_custom_pass": DEFAULT, # Typing
"custom_partitioner_fn": DEFAULT, # Typing
"inductor_choices_class": DEFAULT, # Typing
},
"torch._dynamo.config": {
"traceable_tensor_subclasses": DEFAULT, # Typing
"nontraceable_tensor_subclasses": DEFAULT, # Typing
"compiled_autograd_kwargs_override": DEFAULT, # Typing
"fail_on_recompile_limit_hit": DEFAULT, # fails in combo with suppress_errors
"suppress_errors": DEFAULT,
"caching_precompile": False, # Required
},
}
| ResultType |
python | django__django | tests/check_framework/test_files.py | {
"start": 176,
"end": 1173
} | class ____(SimpleTestCase):
def test_file_upload_temp_dir(self):
tests = [
None,
"",
Path.cwd(),
str(Path.cwd()),
]
for setting in tests:
with self.subTest(setting), self.settings(FILE_UPLOAD_TEMP_DIR=setting):
self.assertEqual(check_setting_file_upload_temp_dir(None), [])
def test_file_upload_temp_dir_nonexistent(self):
for setting in ["nonexistent", Path("nonexistent")]:
with self.subTest(setting), self.settings(FILE_UPLOAD_TEMP_DIR=setting):
self.assertEqual(
check_setting_file_upload_temp_dir(None),
[
Error(
"The FILE_UPLOAD_TEMP_DIR setting refers to the "
"nonexistent directory 'nonexistent'.",
id="files.E001",
),
],
)
| FilesCheckTests |
python | Netflix__metaflow | test/unit/inheritance/flows/mutator_with_base_config_base.py | {
"start": 2271,
"end": 2535
} | class ____(BaseA):
"""
Middle class with mutator that uses config from BaseA.
The mutator reads mutator_config from BaseA and injects parameters accordingly.
"""
middle_param = Parameter("middle_param", help="Middle parameter", default=100)
| BaseB |
python | django__django | django/contrib/gis/db/models/functions.py | {
"start": 20718,
"end": 21085
} | class ____(Scale):
def as_sqlite(self, compiler, connection, **extra_context):
clone = self.copy()
if len(self.source_expressions) < 4:
# Always provide the z parameter for ST_Translate
clone.source_expressions.append(Value(0))
return super(Translate, clone).as_sqlite(compiler, connection, **extra_context)
| Translate |
python | google__flatbuffers | python/flatbuffers/builder.py | {
"start": 1240,
"end": 1382
} | class ____(RuntimeError):
"""Error caused by using a Builder to write Object data when not inside
an Object.
"""
pass
| IsNotNestedError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.