language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | mlflow__mlflow | mlflow/gateway/providers/cohere.py | {
"start": 11157,
"end": 16164
} | class ____(BaseProvider):
NAME = "Cohere"
CONFIG_TYPE = CohereConfig
def __init__(self, config: EndpointConfig) -> None:
super().__init__(config)
warnings.warn(
"Cohere provider is deprecated and will be removed in a future MLflow version.",
category=FutureWarning,
stacklevel=2,
)
if config.model.config is None or not isinstance(config.model.config, CohereConfig):
raise TypeError(f"Unexpected config type {config.model.config}")
self.cohere_config: CohereConfig = config.model.config
@property
def headers(self) -> dict[str, str]:
return {"Authorization": f"Bearer {self.cohere_config.cohere_api_key}"}
@property
def base_url(self) -> str:
return "https://api.cohere.ai/v1"
@property
def adapter_class(self) -> type[ProviderAdapter]:
return CohereAdapter
def get_endpoint_url(self, route_type: str) -> str:
if route_type == "llm/v1/chat":
return f"{self.base_url}/chat"
elif route_type == "llm/v1/completions":
return f"{self.base_url}/generate"
elif route_type == "llm/v1/embeddings":
return f"{self.base_url}/embed"
else:
raise ValueError(f"Invalid route type {route_type}")
async def _request(self, path: str, payload: dict[str, Any]) -> dict[str, Any]:
return await send_request(
headers=self.headers,
base_url=self.base_url,
path=path,
payload=payload,
)
def _stream_request(self, path: str, payload: dict[str, Any]) -> AsyncGenerator[bytes, None]:
return send_stream_request(
headers=self.headers,
base_url=self.base_url,
path=path,
payload=payload,
)
async def chat_stream(
self, payload: chat.RequestPayload
) -> AsyncIterable[chat.StreamResponsePayload]:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
stream = self._stream_request(
"chat",
{
"model": self.config.model.name,
**CohereAdapter.chat_streaming_to_model(payload, self.config),
},
)
async for chunk in stream:
if not chunk:
continue
resp = json.loads(chunk)
if resp["event_type"] == "stream-start":
continue
yield CohereAdapter.model_to_chat_streaming(resp, self.config)
async def chat(self, payload: chat.RequestPayload) -> chat.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await self._request(
"chat",
{
"model": self.config.model.name,
**CohereAdapter.chat_to_model(payload, self.config),
},
)
return CohereAdapter.model_to_chat(resp, self.config)
async def completions_stream(
self, payload: completions.RequestPayload
) -> AsyncIterable[completions.StreamResponsePayload]:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
stream = self._stream_request(
"generate",
{
"model": self.config.model.name,
**CohereAdapter.completions_streaming_to_model(payload, self.config),
},
)
async for chunk in stream:
if not chunk:
continue
resp = json.loads(chunk)
yield CohereAdapter.model_to_completions_streaming(resp, self.config)
async def completions(self, payload: completions.RequestPayload) -> completions.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await self._request(
"generate",
{
"model": self.config.model.name,
**CohereAdapter.completions_to_model(payload, self.config),
},
)
return CohereAdapter.model_to_completions(resp, self.config)
async def embeddings(self, payload: embeddings.RequestPayload) -> embeddings.ResponsePayload:
from fastapi.encoders import jsonable_encoder
payload = jsonable_encoder(payload, exclude_none=True)
self.check_for_model_field(payload)
resp = await self._request(
"embed",
{
"model": self.config.model.name,
**CohereAdapter.embeddings_to_model(payload, self.config),
},
)
return CohereAdapter.model_to_embeddings(resp, self.config)
| CohereProvider |
python | donnemartin__interactive-coding-challenges | sorting_searching/search_sorted_matrix/test_search_sorted_matrix.py | {
"start": 18,
"end": 638
} | class ____(unittest.TestCase):
def test_find_val(self):
matrix = [[20, 40, 63, 80],
[30, 50, 80, 90],
[40, 60, 110, 110],
[50, 65, 105, 150]]
sorted_matrix = SortedMatrix()
self.assertRaises(TypeError, sorted_matrix.find_val, None, None)
self.assertEqual(sorted_matrix.find_val(matrix, 1000), None)
self.assertEqual(sorted_matrix.find_val(matrix, 60), (2, 1))
print('Success: test_find_val')
def main():
test = TestSortedMatrix()
test.test_find_val()
if __name__ == '__main__':
main()
| TestSortedMatrix |
python | keon__algorithms | tests/test_maths.py | {
"start": 5744,
"end": 6352
} | class ____(unittest.TestCase):
"""[summary]
Test for the file modular_Exponential.py
Arguments:
unittest {[type]} -- [description]
"""
def test_modular_inverse(self):
# checks if x * x_inv == 1 (mod m)
self.assertEqual(1, 2 * modular_inverse.modular_inverse(2, 19) % 19)
self.assertEqual(1, 53 * modular_inverse.modular_inverse(53, 91) % 91)
self.assertEqual(1, 2 * modular_inverse.modular_inverse(2, 1000000007)
% 1000000007)
self.assertRaises(ValueError, modular_inverse.modular_inverse, 2, 20)
| TestModularInverse |
python | getsentry__sentry | tests/sentry/models/test_groupowner.py | {
"start": 225,
"end": 5761
} | class ____(TestCase):
def setUp(self) -> None:
super().setUp()
self.timestamp = before_now(minutes=10)
self.c = self.create_commit(
project=self.project,
repo=self.create_repo(self.project),
)
self.lookup_kwargs = {
"group_id": self.group.id,
"type": GroupOwnerType.SUSPECT_COMMIT.value,
"user_id": self.user.id,
"project_id": self.project.id,
"organization_id": self.organization.id,
}
self.scm_extra_lookup = {"context__asjsonb__commitId": self.c.id}
self.defaults = {
"date_added": self.timestamp,
}
self.scm_context_defaults = {
"commitId": self.c.id,
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
self.rb_context_defaults = {
"suspectCommitStrategy": SuspectCommitStrategy.RELEASE_BASED,
}
def _make_scm_lookup_kwargs(self) -> None:
"""
scm_based lookup_kwargs include an additional filter: context__contains,
release_based group owners don't have this field in context.
"""
self.lookup_kwargs.update(self.scm_extra_lookup)
def test_update_or_create_and_preserve_context_create_then_update_scm(self) -> None:
assert GroupOwner.objects.filter(**self.lookup_kwargs).exists() is False
self._make_scm_lookup_kwargs()
obj, created = GroupOwner.objects.update_or_create_and_preserve_context(
lookup_kwargs=self.lookup_kwargs,
defaults=self.defaults,
context_defaults=self.scm_context_defaults,
)
assert GroupOwner.objects.filter(**self.lookup_kwargs).exists() is True
assert created is True
assert obj.group_id == self.group.id
assert obj.type == GroupOwnerType.SUSPECT_COMMIT.value
assert obj.user_id == self.user.id
assert obj.project_id == self.project.id
assert obj.organization_id == self.organization.id
assert obj.date_added == self.timestamp
assert obj.context == self.scm_context_defaults
now = timezone.now()
obj, created = GroupOwner.objects.update_or_create_and_preserve_context(
lookup_kwargs=self.lookup_kwargs,
defaults={
"date_added": now,
},
context_defaults=self.scm_context_defaults,
)
assert created is False
assert obj.group_id == self.group.id
assert obj.type == GroupOwnerType.SUSPECT_COMMIT.value
assert obj.user_id == self.user.id
assert obj.project_id == self.project.id
assert obj.organization_id == self.organization.id
assert obj.date_added == now
assert obj.context == self.scm_context_defaults
def test_update_or_create_and_preserve_context_update_scm(self) -> None:
original_obj = GroupOwner.objects.create(
context={
"commitId": self.c.id,
"something": "else",
},
**self.lookup_kwargs,
**self.defaults,
)
self._make_scm_lookup_kwargs()
obj, created = GroupOwner.objects.update_or_create_and_preserve_context(
lookup_kwargs=self.lookup_kwargs,
defaults=self.defaults,
context_defaults=self.scm_context_defaults,
)
assert created is False
assert original_obj.id == obj.id
assert obj.group_id == self.group.id
assert obj.type == GroupOwnerType.SUSPECT_COMMIT.value
assert obj.user_id == self.user.id
assert obj.project_id == self.project.id
assert obj.organization_id == self.organization.id
assert obj.date_added == self.timestamp
assert obj.context == {
"commitId": self.c.id,
"something": "else",
"suspectCommitStrategy": SuspectCommitStrategy.SCM_BASED,
}
def test_update_or_create_and_preserve_context_create_then_update_rb(self) -> None:
assert GroupOwner.objects.filter(**self.lookup_kwargs).exists() is False
obj, created = GroupOwner.objects.update_or_create_and_preserve_context(
lookup_kwargs=self.lookup_kwargs,
defaults=self.defaults,
context_defaults=self.rb_context_defaults,
)
assert GroupOwner.objects.filter(**self.lookup_kwargs).exists() is True
assert created is True
assert obj.group_id == self.group.id
assert obj.type == GroupOwnerType.SUSPECT_COMMIT.value
assert obj.user_id == self.user.id
assert obj.project_id == self.project.id
assert obj.organization_id == self.organization.id
assert obj.date_added == self.timestamp
assert obj.context == self.rb_context_defaults
now = timezone.now()
obj, created = GroupOwner.objects.update_or_create_and_preserve_context(
lookup_kwargs=self.lookup_kwargs,
defaults={
"date_added": now,
},
context_defaults=self.rb_context_defaults,
)
assert created is False
assert obj.group_id == self.group.id
assert obj.type == GroupOwnerType.SUSPECT_COMMIT.value
assert obj.user_id == self.user.id
assert obj.project_id == self.project.id
assert obj.organization_id == self.organization.id
assert obj.date_added == now
assert obj.context == self.rb_context_defaults
| GroupOwnerTest |
python | pypa__packaging | src/packaging/pylock.py | {
"start": 8238,
"end": 8402
} | class ____(PylockValidationError):
"""Raised when encountering an unsupported `lock_version`."""
@dataclass(frozen=True, init=False)
| PylockUnsupportedVersionError |
python | google__jax | tests/lax_numpy_test.py | {
"start": 251505,
"end": 252560
} | class ____(jtu.JaxTestCase):
@parameterized.parameters(itertools.chain.from_iterable(
jtu.sample_product_testcases([dict(name=name)],
arg_dtypes=_dtypes_for_ufunc(name))
for name in _all_numpy_ufuncs()
))
def testUfuncInputTypes(self, name, arg_dtypes):
if name in ['arctanh', 'atanh'] and jnp.issubdtype(arg_dtypes[0], jnp.complexfloating):
self.skipTest("np.arctanh & jnp.arctanh have mismatched NaNs for complex input.")
jnp_op = getattr(jnp, name)
np_op = getattr(np, name)
np_op = jtu.ignore_warning(category=RuntimeWarning,
message="(divide by zero|invalid value)")(np_op)
args_maker = lambda: tuple(np.ones(1, dtype=dtype) for dtype in arg_dtypes)
with jtu.strict_promotion_if_dtypes_match(arg_dtypes):
# large tol comes from the fact that numpy returns float16 in places
# that jnp returns float32. e.g. np.cos(np.uint8(0))
self._CheckAgainstNumpy(np_op, jnp_op, args_maker, check_dtypes=False, tol=1E-2)
| NumpyUfuncTests |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/base.py | {
"start": 2926,
"end": 90853
} | class ____(ConnectionEventsTarget, inspection.Inspectable["Inspector"]):
"""Provides high-level functionality for a wrapped DB-API connection.
The :class:`_engine.Connection` object is procured by calling the
:meth:`_engine.Engine.connect` method of the :class:`_engine.Engine`
object, and provides services for execution of SQL statements as well
as transaction control.
The Connection object is **not** thread-safe. While a Connection can be
shared among threads using properly synchronized access, it is still
possible that the underlying DBAPI connection may not support shared
access between threads. Check the DBAPI documentation for details.
The Connection object represents a single DBAPI connection checked out
from the connection pool. In this state, the connection pool has no
affect upon the connection, including its expiration or timeout state.
For the connection pool to properly manage connections, connections
should be returned to the connection pool (i.e. ``connection.close()``)
whenever the connection is not in use.
.. index::
single: thread safety; Connection
"""
dialect: Dialect
dispatch: dispatcher[ConnectionEventsTarget]
_sqla_logger_namespace = "sqlalchemy.engine.Connection"
# used by sqlalchemy.engine.util.TransactionalContext
_trans_context_manager: Optional[TransactionalContext] = None
# legacy as of 2.0, should be eventually deprecated and
# removed. was used in the "pre_ping" recipe that's been in the docs
# a long time
should_close_with_result = False
_dbapi_connection: Optional[PoolProxiedConnection]
_execution_options: _ExecuteOptions
_transaction: Optional[RootTransaction]
_nested_transaction: Optional[NestedTransaction]
def __init__(
self,
engine: Engine,
connection: Optional[PoolProxiedConnection] = None,
_has_events: Optional[bool] = None,
_allow_revalidate: bool = True,
_allow_autobegin: bool = True,
):
"""Construct a new Connection."""
self.engine = engine
self.dialect = dialect = engine.dialect
if connection is None:
try:
self._dbapi_connection = engine.raw_connection()
except dialect.loaded_dbapi.Error as err:
Connection._handle_dbapi_exception_noconnection(
err, dialect, engine
)
raise
else:
self._dbapi_connection = connection
self._transaction = self._nested_transaction = None
self.__savepoint_seq = 0
self.__in_begin = False
self.__can_reconnect = _allow_revalidate
self._allow_autobegin = _allow_autobegin
self._echo = self.engine._should_log_info()
if _has_events is None:
# if _has_events is sent explicitly as False,
# then don't join the dispatch of the engine; we don't
# want to handle any of the engine's events in that case.
self.dispatch = self.dispatch._join(engine.dispatch)
self._has_events = _has_events or (
_has_events is None and engine._has_events
)
self._execution_options = engine._execution_options
if self._has_events or self.engine._has_events:
self.dispatch.engine_connect(self)
# this can be assigned differently via
# characteristics.LoggingTokenCharacteristic
_message_formatter: Any = None
def _log_info(self, message: str, *arg: Any, **kw: Any) -> None:
fmt = self._message_formatter
if fmt:
message = fmt(message)
if log.STACKLEVEL:
kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET
self.engine.logger.info(message, *arg, **kw)
def _log_debug(self, message: str, *arg: Any, **kw: Any) -> None:
fmt = self._message_formatter
if fmt:
message = fmt(message)
if log.STACKLEVEL:
kw["stacklevel"] = 1 + log.STACKLEVEL_OFFSET
self.engine.logger.debug(message, *arg, **kw)
@property
def _schema_translate_map(self) -> Optional[SchemaTranslateMapType]:
schema_translate_map: Optional[SchemaTranslateMapType] = (
self._execution_options.get("schema_translate_map", None)
)
return schema_translate_map
def schema_for_object(self, obj: HasSchemaAttr) -> Optional[str]:
"""Return the schema name for the given schema item taking into
account current schema translate map.
"""
name = obj.schema
schema_translate_map: Optional[SchemaTranslateMapType] = (
self._execution_options.get("schema_translate_map", None)
)
if (
schema_translate_map
and name in schema_translate_map
and obj._use_schema_map
):
return schema_translate_map[name]
else:
return name
def __enter__(self) -> Connection:
return self
def __exit__(self, type_: Any, value: Any, traceback: Any) -> None:
self.close()
@overload
def execution_options(
self,
*,
compiled_cache: Optional[CompiledCacheType] = ...,
logging_token: str = ...,
isolation_level: IsolationLevel = ...,
no_parameters: bool = False,
stream_results: bool = False,
max_row_buffer: int = ...,
yield_per: int = ...,
insertmanyvalues_page_size: int = ...,
schema_translate_map: Optional[SchemaTranslateMapType] = ...,
preserve_rowcount: bool = False,
driver_column_names: bool = False,
**opt: Any,
) -> Connection: ...
@overload
def execution_options(self, **opt: Any) -> Connection: ...
def execution_options(self, **opt: Any) -> Connection:
r"""Set non-SQL options for the connection which take effect
during execution.
This method modifies this :class:`_engine.Connection` **in-place**;
the return value is the same :class:`_engine.Connection` object
upon which the method is called. Note that this is in contrast
to the behavior of the ``execution_options`` methods on other
objects such as :meth:`_engine.Engine.execution_options` and
:meth:`_sql.Executable.execution_options`. The rationale is that many
such execution options necessarily modify the state of the base
DBAPI connection in any case so there is no feasible means of
keeping the effect of such an option localized to a "sub" connection.
.. versionchanged:: 2.0 The :meth:`_engine.Connection.execution_options`
method, in contrast to other objects with this method, modifies
the connection in-place without creating copy of it.
As discussed elsewhere, the :meth:`_engine.Connection.execution_options`
method accepts any arbitrary parameters including user defined names.
All parameters given are consumable in a number of ways including
by using the :meth:`_engine.Connection.get_execution_options` method.
See the examples at :meth:`_sql.Executable.execution_options`
and :meth:`_engine.Engine.execution_options`.
The keywords that are currently recognized by SQLAlchemy itself
include all those listed under :meth:`.Executable.execution_options`,
as well as others that are specific to :class:`_engine.Connection`.
:param compiled_cache: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`.
A dictionary where :class:`.Compiled` objects
will be cached when the :class:`_engine.Connection`
compiles a clause
expression into a :class:`.Compiled` object. This dictionary will
supersede the statement cache that may be configured on the
:class:`_engine.Engine` itself. If set to None, caching
is disabled, even if the engine has a configured cache size.
Note that the ORM makes use of its own "compiled" caches for
some operations, including flush operations. The caching
used by the ORM internally supersedes a cache dictionary
specified here.
:param logging_token: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`, :class:`_sql.Executable`.
Adds the specified string token surrounded by brackets in log
messages logged by the connection, i.e. the logging that's enabled
either via the :paramref:`_sa.create_engine.echo` flag or via the
``logging.getLogger("sqlalchemy.engine")`` logger. This allows a
per-connection or per-sub-engine token to be available which is
useful for debugging concurrent connection scenarios.
.. versionadded:: 1.4.0b2
.. seealso::
:ref:`dbengine_logging_tokens` - usage example
:paramref:`_sa.create_engine.logging_name` - adds a name to the
name used by the Python logger object itself.
:param isolation_level: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`.
Set the transaction isolation level for the lifespan of this
:class:`_engine.Connection` object.
Valid values include those string
values accepted by the :paramref:`_sa.create_engine.isolation_level`
parameter passed to :func:`_sa.create_engine`. These levels are
semi-database specific; see individual dialect documentation for
valid levels.
The isolation level option applies the isolation level by emitting
statements on the DBAPI connection, and **necessarily affects the
original Connection object overall**. The isolation level will remain
at the given setting until explicitly changed, or when the DBAPI
connection itself is :term:`released` to the connection pool, i.e. the
:meth:`_engine.Connection.close` method is called, at which time an
event handler will emit additional statements on the DBAPI connection
in order to revert the isolation level change.
.. note:: The ``isolation_level`` execution option may only be
established before the :meth:`_engine.Connection.begin` method is
called, as well as before any SQL statements are emitted which
would otherwise trigger "autobegin", or directly after a call to
:meth:`_engine.Connection.commit` or
:meth:`_engine.Connection.rollback`. A database cannot change the
isolation level on a transaction in progress.
.. note:: The ``isolation_level`` execution option is implicitly
reset if the :class:`_engine.Connection` is invalidated, e.g. via
the :meth:`_engine.Connection.invalidate` method, or if a
disconnection error occurs. The new connection produced after the
invalidation will **not** have the selected isolation level
re-applied to it automatically.
.. seealso::
:ref:`dbapi_autocommit`
:meth:`_engine.Connection.get_isolation_level`
- view current actual level
:param no_parameters: Available on: :class:`_engine.Connection`,
:class:`_sql.Executable`.
When ``True``, if the final parameter
list or dictionary is totally empty, will invoke the
statement on the cursor as ``cursor.execute(statement)``,
not passing the parameter collection at all.
Some DBAPIs such as psycopg2 and mysql-python consider
percent signs as significant only when parameters are
present; this option allows code to generate SQL
containing percent signs (and possibly other characters)
that is neutral regarding whether it's executed by the DBAPI
or piped into a script that's later invoked by
command line tools.
:param stream_results: Available on: :class:`_engine.Connection`,
:class:`_sql.Executable`.
Indicate to the dialect that results should be "streamed" and not
pre-buffered, if possible. For backends such as PostgreSQL, MySQL
and MariaDB, this indicates the use of a "server side cursor" as
opposed to a client side cursor. Other backends such as that of
Oracle Database may already use server side cursors by default.
The usage of
:paramref:`_engine.Connection.execution_options.stream_results` is
usually combined with setting a fixed number of rows to to be fetched
in batches, to allow for efficient iteration of database rows while
at the same time not loading all result rows into memory at once;
this can be configured on a :class:`_engine.Result` object using the
:meth:`_engine.Result.yield_per` method, after execution has
returned a new :class:`_engine.Result`. If
:meth:`_engine.Result.yield_per` is not used,
the :paramref:`_engine.Connection.execution_options.stream_results`
mode of operation will instead use a dynamically sized buffer
which buffers sets of rows at a time, growing on each batch
based on a fixed growth size up until a limit which may
be configured using the
:paramref:`_engine.Connection.execution_options.max_row_buffer`
parameter.
When using the ORM to fetch ORM mapped objects from a result,
:meth:`_engine.Result.yield_per` should always be used with
:paramref:`_engine.Connection.execution_options.stream_results`,
so that the ORM does not fetch all rows into new ORM objects at once.
For typical use, the
:paramref:`_engine.Connection.execution_options.yield_per` execution
option should be preferred, which sets up both
:paramref:`_engine.Connection.execution_options.stream_results` and
:meth:`_engine.Result.yield_per` at once. This option is supported
both at a core level by :class:`_engine.Connection` as well as by the
ORM :class:`_engine.Session`; the latter is described at
:ref:`orm_queryguide_yield_per`.
.. seealso::
:ref:`engine_stream_results` - background on
:paramref:`_engine.Connection.execution_options.stream_results`
:paramref:`_engine.Connection.execution_options.max_row_buffer`
:paramref:`_engine.Connection.execution_options.yield_per`
:ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel`
describing the ORM version of ``yield_per``
:param max_row_buffer: Available on: :class:`_engine.Connection`,
:class:`_sql.Executable`. Sets a maximum
buffer size to use when the
:paramref:`_engine.Connection.execution_options.stream_results`
execution option is used on a backend that supports server side
cursors. The default value if not specified is 1000.
.. seealso::
:paramref:`_engine.Connection.execution_options.stream_results`
:ref:`engine_stream_results`
:param yield_per: Available on: :class:`_engine.Connection`,
:class:`_sql.Executable`. Integer value applied which will
set the :paramref:`_engine.Connection.execution_options.stream_results`
execution option and invoke :meth:`_engine.Result.yield_per`
automatically at once. Allows equivalent functionality as
is present when using this parameter with the ORM.
.. versionadded:: 1.4.40
.. seealso::
:ref:`engine_stream_results` - background and examples
on using server side cursors with Core.
:ref:`orm_queryguide_yield_per` - in the :ref:`queryguide_toplevel`
describing the ORM version of ``yield_per``
:param insertmanyvalues_page_size: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`. Number of rows to format into an
INSERT statement when the statement uses "insertmanyvalues" mode,
which is a paged form of bulk insert that is used for many backends
when using :term:`executemany` execution typically in conjunction
with RETURNING. Defaults to 1000. May also be modified on a
per-engine basis using the
:paramref:`_sa.create_engine.insertmanyvalues_page_size` parameter.
.. versionadded:: 2.0
.. seealso::
:ref:`engine_insertmanyvalues`
:param schema_translate_map: Available on: :class:`_engine.Connection`,
:class:`_engine.Engine`, :class:`_sql.Executable`.
A dictionary mapping schema names to schema names, that will be
applied to the :paramref:`_schema.Table.schema` element of each
:class:`_schema.Table`
encountered when SQL or DDL expression elements
are compiled into strings; the resulting schema name will be
converted based on presence in the map of the original name.
.. seealso::
:ref:`schema_translating`
:param preserve_rowcount: Boolean; when True, the ``cursor.rowcount``
attribute will be unconditionally memoized within the result and
made available via the :attr:`.CursorResult.rowcount` attribute.
Normally, this attribute is only preserved for UPDATE and DELETE
statements. Using this option, the DBAPIs rowcount value can
be accessed for other kinds of statements such as INSERT and SELECT,
to the degree that the DBAPI supports these statements. See
:attr:`.CursorResult.rowcount` for notes regarding the behavior
of this attribute.
.. versionadded:: 2.0.28
.. seealso::
:meth:`_engine.Engine.execution_options`
:meth:`.Executable.execution_options`
:meth:`_engine.Connection.get_execution_options`
:ref:`orm_queryguide_execution_options` - documentation on all
ORM-specific execution options
:param driver_column_names: When True, the returned
:class:`_engine.CursorResult` will use the column names as written in
``cursor.description`` to set up the keys for the result set,
including the names of columns for the :class:`_engine.Row` object as
well as the dictionary keys when using :attr:`_engine.Row._mapping`.
On backends that use "name normalization" such as Oracle Database to
correct for lower case names being converted to all uppercase, this
behavior is turned off and the raw UPPERCASE names in
cursor.description will be present.
.. versionadded:: 2.1
""" # noqa
if self._has_events or self.engine._has_events:
self.dispatch.set_connection_execution_options(self, opt)
self._execution_options = self._execution_options.union(opt)
self.dialect.set_connection_execution_options(self, opt)
return self
def get_execution_options(self) -> _ExecuteOptions:
"""Get the non-SQL options which will take effect during execution.
.. seealso::
:meth:`_engine.Connection.execution_options`
"""
return self._execution_options
@property
def _still_open_and_dbapi_connection_is_valid(self) -> bool:
pool_proxied_connection = self._dbapi_connection
return (
pool_proxied_connection is not None
and pool_proxied_connection.is_valid
)
@property
def closed(self) -> bool:
"""Return True if this connection is closed."""
return self._dbapi_connection is None and not self.__can_reconnect
@property
def invalidated(self) -> bool:
"""Return True if this connection was invalidated.
This does not indicate whether or not the connection was
invalidated at the pool level, however
"""
# prior to 1.4, "invalid" was stored as a state independent of
# "closed", meaning an invalidated connection could be "closed",
# the _dbapi_connection would be None and closed=True, yet the
# "invalid" flag would stay True. This meant that there were
# three separate states (open/valid, closed/valid, closed/invalid)
# when there is really no reason for that; a connection that's
# "closed" does not need to be "invalid". So the state is now
# represented by the two facts alone.
pool_proxied_connection = self._dbapi_connection
return pool_proxied_connection is None and self.__can_reconnect
@property
def connection(self) -> PoolProxiedConnection:
"""The underlying DB-API connection managed by this Connection.
This is a SQLAlchemy connection-pool proxied connection
which then has the attribute
:attr:`_pool._ConnectionFairy.dbapi_connection` that refers to the
actual driver connection.
.. seealso::
:ref:`dbapi_connections`
"""
if self._dbapi_connection is None:
try:
return self._revalidate_connection()
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
else:
return self._dbapi_connection
def get_isolation_level(self) -> IsolationLevel:
"""Return the current **actual** isolation level that's present on
the database within the scope of this connection.
This attribute will perform a live SQL operation against the database
in order to procure the current isolation level, so the value returned
is the actual level on the underlying DBAPI connection regardless of
how this state was set. This will be one of the four actual isolation
modes ``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``,
``SERIALIZABLE``. It will **not** include the ``AUTOCOMMIT`` isolation
level setting. Third party dialects may also feature additional
isolation level settings.
.. note:: This method **will not report** on the ``AUTOCOMMIT``
isolation level, which is a separate :term:`dbapi` setting that's
independent of **actual** isolation level. When ``AUTOCOMMIT`` is
in use, the database connection still has a "traditional" isolation
mode in effect, that is typically one of the four values
``READ UNCOMMITTED``, ``READ COMMITTED``, ``REPEATABLE READ``,
``SERIALIZABLE``.
Compare to the :attr:`_engine.Connection.default_isolation_level`
accessor which returns the isolation level that is present on the
database at initial connection time.
.. seealso::
:attr:`_engine.Connection.default_isolation_level`
- view default level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
dbapi_connection = self.connection.dbapi_connection
assert dbapi_connection is not None
try:
return self.dialect.get_isolation_level(dbapi_connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
@property
def default_isolation_level(self) -> Optional[IsolationLevel]:
"""The initial-connection time isolation level associated with the
:class:`_engine.Dialect` in use.
This value is independent of the
:paramref:`.Connection.execution_options.isolation_level` and
:paramref:`.Engine.execution_options.isolation_level` execution
options, and is determined by the :class:`_engine.Dialect` when the
first connection is created, by performing a SQL query against the
database for the current isolation level before any additional commands
have been emitted.
Calling this accessor does not invoke any new SQL queries.
.. seealso::
:meth:`_engine.Connection.get_isolation_level`
- view current actual isolation level
:paramref:`_sa.create_engine.isolation_level`
- set per :class:`_engine.Engine` isolation level
:paramref:`.Connection.execution_options.isolation_level`
- set per :class:`_engine.Connection` isolation level
"""
return self.dialect.default_isolation_level
def _invalid_transaction(self) -> NoReturn:
raise exc.PendingRollbackError(
"Can't reconnect until invalid %stransaction is rolled "
"back. Please rollback() fully before proceeding"
% ("savepoint " if self._nested_transaction is not None else ""),
code="8s2b",
)
def _revalidate_connection(self) -> PoolProxiedConnection:
if self.__can_reconnect and self.invalidated:
if self._transaction is not None:
self._invalid_transaction()
self._dbapi_connection = self.engine.raw_connection()
return self._dbapi_connection
raise exc.ResourceClosedError("This Connection is closed")
@property
def info(self) -> _InfoType:
"""Info dictionary associated with the underlying DBAPI connection
referred to by this :class:`_engine.Connection`, allowing user-defined
data to be associated with the connection.
The data here will follow along with the DBAPI connection including
after it is returned to the connection pool and used again
in subsequent instances of :class:`_engine.Connection`.
"""
return self.connection.info
def invalidate(self, exception: Optional[BaseException] = None) -> None:
"""Invalidate the underlying DBAPI connection associated with
this :class:`_engine.Connection`.
An attempt will be made to close the underlying DBAPI connection
immediately; however if this operation fails, the error is logged
but not raised. The connection is then discarded whether or not
close() succeeded.
Upon the next use (where "use" typically means using the
:meth:`_engine.Connection.execute` method or similar),
this :class:`_engine.Connection` will attempt to
procure a new DBAPI connection using the services of the
:class:`_pool.Pool` as a source of connectivity (e.g.
a "reconnection").
If a transaction was in progress (e.g. the
:meth:`_engine.Connection.begin` method has been called) when
:meth:`_engine.Connection.invalidate` method is called, at the DBAPI
level all state associated with this transaction is lost, as
the DBAPI connection is closed. The :class:`_engine.Connection`
will not allow a reconnection to proceed until the
:class:`.Transaction` object is ended, by calling the
:meth:`.Transaction.rollback` method; until that point, any attempt at
continuing to use the :class:`_engine.Connection` will raise an
:class:`~sqlalchemy.exc.InvalidRequestError`.
This is to prevent applications from accidentally
continuing an ongoing transactional operations despite the
fact that the transaction has been lost due to an
invalidation.
The :meth:`_engine.Connection.invalidate` method,
just like auto-invalidation,
will at the connection pool level invoke the
:meth:`_events.PoolEvents.invalidate` event.
:param exception: an optional ``Exception`` instance that's the
reason for the invalidation. is passed along to event handlers
and logging functions.
.. seealso::
:ref:`pool_connection_invalidation`
"""
if self.invalidated:
return
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
if self._still_open_and_dbapi_connection_is_valid:
pool_proxied_connection = self._dbapi_connection
assert pool_proxied_connection is not None
pool_proxied_connection.invalidate(exception)
self._dbapi_connection = None
def detach(self) -> None:
"""Detach the underlying DB-API connection from its connection pool.
E.g.::
with engine.connect() as conn:
conn.detach()
conn.execute(text("SET search_path TO schema1, schema2"))
# work with connection
# connection is fully closed (since we used "with:", can
# also call .close())
This :class:`_engine.Connection` instance will remain usable.
When closed
(or exited from a context manager context as above),
the DB-API connection will be literally closed and not
returned to its originating pool.
This method can be used to insulate the rest of an application
from a modified state on a connection (such as a transaction
isolation level or similar).
"""
if self.closed:
raise exc.ResourceClosedError("This Connection is closed")
pool_proxied_connection = self._dbapi_connection
if pool_proxied_connection is None:
raise exc.InvalidRequestError(
"Can't detach an invalidated Connection"
)
pool_proxied_connection.detach()
def _autobegin(self) -> None:
if self._allow_autobegin and not self.__in_begin:
self.begin()
def begin(self) -> RootTransaction:
"""Begin a transaction prior to autobegin occurring.
E.g.::
with engine.connect() as conn:
with conn.begin() as trans:
conn.execute(table.insert(), {"username": "sandy"})
The returned object is an instance of :class:`_engine.RootTransaction`.
This object represents the "scope" of the transaction,
which completes when either the :meth:`_engine.Transaction.rollback`
or :meth:`_engine.Transaction.commit` method is called; the object
also works as a context manager as illustrated above.
The :meth:`_engine.Connection.begin` method begins a
transaction that normally will be begun in any case when the connection
is first used to execute a statement. The reason this method might be
used would be to invoke the :meth:`_events.ConnectionEvents.begin`
event at a specific time, or to organize code within the scope of a
connection checkout in terms of context managed blocks, such as::
with engine.connect() as conn:
with conn.begin():
conn.execute(...)
conn.execute(...)
with conn.begin():
conn.execute(...)
conn.execute(...)
The above code is not fundamentally any different in its behavior than
the following code which does not use
:meth:`_engine.Connection.begin`; the below style is known
as "commit as you go" style::
with engine.connect() as conn:
conn.execute(...)
conn.execute(...)
conn.commit()
conn.execute(...)
conn.execute(...)
conn.commit()
From a database point of view, the :meth:`_engine.Connection.begin`
method does not emit any SQL or change the state of the underlying
DBAPI connection in any way; the Python DBAPI does not have any
concept of explicit transaction begin.
.. seealso::
:ref:`tutorial_working_with_transactions` - in the
:ref:`unified_tutorial`
:meth:`_engine.Connection.begin_nested` - use a SAVEPOINT
:meth:`_engine.Connection.begin_twophase` -
use a two phase /XID transaction
:meth:`_engine.Engine.begin` - context manager available from
:class:`_engine.Engine`
"""
if self._transaction is None:
self._transaction = RootTransaction(self)
return self._transaction
else:
raise exc.InvalidRequestError(
"This connection has already initialized a SQLAlchemy "
"Transaction() object via begin() or autobegin; can't "
"call begin() here unless rollback() or commit() "
"is called first."
)
def begin_nested(self) -> NestedTransaction:
"""Begin a nested transaction (i.e. SAVEPOINT) and return a transaction
handle that controls the scope of the SAVEPOINT.
E.g.::
with engine.begin() as connection:
with connection.begin_nested():
connection.execute(table.insert(), {"username": "sandy"})
The returned object is an instance of
:class:`_engine.NestedTransaction`, which includes transactional
methods :meth:`_engine.NestedTransaction.commit` and
:meth:`_engine.NestedTransaction.rollback`; for a nested transaction,
these methods correspond to the operations "RELEASE SAVEPOINT <name>"
and "ROLLBACK TO SAVEPOINT <name>". The name of the savepoint is local
to the :class:`_engine.NestedTransaction` object and is generated
automatically. Like any other :class:`_engine.Transaction`, the
:class:`_engine.NestedTransaction` may be used as a context manager as
illustrated above which will "release" or "rollback" corresponding to
if the operation within the block were successful or raised an
exception.
Nested transactions require SAVEPOINT support in the underlying
database, else the behavior is undefined. SAVEPOINT is commonly used to
run operations within a transaction that may fail, while continuing the
outer transaction. E.g.::
from sqlalchemy import exc
with engine.begin() as connection:
trans = connection.begin_nested()
try:
connection.execute(table.insert(), {"username": "sandy"})
trans.commit()
except exc.IntegrityError: # catch for duplicate username
trans.rollback() # rollback to savepoint
# outer transaction continues
connection.execute(...)
If :meth:`_engine.Connection.begin_nested` is called without first
calling :meth:`_engine.Connection.begin` or
:meth:`_engine.Engine.begin`, the :class:`_engine.Connection` object
will "autobegin" the outer transaction first. This outer transaction
may be committed using "commit-as-you-go" style, e.g.::
with engine.connect() as connection: # begin() wasn't called
with connection.begin_nested(): # will auto-"begin()" first
connection.execute(...)
# savepoint is released
connection.execute(...)
# explicitly commit outer transaction
connection.commit()
# can continue working with connection here
.. versionchanged:: 2.0
:meth:`_engine.Connection.begin_nested` will now participate
in the connection "autobegin" behavior that is new as of
2.0 / "future" style connections in 1.4.
.. seealso::
:meth:`_engine.Connection.begin`
:ref:`session_begin_nested` - ORM support for SAVEPOINT
"""
if self._transaction is None:
self._autobegin()
return NestedTransaction(self)
def begin_twophase(self, xid: Optional[Any] = None) -> TwoPhaseTransaction:
"""Begin a two-phase or XA transaction and return a transaction
handle.
The returned object is an instance of :class:`.TwoPhaseTransaction`,
which in addition to the methods provided by
:class:`.Transaction`, also provides a
:meth:`~.TwoPhaseTransaction.prepare` method.
:param xid: the two phase transaction id. If not supplied, a
random id will be generated.
.. seealso::
:meth:`_engine.Connection.begin`
:meth:`_engine.Connection.begin_twophase`
"""
if self._transaction is not None:
raise exc.InvalidRequestError(
"Cannot start a two phase transaction when a transaction "
"is already in progress."
)
if xid is None:
xid = self.engine.dialect.create_xid()
return TwoPhaseTransaction(self, xid)
def commit(self) -> None:
"""Commit the transaction that is currently in progress.
This method commits the current transaction if one has been started.
If no transaction was started, the method has no effect, assuming
the connection is in a non-invalidated state.
A transaction is begun on a :class:`_engine.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_engine.Connection.begin` method is called.
.. note:: The :meth:`_engine.Connection.commit` method only acts upon
the primary database transaction that is linked to the
:class:`_engine.Connection` object. It does not operate upon a
SAVEPOINT that would have been invoked from the
:meth:`_engine.Connection.begin_nested` method; for control of a
SAVEPOINT, call :meth:`_engine.NestedTransaction.commit` on the
:class:`_engine.NestedTransaction` that is returned by the
:meth:`_engine.Connection.begin_nested` method itself.
"""
if self._transaction:
self._transaction.commit()
def rollback(self) -> None:
"""Roll back the transaction that is currently in progress.
This method rolls back the current transaction if one has been started.
If no transaction was started, the method has no effect. If a
transaction was started and the connection is in an invalidated state,
the transaction is cleared using this method.
A transaction is begun on a :class:`_engine.Connection` automatically
whenever a statement is first executed, or when the
:meth:`_engine.Connection.begin` method is called.
.. note:: The :meth:`_engine.Connection.rollback` method only acts
upon the primary database transaction that is linked to the
:class:`_engine.Connection` object. It does not operate upon a
SAVEPOINT that would have been invoked from the
:meth:`_engine.Connection.begin_nested` method; for control of a
SAVEPOINT, call :meth:`_engine.NestedTransaction.rollback` on the
:class:`_engine.NestedTransaction` that is returned by the
:meth:`_engine.Connection.begin_nested` method itself.
"""
if self._transaction:
self._transaction.rollback()
def recover_twophase(self) -> List[Any]:
return self.engine.dialect.do_recover_twophase(self)
def rollback_prepared(self, xid: Any, recover: bool = False) -> None:
self.engine.dialect.do_rollback_twophase(self, xid, recover=recover)
def commit_prepared(self, xid: Any, recover: bool = False) -> None:
self.engine.dialect.do_commit_twophase(self, xid, recover=recover)
def in_transaction(self) -> bool:
"""Return True if a transaction is in progress."""
return self._transaction is not None and self._transaction.is_active
def in_nested_transaction(self) -> bool:
"""Return True if a transaction is in progress."""
return (
self._nested_transaction is not None
and self._nested_transaction.is_active
)
def _is_autocommit_isolation(self) -> bool:
opt_iso = self._execution_options.get("isolation_level", None)
return bool(
opt_iso == "AUTOCOMMIT"
or (
opt_iso is None
and self.engine.dialect._on_connect_isolation_level
== "AUTOCOMMIT"
)
)
def _get_required_transaction(self) -> RootTransaction:
trans = self._transaction
if trans is None:
raise exc.InvalidRequestError("connection is not in a transaction")
return trans
def _get_required_nested_transaction(self) -> NestedTransaction:
trans = self._nested_transaction
if trans is None:
raise exc.InvalidRequestError(
"connection is not in a nested transaction"
)
return trans
def get_transaction(self) -> Optional[RootTransaction]:
"""Return the current root transaction in progress, if any.
.. versionadded:: 1.4
"""
return self._transaction
def get_nested_transaction(self) -> Optional[NestedTransaction]:
"""Return the current nested transaction in progress, if any.
.. versionadded:: 1.4
"""
return self._nested_transaction
def _begin_impl(self, transaction: RootTransaction) -> None:
if self._echo:
if self._is_autocommit_isolation():
self._log_info(
"BEGIN (implicit; DBAPI should not BEGIN due to "
"autocommit mode)"
)
else:
self._log_info("BEGIN (implicit)")
self.__in_begin = True
if self._has_events or self.engine._has_events:
self.dispatch.begin(self)
try:
self.engine.dialect.do_begin(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
self.__in_begin = False
def _rollback_impl(self) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.rollback(self)
if self._still_open_and_dbapi_connection_is_valid:
if self._echo:
if self._is_autocommit_isolation():
if self.dialect.skip_autocommit_rollback:
self._log_info(
"ROLLBACK will be skipped by "
"skip_autocommit_rollback"
)
else:
self._log_info(
"ROLLBACK using DBAPI connection.rollback(); "
"set skip_autocommit_rollback to prevent fully"
)
else:
self._log_info("ROLLBACK")
try:
self.engine.dialect.do_rollback(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _commit_impl(self) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.commit(self)
if self._echo:
if self._is_autocommit_isolation():
self._log_info(
"COMMIT using DBAPI connection.commit(), "
"has no effect due to autocommit mode"
)
else:
self._log_info("COMMIT")
try:
self.engine.dialect.do_commit(self.connection)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _savepoint_impl(self, name: Optional[str] = None) -> str:
if self._has_events or self.engine._has_events:
self.dispatch.savepoint(self, name)
if name is None:
self.__savepoint_seq += 1
name = "sa_savepoint_%s" % self.__savepoint_seq
self.engine.dialect.do_savepoint(self, name)
return name
def _rollback_to_savepoint_impl(self, name: str) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.rollback_savepoint(self, name, None)
if self._still_open_and_dbapi_connection_is_valid:
self.engine.dialect.do_rollback_to_savepoint(self, name)
def _release_savepoint_impl(self, name: str) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.release_savepoint(self, name, None)
self.engine.dialect.do_release_savepoint(self, name)
def _begin_twophase_impl(self, transaction: TwoPhaseTransaction) -> None:
if self._echo:
self._log_info("BEGIN TWOPHASE (implicit)")
if self._has_events or self.engine._has_events:
self.dispatch.begin_twophase(self, transaction.xid)
self.__in_begin = True
try:
self.engine.dialect.do_begin_twophase(self, transaction.xid)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
finally:
self.__in_begin = False
def _prepare_twophase_impl(self, xid: Any) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.prepare_twophase(self, xid)
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_prepare_twophase(self, xid)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _rollback_twophase_impl(self, xid: Any, is_prepared: bool) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.rollback_twophase(self, xid, is_prepared)
if self._still_open_and_dbapi_connection_is_valid:
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_rollback_twophase(
self, xid, is_prepared
)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def _commit_twophase_impl(self, xid: Any, is_prepared: bool) -> None:
if self._has_events or self.engine._has_events:
self.dispatch.commit_twophase(self, xid, is_prepared)
assert isinstance(self._transaction, TwoPhaseTransaction)
try:
self.engine.dialect.do_commit_twophase(self, xid, is_prepared)
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
def close(self) -> None:
"""Close this :class:`_engine.Connection`.
This results in a release of the underlying database
resources, that is, the DBAPI connection referenced
internally. The DBAPI connection is typically restored
back to the connection-holding :class:`_pool.Pool` referenced
by the :class:`_engine.Engine` that produced this
:class:`_engine.Connection`. Any transactional state present on
the DBAPI connection is also unconditionally released via
the DBAPI connection's ``rollback()`` method, regardless
of any :class:`.Transaction` object that may be
outstanding with regards to this :class:`_engine.Connection`.
This has the effect of also calling :meth:`_engine.Connection.rollback`
if any transaction is in place.
After :meth:`_engine.Connection.close` is called, the
:class:`_engine.Connection` is permanently in a closed state,
and will allow no further operations.
"""
if self._transaction:
self._transaction.close()
skip_reset = True
else:
skip_reset = False
if self._dbapi_connection is not None:
conn = self._dbapi_connection
# as we just closed the transaction, close the connection
# pool connection without doing an additional reset
if skip_reset:
cast("_ConnectionFairy", conn)._close_special(
transaction_reset=True
)
else:
conn.close()
# There is a slight chance that conn.close() may have
# triggered an invalidation here in which case
# _dbapi_connection would already be None, however usually
# it will be non-None here and in a "closed" state.
self._dbapi_connection = None
self.__can_reconnect = False
@overload
def scalar(
self,
statement: TypedReturnsRows[_T],
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Optional[_T]: ...
@overload
def scalar(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Any: ...
def scalar(
self,
statement: Executable,
parameters: Optional[_CoreSingleExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> Any:
r"""Executes a SQL statement construct and returns a scalar object.
This method is shorthand for invoking the
:meth:`_engine.Result.scalar` method after invoking the
:meth:`_engine.Connection.execute` method. Parameters are equivalent.
:return: a scalar Python value representing the first column of the
first row returned.
"""
distilled_parameters = _distill_params_20(parameters)
try:
meth = statement._execute_on_scalar
except AttributeError as err:
raise exc.ObjectNotExecutableError(statement) from err
else:
return meth(
self,
distilled_parameters,
execution_options or NO_OPTIONS,
)
@overload
def scalars(
self,
statement: TypedReturnsRows[_T],
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[_T]: ...
@overload
def scalars(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[Any]: ...
def scalars(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> ScalarResult[Any]:
"""Executes and returns a scalar result set, which yields scalar values
from the first column of each row.
This method is equivalent to calling :meth:`_engine.Connection.execute`
to receive a :class:`_result.Result` object, then invoking the
:meth:`_result.Result.scalars` method to produce a
:class:`_result.ScalarResult` instance.
:return: a :class:`_result.ScalarResult`
.. versionadded:: 1.4.24
"""
return self.execute(
statement, parameters, execution_options=execution_options
).scalars()
@overload
def execute(
self,
statement: TypedReturnsRows[Unpack[_Ts]],
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[_Ts]]: ...
@overload
def execute(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[TupleAny]]: ...
def execute(
self,
statement: Executable,
parameters: Optional[_CoreAnyExecuteParams] = None,
*,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[TupleAny]]:
r"""Executes a SQL statement construct and returns a
:class:`_engine.CursorResult`.
:param statement: The statement to be executed. This is always
an object that is in both the :class:`_expression.ClauseElement` and
:class:`_expression.Executable` hierarchies, including:
* :class:`_expression.Select`
* :class:`_expression.Insert`, :class:`_expression.Update`,
:class:`_expression.Delete`
* :class:`_expression.TextClause` and
:class:`_expression.TextualSelect`
* :class:`_schema.DDL` and objects which inherit from
:class:`_schema.ExecutableDDLElement`
:param parameters: parameters which will be bound into the statement.
This may be either a dictionary of parameter names to values,
or a mutable sequence (e.g. a list) of dictionaries. When a
list of dictionaries is passed, the underlying statement execution
will make use of the DBAPI ``cursor.executemany()`` method.
When a single dictionary is passed, the DBAPI ``cursor.execute()``
method will be used.
:param execution_options: optional dictionary of execution options,
which will be associated with the statement execution. This
dictionary can provide a subset of the options that are accepted
by :meth:`_engine.Connection.execution_options`.
:return: a :class:`_engine.Result` object.
"""
distilled_parameters = _distill_params_20(parameters)
try:
meth = statement._execute_on_connection
except AttributeError as err:
raise exc.ObjectNotExecutableError(statement) from err
else:
return meth(
self,
distilled_parameters,
execution_options or NO_OPTIONS,
)
def _execute_function(
self,
func: FunctionElement[Any],
distilled_parameters: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> CursorResult[Unpack[TupleAny]]:
"""Execute a sql.FunctionElement object."""
return self._execute_clauseelement(
func.select(), distilled_parameters, execution_options
)
def _execute_default(
self,
default: DefaultGenerator,
distilled_parameters: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> Any:
"""Execute a schema.ColumnDefault object."""
exec_opts = self._execution_options.merge_with(execution_options)
event_multiparams: Optional[_CoreMultiExecuteParams]
event_params: Optional[_CoreAnyExecuteParams]
# note for event handlers, the "distilled parameters" which is always
# a list of dicts is broken out into separate "multiparams" and
# "params" collections, which allows the handler to distinguish
# between an executemany and execute style set of parameters.
if self._has_events or self.engine._has_events:
(
default,
distilled_parameters,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
default, distilled_parameters, exec_opts
)
else:
event_multiparams = event_params = None
try:
conn = self._dbapi_connection
if conn is None:
conn = self._revalidate_connection()
dialect = self.dialect
ctx = dialect.execution_ctx_cls._init_default(
dialect, self, conn, exec_opts
)
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(e, None, None, None, None)
ret = ctx._exec_default(None, default, None)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
default,
event_multiparams,
event_params,
exec_opts,
ret,
)
return ret
def _execute_ddl(
self,
ddl: ExecutableDDLElement,
distilled_parameters: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> CursorResult[Unpack[TupleAny]]:
"""Execute a schema.DDL object."""
exec_opts = ddl._execution_options.merge_with(
self._execution_options, execution_options
)
event_multiparams: Optional[_CoreMultiExecuteParams]
event_params: Optional[_CoreSingleExecuteParams]
if self._has_events or self.engine._has_events:
(
ddl,
distilled_parameters,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
ddl, distilled_parameters, exec_opts
)
else:
event_multiparams = event_params = None
schema_translate_map = exec_opts.get("schema_translate_map", None)
dialect = self.dialect
compiled = ddl.compile(
dialect=dialect, schema_translate_map=schema_translate_map
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_ddl,
compiled,
None,
exec_opts,
compiled,
)
if self._has_events or self.engine._has_events:
self.dispatch.after_execute(
self,
ddl,
event_multiparams,
event_params,
exec_opts,
ret,
)
return ret
def _invoke_before_exec_event(
self,
elem: Any,
distilled_params: _CoreMultiExecuteParams,
execution_options: _ExecuteOptions,
) -> Tuple[
Any,
_CoreMultiExecuteParams,
_CoreMultiExecuteParams,
_CoreSingleExecuteParams,
]:
event_multiparams: _CoreMultiExecuteParams
event_params: _CoreSingleExecuteParams
if len(distilled_params) == 1:
event_multiparams, event_params = [], distilled_params[0]
else:
event_multiparams, event_params = distilled_params, {}
for fn in self.dispatch.before_execute:
elem, event_multiparams, event_params = fn(
self,
elem,
event_multiparams,
event_params,
execution_options,
)
if event_multiparams:
distilled_params = list(event_multiparams)
if event_params:
raise exc.InvalidRequestError(
"Event handler can't return non-empty multiparams "
"and params at the same time"
)
elif event_params:
distilled_params = [event_params]
else:
distilled_params = []
return elem, distilled_params, event_multiparams, event_params
def _execute_clauseelement(
self,
elem: Executable,
distilled_parameters: _CoreMultiExecuteParams,
execution_options: CoreExecuteOptionsParameter,
) -> CursorResult[Unpack[TupleAny]]:
"""Execute a sql.ClauseElement object."""
exec_opts = elem._execution_options.merge_with(
self._execution_options, execution_options
)
has_events = self._has_events or self.engine._has_events
if has_events:
(
elem,
distilled_parameters,
event_multiparams,
event_params,
) = self._invoke_before_exec_event(
elem, distilled_parameters, exec_opts
)
if distilled_parameters:
# ensure we don't retain a link to the view object for keys()
# which links to the values, which we don't want to cache
keys = sorted(distilled_parameters[0])
for_executemany = len(distilled_parameters) > 1
else:
keys = []
for_executemany = False
dialect = self.dialect
schema_translate_map = exec_opts.get("schema_translate_map", None)
compiled_cache: Optional[CompiledCacheType] = exec_opts.get(
"compiled_cache", self.engine._compiled_cache
)
compiled_sql, extracted_params, param_dict, cache_hit = (
elem._compile_w_cache(
dialect=dialect,
compiled_cache=compiled_cache,
column_keys=keys,
for_executemany=for_executemany,
schema_translate_map=schema_translate_map,
linting=self.dialect.compiler_linting | compiler.WARN_LINTING,
)
)
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_compiled,
compiled_sql,
distilled_parameters,
exec_opts,
compiled_sql,
distilled_parameters,
elem,
extracted_params,
cache_hit=cache_hit,
param_dict=param_dict,
)
if has_events:
self.dispatch.after_execute(
self,
elem,
event_multiparams,
event_params,
exec_opts,
ret,
)
return ret
def exec_driver_sql(
self,
statement: str,
parameters: Optional[_DBAPIAnyExecuteParams] = None,
execution_options: Optional[CoreExecuteOptionsParameter] = None,
) -> CursorResult[Unpack[TupleAny]]:
r"""Executes a string SQL statement on the DBAPI cursor directly,
without any SQL compilation steps.
This can be used to pass any string directly to the
``cursor.execute()`` method of the DBAPI in use.
:param statement: The statement str to be executed. Bound parameters
must use the underlying DBAPI's paramstyle, such as "qmark",
"pyformat", "format", etc.
:param parameters: represent bound parameter values to be used in the
execution. The format is one of: a dictionary of named parameters,
a tuple of positional parameters, or a list containing either
dictionaries or tuples for multiple-execute support.
:return: a :class:`_engine.CursorResult`.
E.g. multiple dictionaries::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
[{"id": 1, "value": "v1"}, {"id": 2, "value": "v2"}],
)
Single dictionary::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (%(id)s, %(value)s)",
dict(id=1, value="v1"),
)
Single tuple::
conn.exec_driver_sql(
"INSERT INTO table (id, value) VALUES (?, ?)", (1, "v1")
)
.. note:: The :meth:`_engine.Connection.exec_driver_sql` method does
not participate in the
:meth:`_events.ConnectionEvents.before_execute` and
:meth:`_events.ConnectionEvents.after_execute` events. To
intercept calls to :meth:`_engine.Connection.exec_driver_sql`, use
:meth:`_events.ConnectionEvents.before_cursor_execute` and
:meth:`_events.ConnectionEvents.after_cursor_execute`.
.. seealso::
:pep:`249`
"""
distilled_parameters = _distill_raw_params(parameters)
exec_opts = self._execution_options.merge_with(execution_options)
dialect = self.dialect
ret = self._execute_context(
dialect,
dialect.execution_ctx_cls._init_statement,
statement,
None,
exec_opts,
statement,
distilled_parameters,
)
return ret
def _execute_context(
self,
dialect: Dialect,
constructor: Callable[..., ExecutionContext],
statement: Union[str, Compiled],
parameters: Optional[_AnyMultiExecuteParams],
execution_options: _ExecuteOptions,
*args: Any,
**kw: Any,
) -> CursorResult[Unpack[TupleAny]]:
"""Create an :class:`.ExecutionContext` and execute, returning
a :class:`_engine.CursorResult`."""
if execution_options:
yp = execution_options.get("yield_per", None)
if yp:
execution_options = execution_options.union(
{"stream_results": True, "max_row_buffer": yp}
)
try:
conn = self._dbapi_connection
if conn is None:
conn = self._revalidate_connection()
context = constructor(
dialect, self, conn, execution_options, *args, **kw
)
except (exc.PendingRollbackError, exc.ResourceClosedError):
raise
except BaseException as e:
self._handle_dbapi_exception(
e, str(statement), parameters, None, None
)
if (
self._transaction
and not self._transaction.is_active
or (
self._nested_transaction
and not self._nested_transaction.is_active
)
):
self._invalid_transaction()
elif self._trans_context_manager:
TransactionalContext._trans_ctx_check(self)
if self._transaction is None:
self._autobegin()
context.pre_exec()
if context.execute_style is ExecuteStyle.INSERTMANYVALUES:
return self._exec_insertmany_context(dialect, context)
else:
return self._exec_single_context(
dialect, context, statement, parameters
)
def _exec_single_context(
self,
dialect: Dialect,
context: ExecutionContext,
statement: Union[str, Compiled],
parameters: Optional[_AnyMultiExecuteParams],
) -> CursorResult[Unpack[TupleAny]]:
"""continue the _execute_context() method for a single DBAPI
cursor.execute() or cursor.executemany() call.
"""
if dialect.bind_typing is BindTyping.SETINPUTSIZES:
generic_setinputsizes = context._prepare_set_input_sizes()
if generic_setinputsizes:
try:
dialect.do_set_input_sizes(
context.cursor, generic_setinputsizes, context
)
except BaseException as e:
self._handle_dbapi_exception(
e, str(statement), parameters, None, context
)
cursor, str_statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
effective_parameters: Optional[_AnyExecuteParams]
if not context.executemany:
effective_parameters = parameters[0]
else:
effective_parameters = parameters
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
str_statement, effective_parameters = fn(
self,
cursor,
str_statement,
effective_parameters,
context,
context.executemany,
)
if self._echo:
self._log_info(str_statement)
stats = context._get_cache_stats()
if not self.engine.hide_parameters:
self._log_info(
"[%s] %r",
stats,
sql_util._repr_params(
effective_parameters,
batches=10,
ismulti=context.executemany,
),
)
else:
self._log_info(
"[%s] [SQL parameters hidden due to hide_parameters=True]",
stats,
)
evt_handled: bool = False
try:
if context.execute_style is ExecuteStyle.EXECUTEMANY:
effective_parameters = cast(
"_CoreMultiExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_executemany:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
self.dialect.do_executemany(
cursor,
str_statement,
effective_parameters,
context,
)
elif not effective_parameters and context.no_parameters:
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute_no_params:
if fn(cursor, str_statement, context):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute_no_params(
cursor, str_statement, context
)
else:
effective_parameters = cast(
"_CoreSingleExecuteParams", effective_parameters
)
if self.dialect._has_events:
for fn in self.dialect.dispatch.do_execute:
if fn(
cursor,
str_statement,
effective_parameters,
context,
):
evt_handled = True
break
if not evt_handled:
self.dialect.do_execute(
cursor, str_statement, effective_parameters, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self,
cursor,
str_statement,
effective_parameters,
context,
context.executemany,
)
context.post_exec()
result = context._setup_result_proxy()
except BaseException as e:
self._handle_dbapi_exception(
e, str_statement, effective_parameters, cursor, context
)
return result
def _exec_insertmany_context(
self,
dialect: Dialect,
context: ExecutionContext,
) -> CursorResult[Unpack[TupleAny]]:
"""continue the _execute_context() method for an "insertmanyvalues"
operation, which will invoke DBAPI
cursor.execute() one or more times with individual log and
event hook calls.
"""
if dialect.bind_typing is BindTyping.SETINPUTSIZES:
generic_setinputsizes = context._prepare_set_input_sizes()
else:
generic_setinputsizes = None
cursor, str_statement, parameters = (
context.cursor,
context.statement,
context.parameters,
)
effective_parameters = parameters
engine_events = self._has_events or self.engine._has_events
if self.dialect._has_events:
do_execute_dispatch: Iterable[Any] = (
self.dialect.dispatch.do_execute
)
else:
do_execute_dispatch = ()
if self._echo:
stats = context._get_cache_stats() + " (insertmanyvalues)"
preserve_rowcount = context.execution_options.get(
"preserve_rowcount", False
)
rowcount = 0
for imv_batch in dialect._deliver_insertmanyvalues_batches(
self,
cursor,
str_statement,
effective_parameters,
generic_setinputsizes,
context,
):
if imv_batch.processed_setinputsizes:
try:
dialect.do_set_input_sizes(
context.cursor,
imv_batch.processed_setinputsizes,
context,
)
except BaseException as e:
self._handle_dbapi_exception(
e,
sql_util._long_statement(imv_batch.replaced_statement),
imv_batch.replaced_parameters,
None,
context,
is_sub_exec=True,
)
sub_stmt = imv_batch.replaced_statement
sub_params = imv_batch.replaced_parameters
if engine_events:
for fn in self.dispatch.before_cursor_execute:
sub_stmt, sub_params = fn(
self,
cursor,
sub_stmt,
sub_params,
context,
True,
)
if self._echo:
self._log_info(sql_util._long_statement(sub_stmt))
imv_stats = f""" {imv_batch.batchnum}/{
imv_batch.total_batches
} ({
'ordered'
if imv_batch.rows_sorted else 'unordered'
}{
'; batch not supported'
if imv_batch.is_downgraded
else ''
})"""
if imv_batch.batchnum == 1:
stats += imv_stats
else:
stats = f"insertmanyvalues{imv_stats}"
if not self.engine.hide_parameters:
self._log_info(
"[%s] %r",
stats,
sql_util._repr_params(
sub_params,
batches=10,
ismulti=False,
),
)
else:
self._log_info(
"[%s] [SQL parameters hidden due to "
"hide_parameters=True]",
stats,
)
try:
for fn in do_execute_dispatch:
if fn(
cursor,
sub_stmt,
sub_params,
context,
):
break
else:
dialect.do_execute(
cursor,
sub_stmt,
sub_params,
context,
)
except BaseException as e:
self._handle_dbapi_exception(
e,
sql_util._long_statement(sub_stmt),
sub_params,
cursor,
context,
is_sub_exec=True,
)
if engine_events:
self.dispatch.after_cursor_execute(
self,
cursor,
str_statement,
effective_parameters,
context,
context.executemany,
)
if preserve_rowcount:
rowcount += imv_batch.current_batch_size
try:
context.post_exec()
if preserve_rowcount:
context._rowcount = rowcount # type: ignore[attr-defined]
result = context._setup_result_proxy()
except BaseException as e:
self._handle_dbapi_exception(
e, str_statement, effective_parameters, cursor, context
)
return result
def _cursor_execute(
self,
cursor: DBAPICursor,
statement: str,
parameters: _DBAPISingleExecuteParams,
context: Optional[ExecutionContext] = None,
) -> None:
"""Execute a statement + params on the given cursor.
Adds appropriate logging and exception handling.
This method is used by DefaultDialect for special-case
executions, such as for sequences and column defaults.
The path of statement execution in the majority of cases
terminates at _execute_context().
"""
if self._has_events or self.engine._has_events:
for fn in self.dispatch.before_cursor_execute:
statement, parameters = fn(
self, cursor, statement, parameters, context, False
)
if self._echo:
self._log_info(statement)
self._log_info("[raw sql] %r", parameters)
try:
for fn in (
()
if not self.dialect._has_events
else self.dialect.dispatch.do_execute
):
if fn(cursor, statement, parameters, context):
break
else:
self.dialect.do_execute(cursor, statement, parameters, context)
except BaseException as e:
self._handle_dbapi_exception(
e, statement, parameters, cursor, context
)
if self._has_events or self.engine._has_events:
self.dispatch.after_cursor_execute(
self, cursor, statement, parameters, context, False
)
def _safe_close_cursor(self, cursor: DBAPICursor) -> None:
"""Close the given cursor, catching exceptions
and turning into log warnings.
"""
try:
cursor.close()
except Exception:
# log the error through the connection pool's logger.
self.engine.pool.logger.error(
"Error closing cursor", exc_info=True
)
_reentrant_error = False
_is_disconnect = False
def _handle_dbapi_exception(
self,
e: BaseException,
statement: Optional[str],
parameters: Optional[_AnyExecuteParams],
cursor: Optional[DBAPICursor],
context: Optional[ExecutionContext],
is_sub_exec: bool = False,
) -> NoReturn:
exc_info = sys.exc_info()
is_exit_exception = util.is_exit_exception(e)
if not self._is_disconnect:
self._is_disconnect = (
isinstance(e, self.dialect.loaded_dbapi.Error)
and not self.closed
and self.dialect.is_disconnect(
e,
self._dbapi_connection if not self.invalidated else None,
cursor,
)
) or (is_exit_exception and not self.closed)
invalidate_pool_on_disconnect = not is_exit_exception
ismulti: bool = (
not is_sub_exec and context.executemany
if context is not None
else False
)
if self._reentrant_error:
raise exc.DBAPIError.instance(
statement,
parameters,
e,
self.dialect.loaded_dbapi.Error,
hide_parameters=self.engine.hide_parameters,
dialect=self.dialect,
ismulti=ismulti,
).with_traceback(exc_info[2]) from e
self._reentrant_error = True
try:
# non-DBAPI error - if we already got a context,
# or there's no string statement, don't wrap it
should_wrap = isinstance(e, self.dialect.loaded_dbapi.Error) or (
statement is not None
and context is None
and not is_exit_exception
)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
statement,
parameters,
cast(Exception, e),
self.dialect.loaded_dbapi.Error,
hide_parameters=self.engine.hide_parameters,
connection_invalidated=self._is_disconnect,
dialect=self.dialect,
ismulti=ismulti,
)
else:
sqlalchemy_exception = None
newraise = None
if (self.dialect._has_events) and not self._execution_options.get(
"skip_user_error_events", False
):
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
self.engine,
self.dialect,
self,
cursor,
statement,
parameters,
context,
self._is_disconnect,
invalidate_pool_on_disconnect,
False,
)
for fn in self.dialect.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if self._is_disconnect != ctx.is_disconnect:
self._is_disconnect = ctx.is_disconnect
if sqlalchemy_exception:
sqlalchemy_exception.connection_invalidated = (
ctx.is_disconnect
)
# set up potentially user-defined value for
# invalidate pool.
invalidate_pool_on_disconnect = (
ctx.invalidate_pool_on_disconnect
)
if should_wrap and context:
context.handle_dbapi_exception(e)
if not self._is_disconnect:
if cursor:
self._safe_close_cursor(cursor)
# "autorollback" was mostly relevant in 1.x series.
# It's very unlikely to reach here, as the connection
# does autobegin so when we are here, we are usually
# in an explicit / semi-explicit transaction.
# however we have a test which manufactures this
# scenario in any case using an event handler.
# test/engine/test_execute.py-> test_actual_autorollback
if not self.in_transaction():
self._rollback_impl()
if newraise:
raise newraise.with_traceback(exc_info[2]) from e
elif should_wrap:
assert sqlalchemy_exception is not None
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
else:
assert exc_info[1] is not None
raise exc_info[1].with_traceback(exc_info[2])
finally:
del self._reentrant_error
if self._is_disconnect:
del self._is_disconnect
if not self.invalidated:
dbapi_conn_wrapper = self._dbapi_connection
assert dbapi_conn_wrapper is not None
if invalidate_pool_on_disconnect:
self.engine.pool._invalidate(dbapi_conn_wrapper, e)
self.invalidate(e)
@classmethod
def _handle_dbapi_exception_noconnection(
cls,
e: BaseException,
dialect: Dialect,
engine: Optional[Engine] = None,
is_disconnect: Optional[bool] = None,
invalidate_pool_on_disconnect: bool = True,
is_pre_ping: bool = False,
) -> NoReturn:
exc_info = sys.exc_info()
if is_disconnect is None:
is_disconnect = isinstance(
e, dialect.loaded_dbapi.Error
) and dialect.is_disconnect(e, None, None)
should_wrap = isinstance(e, dialect.loaded_dbapi.Error)
if should_wrap:
sqlalchemy_exception = exc.DBAPIError.instance(
None,
None,
cast(Exception, e),
dialect.loaded_dbapi.Error,
hide_parameters=(
engine.hide_parameters if engine is not None else False
),
connection_invalidated=is_disconnect,
dialect=dialect,
)
else:
sqlalchemy_exception = None
newraise = None
if dialect._has_events:
ctx = ExceptionContextImpl(
e,
sqlalchemy_exception,
engine,
dialect,
None,
None,
None,
None,
None,
is_disconnect,
invalidate_pool_on_disconnect,
is_pre_ping,
)
for fn in dialect.dispatch.handle_error:
try:
# handler returns an exception;
# call next handler in a chain
per_fn = fn(ctx)
if per_fn is not None:
ctx.chained_exception = newraise = per_fn
except Exception as _raised:
# handler raises an exception - stop processing
newraise = _raised
break
if sqlalchemy_exception and is_disconnect != ctx.is_disconnect:
sqlalchemy_exception.connection_invalidated = ctx.is_disconnect
if newraise:
raise newraise.with_traceback(exc_info[2]) from e
elif should_wrap:
assert sqlalchemy_exception is not None
raise sqlalchemy_exception.with_traceback(exc_info[2]) from e
else:
assert exc_info[1] is not None
raise exc_info[1].with_traceback(exc_info[2])
def _run_ddl_visitor(
self,
visitorcallable: Type[InvokeDDLBase],
element: SchemaVisitable,
**kwargs: Any,
) -> None:
"""run a DDL visitor.
This method is only here so that the MockConnection can change the
options given to the visitor so that "checkfirst" is skipped.
"""
visitorcallable(
dialect=self.dialect, connection=self, **kwargs
).traverse_single(element)
| Connection |
python | justquick__django-activity-stream | actstream/tests/test_gfk.py | {
"start": 260,
"end": 4256
} | class ____(TestCase):
def setUp(self):
User = get_user_model()
self.user_ct = ContentType.objects.get_for_model(User)
self.group_ct = ContentType.objects.get_for_model(Group)
self.group, _ = Group.objects.get_or_create(name='CoolGroup')
self.user1, _ = User.objects.get_or_create(username='admin')
self.user2, _ = User.objects.get_or_create(username='Two')
self.user3, _ = User.objects.get_or_create(username='Three')
self.user4, _ = User.objects.get_or_create(username='Four')
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user2.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user3.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='followed',
target_content_type=self.user_ct,
target_object_id=self.user4.id
)
Action.objects.get_or_create(
actor_content_type=self.user_ct,
actor_object_id=self.user1.id,
verb='joined',
target_content_type=self.group_ct,
target_object_id=self.group.id
)
def test_fetch_generic_relations(self):
# baseline without fetch_generic_relations
_actions = Action.objects.filter(actor_content_type=self.user_ct,
actor_object_id=self.user1.id)
def actions(): return _actions._clone()
num_content_types = len(set(actions().values_list(
'target_content_type_id', flat=True)))
n = actions().count()
# compare to fetching only 1 generic relation
full, generic = actions(), actions().fetch_generic_relations('target')
self.assertNumQueries(LTE(n + 1),
lambda: [a.target for a in full])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [a.target for a in generic])
action_targets = [(a.id, a.target) for a in actions()]
action_targets_fetch_generic = [
(a.id, a.target)
for a in actions().fetch_generic_relations('target')]
self.assertEqual(action_targets, action_targets_fetch_generic)
# compare to fetching all generic relations
num_content_types = len(set(sum(actions().values_list(
'actor_content_type_id', 'target_content_type_id'), ())))
full, generic = actions(), actions().fetch_generic_relations()
self.assertNumQueries(LTE(2 * n + 1),
lambda: [(a.actor, a.target) for a in full])
self.assertNumQueries(LTE(num_content_types + 2),
lambda: [(a.actor, a.target) for a in generic])
action_actor_targets = [(a.id, a.actor, a.target) for a in actions()]
action_actor_targets_fetch_generic_all = [
(a.id, a.actor, a.target)
for a in actions().fetch_generic_relations()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_all)
# fetch only 1 generic relation, but access both gfks
def generic():
return actions().fetch_generic_relations('target')
self.assertNumQueries(LTE(n + num_content_types + 2), lambda: [
(a.actor, a.target) for a in generic()])
action_actor_targets_fetch_generic_target = [
(a.id, a.actor, a.target) for a in generic()]
self.assertEqual(action_actor_targets,
action_actor_targets_fetch_generic_target)
| GFKManagerTestCase |
python | PyCQA__bandit | bandit/formatters/custom.py | {
"start": 756,
"end": 5363
} | class ____(dict):
"""Safe mapper to handle format key errors"""
@classmethod # To prevent PEP8 warnings in the test suite
def __missing__(cls, key):
return "{%s}" % key
@test_properties.accepts_baseline
def report(manager, fileobj, sev_level, conf_level, template=None):
"""Prints issues in custom format
:param manager: the bandit manager object
:param fileobj: The output file object, which may be sys.stdout
:param sev_level: Filtering severity level
:param conf_level: Filtering confidence level
:param template: Output template with non-terminal tags <N>
(default: '{abspath}:{line}:
{test_id}[bandit]: {severity}: {msg}')
"""
machine_output = {"results": [], "errors": []}
for fname, reason in manager.get_skipped():
machine_output["errors"].append({"filename": fname, "reason": reason})
results = manager.get_issue_list(
sev_level=sev_level, conf_level=conf_level
)
msg_template = template
if template is None:
msg_template = "{abspath}:{line}: {test_id}[bandit]: {severity}: {msg}"
# Dictionary of non-terminal tags that will be expanded
tag_mapper = {
"abspath": lambda issue: os.path.abspath(issue.fname),
"relpath": lambda issue: os.path.relpath(issue.fname),
"line": lambda issue: issue.lineno,
"col": lambda issue: issue.col_offset,
"end_col": lambda issue: issue.end_col_offset,
"test_id": lambda issue: issue.test_id,
"severity": lambda issue: issue.severity,
"msg": lambda issue: issue.text,
"confidence": lambda issue: issue.confidence,
"range": lambda issue: issue.linerange,
"cwe": lambda issue: issue.cwe,
}
# Create dictionary with tag sets to speed up search for similar tags
tag_sim_dict = {tag: set(tag) for tag, _ in tag_mapper.items()}
# Parse the format_string template and check the validity of tags
try:
parsed_template_orig = list(string.Formatter().parse(msg_template))
# of type (literal_text, field_name, fmt_spec, conversion)
# Check the format validity only, ignore keys
string.Formatter().vformat(msg_template, (), SafeMapper(line=0))
except ValueError as e:
LOG.error("Template is not in valid format: %s", e.args[0])
sys.exit(2)
tag_set = {t[1] for t in parsed_template_orig if t[1] is not None}
if not tag_set:
LOG.error("No tags were found in the template. Are you missing '{}'?")
sys.exit(2)
def get_similar_tag(tag):
similarity_list = [
(len(set(tag) & t_set), t) for t, t_set in tag_sim_dict.items()
]
return sorted(similarity_list)[-1][1]
tag_blacklist = []
for tag in tag_set:
# check if the tag is in dictionary
if tag not in tag_mapper:
similar_tag = get_similar_tag(tag)
LOG.warning(
"Tag '%s' was not recognized and will be skipped, "
"did you mean to use '%s'?",
tag,
similar_tag,
)
tag_blacklist += [tag]
# Compose the message template back with the valid values only
msg_parsed_template_list = []
for literal_text, field_name, fmt_spec, conversion in parsed_template_orig:
if literal_text:
# if there is '{' or '}', double it to prevent expansion
literal_text = re.sub("{", "{{", literal_text)
literal_text = re.sub("}", "}}", literal_text)
msg_parsed_template_list.append(literal_text)
if field_name is not None:
if field_name in tag_blacklist:
msg_parsed_template_list.append(field_name)
continue
# Append the fmt_spec part
params = [field_name, fmt_spec, conversion]
markers = ["", ":", "!"]
msg_parsed_template_list.append(
["{"]
+ [f"{m + p}" if p else "" for m, p in zip(markers, params)]
+ ["}"]
)
msg_parsed_template = (
"".join([item for lst in msg_parsed_template_list for item in lst])
+ "\n"
)
with fileobj:
for defect in results:
evaluated_tags = SafeMapper(
(k, v(defect)) for k, v in tag_mapper.items()
)
output = msg_parsed_template.format(**evaluated_tags)
fileobj.write(output)
if fileobj.name != sys.stdout.name:
LOG.info("Result written to file: %s", fileobj.name)
| SafeMapper |
python | walkccc__LeetCode | solutions/1797. Design Authentication Manager/1797.py | {
"start": 41,
"end": 942
} | class ____:
def __init__(self, timeToLive: int):
self.timeToLive = timeToLive
self.tokenIdToExpiryTime = {}
self.times = SortedSet()
def generate(self, tokenId: str, currentTime: int) -> None:
self.tokenIdToExpiryTime[tokenId] = currentTime
self.times.add(currentTime)
def renew(self, tokenId: str, currentTime: int) -> None:
if (tokenId not in self.tokenIdToExpiryTime or
currentTime >= self.tokenIdToExpiryTime[tokenId] + self.timeToLive):
return
self.times.remove(self.tokenIdToExpiryTime[tokenId])
self.tokenIdToExpiryTime[tokenId] = currentTime
self.times.add(currentTime)
def countUnexpiredTokens(self, currentTime: int) -> int:
i = self.times.bisect_left(currentTime - self.timeToLive + 1)
# Remove expired tokens.
for _ in range(i):
self.times.remove(self.times[0])
return len(self.times)
| AuthenticationManager |
python | catalyst-team__catalyst | catalyst/contrib/layers/common.py | {
"start": 801,
"end": 1322
} | class ____(nn.Module):
"""Performs :math:`L_p` normalization of inputs over specified dimension.
@TODO: Docs (add `Example`). Contribution is welcome.
"""
def __init__(self, **normalize_kwargs):
"""
Args:
**normalize_kwargs: see ``torch.nn.functional.normalize`` params
"""
super().__init__()
self.normalize_kwargs = normalize_kwargs
def forward(self, x):
"""Forward call."""
return F.normalize(x, **self.normalize_kwargs)
| Normalize |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/message_create_params.py | {
"start": 1329,
"end": 11150
} | class ____(TypedDict, total=False):
max_tokens: Required[int]
"""The maximum number of tokens to generate before stopping.
Note that our models may stop _before_ reaching this maximum. This parameter
only specifies the absolute maximum number of tokens to generate.
Different models have different maximum values for this parameter. See
[models](https://docs.claude.com/en/docs/models-overview) for details.
"""
messages: Required[Iterable[BetaMessageParam]]
"""Input messages.
Our models are trained to operate on alternating `user` and `assistant`
conversational turns. When creating a new `Message`, you specify the prior
conversational turns with the `messages` parameter, and the model then generates
the next `Message` in the conversation. Consecutive `user` or `assistant` turns
in your request will be combined into a single turn.
Each input message must be an object with a `role` and `content`. You can
specify a single `user`-role message, or you can include multiple `user` and
`assistant` messages.
If the final message uses the `assistant` role, the response content will
continue immediately from the content in that message. This can be used to
constrain part of the model's response.
Example with a single `user` message:
```json
[{ "role": "user", "content": "Hello, Claude" }]
```
Example with multiple conversational turns:
```json
[
{ "role": "user", "content": "Hello there." },
{ "role": "assistant", "content": "Hi, I'm Claude. How can I help you?" },
{ "role": "user", "content": "Can you explain LLMs in plain English?" }
]
```
Example with a partially-filled response from Claude:
```json
[
{
"role": "user",
"content": "What's the Greek name for Sun? (A) Sol (B) Helios (C) Sun"
},
{ "role": "assistant", "content": "The best answer is (" }
]
```
Each input message `content` may be either a single `string` or an array of
content blocks, where each block has a specific `type`. Using a `string` for
`content` is shorthand for an array of one content block of type `"text"`. The
following input messages are equivalent:
```json
{ "role": "user", "content": "Hello, Claude" }
```
```json
{ "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
```
See [input examples](https://docs.claude.com/en/api/messages-examples).
Note that if you want to include a
[system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
top-level `system` parameter — there is no `"system"` role for input messages in
the Messages API.
There is a limit of 100,000 messages in a single request.
"""
model: Required[ModelParam]
"""
The model that will complete your prompt.\n\nSee
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
details and options.
"""
container: Optional[Container]
"""Container identifier for reuse across requests."""
context_management: Optional[BetaContextManagementConfigParam]
"""Context management configuration.
This allows you to control how Claude manages context across multiple requests,
such as whether to clear function results or not.
"""
mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam]
"""MCP servers to be utilized in this request"""
metadata: BetaMetadataParam
"""An object describing metadata about the request."""
output_config: BetaOutputConfigParam
"""Configuration options for the model's output.
Controls aspects like how much effort the model puts into its response.
"""
output_format: Optional[BetaJSONOutputFormatParam]
"""A schema to specify Claude's output format in responses."""
service_tier: Literal["auto", "standard_only"]
"""
Determines whether to use priority capacity (if available) or standard capacity
for this request.
Anthropic offers different levels of service for your API requests. See
[service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
"""
stop_sequences: SequenceNotStr[str]
"""Custom text sequences that will cause the model to stop generating.
Our models will normally stop when they have naturally completed their turn,
which will result in a response `stop_reason` of `"end_turn"`.
If you want the model to stop generating when it encounters custom strings of
text, you can use the `stop_sequences` parameter. If the model encounters one of
the custom sequences, the response `stop_reason` value will be `"stop_sequence"`
and the response `stop_sequence` value will contain the matched stop sequence.
"""
system: Union[str, Iterable[BetaTextBlockParam]]
"""System prompt.
A system prompt is a way of providing context and instructions to Claude, such
as specifying a particular goal or role. See our
[guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
"""
temperature: float
"""Amount of randomness injected into the response.
Defaults to `1.0`. Ranges from `0.0` to `1.0`. Use `temperature` closer to `0.0`
for analytical / multiple choice, and closer to `1.0` for creative and
generative tasks.
Note that even with `temperature` of `0.0`, the results will not be fully
deterministic.
"""
thinking: BetaThinkingConfigParam
"""Configuration for enabling Claude's extended thinking.
When enabled, responses include `thinking` content blocks showing Claude's
thinking process before the final answer. Requires a minimum budget of 1,024
tokens and counts towards your `max_tokens` limit.
See
[extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
for details.
"""
tool_choice: BetaToolChoiceParam
"""How the model should use the provided tools.
The model can use a specific tool, any available tool, decide by itself, or not
use tools at all.
"""
tools: Iterable[BetaToolUnionParam]
"""Definitions of tools that the model may use.
If you include `tools` in your API request, the model may return `tool_use`
content blocks that represent the model's use of those tools. You can then run
those tools using the tool input generated by the model and then optionally
return results back to the model using `tool_result` content blocks.
There are two types of tools: **client tools** and **server tools**. The
behavior described below applies to client tools. For
[server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
see their individual documentation as each has its own behavior (e.g., the
[web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
Each tool definition includes:
- `name`: Name of the tool.
- `description`: Optional, but strongly-recommended description of the tool.
- `input_schema`: [JSON schema](https://json-schema.org/draft/2020-12) for the
tool `input` shape that the model will produce in `tool_use` output content
blocks.
For example, if you defined `tools` as:
```json
[
{
"name": "get_stock_price",
"description": "Get the current stock price for a given ticker symbol.",
"input_schema": {
"type": "object",
"properties": {
"ticker": {
"type": "string",
"description": "The stock ticker symbol, e.g. AAPL for Apple Inc."
}
},
"required": ["ticker"]
}
}
]
```
And then asked the model "What's the S&P 500 at today?", the model might produce
`tool_use` content blocks in the response like this:
```json
[
{
"type": "tool_use",
"id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"name": "get_stock_price",
"input": { "ticker": "^GSPC" }
}
]
```
You might then run your `get_stock_price` tool with `{"ticker": "^GSPC"}` as an
input, and return the following back to the model in a subsequent `user`
message:
```json
[
{
"type": "tool_result",
"tool_use_id": "toolu_01D7FLrfh4GYq7yT1ULFeyMV",
"content": "259.75 USD"
}
]
```
Tools can be used for workflows that include running client-side tools and
functions, or more generally whenever you want the model to produce a particular
JSON structure of output.
See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
"""
top_k: int
"""Only sample from the top K options for each subsequent token.
Used to remove "long tail" low probability responses.
[Learn more technical details here](https://towardsdatascience.com/how-to-sample-from-language-models-682bceb97277).
Recommended for advanced use cases only. You usually only need to use
`temperature`.
"""
top_p: float
"""Use nucleus sampling.
In nucleus sampling, we compute the cumulative distribution over all the options
for each subsequent token in decreasing probability order and cut it off once it
reaches a particular probability specified by `top_p`. You should either alter
`temperature` or `top_p`, but not both.
Recommended for advanced use cases only. You usually only need to use
`temperature`.
"""
betas: Annotated[List[AnthropicBetaParam], PropertyInfo(alias="anthropic-beta")]
"""Optional header to specify the beta version(s) you want to use."""
Container: TypeAlias = Union[BetaContainerParams, str]
| MessageCreateParamsBase |
python | huggingface__transformers | tests/models/imagegpt/test_image_processing_imagegpt.py | {
"start": 3189,
"end": 15582
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = ImageGPTImageProcessor if is_vision_available() else None
fast_image_processing_class = ImageGPTImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = ImageGPTImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "clusters"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
def test_image_processor_from_dict_with_kwargs(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size, {"height": 18, "width": 18})
image_processor = image_processing_class.from_dict(self.image_processor_dict, size=42)
self.assertEqual(image_processor.size, {"height": 42, "width": 42})
def test_image_processor_to_json_string(self):
for image_processing_class in self.image_processor_list:
image_processor = image_processing_class(**self.image_processor_dict)
obj = json.loads(image_processor.to_json_string())
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, obj[key]))
else:
self.assertEqual(obj[key], value)
def test_image_processor_to_json_file(self):
for image_processing_class in self.image_processor_list:
image_processor_first = image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
json_file_path = os.path.join(tmpdirname, "image_processor.json")
image_processor_first.to_json_file(json_file_path)
image_processor_second = image_processing_class.from_json_file(json_file_path).to_dict()
image_processor_first = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, image_processor_second[key]))
else:
self.assertEqual(image_processor_first[key], value)
def test_image_processor_from_and_save_pretrained(self):
for image_processing_class in self.image_processor_list:
image_processor_first = image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(tmpdirname)
image_processor_second = image_processing_class.from_pretrained(tmpdirname).to_dict()
image_processor_first = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, image_processor_second[key]))
else:
self.assertEqual(value, value)
def test_image_processor_save_load_with_autoimageprocessor(self):
for image_processing_class in self.image_processor_list:
image_processor_first = image_processing_class(**self.image_processor_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
saved_file = image_processor_first.save_pretrained(tmpdirname)[0]
check_json_file_has_correct_format(saved_file)
image_processor_second = AutoImageProcessor.from_pretrained(tmpdirname)
image_processor_first = image_processor_first.to_dict()
image_processor_second = image_processor_second.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(value, image_processor_second[key]))
else:
self.assertEqual(value, value)
@unittest.skip(reason="ImageGPT requires clusters at initialization")
def test_init_without_params(self):
pass
# Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input
def test_call_pil(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PIL images
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False)
for image in image_inputs:
self.assertIsInstance(image, Image.Image)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
# Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input
def test_call_numpy(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random numpy tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, numpify=True)
for image in image_inputs:
self.assertIsInstance(image, np.ndarray)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(encoded_images)
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids
self.assertEqual(
tuple(encoded_images.shape), (self.image_processor_tester.batch_size, *expected_output_image_shape)
)
@unittest.skip(reason="ImageGPT assumes clusters for 3 channels")
def test_call_numpy_4_channels(self):
pass
# Override the test from ImageProcessingTestMixin as ImageGPT model takes input_ids as input
def test_call_pytorch(self):
for image_processing_class in self.image_processor_list:
# Initialize image_processing
image_processing = image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
image_inputs = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
expected_output_image_shape = self.image_processor_tester.expected_output_image_shape(image_inputs)
for image in image_inputs:
self.assertIsInstance(image, torch.Tensor)
# Test not batched input
encoded_images = image_processing(image_inputs[0], return_tensors="pt").input_ids
self.assertEqual(tuple(encoded_images.shape), (1, *expected_output_image_shape))
# Test batched
encoded_images = image_processing(image_inputs, return_tensors="pt").input_ids
self.assertEqual(
tuple(encoded_images.shape),
(self.image_processor_tester.batch_size, *expected_output_image_shape),
)
# For quantization-based processors, use absolute tolerance only to avoid infinity issues
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = Image.open(
requests.get("http://images.cocodataset.org/val2017/000000039769.jpg", stream=True).raw
)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
encoding_slow.input_ids.float(), encoding_fast.input_ids.float(), atol=1.0, rtol=0
)
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
encoding_slow.input_ids.float(), encoding_fast.input_ids.float(), atol=1.0, rtol=0
)
@slow
@require_torch_accelerator
@require_vision
@pytest.mark.torch_compile_test
def test_can_compile_fast_image_processor(self):
if self.fast_image_processing_class is None:
self.skipTest("Skipping compilation test as fast image processor is not defined")
if version.parse(torch.__version__) < version.parse("2.3"):
self.skipTest(reason="This test requires torch >= 2.3 to run.")
torch.compiler.reset()
input_image = torch.randint(0, 255, (3, 224, 224), dtype=torch.uint8)
image_processor = self.fast_image_processing_class(**self.image_processor_dict)
output_eager = image_processor(input_image, device=torch_device, return_tensors="pt")
image_processor = torch.compile(image_processor, mode="reduce-overhead")
output_compiled = image_processor(input_image, device=torch_device, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(
output_eager.input_ids.float(), output_compiled.input_ids.float(), atol=1.0, rtol=0
)
def prepare_images():
# we use revision="refs/pr/1" until the PR is merged
# https://hf.co/datasets/hf-internal-testing/fixtures_image_utils/discussions/1
dataset = load_dataset("hf-internal-testing/fixtures_image_utils", split="test", revision="refs/pr/1")
image1 = dataset[4]["image"]
image2 = dataset[5]["image"]
images = [image1, image2]
return images
@require_vision
@require_torch
| ImageGPTImageProcessingTest |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 3109,
"end": 3664
} | class ____(Action):
"""Changes the state of a flow run associated with the trigger"""
type: Literal["change-flow-run-state"] = "change-flow-run-state"
name: Optional[str] = Field(
None,
description="The name of the state to change the flow run to",
)
state: StateType = Field(
...,
description="The type of the state to change the flow run to",
)
message: Optional[str] = Field(
None,
description="An optional message to associate with the state change",
)
| ChangeFlowRunState |
python | vyperlang__vyper | vyper/builtins/functions.py | {
"start": 2604,
"end": 3142
} | class ____(FoldedFunctionT):
# Base class for builtin functions that:
# (1) take a typename as the only argument; and
# (2) should always be folded.
_inputs = [("typename", TYPE_T.any())]
def fetch_call_return(self, node):
type_ = self.infer_arg_types(node)[0].typedef
return type_
def infer_arg_types(self, node, expected_return_typ=None):
validate_call_args(node, 1)
input_typedef = TYPE_T(type_from_annotation(node.args[0]))
return [input_typedef]
| TypenameFoldedFunctionT |
python | pallets__jinja | src/jinja2/nodes.py | {
"start": 11592,
"end": 11902
} | class ____(Stmt):
"""Like a macro without a name but a call instead. `call` is called with
the unnamed macro as `caller` argument this node holds.
"""
fields = ("call", "args", "defaults", "body")
call: "Call"
args: list["Name"]
defaults: list["Expr"]
body: list[Node]
| CallBlock |
python | realpython__materials | python-double-underscore/cart.py | {
"start": 0,
"end": 262
} | class ____:
def __init__(self):
self.products = []
def add_product(self, product):
self.products.append(product)
def get_products(self):
return self.products
def __len__(self):
return len(self.products)
| ShoppingCart |
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 31521,
"end": 33764
} | class ____(FixtureTest):
run_setup_mappers = "once"
run_inserts = None
@classmethod
def setup_mappers(cls):
User, Address = cls.classes.User, cls.classes.Address
users, addresses = cls.tables.users, cls.tables.addresses
cls.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(
Address,
backref="user",
cascade="all, delete-orphan",
order_by=addresses.c.id,
)
},
)
cls.mapper_registry.map_imperatively(Address, addresses)
def subtransaction_recipe_one(self):
@contextlib.contextmanager
def transaction(session):
if session.in_transaction():
outermost = False
else:
outermost = True
session.begin()
try:
yield
except:
if session.in_transaction():
session.rollback()
raise
else:
if outermost and session.in_transaction():
session.commit()
return transaction
def subtransaction_recipe_two(self):
# shorter recipe
@contextlib.contextmanager
def transaction(session):
if not session.in_transaction():
with session.begin():
yield
else:
yield
return transaction
def subtransaction_recipe_three(self):
@contextlib.contextmanager
def transaction(session):
if not session.in_transaction():
session.begin()
try:
yield
except:
if session.in_transaction():
session.rollback()
else:
session.commit()
else:
try:
yield
except:
if session.in_transaction():
session.rollback()
raise
return transaction
@testing.combinations(
(subtransaction_recipe_one, True),
(subtransaction_recipe_two, False),
(subtransaction_recipe_three, True),
argnames="target_recipe,recipe_rollsback_early",
id_="ns",
)
| _LocalFixture |
python | ansible__ansible | test/lib/ansible_test/_internal/debugging.py | {
"start": 14417,
"end": 15563
} | class ____:
"""Options detected from the debugpy instance hosting this process."""
port: int
adapter_access_token: str | None
@cache
def detect_debugpy_options() -> DebugpyOptions | None:
"""Return the options for the debugpy instance hosting this process, or `None` if not detected."""
if "debugpy" not in sys.modules:
return None
import debugpy # pylint: disable=import-error
# get_cli_options is the new public API introduced after debugpy 1.8.15.
# We should remove the debugpy.server cli fallback once the new version is
# released.
if hasattr(debugpy, 'get_cli_options'):
opts = debugpy.get_cli_options()
else:
from debugpy.server import cli # pylint: disable=import-error
opts = cli.options
# address can be None if the debugger is not configured through the CLI as
# we expected.
if not opts.address:
return None
port = opts.address[1]
display.info(f'Detected debugpy debugger port {port}.', verbosity=1)
return DebugpyOptions(
port=port,
adapter_access_token=opts.adapter_access_token,
)
| DebugpyOptions |
python | pypa__warehouse | tests/unit/email/ses/test_models.py | {
"start": 333,
"end": 7436
} | class ____:
def test_starts_out_accepted(self, db_session):
em = EmailStatus(EmailMessage()).save()
assert em.status is EmailStatuses.Accepted
def test_can_deliver(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
assert status.save().status is EmailStatuses.Delivered
assert email.unverify_reason is None
def test_delivery_resets_transient_bounces(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
assert status.save().status is EmailStatuses.Delivered
assert email.transient_bounces == 0
def test_delivery_without_an_email_obj(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.deliver()
assert em.missing
def test_soft_bounce_increments_transient_bounces(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.soft_bounce()
assert status.save().status is EmailStatuses.SoftBounced
assert email.transient_bounces == 4
def test_soft_bounced_unverifies_when_going_over(self, db_session):
email = EmailFactory.create(transient_bounces=MAX_TRANSIENT_BOUNCES)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.soft_bounce()
assert status.save().status is EmailStatuses.SoftBounced
assert email.transient_bounces == MAX_TRANSIENT_BOUNCES + 1
assert not email.verified
assert email.unverify_reason is UnverifyReasons.SoftBounce
def test_soft_bounce_after_delivery_does_nothing(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.soft_bounce()
assert status.save().status is EmailStatuses.Delivered
assert email.transient_bounces == 0
def test_duplicate_delivery_does_nothing(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.deliver()
assert status.save().status is EmailStatuses.Delivered
def test_delivery_after_soft_bounce(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.soft_bounce()
assert status.save().status is EmailStatuses.SoftBounced
assert email.transient_bounces == 1
status.deliver()
assert status.save().status is EmailStatuses.Delivered
assert email.transient_bounces == 0
def test_soft_bounce_without_an_email_obj(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.soft_bounce()
assert em.missing
def test_hard_bounce_unverifies_email(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.bounce()
assert status.save().status is EmailStatuses.Bounced
assert not email.verified
assert email.unverify_reason is UnverifyReasons.HardBounce
def test_hard_bounce_resets_transient_bounces(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.bounce()
assert email.transient_bounces == 0
def test_hard_bounce_after_delivery_unverifies_email(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.bounce()
assert status.save().status is EmailStatuses.Bounced
assert not email.verified
assert email.unverify_reason is UnverifyReasons.HardBounce
def test_hard_bounce_after_delivery_resets_transient_bounces(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.bounce()
assert email.transient_bounces == 0
def test_hard_bounce_without_an_email_obj(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.bounce()
assert em.missing
def test_hard_bounce_after_delivery_without_email_obj(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.deliver()
EmailFactory.create(email=em.to)
status.bounce()
assert em.missing
def test_complain_unverifies_email(self, db_session):
email = EmailFactory.create()
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.complain()
assert status.save().status is EmailStatuses.Complained
assert not email.verified
assert email.unverify_reason is UnverifyReasons.SpamComplaint
def test_complain_resets_transient_bounces(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.deliver()
status.complain()
assert email.transient_bounces == 0
def test_complain_without_an_email_obj(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.deliver()
status.complain()
assert em.missing
def test_complain_without_email_obj_and_with(self, db_session):
em = EmailMessageFactory.create()
status = EmailStatus.load(em)
status.deliver()
EmailFactory.create(email=em.to)
status.complain()
assert em.missing
def test_delivery_after_hard_bounce(self, db_session):
email = EmailFactory.create(transient_bounces=3)
em = EmailMessageFactory.create(to=email.email)
status = EmailStatus.load(em)
status.bounce()
status.deliver()
assert email.transient_bounces == 0
assert not email.verified
@pytest.mark.parametrize(
"start_status",
["Accepted", "Delivered", "Bounced", "Soft Bounced", "Complained"],
)
def test_load(self, start_status, db_session):
em = EmailMessageFactory.create(status=EmailStatuses(start_status))
status = EmailStatus.load(em)
assert status.save().status == EmailStatuses(start_status)
| TestEmailStatus |
python | dask__distributed | distributed/deploy/adaptive_core.py | {
"start": 406,
"end": 6415
} | class ____(ABC):
"""
The core logic for adaptive deployments, with none of the cluster details
This class controls our adaptive scaling behavior. It is intended to be
used as a super-class or mixin. It expects the following state and methods:
**State**
plan: set
A set of workers that we think should exist.
Here and below worker is just a token, often an address or name string
requested: set
A set of workers that the cluster class has successfully requested from
the resource manager. We expect that resource manager to work to make
these exist.
observed: set
A set of workers that have successfully checked in with the scheduler
These sets are not necessarily equivalent. Often plan and requested will
be very similar (requesting is usually fast) but there may be a large delay
between requested and observed (often resource managers don't give us what
we want).
**Functions**
target : -> int
Returns the target number of workers that should exist.
This is often obtained by querying the scheduler
workers_to_close : int -> Set[worker]
Given a target number of workers,
returns a set of workers that we should close when we're scaling down
scale_up : int -> None
Scales the cluster up to a target number of workers, presumably
changing at least ``plan`` and hopefully eventually also ``requested``
scale_down : Set[worker] -> None
Closes the provided set of workers
Parameters
----------
minimum: int
The minimum number of allowed workers
maximum: int | inf
The maximum number of allowed workers
wait_count: int
The number of scale-down requests we should receive before actually
scaling down
interval: str
The amount of time, like ``"1s"`` between checks
"""
minimum: int
maximum: int | float
wait_count: int
close_counts: defaultdict[WorkerState, int]
log: deque[tuple[float, dict]]
_adapting: bool
def __init__(
self,
minimum: int = 0,
maximum: int | float = math.inf,
wait_count: int = 3,
):
if not isinstance(maximum, int) and not math.isinf(maximum):
raise ValueError(f"maximum must be int or inf; got {maximum}")
self.minimum = minimum
self.maximum = maximum
self.wait_count = wait_count
# internal state
self.close_counts = defaultdict(int)
self._adapting = False
self.log = deque(
maxlen=dask.config.get("distributed.admin.low-level-log-length")
)
@property
@abstractmethod
def plan(self) -> set[WorkerState]: ...
@property
@abstractmethod
def requested(self) -> set[WorkerState]: ...
@property
@abstractmethod
def observed(self) -> set[WorkerState]: ...
@abstractmethod
async def target(self) -> int:
"""The target number of workers that should exist"""
...
async def workers_to_close(self, target: int) -> list:
"""
Give a list of workers to close that brings us down to target workers
"""
# TODO, improve me with something that thinks about current load
return list(self.observed)[target:]
async def safe_target(self) -> int:
"""Used internally, like target, but respects minimum/maximum"""
n = await self.target()
if n > self.maximum:
n = cast(int, self.maximum)
if n < self.minimum:
n = self.minimum
return n
@abstractmethod
async def scale_down(self, n: int) -> None: ...
@abstractmethod
async def scale_up(self, workers: Iterable) -> None: ...
async def recommendations(self, target: int) -> dict:
"""
Make scale up/down recommendations based on current state and target
"""
plan = self.plan
requested = self.requested
observed = self.observed
if target == len(plan):
self.close_counts.clear()
return {"status": "same"}
if target > len(plan):
self.close_counts.clear()
return {"status": "up", "n": target}
# target < len(plan)
not_yet_arrived = requested - observed
to_close = set()
if not_yet_arrived:
to_close.update(toolz.take(len(plan) - target, not_yet_arrived))
if target < len(plan) - len(to_close):
L = await self.workers_to_close(target=target)
to_close.update(L)
firmly_close = set()
for w in to_close:
self.close_counts[w] += 1
if self.close_counts[w] >= self.wait_count:
firmly_close.add(w)
for k in list(self.close_counts): # clear out unseen keys
if k in firmly_close or k not in to_close:
del self.close_counts[k]
if firmly_close:
return {"status": "down", "workers": list(firmly_close)}
else:
return {"status": "same"}
async def adapt(self) -> None:
"""
Check the current state, make recommendations, call scale
This is the main event of the system
"""
if self._adapting: # Semaphore to avoid overlapping adapt calls
return
self._adapting = True
status = None
try:
target = await self.safe_target()
recommendations = await self.recommendations(target)
if recommendations["status"] != "same":
self.log.append((time(), dict(recommendations)))
status = recommendations.pop("status")
if status == "same":
return
if status == "up":
await self.scale_up(**recommendations)
if status == "down":
await self.scale_down(**recommendations)
finally:
self._adapting = False
| AdaptiveCore |
python | huggingface__transformers | src/transformers/models/kosmos2/modeling_kosmos2.py | {
"start": 22246,
"end": 24731
} | class ____(nn.Module):
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPVisionTransformer.__init__ with AltCLIPVision->Kosmos2Vision,ALTCLIP_VISION->KOSMOS2_VISION,AltCLIP->Kosmos2Vision
def __init__(self, config: Kosmos2VisionConfig):
super().__init__()
self.config = config
embed_dim = config.hidden_size
self.embeddings = Kosmos2VisionEmbeddings(config)
self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
self.encoder = Kosmos2VisionEncoder(config)
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
def forward(
self,
pixel_values: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
interpolate_pos_encoding: bool = False,
return_dict: Optional[bool] = None,
) -> Union[tuple, BaseModelOutputWithPooling]:
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
hidden_states = self.pre_layrnorm(hidden_states)
encoder_outputs = self.encoder(
inputs_embeds=hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
last_hidden_state = encoder_outputs[0]
pooled_output = last_hidden_state[:, 0, :]
pooled_output = self.post_layernorm(pooled_output)
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPooling(
last_hidden_state=last_hidden_state,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
# Similar to `transformers.models.m2m_100.modeling_m2m_100.M2M100SinusoidalPositionalEmbedding` but allowing to pass `position_ids`
| Kosmos2VisionTransformer |
python | google__jax | tests/debugging_primitives_test.py | {
"start": 22624,
"end": 28133
} | class ____(jtu.JaxTestCase):
def _assertLinesEqual(self, text1, text2):
def _count(lines):
return collections.Counter(lines)
self.assertDictEqual(_count(text1.split("\n")), _count(text2.split("\n")))
def test_ordered_print_not_supported_in_pmap(self):
@jax.pmap
def f(x):
debug_print("{}", x, ordered=True)
if config.pmap_shmap_merge.value:
if jax.device_count() == 1:
self.skipTest("This test won't raise with 1 device.")
if jtu.device_under_test() == "gpu":
self.skipTest("Test does not raise under GPU.")
if jtu.device_under_test() == "tpu" and jtu.get_tpu_version() > 3:
self.skipTest("Test does not raise under TPU v4+.")
regex = "The following ordered effects are not supported for more than 1 device:*"
else:
regex = "Ordered effects not supported in `pmap`."
with self.assertRaisesRegex(ValueError, regex):
f(jnp.arange(jax.local_device_count()))
def test_unordered_print_works_in_pmap(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices.")
@jax.pmap
def f(x):
debug_print("hello: {}", x, ordered=False)
with jtu.capture_stdout() as output:
f(jnp.arange(jax.local_device_count()))
jax.effects_barrier()
lines = [f"hello: {i}\n" for i in range(jax.local_device_count())]
self._assertLinesEqual(output(), "".join(lines))
@jax.pmap
def f2(x):
debug_print('hello: {}', x)
debug_print('hello: {}', x + 2)
with jtu.capture_stdout() as output:
f2(jnp.arange(2))
jax.effects_barrier()
self._assertLinesEqual(output(), "hello: 0\nhello: 1\nhello: 2\nhello: 3\n")
def test_unordered_print_with_jit(self):
def f(x):
debug_print("{}", x, ordered=False)
return x
mesh = jax.sharding.Mesh(np.array(jax.devices()), ['dev'])
spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('dev'))
out_spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec())
f = jax.jit(f, in_shardings=spec, out_shardings=out_spec)
with jax.set_mesh(mesh):
with jtu.capture_stdout() as output:
f(np.arange(8, dtype=jnp.int32))
jax.effects_barrier()
self.assertEqual(output(), "[0 1 2 3 4 5 6 7]\n")
def f2(x):
y = x.dot(x)
debug_print("{}", y, ordered=False)
return y
f2 = jax.jit(f2, in_shardings=spec, out_shardings=out_spec)
with jax.set_mesh(mesh):
with jtu.capture_stdout() as output:
f2(np.arange(8, dtype=jnp.int32))
jax.effects_barrier()
self.assertEqual(output(), "140\n")
def test_nested_jit_debug_print(self):
def f(x):
debug_print("{}", x)
return x
with jtu.capture_stdout() as output:
jax.jit(jax.jit(f))(jnp.arange(8))
jax.effects_barrier()
self.assertEqual(output(), "[0 1 2 3 4 5 6 7]\n")
def test_unordered_print_of_jit_of_while(self):
def f(x):
def cond(carry):
i, *_ = carry
return i < 5
def body(carry):
i, x = carry
debug_print("{}", x, ordered=False)
x = x + 1
return (i + 1, x)
return lax.while_loop(cond, body, (0, x))[1]
mesh = jax.sharding.Mesh(np.array(jax.devices()), ['dev'])
spec = jax.sharding.NamedSharding(mesh, jax.sharding.PartitionSpec('dev'))
f = jax.jit(f, in_shardings=spec, out_shardings=spec)
with jax.set_mesh(mesh):
with jtu.capture_stdout() as output:
f(np.arange(8, dtype=jnp.int32))
jax.effects_barrier()
self.assertEqual(output(),
"[0 1 2 3 4 5 6 7]\n"
"[1 2 3 4 5 6 7 8]\n"
"[2 3 4 5 6 7 8 9]\n"
"[ 3 4 5 6 7 8 9 10]\n"
"[ 4 5 6 7 8 9 10 11]\n")
def test_unordered_print_works_in_pmap_of_while(self):
if jax.device_count() < 2:
raise unittest.SkipTest("Test requires >= 2 devices.")
@jax.pmap
def f(x):
def cond(x):
return x < 3
def body(x):
debug_print("hello: {}", x, ordered=False)
return x + 1
return lax.while_loop(cond, body, x)
with jtu.capture_stdout() as output:
f(jnp.arange(2))
jax.effects_barrier()
self._assertLinesEqual(
output(), "hello: 0\nhello: 1\nhello: 2\n"
"hello: 1\nhello: 2\n")
def test_incorrectly_formatted_string(self):
@jax.jit
def f(x):
debug_print("hello: {x}", x)
return x
with self.assertRaises(KeyError):
f(jnp.arange(2))
jax.effects_barrier()
@jax.jit
def f(x):
debug_print("hello: {}", x=x)
return x
with self.assertRaises(IndexError):
f(jnp.arange(2))
jax.effects_barrier()
def test_format_string_errors_with_unused_args(self):
@jax.jit
def f(x):
debug_print("hello: {x}", x=x, y=x)
return x
with self.assertRaisesRegex(ValueError, "Unused keyword arguments"):
f(jnp.arange(2))
jax.effects_barrier()
@jax.jit
def g(x):
debug_print("hello", x)
return x
with self.assertRaisesRegex(ValueError, "Unused positional arguments"):
g(jnp.arange(2))
jax.effects_barrier()
def test_accidental_fstring(self):
@jax.jit
def f(x):
debug_print(f"hello: {x}", x=x)
return x
with self.assertRaisesRegex(ValueError, "You may be passing an f-string"):
f(jnp.arange(2))
jax.effects_barrier()
@jtu.thread_unsafe_test_class() # logging isn't thread-safe
| DebugPrintParallelTest |
python | eventlet__eventlet | tests/mock.py | {
"start": 12711,
"end": 12842
} | class ____:
_mock_return_value = DEFAULT
_mock_side_effect = None
def __init__(self, *args, **kwargs):
pass
| Base |
python | PrefectHQ__prefect | src/prefect/client/schemas/responses.py | {
"start": 4243,
"end": 4473
} | class ____(PrefectBaseModel, Generic[T]):
"""
A container for the output of state orchestration.
"""
state: Optional[objects.State[T]]
status: SetStateStatus
details: StateResponseDetails
| OrchestrationResult |
python | kamyu104__LeetCode-Solutions | Python/find-array-given-subset-sums.py | {
"start": 4125,
"end": 5679
} | class ____(object):
def recoverArray(self, n, sums):
"""
:type n: int
:type sums: List[int]
:rtype: List[int]
"""
dp = {k: v for k, v in collections.Counter(sums).iteritems()}
total = reduce(operator.ior, dp.itervalues(), 0)
basis = total&-total # find rightmost bit 1
if basis > 1:
for k in dp.iterkeys():
dp[k] //= basis
sorted_sums = sorted(dp.iterkeys()) # Time: O(2^n * log(2^n)) = O(n * 2^n)
shift = 0
result = [0]*(basis.bit_length()-1)
for _ in xrange(n-len(result)): # log(2^n) times, each time costs O(2^(n-len(result))), Total Time: O(2^n)
new_dp = {}
new_sorted_sums = []
new_shift = sorted_sums[0]-sorted_sums[1]
assert(new_shift < 0)
for x in sorted_sums:
if not dp[x]:
continue
dp[x-new_shift] -= dp[x]
new_dp[x-new_shift] = dp[x]
new_sorted_sums.append(x-new_shift)
dp = new_dp
sorted_sums = new_sorted_sums
if shift in dp: # contain 0, choose this side
result.append(new_shift)
else: # contain no 0, choose another side and shift 0 offset
result.append(-new_shift)
shift -= new_shift
return result
# Time: O(n * 2^n), len(sums) = 2^n
# Space: O(2^n)
import collections
# optimized from solution4 (not using OrderedDict), runtime: 1024 ms
| Solution3 |
python | Textualize__textual | docs/examples/tutorial/stopwatch.py | {
"start": 233,
"end": 1468
} | class ____(Digits):
"""A widget to display elapsed time."""
start_time = reactive(monotonic)
time = reactive(0.0)
total = reactive(0.0)
def on_mount(self) -> None:
"""Event handler called when widget is added to the app."""
self.update_timer = self.set_interval(1 / 60, self.update_time, pause=True)
def update_time(self) -> None:
"""Method to update time to current."""
self.time = self.total + (monotonic() - self.start_time)
def watch_time(self, time: float) -> None:
"""Called when the time attribute changes."""
minutes, seconds = divmod(time, 60)
hours, minutes = divmod(minutes, 60)
self.update(f"{hours:02,.0f}:{minutes:02.0f}:{seconds:05.2f}")
def start(self) -> None:
"""Method to start (or resume) time updating."""
self.start_time = monotonic()
self.update_timer.resume()
def stop(self):
"""Method to stop the time display updating."""
self.update_timer.pause()
self.total += monotonic() - self.start_time
self.time = self.total
def reset(self):
"""Method to reset the time display to zero."""
self.total = 0
self.time = 0
| TimeDisplay |
python | getsentry__sentry | tests/sentry/grouping/test_enhancer_dart_flutter_javascript.py | {
"start": 744,
"end": 3001
} | class ____(_BaseJavaScriptDartFlutterEnhancerTest):
"""Tests that are expected to run with platform="javascript" only."""
# ------------------------------------------------------------------
# Dart SDK
# ------------------------------------------------------------------
def test_dart_sdk_not_in_app(self) -> None:
"""All frames coming from the Dart SDK must be out-of-app."""
sdk_paths = [
"org-dartlang-sdk:///sdk/lib/core/object.dart",
"org-dartlang-sdk:///sdk/lib/async/future.dart",
"org-dartlang-sdk:///sdk/lib/collection/list.dart",
"org-dartlang-sdk:///flutter/lib/ui/window.dart",
]
for path in sdk_paths:
frame = {"abs_path": path}
result = self.apply_rules(frame)
assert result["in_app"] is False
# ------------------------------------------------------------------
# Flutter framework (compiled to JS)
# ------------------------------------------------------------------
def test_flutter_packages_not_in_app(self) -> None:
"""Flutter framework modules compiled to JS (dart2js) are out-of-app."""
frame = {"module": "packages/flutter/src/widgets/framework.dart"}
result = self.apply_rules(frame)
assert result["in_app"] is False
# Another example module
frame = {"module": "packages/flutter/src/widgets/container.dart"}
result = self.apply_rules(frame)
assert result["in_app"] is False
# ------------------------------------------------------------------
# Ensure native-specific rules do not leak into JS
# ------------------------------------------------------------------
def test_android_app_rule_does_not_apply_on_javascript(self) -> None:
"""The APK rule is native-specific and must not affect JS frames."""
frame = {
"package": "/data/app/com.example.myapp-1/base.apk",
"abs_path": "package:myapp/main.dart",
}
result = self.apply_rules(frame)
# The JS family definitions have no such rule → in_app should be untouched
assert result.get("in_app") is None, f"{frame['abs_path']} should be untouched"
| TestDartFlutterEnhancerJavaScript |
python | kamyu104__LeetCode-Solutions | Python/check-if-a-string-is-a-valid-sequence-from-root-to-leaves-path-in-a-binary-tree.py | {
"start": 66,
"end": 236
} | class ____(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# bfs solution
| TreeNode |
python | PrefectHQ__prefect | tests/runner/test_runner.py | {
"start": 109759,
"end": 110903
} | class ____:
async def test_healthcheck_fails_as_expected(self):
runner = Runner()
runner.last_polled = now("UTC") - datetime.timedelta(minutes=5)
health_check = perform_health_check(runner)
assert health_check().status_code == status.HTTP_503_SERVICE_UNAVAILABLE
runner.last_polled = now("UTC")
assert health_check().status_code == status.HTTP_200_OK
@pytest.mark.skip("This test is flaky and needs to be fixed")
@pytest.mark.parametrize("enabled", [True, False])
async def test_webserver_start_flag(self, enabled: bool):
with temporary_settings(updates={PREFECT_RUNNER_SERVER_ENABLE: enabled}):
with mock.patch("prefect.runner.runner.threading.Thread") as mocked_thread:
runner = Runner()
await runner.start(run_once=True)
if enabled:
mocked_thread.assert_called_once()
mocked_thread.return_value.start.assert_called_once()
if not enabled:
mocked_thread.assert_not_called()
mocked_thread.return_value.start.assert_not_called()
| TestServer |
python | Farama-Foundation__Gymnasium | gymnasium/envs/mujoco/humanoidstandup_v4.py | {
"start": 268,
"end": 2522
} | class ____(MujocoEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"rgbd_tuple",
],
"render_fps": 67,
}
def __init__(self, **kwargs):
observation_space = Box(
low=-np.inf, high=np.inf, shape=(376,), dtype=np.float64
)
MujocoEnv.__init__(
self,
"humanoidstandup.xml",
5,
observation_space=observation_space,
default_camera_config=DEFAULT_CAMERA_CONFIG,
**kwargs,
)
utils.EzPickle.__init__(self, **kwargs)
def _get_obs(self):
data = self.data
return np.concatenate(
[
data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat,
data.cvel.flat,
data.qfrc_actuator.flat,
data.cfrc_ext.flat,
]
)
def step(self, a):
self.do_simulation(a, self.frame_skip)
pos_after = self.data.qpos[2]
data = self.data
uph_cost = (pos_after - 0) / self.model.opt.timestep
quad_ctrl_cost = 0.1 * np.square(data.ctrl).sum()
quad_impact_cost = 0.5e-6 * np.square(data.cfrc_ext).sum()
quad_impact_cost = min(quad_impact_cost, 10)
reward = uph_cost - quad_ctrl_cost - quad_impact_cost + 1
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return (
self._get_obs(),
reward,
False,
False,
dict(
reward_linup=uph_cost,
reward_quadctrl=-quad_ctrl_cost,
reward_impact=-quad_impact_cost,
),
)
def reset_model(self):
c = 0.01
self.set_state(
self.init_qpos + self.np_random.uniform(low=-c, high=c, size=self.model.nq),
self.init_qvel
+ self.np_random.uniform(
low=-c,
high=c,
size=self.model.nv,
),
)
return self._get_obs()
| HumanoidStandupEnv |
python | pandas-dev__pandas | pandas/tests/arithmetic/test_datetime64.py | {
"start": 13586,
"end": 29396
} | class ____:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = date_range("2020-01-01", periods=10)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
pytest.skip(f"{type(other).__name__} is not tz aware")
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs == NaT, expected)
tm.assert_equal(NaT == rhs, expected)
expected = np.array([True, True, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs != NaT, expected)
tm.assert_equal(NaT != lhs, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(lhs < NaT, expected)
tm.assert_equal(NaT > lhs, expected)
def test_dti_cmp_nat_behaves_like_float_cmp_nan(self):
fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0])
fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0])
didx1 = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
didx2 = DatetimeIndex(
["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"]
)
darr = np.array(
[
np.datetime64("2014-02-01 00:00"),
np.datetime64("2014-03-01 00:00"),
np.datetime64("nat"),
np.datetime64("nat"),
np.datetime64("2014-06-01 00:00"),
np.datetime64("2014-07-01 00:00"),
]
)
cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)]
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, np.nan), (didx1, NaT)]:
result = idx1 < val
expected = np.array([False, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, True, True, True, True])
tm.assert_numpy_array_equal(result, expected)
# Check pd.NaT is handles as the same as np.nan
with tm.assert_produces_warning(None):
for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]:
result = idx1 < val
expected = np.array([True, False, False, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 > val
expected = np.array([False, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= val
expected = np.array([True, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 >= val
expected = np.array([False, False, True, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == val
expected = np.array([False, False, True, False, False, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != val
expected = np.array([True, True, False, True, True, True])
tm.assert_numpy_array_equal(result, expected)
def test_comparison_tzawareness_compat(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
box = box_with_array
dr = date_range("2016-01-01", periods=6, unit="ns")
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box)
dz = tm.box_expected(dz, box)
if box is pd.DataFrame:
tolist = lambda x: x.astype(object).values.tolist()[0]
else:
tolist = list
if op not in [operator.eq, operator.ne]:
msg = (
r"Invalid comparison between dtype=datetime64\[ns.*\] "
"and (Timestamp|DatetimeArray|list|ndarray)"
)
with pytest.raises(TypeError, match=msg):
op(dr, dz)
with pytest.raises(TypeError, match=msg):
op(dr, tolist(dz))
with pytest.raises(TypeError, match=msg):
op(dr, np.array(tolist(dz), dtype=object))
with pytest.raises(TypeError, match=msg):
op(dz, dr)
with pytest.raises(TypeError, match=msg):
op(dz, tolist(dr))
with pytest.raises(TypeError, match=msg):
op(dz, np.array(tolist(dr), dtype=object))
# The aware==aware and naive==naive comparisons should *not* raise
assert np.all(dr == dr)
assert np.all(dr == tolist(dr))
assert np.all(tolist(dr) == dr)
assert np.all(np.array(tolist(dr), dtype=object) == dr)
assert np.all(dr == np.array(tolist(dr), dtype=object))
assert np.all(dz == dz)
assert np.all(dz == tolist(dz))
assert np.all(tolist(dz) == dz)
assert np.all(np.array(tolist(dz), dtype=object) == dz)
assert np.all(dz == np.array(tolist(dz), dtype=object))
def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array):
# GH#18162
op = comparison_op
dr = date_range("2016-01-01", periods=6, unit="ns")
dz = dr.tz_localize("US/Pacific")
dr = tm.box_expected(dr, box_with_array)
dz = tm.box_expected(dz, box_with_array)
# Check comparisons against scalar Timestamps
ts = Timestamp("2000-03-14 01:59")
ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam")
assert np.all(dr > ts)
msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp"
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dr, ts_tz)
assert np.all(dz > ts_tz)
if op not in [operator.eq, operator.ne]:
with pytest.raises(TypeError, match=msg):
op(dz, ts)
if op not in [operator.eq, operator.ne]:
# GH#12601: Check comparison against Timestamps and DatetimeIndex
with pytest.raises(TypeError, match=msg):
op(ts, dz)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
# Bug in NumPy? https://github.com/numpy/numpy/issues/13841
# Raising in __eq__ will fallback to NumPy, which warns, fails,
# then re-raises the original exception. So we just need to ignore.
@pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning")
def test_scalar_comparison_tzawareness(
self, comparison_op, other, tz_aware_fixture, box_with_array
):
op = comparison_op
tz = tz_aware_fixture
dti = date_range("2016-01-01", periods=2, tz=tz, unit="ns")
dtarr = tm.box_expected(dti, box_with_array)
xbox = get_upcast_box(dtarr, other, True)
if op in [operator.eq, operator.ne]:
exbool = op is operator.ne
expected = np.array([exbool, exbool], dtype=bool)
expected = tm.box_expected(expected, xbox)
result = op(dtarr, other)
tm.assert_equal(result, expected)
result = op(other, dtarr)
tm.assert_equal(result, expected)
else:
msg = (
r"Invalid comparison between dtype=datetime64\[ns, .*\] "
f"and {type(other).__name__}"
)
with pytest.raises(TypeError, match=msg):
op(dtarr, other)
with pytest.raises(TypeError, match=msg):
op(other, dtarr)
def test_nat_comparison_tzawareness(self, comparison_op):
# GH#19276
# tzaware DatetimeIndex should not raise when compared to NaT
op = comparison_op
dti = DatetimeIndex(
["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"]
)
expected = np.array([op == operator.ne] * len(dti))
result = op(dti, NaT)
tm.assert_numpy_array_equal(result, expected)
result = op(dti.tz_localize("US/Pacific"), NaT)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_str(self, tz_naive_fixture):
# GH#22074
# regardless of tz, we expect these comparisons are valid
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
other = "1/1/2000"
result = rng == other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng != other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng < other
expected = np.array([False] * 10)
tm.assert_numpy_array_equal(result, expected)
result = rng <= other
expected = np.array([True] + [False] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng > other
expected = np.array([False] + [True] * 9)
tm.assert_numpy_array_equal(result, expected)
result = rng >= other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
def test_dti_cmp_list(self):
rng = date_range("1/1/2000", periods=10)
result = rng == list(rng)
expected = rng == rng
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
pd.timedelta_range("1D", periods=10),
pd.timedelta_range("1D", periods=10).to_series(),
pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"),
],
ids=lambda x: type(x).__name__,
)
def test_dti_cmp_tdi_tzawareness(self, other):
# GH#22074
# reversion test that we _don't_ call _assert_tzawareness_compat
# when comparing against TimedeltaIndex
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
result = dti == other
expected = np.array([False] * 10)
if isinstance(other, Series):
tm.assert_series_equal(result, Series(expected, index=other.index))
else:
tm.assert_numpy_array_equal(result, expected)
result = dti != other
expected = np.array([True] * 10)
if isinstance(other, Series):
tm.assert_series_equal(result, Series(expected, index=other.index))
else:
tm.assert_numpy_array_equal(result, expected)
msg = "Invalid comparison between"
with pytest.raises(TypeError, match=msg):
dti < other
with pytest.raises(TypeError, match=msg):
dti <= other
with pytest.raises(TypeError, match=msg):
dti > other
with pytest.raises(TypeError, match=msg):
dti >= other
def test_dti_cmp_object_dtype(self):
# GH#22074
dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo")
other = dti.astype("O")
result = dti == other
expected = np.array([True] * 10)
tm.assert_numpy_array_equal(result, expected)
other = dti.tz_localize(None)
result = dti != other
tm.assert_numpy_array_equal(result, expected)
other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5)
result = dti == other
expected = np.array([True] * 5 + [False] * 5)
tm.assert_numpy_array_equal(result, expected)
msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'"
with pytest.raises(TypeError, match=msg):
dti >= other
# ------------------------------------------------------------------
# Arithmetic
| TestDatetimeIndexComparisons |
python | kamyu104__LeetCode-Solutions | Python/decoded-string-at-index.py | {
"start": 29,
"end": 518
} | class ____(object):
def decodeAtIndex(self, S, K):
"""
:type S: str
:type K: int
:rtype: str
"""
i = 0
for c in S:
if c.isdigit():
i *= int(c)
else:
i += 1
for c in reversed(S):
K %= i
if K == 0 and c.isalpha():
return c
if c.isdigit():
i /= int(c)
else:
i -= 1
| Solution |
python | ansible__ansible | test/lib/ansible_test/_internal/dev/container_probe.py | {
"start": 714,
"end": 921
} | class ____(enum.Enum):
"""The expected state of a cgroup related mount point."""
HOST = enum.auto()
PRIVATE = enum.auto()
SHADOWED = enum.auto()
@dataclasses.dataclass(frozen=True)
| CGroupState |
python | walkccc__LeetCode | solutions/3563. Lexicographically Smallest String After Adjacent Removals/3563.py | {
"start": 0,
"end": 818
} | class ____:
def lexicographicallySmallestString(self, s: str) -> str:
n = len(s)
# dp[i][j]: the lexicographically smallest string by removing adjacent
# letters from s[i..j)
dp = [[''] * (n + 1) for _ in range(n + 1)]
for d in range(1, n + 1):
for i in range(n - d + 1):
j = i + d
# 1. Keep s[i].
minString = s[i] + dp[i + 1][j]
# 2. Remove s[i] and s[k] if possible.
for k in range(i + 1, j):
if self._isConsecutive(s[i], s[k]) and dp[i + 1][k] == '':
candidate = dp[k + 1][j]
if candidate < minString:
minString = candidate
dp[i][j] = minString
return dp[0][n]
def _isConsecutive(self, a: str, b: str) -> bool:
return abs(ord(a) - ord(b)) == 1 or abs(ord(a) - ord(b)) == 25
| Solution |
python | ray-project__ray | python/ray/serve/_private/proxy_request_response.py | {
"start": 2902,
"end": 5441
} | class ____(ProxyRequest):
"""ProxyRequest implementation to wrap gRPC request protobuf and metadata."""
def __init__(
self,
request_proto: Any,
context: grpc._cython.cygrpc._ServicerContext,
service_method: str,
stream: bool,
):
self._request_proto = request_proto
self.context = context
self.service_method = service_method
self.stream = stream
self.app_name = ""
self.request_id = None
self.method_name = "__call__"
self.multiplexed_model_id = DEFAULT.VALUE
# ray_serve_grpc_context is a class implemented by us to be able to serialize
# the object and pass it into the deployment.
self.ray_serve_grpc_context = RayServegRPCContext(context)
self.setup_variables()
def setup_variables(self):
if not self.is_route_request and not self.is_health_request:
service_method_split = self.service_method.split("/")
self.method_name = service_method_split[-1]
for key, value in self.context.invocation_metadata():
if key == "application":
self.app_name = value
elif key == "request_id":
self.request_id = value
elif key == "multiplexed_model_id":
self.multiplexed_model_id = value
@property
def request_type(self) -> str:
return "grpc"
@property
def method(self) -> str:
return self.service_method
@property
def route_path(self) -> str:
return self.app_name
@property
def is_route_request(self) -> bool:
return self.service_method == "/ray.serve.RayServeAPIService/ListApplications"
@property
def is_health_request(self) -> bool:
return self.service_method == "/ray.serve.RayServeAPIService/Healthz"
def send_request_id(self, request_id: str):
# Setting the trailing metadata on the ray_serve_grpc_context object, so it's
# not overriding the ones set from the user and will be sent back to the
# client altogether.
self.ray_serve_grpc_context.set_trailing_metadata([("request_id", request_id)])
def serialized_replica_arg(self) -> bytes:
# NOTE(edoakes): it's important that the request is sent as raw bytes to
# skip the Ray cloudpickle serialization codepath for performance.
return pickle.dumps(gRPCRequest(user_request_proto=self._request_proto))
@dataclass(frozen=True)
| gRPCProxyRequest |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 28069,
"end": 31604
} | class ____(JsAsyncIterable[T_co], Generic[T_co, T_contra, V_co]):
"""A JavaScript :js:class:`AsyncGenerator`
A JavaScript object is treated as an async generator if it's
:js:data:`Symbol.toStringTag` is ``"AsyncGenerator"``. Most likely this will
be because it is a true async generator produced by the JavaScript runtime,
but it may be a custom object trying hard to pretend to be an async
generator. It should have :js:meth:`~AsyncGenerator.next`,
:js:meth:`~AsyncGenerator.return`, and :js:meth:`~AsyncGenerator.throw`
methods.
"""
_js_type_flags = ["IS_ASYNC_GENERATOR"]
def __anext__(self) -> Awaitable[T_co]:
raise NotImplementedError
def __aiter__(self) -> "JsAsyncGenerator[T_co, T_contra, V_co]":
raise NotImplementedError
def asend(self, value: T_contra, /) -> Awaitable[T_co]:
"""Resumes the execution and "sends" a value into the async generator
function.
The ``value`` argument becomes the result of the current yield
expression. The awaitable returned by the ``asend()`` method will return
the next value yielded by the generator or raises
:py:exc:`StopAsyncIteration` if the asynchronous generator returns. If the
generator returned a value, this value is discarded (because in Python
async generators cannot return a value).
When ``asend()`` is called to start the generator, the argument will be
ignored. Unlike in Python, we cannot detect that the generator hasn't
started yet, and no error will be thrown if the argument of a
not-started generator is not ``None``.
"""
raise NotImplementedError
@overload
def athrow(
self,
typ: type[BaseException],
val: BaseException | object = ...,
tb: TracebackType | None = ...,
/,
) -> Awaitable[T_co]: ...
@overload
def athrow(
self,
typ: BaseException,
val: None = ...,
tb: TracebackType | None = ...,
/,
) -> Awaitable[T_co]: ...
@docs_argspec("(self, error: BaseException, /) -> T_co")
def athrow(self, value: Any, *args: Any) -> Awaitable[T_co]:
"""Resumes the execution and raises an exception at the point where the
generator was paused.
The awaitable returned by ``athrow()`` method will return the next value
yielded by the generator or raises :py:exc:`StopAsyncIteration` if the
asynchronous generator returns. If the generator returned a value, this
value is discarded (because in Python async generators cannot return a
value). If the generator function does not catch the passed-in
exception, or raises a different exception, then that exception
propagates to the caller.
"""
raise NotImplementedError
def aclose(self) -> Awaitable[None]:
"""Raises a :py:exc:`GeneratorExit` at the point where the generator
function was paused.
If the generator function then exits gracefully, is already closed, or
raises :py:exc:`GeneratorExit` (by not catching the exception),
``aclose()`` returns to its caller. If the generator yields a value, a
:py:exc:`RuntimeError` is raised. If the generator raises any other
exception, it is propagated to the caller. ``aclose()`` does nothing if
the generator has already exited due to an exception or normal exit.
"""
raise NotImplementedError
| JsAsyncGenerator |
python | tensorflow__tensorflow | tensorflow/python/distribute/distribute_utils.py | {
"start": 16565,
"end": 19774
} | class ____(threading.local):
"""Class for maintaining thread local state for caching scope."""
def __init__(self):
super(CachingScopeLocal, self).__init__()
self.new_cache_scope_count = 0
self.cache_scope_exited_count = 0
def enter_scope(self):
self.new_cache_scope_count += 1
def exit_scope(self):
self.cache_scope_exited_count += 1
def in_caching_scope(self):
return self.new_cache_scope_count > self.cache_scope_exited_count
caching_scope_local = CachingScopeLocal()
@contextlib.contextmanager
def cache_variable_reads():
"""Scope for caching variable reads for AggregatingVariable.
The variable reads for AggregatingVariable inside this scope are cached. i.e.
the first read of variable reads the value from possibly remote handle, but
subsequent reads are returned using local cached value.
For example:
strategy = ParameterServerStrategy...
with strategy.scope():
# Variable v is of AggregatingVariable type with actual variable residing
# on PS.
v = tf.Variable(1.0)
with distribute_utils.cache_variable_reads():
v.read_value() # Reads value 1.0
v.assign(constant_op.constant(5.0)) # v changes to 5.0
t1 = v.read_value()
t2 = v.read_value() # Both t1 & t2 return cached value 1.0 from local CPU.
Notes about cache_variable_reads scope:
1. Nesting of scope cache_variable_reads() is not supported
2. And when caching scope is enabled, the thread enabling the cache and
mirrored_run._MirroredReplicaThread threads spawned from it will have
caching enabled.
Yields:
A context for caching variables.
"""
try:
if caching_scope_local.in_caching_scope():
# There is nested cache scope, which is not supported.
raise ValueError("cache_variable_reads scope cannot be nested")
caching_scope_local.enter_scope()
yield
finally:
caching_scope_local.exit_scope()
# The following mapping indicates the policy that you must use for a given
# variable `synchronization` and `aggregation` pair.
# OnWritePolicy is used for:
# (synchronization=Auto, aggregation=NONE,SUM,MEAN,ONLY_FIRST_REPLICA)
# (synchronization=ON_WRITE, aggregation=NONE,SUM,MEAN,ONLY_FIRST_REPLICA)
# OnReadPolicy is used for:
# (synchronization=ON_READ, aggregation=NONE,SUM,MEAN,ONLY_FIRST_REPLICA)
VARIABLE_POLICY_MAPPING = {
vs.VariableSynchronization.ON_WRITE: values_lib.OnWritePolicy,
vs.VariableSynchronization.ON_READ: values_lib.OnReadPolicy,
}
VARIABLE_CLASS_MAPPING = {
"VariableClass": values_lib.DistributedVariable,
vs.VariableSynchronization.ON_WRITE: values_lib.MirroredVariable,
vs.VariableSynchronization.ON_READ: values_lib.SyncOnReadVariable,
}
TPU_VARIABLE_POLICY_MAPPING = {
vs.VariableSynchronization.ON_WRITE: tpu_values_lib.TPUOnWritePolicy,
vs.VariableSynchronization.ON_READ: tpu_values_lib.TPUOnReadPolicy,
}
TPU_VARIABLE_CLASS_MAPPING = {
"VariableClass": tpu_values_lib.TPUDistributedVariable,
"LazyVariableClass": tpu_values_lib.TPULazyDistributedVariable,
vs.VariableSynchronization.ON_WRITE: tpu_values_lib.TPUMirroredVariable,
vs.VariableSynchronization.ON_READ: tpu_values_lib.TPUSyncOnReadVariable,
}
| CachingScopeLocal |
python | ApeWorX__ape | src/ape/exceptions.py | {
"start": 15548,
"end": 15656
} | class ____(ApeException):
"""
Raised when a problem occurs when using providers.
"""
| ProviderError |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/oauth_request_builder.py | {
"start": 196,
"end": 1612
} | class ____(AmazonAdsRequestBuilder):
@classmethod
def oauth_endpoint(cls, client_id: str, client_secred: str, refresh_token: str) -> "OAuthRequestBuilder":
return cls("auth/o2/token").with_client_id(client_id).with_client_secret(client_secred).with_refresh_token(refresh_token)
def __init__(self, resource: str) -> None:
self._resource: str = resource
self._client_id: str = None
self._client_secret: str = None
self._refresh_token: str = None
@property
def url(self) -> str:
return f"{BASE_OAUTH_URL}/{self._resource}"
@property
def query_params(self) -> Dict[str, Any]:
return {}
@property
def headers(self) -> Dict[str, Any]:
return {}
@property
def request_body(self) -> Optional[str]:
return (
f"grant_type=refresh_token&client_id={self._client_id}&client_secret={self._client_secret}&refresh_token={self._refresh_token}"
)
def with_client_id(self, client_id: str) -> "OAuthRequestBuilder":
self._client_id: str = client_id
return self
def with_client_secret(self, client_secret: str) -> "OAuthRequestBuilder":
self._client_secret: str = client_secret
return self
def with_refresh_token(self, refresh_token: str) -> "OAuthRequestBuilder":
self._refresh_token: str = refresh_token
return self
| OAuthRequestBuilder |
python | lepture__mistune | tests/test_directives.py | {
"start": 1082,
"end": 1211
} | class ____(TableOfContents):
def generate_heading_id(self, token, i):
return "t-" + str(i + 1)
| CustomizeTableOfContents |
python | imageio__imageio | imageio/core/format.py | {
"start": 21366,
"end": 30885
} | class ____(object):
"""
The FormatManager is a singleton plugin factory.
The format manager supports getting a format object using indexing (by
format name or extension). When used as an iterator, this object
yields all registered format objects.
See also :func:`.help`.
"""
@property
def _formats(self):
available_formats = list()
for config in known_plugins.values():
with contextlib.suppress(ImportError):
# if an exception is raised, then format not installed
if config.is_legacy and config.format is not None:
available_formats.append(config)
return available_formats
def __repr__(self):
return f"<imageio.FormatManager with {len(self._formats)} registered formats>"
def __iter__(self):
return iter(x.format for x in self._formats)
def __len__(self):
return len(self._formats)
def __str__(self):
ss = []
for config in self._formats:
ext = config.legacy_args["extensions"]
desc = config.legacy_args["description"]
s = f"{config.name} - {desc} [{ext}]"
ss.append(s)
return "\n".join(ss)
def __getitem__(self, name):
warnings.warn(
"The usage of `FormatManager` is deprecated and it will be "
"removed in Imageio v3. Use `iio.imopen` instead.",
DeprecationWarning,
stacklevel=2,
)
if not isinstance(name, str):
raise ValueError(
"Looking up a format should be done by name or by extension."
)
if name == "":
raise ValueError("No format matches the empty string.")
# Test if name is existing file
if Path(name).is_file():
# legacy compatibility - why test reading here??
try:
return imopen(name, "r", legacy_mode=True)._format
except ValueError:
# no plugin can read the file
pass
config = _get_config(name.upper())
try:
return config.format
except ImportError:
raise ImportError(
f"The `{config.name}` format is not installed. "
f"Use `pip install imageio[{config.install_name}]` to install it."
)
def sort(self, *names):
"""sort(name1, name2, name3, ...)
Sort the formats based on zero or more given names; a format with
a name that matches one of the given names will take precedence
over other formats. A match means an equal name, or ending with
that name (though the former counts higher). Case insensitive.
Format preference will match the order of the given names: using
``sort('TIFF', '-FI', '-PIL')`` would prefer the FreeImage formats
over the Pillow formats, but prefer TIFF even more. Each time
this is called, the starting point is the default format order,
and calling ``sort()`` with no arguments will reset the order.
Be aware that using the function can affect the behavior of
other code that makes use of imageio.
Also see the ``IMAGEIO_FORMAT_ORDER`` environment variable.
"""
warnings.warn(
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
" Migrating `FormatManager.sort` depends on your use-case:\n"
"\t- modify `iio.config.known_plugins` to specify the search order for "
"unrecognized formats.\n"
"\t- modify `iio.config.known_extensions[<extension>].priority`"
" to control a specific extension.",
DeprecationWarning,
stacklevel=2,
)
# Check and sanitize input
for name in names:
if not isinstance(name, str):
raise TypeError("formats.sort() accepts only string names.")
if any(c in name for c in ".,"):
raise ValueError(
"Names given to formats.sort() should not "
"contain dots `.` or commas `,`."
)
should_reset = len(names) == 0
if should_reset:
names = _original_order
sane_names = [name.strip().upper() for name in names if name != ""]
# enforce order for every extension that uses it
flat_extensions = [
ext for ext_list in known_extensions.values() for ext in ext_list
]
for extension in flat_extensions:
if should_reset:
extension.reset()
continue
for name in reversed(sane_names):
for plugin in [x for x in extension.default_priority]:
if plugin.endswith(name):
extension.priority.remove(plugin)
extension.priority.insert(0, plugin)
old_order = known_plugins.copy()
known_plugins.clear()
for name in sane_names:
plugin = old_order.pop(name, None)
if plugin is not None:
known_plugins[name] = plugin
known_plugins.update(old_order)
def add_format(self, iio_format, overwrite=False):
"""add_format(format, overwrite=False)
Register a format, so that imageio can use it. If a format with the
same name already exists, an error is raised, unless overwrite is True,
in which case the current format is replaced.
"""
warnings.warn(
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
"To migrate `FormatManager.add_format` add the plugin directly to "
"`iio.config.known_plugins`.",
DeprecationWarning,
stacklevel=2,
)
if not isinstance(iio_format, Format):
raise ValueError("add_format needs argument to be a Format object")
elif not overwrite and iio_format.name in self.get_format_names():
raise ValueError(
f"A Format named {iio_format.name} is already registered, use"
" `overwrite=True` to replace."
)
config = PluginConfig(
name=iio_format.name.upper(),
class_name=iio_format.__class__.__name__,
module_name=iio_format.__class__.__module__,
is_legacy=True,
install_name="unknown",
legacy_args={
"name": iio_format.name,
"description": iio_format.description,
"extensions": " ".join(iio_format.extensions),
"modes": iio_format.modes,
},
)
known_plugins[config.name] = config
for extension in iio_format.extensions:
# be conservative and always treat it as a unique file format
ext = FileExtension(
extension=extension,
priority=[config.name],
name="Unique Format",
description="A format inserted at runtime."
f" It is being read by the `{config.name}` plugin.",
)
known_extensions.setdefault(extension, list()).append(ext)
def search_read_format(self, request):
"""search_read_format(request)
Search a format that can read a file according to the given request.
Returns None if no appropriate format was found. (used internally)
"""
try:
# in legacy_mode imopen returns a LegacyPlugin
return imopen(request, request.mode.io_mode, legacy_mode=True)._format
except AttributeError:
warnings.warn(
"ImageIO now uses a v3 plugin when reading this format."
" Please migrate to the v3 API (preferred) or use imageio.v2.",
DeprecationWarning,
stacklevel=2,
)
return None
except ValueError:
# no plugin can read this request
# but the legacy API doesn't raise
return None
def search_write_format(self, request):
"""search_write_format(request)
Search a format that can write a file according to the given request.
Returns None if no appropriate format was found. (used internally)
"""
try:
# in legacy_mode imopen returns a LegacyPlugin
return imopen(request, request.mode.io_mode, legacy_mode=True)._format
except AttributeError:
warnings.warn(
"ImageIO now uses a v3 plugin when writing this format."
" Please migrate to the v3 API (preferred) or use imageio.v2.",
DeprecationWarning,
stacklevel=2,
)
return None
except ValueError:
# no plugin can write this request
# but the legacy API doesn't raise
return None
def get_format_names(self):
"""Get the names of all registered formats."""
warnings.warn(
"`FormatManager` is deprecated and it will be removed in ImageIO v3."
"To migrate `FormatManager.get_format_names` use `iio.config.known_plugins.keys()` instead.",
DeprecationWarning,
stacklevel=2,
)
return [f.name for f in self._formats]
def show(self):
"""Show a nicely formatted list of available formats"""
print(self)
| FormatManager |
python | spyder-ide__spyder | spyder/utils/color_system.py | {
"start": 900,
"end": 1241
} | class ____:
B0 = '#000000'
B10 = '#471D06'
B20 = '#692907'
B30 = '#AB3E00'
B40 = '#CE4B01'
B50 = '#E05E15'
B60 = '#E57004'
B70 = '#F37E12'
B80 = '#FF993B'
B90 = '#FFB950'
B100 = '#FFCF84'
B110 = '#FFDDA7'
B120 = '#FFEACA'
B130 = '#FFF3E2'
B140 = '#FFFBF5'
B150 = '#FFFFFF'
| Orange |
python | matplotlib__matplotlib | lib/matplotlib/_mathtext.py | {
"start": 49825,
"end": 50632
} | class ____(Box):
"""
A solid black rectangle.
It has *width*, *depth*, and *height* fields just as in an `Hlist`.
However, if any of these dimensions is inf, the actual value will be
determined by running the rule up to the boundary of the innermost
enclosing box. This is called a "running dimension". The width is never
running in an `Hlist`; the height and depth are never running in a `Vlist`.
"""
def __init__(self, width: float, height: float, depth: float, state: ParserState):
super().__init__(width, height, depth)
self.fontset = state.fontset
def render(self, output: Output, # type: ignore[override]
x: float, y: float, w: float, h: float) -> None:
self.fontset.render_rect_filled(output, x, y, x + w, y + h)
| Rule |
python | spyder-ide__spyder | spyder/plugins/updatemanager/workers.py | {
"start": 8445,
"end": 9508
} | class ____(QObject):
"""Base worker class for the updater"""
sig_ready = Signal(bool)
"""
Signal to inform that the worker has finished.
Parameters
----------
success : bool
Whether the worker was successful (True) or not (False).
"""
sig_exception_occurred = Signal(dict)
"""
Send untracked exceptions to the error reporter
Parameters
----------
error_data: dict
The dictionary containing error data. The allowed keys are:
text: str
Error text to display. This may be a translated string or
formatted exception string.
is_traceback: bool
Whether `text` is plain text or an error traceback.
repo: str
Customized display of repo in GitHub error submission report.
title: str
Customized display of title in GitHub error submission report.
label: str
Customized content of the error dialog.
steps: str
Customized content of the error dialog.
"""
| BaseWorker |
python | pytorch__pytorch | test/test_shape_ops.py | {
"start": 2215,
"end": 35245
} | class ____(TestCase):
# TODO: update to work on CUDA, too
@onlyCPU
def test_unbind(self, device):
x = torch.rand(2, 3, 4, 5)
for dim in range(4):
res = torch.unbind(x, dim)
res2 = x.unbind(dim)
self.assertEqual(x.size(dim), len(res))
self.assertEqual(x.size(dim), len(res2))
for i in range(dim):
self.assertEqual(x.select(dim, i), res[i])
self.assertEqual(x.select(dim, i), res2[i])
# TODO: update to work on CUDA, too?
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@onlyCPU
def test_tolist(self, device):
list0D = []
tensor0D = torch.tensor(list0D)
self.assertEqual(tensor0D.tolist(), list0D)
table1D = [1.0, 2.0, 3.0]
tensor1D = torch.tensor(table1D)
storage = torch.Storage(table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
self.assertEqual(tensor1D.tolist(), table1D)
self.assertEqual(storage.tolist(), table1D)
table2D = [[1, 2], [3, 4]]
tensor2D = torch.tensor(table2D)
self.assertEqual(tensor2D.tolist(), table2D)
tensor3D = torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])
tensorNonContig = tensor3D.select(1, 1)
self.assertFalse(tensorNonContig.is_contiguous())
self.assertEqual(tensorNonContig.tolist(), [[3, 4], [7, 8]])
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim_invalid(self, device, dtype):
shape = self._rand_shape(4, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, False)
for fn in [torch.movedim, torch.moveaxis]:
# Invalid `source` and `destination` dimension
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 5, 0)
with self.assertRaisesRegex(IndexError, "Dimension out of range"):
fn(x, 0, 5)
# Mismatch in size of `source` and `destination`
with self.assertRaisesRegex(
RuntimeError, "movedim: Invalid source or destination dims:"
):
fn(x, (1, 0), (0,))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `source`"
):
fn(x, (0, 0), (0, 1))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `source`"
):
fn(x, (0, 1, 0), (0, 1, 2))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `destination`"
):
fn(x, (0, 1), (1, 1))
with self.assertRaisesRegex(
RuntimeError, "movedim: repeated dim in `destination`"
):
fn(x, (0, 1, 2), (1, 0, 1))
@dtypes(torch.int64, torch.float, torch.complex128)
def test_movedim(self, device, dtype):
for fn in [torch.moveaxis, torch.movedim]:
for nd in range(5):
shape = self._rand_shape(nd, min_size=5, max_size=10)
x = _generate_input(shape, dtype, device, with_extremal=False)
for random_negative in [True, False]:
for src_dim, dst_dim in permutations(range(nd), r=2):
random_prob = random.random()
if random_negative and random_prob > 0.66:
src_dim = src_dim - nd
elif random_negative and random_prob > 0.33:
dst_dim = dst_dim - nd
elif random_negative:
src_dim = src_dim - nd
dst_dim = dst_dim - nd
# Integer `source` and `destination`
torch_fn = partial(fn, source=src_dim, destination=dst_dim)
np_fn = partial(
np.moveaxis, source=src_dim, destination=dst_dim
)
self.compare_with_numpy(
torch_fn, np_fn, x, device=None, dtype=None
)
if nd == 0:
continue
def make_index_negative(sequence, idx):
sequence = list(sequence)
sequence[random_idx] = sequence[random_idx] - nd
return tuple(src_sequence)
for src_sequence in permutations(
range(nd), r=random.randint(1, nd)
):
# Sequence `source` and `destination`
dst_sequence = tuple(
random.sample(range(nd), len(src_sequence))
)
# Randomly change a dim to a negative dim representation of itself.
random_prob = random.random()
if random_negative and random_prob > 0.66:
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
elif random_negative and random_prob > 0.33:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
elif random_negative:
random_idx = random.randint(0, len(src_sequence) - 1)
dst_sequence = make_index_negative(dst_sequence, random_idx)
random_idx = random.randint(0, len(src_sequence) - 1)
src_sequence = make_index_negative(src_sequence, random_idx)
torch_fn = partial(
fn, source=src_sequence, destination=dst_sequence
)
np_fn = partial(
np.moveaxis, source=src_sequence, destination=dst_sequence
)
self.compare_with_numpy(
torch_fn, np_fn, x, device=None, dtype=None
)
# Move dim to same position
x = torch.randn(2, 3, 5, 7, 11)
torch_fn = partial(fn, source=(0, 1), destination=(0, 1))
np_fn = partial(np.moveaxis, source=(0, 1), destination=(0, 1))
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
torch_fn = partial(fn, source=1, destination=1)
np_fn = partial(np.moveaxis, source=1, destination=1)
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
# Empty Sequence
torch_fn = partial(fn, source=(), destination=())
np_fn = partial(np.moveaxis, source=(), destination=())
self.compare_with_numpy(torch_fn, np_fn, x, device=None, dtype=None)
@dtypes(torch.float, torch.bool)
def test_diag(self, device, dtype):
if dtype is torch.bool:
x = torch.rand(100, 100, device=device) >= 0.5
else:
x = torch.rand(100, 100, dtype=dtype, device=device)
res1 = torch.diag(x)
res2 = torch.tensor((), dtype=dtype, device=device)
torch.diag(x, out=res2)
self.assertEqual(res1, res2)
def test_diagonal(self, device):
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x)
expected = torch.diag(x)
self.assertEqual(result, expected)
x = torch.randn((100, 100), device=device)
result = torch.diagonal(x, 17)
expected = torch.diag(x, 17)
self.assertEqual(result, expected)
@onlyCPU
@dtypes(torch.float)
def test_diagonal_multidim(self, device, dtype):
x = torch.randn(10, 11, 12, 13, dtype=dtype, device=device)
xn = x.numpy()
for args in [(2, 2, 3), (2,), (-2, 1, 2), (0, -2, -1)]:
result = torch.diagonal(x, *args)
expected = xn.diagonal(*args)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
# test non-contiguous
xp = x.permute(1, 2, 3, 0)
result = torch.diagonal(xp, 0, -2, -1)
expected = xp.numpy().diagonal(0, -2, -1)
self.assertEqual(expected.shape, result.shape)
self.assertEqual(expected, result)
@onlyNativeDeviceTypes
@dtypes(*all_types())
@dtypesIfCUDA(*all_types_and(torch.half))
@dtypesIfXPU(*all_types_and(torch.half))
def test_trace(self, device, dtype):
def test(shape):
tensor = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9)
expected_dtype = tensor.sum().dtype
expected_dtype = torch_to_numpy_dtype_dict[expected_dtype]
result = np.trace(tensor.cpu().numpy(), dtype=expected_dtype)
expected = torch.tensor(result, device=device)
self.assertEqual(tensor.trace(), expected)
shapes = (
[10, 1],
[1, 10],
[100, 100],
[20, 100],
[100, 20],
)
for shape in shapes:
test(shape)
def generate_clamp_baseline(self, device, dtype, *, min_vals, max_vals, with_nans):
"""
Creates a random tensor for a given device and dtype, and computes the expected clamped
values given the min_vals and/or max_vals.
If with_nans is provided, then some values are randomly set to nan.
"""
X = torch.rand(100, device=device).mul(50).add(-25) # uniform in [-25, 25]
X = X.to(dtype)
if with_nans:
mask = torch.randint(0, 2, X.shape, dtype=torch.bool, device=device)
X[mask] = nan
if isinstance(min_vals, torch.Tensor):
min_vals = min_vals.cpu().numpy()
if isinstance(max_vals, torch.Tensor):
max_vals = max_vals.cpu().numpy()
# Use NumPy implementation as reference
X_clamped = torch.tensor(
np.clip(X.cpu().numpy(), a_min=min_vals, a_max=max_vals), device=device
)
return X, X_clamped
# Tests clamp and its alias, clip
@dtypes(torch.int64, torch.float32)
def test_clamp(self, device, dtype):
op_list = (
torch.clamp,
torch.Tensor.clamp,
torch.Tensor.clamp_,
torch.clip,
torch.Tensor.clip,
torch.Tensor.clip_,
)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(
device, dtype, min_vals=min_val, max_vals=max_val, with_nans=False
)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, Y_actual)
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min=min_val, max=max_val, out=Y_out)
self.assertEqual(Y_expected, Y_out)
def test_clamp_propagates_nans(self, device):
op_list = (
torch.clamp,
torch.Tensor.clamp,
torch.Tensor.clamp_,
torch.clip,
torch.Tensor.clip,
torch.Tensor.clip_,
)
# min/max argument product
args = product((-10, None), (10, None))
for op in op_list:
for min_val, max_val in args:
if min_val is None and max_val is None:
continue
X, Y_expected = self.generate_clamp_baseline(
device,
torch.float,
min_vals=min_val,
max_vals=max_val,
with_nans=True,
)
Y_expected = torch.isnan(Y_expected)
# Test op
X1 = X.clone() # So that the in-place ops do not change X
Y_actual = op(X1, min_val, max_val)
self.assertEqual(Y_expected, torch.isnan(Y_actual))
# Test op-out behavior (out does not exist for method versions)
if op in (torch.clamp, torch.clip):
Y_out = torch.empty_like(X)
op(X, min_val, max_val, out=Y_out)
self.assertEqual(Y_expected, torch.isnan(Y_out))
def test_clamp_raises_arg_errors(self, device):
X = torch.randn(100, dtype=torch.float, device=device)
error_msg = "At least one of 'min' or 'max' must not be None"
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp()
with self.assertRaisesRegex(RuntimeError, error_msg):
X.clamp_()
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.clamp(X)
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip(self, device, dtype):
make_from_data = partial(torch.tensor, device=device, dtype=dtype)
make_from_size = partial(make_tensor, device=device, dtype=dtype)
def test_flip_impl(input_t, dims, output_t):
def all_t():
yield input_t, output_t
if dtype is torch.float:
# We generate quantized versions as well
for qdtype in (torch.quint8, torch.qint8, torch.qint32):
qinput_t = torch.quantize_per_tensor(input_t, 0.1, 5, qdtype)
qoutput_t = torch.quantize_per_tensor(output_t, 0.1, 5, qdtype)
yield qinput_t, qoutput_t
for in_t, out_t in all_t():
self.assertEqual(in_t.flip(dims), out_t)
n = in_t.ndim
if not isinstance(dims, tuple):
# Wrap dim
self.assertEqual(in_t.flip(-n + dims), out_t)
else:
# Permute dimensions
for p_dims in permutations(dims):
self.assertEqual(in_t.flip(p_dims), out_t)
if len(p_dims) > 0:
# Wrap 1st dim
self.assertEqual(
in_t.flip((-n + p_dims[0],) + p_dims[1:]), out_t
)
def gen_data():
# Basic tests
data = make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2)
nonctg = make_from_size((2, 2, 2), noncontiguous=True).copy_(data)
dims_result = (
(0, make_from_data([5, 6, 7, 8, 1, 2, 3, 4]).view(2, 2, 2)),
(1, make_from_data([3, 4, 1, 2, 7, 8, 5, 6]).view(2, 2, 2)),
(2, make_from_data([2, 1, 4, 3, 6, 5, 8, 7]).view(2, 2, 2)),
((0, 1), make_from_data([7, 8, 5, 6, 3, 4, 1, 2]).view(2, 2, 2)),
((0, 1, 2), make_from_data([8, 7, 6, 5, 4, 3, 2, 1]).view(2, 2, 2)),
)
for in_tensor, (dims, out_tensor) in product((data, nonctg), dims_result):
yield in_tensor, dims, out_tensor
# Expanded
in_t = make_from_data([1, 2, 3]).view(3, 1).expand(3, 2)
dims = 0
out_t = make_from_data([3, 3, 2, 2, 1, 1]).view(3, 2)
yield in_t, dims, out_t
# Noop on expanded dimension
yield in_t, 1, in_t
# Transposed
in_t = (
make_from_data([1, 2, 3, 4, 5, 6, 7, 8]).view(2, 2, 2).transpose(0, 1)
)
dims = (0, 1, 2)
out_t = make_from_data([8, 7, 4, 3, 6, 5, 2, 1]).view(2, 2, 2)
yield in_t, dims, out_t
# Rectangular case
in_t = make_from_data([1, 2, 3, 4, 5, 6]).view(2, 3)
dims = 0
out_t = make_from_data([[4, 5, 6], [1, 2, 3]])
yield in_t, dims, out_t
dims = 1
out_t = make_from_data([[3, 2, 1], [6, 5, 4]])
yield in_t, dims, out_t
# vectorized NCHW cases (images)
if device == "cpu" and dtype != torch.bfloat16:
for mf in [torch.contiguous_format, torch.channels_last]:
for c in [2, 3, 8, 16]:
in_t = make_from_size((2, c, 32, 32)).contiguous(
memory_format=mf
)
np_in_t = in_t.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 3, out_t
np_out_t = np_in_t[:, :, ::-1, :].copy()
out_t = torch.from_numpy(np_out_t)
yield in_t, 2, out_t
# non-contig cases
in_tt = in_t[..., ::2, :]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
in_tt = in_t[..., ::2]
np_in_t = in_tt.numpy()
np_out_t = np_in_t[:, :, :, ::-1].copy()
out_t = torch.from_numpy(np_out_t)
yield in_tt, 3, out_t
# Noops (edge cases)
# Size 0
in_t = make_from_data(())
yield in_t, 0, in_t
yield in_t, (), in_t
# dims = ()
in_t = make_from_size((3, 2, 1))
yield in_t, (), in_t
# Zero elements, non-zero size
in_t = make_from_size((3, 0, 2))
for i in range(in_t.ndim):
yield in_t, i, in_t
# Size 1
in_t = make_from_size(())
yield in_t, 0, in_t
in_t = make_from_size((1,))
yield in_t, 0, in_t
for in_tensor, dims, out_tensor in gen_data():
test_flip_impl(in_tensor, dims, out_tensor)
# test for shape
size = [2, 3, 4]
data = make_from_size(size)
possible_dims = range(len(size))
test_dims = chain(
combinations(possible_dims, 1), combinations(possible_dims, 2)
)
for dims in test_dims:
self.assertEqual(size, list(data.flip(dims).size()))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_errors(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
data = make_arg((2, 2, 2))
# not allow flip on the same dim more than once
self.assertRaises(RuntimeError, lambda: data.flip(0, 1, 1))
# not allow empty list as input
self.assertRaises(TypeError, lambda: data.flip())
# not allow dim > max dim
self.assertRaises(IndexError, lambda: data.flip(0, 1, 2, 3))
self.assertRaises(IndexError, lambda: data.flip(3))
def _rand_shape(self, dim, min_size, max_size):
return tuple(torch.randint(min_size, max_size + 1, (dim,)))
@dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16))
def test_flip_numpy(self, device, dtype):
make_arg = partial(make_tensor, dtype=dtype, device=device)
for ndim in [3, 4]:
shape = self._rand_shape(ndim, 5, 10)
data = make_arg(shape)
# Axis to sample for given shape.
for i in range(1, ndim + 1):
# Check all combinations of `i` axis.
for flip_dim in combinations(range(ndim), i):
torch_fn = partial(torch.flip, dims=flip_dim)
np_fn = partial(np.flip, axis=flip_dim)
self.compare_with_numpy(torch_fn, np_fn, data)
@onlyOn(["cuda", "xpu"]) # CPU is too slow
@largeTensorTest("17GB") # 4 tensors of 4GB (in, out) x (torch, numpy) + 1GB
@largeTensorTest(
"81GB", "cpu"
) # even for CUDA test, sufficient system memory is required
@unittest.skipIf(IS_JETSON, "Too large for Jetson")
def test_flip_large_tensor(self, device):
t_in = torch.empty(2**32 + 1, dtype=torch.uint8).random_()
torch_fn = partial(torch.flip, dims=(0,))
np_fn = partial(np.flip, axis=0)
self.compare_with_numpy(torch_fn, np_fn, t_in)
del t_in
@onlyCPU
@unittest.expectedFailure
@dtypes(torch.quint4x2, torch.quint2x4)
def test_flip_unsupported_dtype(self, dtype):
scale, zero_point = 0.1, 5
qt = torch.quantize_per_tensor(
torch.randn(16, 16), scale=scale, zero_point=zero_point, dtype=dtype
)
torch.flip(qt, dims=(0,))
def _test_fliplr_flipud(self, torch_fn, np_fn, min_dim, max_dim, device, dtype):
for dim in range(min_dim, max_dim + 1):
shape = self._rand_shape(dim, 5, 10)
# Randomly scale the input
if dtype.is_floating_point or dtype.is_complex:
data = torch.randn(*shape, device=device, dtype=dtype)
else:
data = torch.randint(0, 10, shape, device=device, dtype=dtype)
self.compare_with_numpy(torch_fn, np_fn, data)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr(self, device, dtype):
self._test_fliplr_flipud(torch.fliplr, np.fliplr, 2, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_fliplr_invalid(self, device, dtype):
x = torch.randn(42).to(dtype)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(x)
with self.assertRaisesRegex(RuntimeError, "Input must be >= 2-d."):
torch.fliplr(torch.tensor(42, device=device, dtype=dtype))
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud(self, device, dtype):
self._test_fliplr_flipud(torch.flipud, np.flipud, 1, 4, device, dtype)
@dtypes(torch.int64, torch.double, torch.cdouble)
def test_flipud_invalid(self, device, dtype):
with self.assertRaisesRegex(RuntimeError, "Input must be >= 1-d."):
torch.flipud(torch.tensor(42, device=device, dtype=dtype))
def test_rot90(self, device):
data = torch.arange(1, 5, device=device).view(2, 2)
self.assertEqual(torch.tensor([1, 2, 3, 4]).view(2, 2), data.rot90(0, [0, 1]))
self.assertEqual(torch.tensor([2, 4, 1, 3]).view(2, 2), data.rot90(1, [0, 1]))
self.assertEqual(torch.tensor([4, 3, 2, 1]).view(2, 2), data.rot90(2, [0, 1]))
self.assertEqual(torch.tensor([3, 1, 4, 2]).view(2, 2), data.rot90(3, [0, 1]))
# test for default args k=1, dims=[0, 1]
self.assertEqual(data.rot90(), data.rot90(1, [0, 1]))
# test for reversed order of dims
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(1, [1, 0]))
# test for modulo of k
self.assertEqual(data.rot90(5, [0, 1]), data.rot90(1, [0, 1]))
self.assertEqual(data.rot90(3, [0, 1]), data.rot90(-1, [0, 1]))
self.assertEqual(data.rot90(-5, [0, 1]), data.rot90(-1, [0, 1]))
# test for dims out-of-range error
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, -3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 2]))
# test tensor with more than 2D
data = torch.arange(1, 9, device=device).view(2, 2, 2)
self.assertEqual(
torch.tensor([2, 4, 1, 3, 6, 8, 5, 7]).view(2, 2, 2), data.rot90(1, [1, 2])
)
self.assertEqual(data.rot90(1, [1, -1]), data.rot90(1, [1, 2]))
# test for errors
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 3]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [1, 1]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0, 1, 2]))
self.assertRaises(RuntimeError, lambda: data.rot90(1, [0]))
@skipIfTorchDynamo("TorchDynamo fails with an unknown error")
@dtypes(torch.cfloat, torch.cdouble)
def test_complex_rot90(self, device, dtype):
shape = self._rand_shape(random.randint(2, 4), 5, 10)
for rot_times in range(4):
data = torch.randn(*shape, device=device, dtype=dtype)
torch_fn = partial(torch.rot90, k=rot_times, dims=[0, 1])
np_fn = partial(np.rot90, k=rot_times, axes=[0, 1])
self.compare_with_numpy(torch_fn, np_fn, data)
# TODO: update once warning flag is available to always trigger ONCE warnings
# Ensures nonzero does not throw a warning, even when the as_tuple argument
# is not provided
def test_nonzero_no_warning(self, device):
t = torch.randn((2, 2), device=device)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
torch.nonzero(t)
t.nonzero()
self.assertEqual(len(w), 0)
@dtypes(*all_types_and(torch.half, torch.bool, torch.bfloat16))
def test_nonzero(self, device, dtype):
shapes = [
torch.Size((12,)),
torch.Size((12, 1)),
torch.Size((1, 12)),
torch.Size((6, 2)),
torch.Size((3, 2, 2)),
torch.Size((5, 5, 5)),
]
def gen_nontrivial_input(shape, dtype, device):
if dtype != torch.bfloat16:
return torch.randint(2, shape, device=device, dtype=dtype)
else:
# windows does not work for bfloat16 randing
return torch.randint(2, shape, device=device, dtype=torch.float).to(
dtype
)
for shape in shapes:
tensor = gen_nontrivial_input(shape, dtype, device)
dst1 = torch.nonzero(tensor, as_tuple=False)
dst2 = tensor.nonzero(as_tuple=False)
dst3 = torch.empty([], dtype=torch.long, device=device)
torch.nonzero(tensor, out=dst3)
if self.device_type != "xla":
# xla does not raise runtime error
self.assertRaisesRegex(
RuntimeError,
"scalar type Long",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.float, device=device)
),
)
if (
self.device_type == "cuda"
or self.device_type == "xpu"
or self.device_type == TEST_PRIVATEUSE1_DEVICE_TYPE
):
self.assertRaisesRegex(
RuntimeError,
"on the same device",
lambda: torch.nonzero(
tensor, out=torch.empty([], dtype=torch.long)
),
)
np_array = (
tensor.cpu().numpy()
if dtype != torch.bfloat16
else tensor.float().cpu().numpy()
)
np_result = torch.from_numpy(np.stack(np_array.nonzero())).t()
self.assertEqual(dst1.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst2.cpu(), np_result, atol=0, rtol=0)
self.assertEqual(dst3.cpu(), np_result, atol=0, rtol=0)
tup1 = torch.nonzero(tensor, as_tuple=True)
tup2 = tensor.nonzero(as_tuple=True)
tup1 = torch.stack(tup1).t().cpu()
tup2 = torch.stack(tup2).t().cpu()
self.assertEqual(tup1, np_result, atol=0, rtol=0)
self.assertEqual(tup2, np_result, atol=0, rtol=0)
def test_nonzero_astuple_out(self, device):
t = torch.randn((3, 3, 3), device=device)
out = torch.empty_like(t, dtype=torch.long)
with self.assertRaises(RuntimeError):
torch.nonzero(t, as_tuple=True, out=out)
self.assertEqual(
torch.nonzero(t, as_tuple=False, out=out), torch.nonzero(t, out=out)
)
# Verifies that JIT script cannot handle the as_tuple kwarg
# See Issue https://github.com/pytorch/pytorch/issues/45499.
def _foo(t):
tuple_result = torch.nonzero(t, as_tuple=True)
nontuple_result = torch.nonzero(t, as_tuple=False)
out = torch.empty_like(nontuple_result)
torch.nonzero(t, as_tuple=False, out=out)
return tuple_result, nontuple_result, out
with self.assertRaises(RuntimeError):
torch.jit.script(_foo)
# Verifies that JIT tracing works fine
traced_foo = torch.jit.trace(_foo, t)
traced_tuple, traced_nontuple, traced_out = traced_foo(t)
expected_tuple = torch.nonzero(t, as_tuple=True)
expected_nontuple = torch.nonzero(t)
self.assertEqual(traced_tuple, expected_tuple)
self.assertEqual(traced_nontuple, expected_nontuple)
self.assertEqual(traced_out, expected_nontuple)
@onlyNativeDeviceTypes
def test_nonzero_discontiguous(self, device):
shape = (4, 4)
tensor = torch.randint(2, shape, device=device)
tensor_nc = torch.empty(shape[0], shape[1] * 2, device=device)[:, ::2].copy_(
tensor
)
dst1 = tensor.nonzero(as_tuple=False)
dst2 = tensor_nc.nonzero(as_tuple=False)
self.assertEqual(dst1, dst2, atol=0, rtol=0)
dst3 = torch.empty_like(dst1)
data_ptr = dst3.data_ptr()
# expect dst3 storage to be reused
torch.nonzero(tensor, out=dst3)
self.assertEqual(data_ptr, dst3.data_ptr())
self.assertEqual(dst1, dst3, atol=0, rtol=0)
# discontiguous out
dst4 = torch.empty(
dst1.size(0), dst1.size(1) * 2, dtype=torch.long, device=device
)[:, ::2]
data_ptr = dst4.data_ptr()
strides = dst4.stride()
torch.nonzero(tensor, out=dst4)
self.assertEqual(data_ptr, dst4.data_ptr())
self.assertEqual(dst1, dst4, atol=0, rtol=0)
self.assertEqual(strides, dst4.stride())
def test_nonzero_non_diff(self, device):
x = torch.randn(10, requires_grad=True)
nz = x.nonzero()
self.assertFalse(nz.requires_grad)
@dtypes(torch.int64, torch.float, torch.complex128)
def test_sparse_dense_dim(self, device, dtype):
for shape in [(), (2,), (2, 3)]:
if dtype.is_complex or dtype.is_floating_point:
x = torch.rand(shape, device=device, dtype=dtype)
else:
x = torch.randint(-9, 9, shape, device=device, dtype=dtype)
self.assertEqual(x.sparse_dim(), 0)
self.assertEqual(x.dense_dim(), len(shape))
def test_unfold_all_devices_and_dtypes(self, device):
for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16):
if dt == torch.bool:
x = torch.empty((0, 1, 3, 0), dtype=dt, device=device)
self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
else:
x = torch.empty((0, 1, 3, 0), dtype=dt, device=device)
self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape)
def test_unfold_scalars(self, device):
x = torch.tensor(0.5, device=device)
# unfold on a 0-dimensional tensor should always return a 1-d dimensional
# tensor of shape [size] (i.e., the second parameter to unfold)
self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 1))
self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2))
self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1))
def test_unfold_errors(self, device):
x = torch.arange(1.0, 8, device=device)
with self.assertRaisesRegex(RuntimeError, "size is -1 but must be >= 0"):
x.unfold(0, -1, 1)
with self.assertRaisesRegex(RuntimeError, "step is -1 but must be > 0"):
x.unfold(0, 1, -1)
def test_unfold_backward_errors(self, device):
grad_in = torch.randn(2, 3, device=device)
input_sizes = [6]
with self.assertRaisesRegex(ValueError, "step is 0 but must be > 0"):
torch.ops.aten.unfold_backward(grad_in, input_sizes, 0, 3, 0)
with self.assertRaisesRegex(RuntimeError, "size is -1 but must be >= 0"):
torch.ops.aten.unfold_backward(grad_in, input_sizes, 0, -1, 1)
instantiate_device_type_tests(TestShapeOps, globals())
if __name__ == "__main__":
run_tests()
| TestShapeOps |
python | pytorch__pytorch | test/fx/test_fx_split.py | {
"start": 360,
"end": 494
} | class ____:
a: int
b: int
c: int
@torch.fx.wrap
def wrapped_add(_dataclass, y):
return _dataclass.c + y
| DummyDataClass |
python | astropy__astropy | astropy/utils/masked/tests/test_masked.py | {
"start": 13386,
"end": 16081
} | class ____(ArraySetup):
def test_whole_mask_setting_simple(self):
ma = Masked(self.a)
assert ma.mask.shape == ma.shape
assert not ma.mask.any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask.all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, np.array([[True] * 3, [False] * 3]))
ma.mask = self.mask_a
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_a)
assert ma.mask is not self.mask_a
assert np.may_share_memory(ma.mask, self.mask_a)
def test_whole_mask_setting_structured(self):
ma = Masked(self.sa)
assert ma.mask.shape == ma.shape
assert not ma.mask["a"].any() and not ma.mask["b"].any()
ma.mask = True
assert ma.mask.shape == ma.shape
assert ma.mask["a"].all() and ma.mask["b"].all()
ma.mask = [[True], [False]]
assert ma.mask.shape == ma.shape
assert_array_equal(
ma.mask,
np.array([[(True, True)] * 2, [(False, False)] * 2], dtype=self.mask_sdt),
)
ma.mask = self.mask_sa
assert ma.mask.shape == ma.shape
assert_array_equal(ma.mask, self.mask_sa)
assert ma.mask is not self.mask_sa
assert np.may_share_memory(ma.mask, self.mask_sa)
@pytest.mark.parametrize("item", VARIOUS_ITEMS)
def test_part_mask_setting(self, item):
ma = Masked(self.a)
ma.mask[item] = True
expected = np.zeros(ma.shape, bool)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, bool))
# Mask propagation
mask = np.zeros(self.a.shape, bool)
ma = Masked(self.a, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
@pytest.mark.parametrize("item", ["a"] + VARIOUS_ITEMS)
def test_part_mask_setting_structured(self, item):
ma = Masked(self.sa)
ma.mask[item] = True
expected = np.zeros(ma.shape, self.mask_sdt)
expected[item] = True
assert_array_equal(ma.mask, expected)
ma.mask[item] = False
assert_array_equal(ma.mask, np.zeros(ma.shape, self.mask_sdt))
# Mask propagation
mask = np.zeros(self.sa.shape, self.mask_sdt)
ma = Masked(self.sa, mask)
ma.mask[item] = True
assert np.may_share_memory(ma.mask, mask)
assert_array_equal(ma.mask, mask)
# Following are tests where we trust the initializer works.
| TestMaskSetting |
python | walkccc__LeetCode | solutions/2318. Number of Distinct Roll Sequences/2318.py | {
"start": 0,
"end": 568
} | class ____:
def distinctSequences(self, n: int) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(n: int, prev: int, prevPrev: int) -> int:
"""
Returns the number of distinct sequences for n dices with `prev` and
`prevPrev`.
"""
if n == 0:
return 1
res = 0
for dice in range(1, 7):
if (dice not in (prev, prevPrev) and
(prev == 0 or math.gcd(dice, prev) == 1)):
res += dp(n - 1, dice, prev)
res %= MOD
return res
return dp(n, 0, 0)
| Solution |
python | celery__celery | t/unit/backends/test_cassandra.py | {
"start": 339,
"end": 8944
} | class ____:
def setup_method(self):
self.app.conf.update(
cassandra_servers=['example.com'],
cassandra_keyspace='celery',
cassandra_table='task_results',
)
@pytest.mark.patched_module(*CASSANDRA_MODULES)
def test_init_no_cassandra(self, module):
# should raise ImproperlyConfigured when no python-driver
# installed.
from celery.backends import cassandra as mod
prev, mod.cassandra = mod.cassandra, None
try:
with pytest.raises(ImproperlyConfigured):
mod.CassandraBackend(app=self.app)
finally:
mod.cassandra = prev
@pytest.mark.patched_module(*CASSANDRA_MODULES)
def test_init_with_and_without_LOCAL_QUROM(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
cons = mod.cassandra.ConsistencyLevel = Bunch(
LOCAL_QUORUM='foo',
)
self.app.conf.cassandra_read_consistency = 'LOCAL_FOO'
self.app.conf.cassandra_write_consistency = 'LOCAL_FOO'
mod.CassandraBackend(app=self.app)
cons.LOCAL_FOO = 'bar'
mod.CassandraBackend(app=self.app)
# no servers and no bundle_path raises ImproperlyConfigured
with pytest.raises(ImproperlyConfigured):
self.app.conf.cassandra_servers = None
self.app.conf.cassandra_secure_bundle_path = None
mod.CassandraBackend(
app=self.app, keyspace='b', column_family='c',
)
# both servers no bundle_path raises ImproperlyConfigured
with pytest.raises(ImproperlyConfigured):
self.app.conf.cassandra_servers = ['localhost']
self.app.conf.cassandra_secure_bundle_path = (
'/home/user/secure-connect-bundle.zip')
mod.CassandraBackend(
app=self.app, keyspace='b', column_family='c',
)
def test_init_with_cloud(self):
# Tests behavior when Cluster.connect works properly
# and cluster is created with 'cloud' param instead of 'contact_points'
from celery.backends import cassandra as mod
class DummyClusterWithBundle:
def __init__(self, *args, **kwargs):
if args != ():
# this cluster is supposed to be created with 'cloud=...'
raise ValueError('I should be created with kwargs only')
pass
def connect(self, *args, **kwargs):
return Mock()
mod.cassandra = Mock()
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = DummyClusterWithBundle
self.app.conf.cassandra_secure_bundle_path = '/path/to/bundle.zip'
self.app.conf.cassandra_servers = None
x = mod.CassandraBackend(app=self.app)
x._get_connection()
assert isinstance(x._cluster, DummyClusterWithBundle)
@pytest.mark.patched_module(*CASSANDRA_MODULES)
@pytest.mark.usefixtures('depends_on_current_app')
def test_reduce(self, module):
from celery.backends.cassandra import CassandraBackend
assert loads(dumps(CassandraBackend(app=self.app)))
@pytest.mark.patched_module(*CASSANDRA_MODULES)
def test_get_task_meta_for(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
session = x._session = Mock()
execute = session.execute = Mock()
result_set = Mock()
result_set.one.return_value = [
states.SUCCESS, '1', datetime.now(), b'', b''
]
execute.return_value = result_set
x.decode = Mock()
meta = x._get_task_meta_for('task_id')
assert meta['status'] == states.SUCCESS
result_set.one.return_value = []
x._session.execute.return_value = result_set
meta = x._get_task_meta_for('task_id')
assert meta['status'] == states.PENDING
def test_as_uri(self):
# Just ensure as_uri works properly
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
x.as_uri()
x.as_uri(include_password=False)
@pytest.mark.patched_module(*CASSANDRA_MODULES)
def test_store_result(self, module):
from celery.backends import cassandra as mod
mod.cassandra = Mock()
x = mod.CassandraBackend(app=self.app)
session = x._session = Mock()
session.execute = Mock()
x._store_result('task_id', 'result', states.SUCCESS)
def test_timeouting_cluster(self):
# Tests behavior when Cluster.connect raises
# cassandra.OperationTimedOut.
from celery.backends import cassandra as mod
class OTOExc(Exception):
pass
class VeryFaultyCluster:
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
raise OTOExc()
def shutdown(self):
pass
mod.cassandra = Mock()
mod.cassandra.OperationTimedOut = OTOExc
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = VeryFaultyCluster
x = mod.CassandraBackend(app=self.app)
with pytest.raises(OTOExc):
x._store_result('task_id', 'result', states.SUCCESS)
assert x._cluster is None
assert x._session is None
def test_create_result_table(self):
# Tests behavior when session.execute raises
# cassandra.AlreadyExists.
from celery.backends import cassandra as mod
class OTOExc(Exception):
pass
class FaultySession:
def __init__(self, *args, **kwargs):
pass
def execute(self, *args, **kwargs):
raise OTOExc()
class DummyCluster:
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
return FaultySession()
mod.cassandra = Mock()
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = DummyCluster
mod.cassandra.AlreadyExists = OTOExc
x = mod.CassandraBackend(app=self.app)
x._get_connection(write=True)
assert x._session is not None
def test_init_session(self):
# Tests behavior when Cluster.connect works properly
from celery.backends import cassandra as mod
class DummyCluster:
def __init__(self, *args, **kwargs):
pass
def connect(self, *args, **kwargs):
return Mock()
mod.cassandra = Mock()
mod.cassandra.cluster = Mock()
mod.cassandra.cluster.Cluster = DummyCluster
x = mod.CassandraBackend(app=self.app)
assert x._session is None
x._get_connection(write=True)
assert x._session is not None
s = x._session
x._get_connection()
assert s is x._session
def test_auth_provider(self):
# Ensure valid auth_provider works properly, and invalid one raises
# ImproperlyConfigured exception.
from celery.backends import cassandra as mod
class DummyAuth:
ValidAuthProvider = Mock()
mod.cassandra = Mock()
mod.cassandra.auth = DummyAuth
# Valid auth_provider
self.app.conf.cassandra_auth_provider = 'ValidAuthProvider'
self.app.conf.cassandra_auth_kwargs = {
'username': 'stuff'
}
mod.CassandraBackend(app=self.app)
# Invalid auth_provider
self.app.conf.cassandra_auth_provider = 'SpiderManAuth'
self.app.conf.cassandra_auth_kwargs = {
'username': 'Jack'
}
with pytest.raises(ImproperlyConfigured):
mod.CassandraBackend(app=self.app)
def test_options(self):
# Ensure valid options works properly
from celery.backends import cassandra as mod
mod.cassandra = Mock()
# Valid options
self.app.conf.cassandra_options = {
'cql_version': '3.2.1',
'protocol_version': 3
}
self.app.conf.cassandra_port = None
x = mod.CassandraBackend(app=self.app)
# Default port is 9042
assert x.port == 9042
# Valid options with port specified
self.app.conf.cassandra_port = 1234
x = mod.CassandraBackend(app=self.app)
assert x.port == 1234
| test_CassandraBackend |
python | getsentry__sentry | src/sentry/api/endpoints/organization_events_trends_v2.py | {
"start": 1651,
"end": 15636
} | class ____(OrganizationEventsV2EndpointBase):
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
}
enforce_rate_limit = True
rate_limits = RateLimitConfig(
limit_overrides={
"GET": {
RateLimitCategory.IP: RateLimit(
DEFAULT_RATE_LIMIT, DEFAULT_RATE_LIMIT_WINDOW, DEFAULT_CONCURRENT_RATE_LIMIT
),
RateLimitCategory.USER: RateLimit(
DEFAULT_RATE_LIMIT, DEFAULT_RATE_LIMIT_WINDOW, DEFAULT_CONCURRENT_RATE_LIMIT
),
RateLimitCategory.ORGANIZATION: RateLimit(
ORGANIZATION_RATE_LIMIT, DEFAULT_RATE_LIMIT_WINDOW, ORGANIZATION_RATE_LIMIT
),
}
}
)
def has_feature(self, organization, request):
return features.has(
"organizations:performance-new-trends", organization, actor=request.user
)
def get(self, request: Request, organization: Organization) -> Response:
if not self.has_feature(organization, request):
return Response(status=404)
try:
snuba_params = self.get_snuba_params(request, organization)
except NoProjects:
return Response([])
trend_type = request.GET.get("trendType", REGRESSION)
if trend_type not in TREND_TYPES:
raise ParseError(detail=f"{trend_type} is not a supported trend type")
trend_function = request.GET.get("trendFunction", "p50()")
selected_columns = ["project_id", "transaction"]
query = request.GET.get("query")
query_source = self.get_request_source(request)
def get_top_events(user_query, snuba_params, event_limit, referrer):
top_event_columns = selected_columns[:]
top_event_columns.append("count()")
# Granularity is set to 1d - the highest granularity possible
# in order to optimize the top event query since we don't care
# about having exact counts.
return metrics_query(
top_event_columns,
query=user_query,
snuba_params=snuba_params,
orderby=["-count()"],
limit=event_limit,
referrer=referrer,
auto_aggregations=True,
use_aggregate_conditions=True,
granularity=DAY_GRANULARITY_IN_SECONDS,
query_source=query_source,
)
def generate_top_transaction_query(events):
pairs = [
(event["project_id"], escape_transaction(event["transaction"])) for event in events
]
conditions = [
f'(project_id:{project_id} transaction:"{transaction}")'
for project_id, transaction in pairs
]
return " OR ".join(conditions)
def get_timeseries(top_events, _, rollup, zerofill_results):
# Split top events into multiple queries for bulk timeseries query
data = top_events["data"]
queries = [
generate_top_transaction_query(chunk) for chunk in chunked(data, EVENTS_PER_QUERY)
]
timeseries_columns = selected_columns[:]
timeseries_columns.append(trend_function)
# When all projects or my projects options selected,
# keep only projects that top events belong to to reduce query cardinality
used_project_ids = set({event["project_id"] for event in data})
# Get new params with pruned projects
pruned_snuba_params = self.get_snuba_params(request, organization)
pruned_snuba_params.projects = [
project
for project in pruned_snuba_params.projects
if project.id in used_project_ids
]
result = metrics_performance.bulk_timeseries_query(
timeseries_columns,
queries,
snuba_params=pruned_snuba_params,
rollup=rollup,
zerofill_results=zerofill_results,
referrer=Referrer.API_TRENDS_GET_EVENT_STATS_V2_TIMESERIES.value,
groupby=[Column("project_id"), Column("transaction")],
apply_formatting=False,
query_source=query_source,
)
# Parse results
translated_groupby = ["project_id", "transaction"]
results = {}
formatted_results = {}
for index, item in enumerate(data):
result_key = create_result_key(item, translated_groupby, {})
results[result_key] = {
"order": index,
"data": [],
"project_id": item["project_id"],
}
discarded = 0
for row in result.get("data", []):
result_key = create_result_key(row, translated_groupby, {})
if result_key in results:
results[result_key]["data"].append(row)
else:
discarded += 1
# TODO: filter out entries that don't have transaction or trend_function
logger.warning(
"trends.top-events.timeseries.key-mismatch",
extra={
"result_key": result_key,
"top_event_keys": list(results.keys()),
},
)
# If we discard any rows, there's a chance we have a bad query and it'll
# most likely be a transaction name being parsed in an unexpected way in
# the search.
# A common side effect of this is that we return data for the same series
# in more than 1 query which can lead to a validation error in seer.
if discarded > 0:
logger.warning(
"trends.top-events.timeseries.discarded-rows",
extra={
"discarded": discarded,
"transactions": [event["transaction"] for event in data],
},
)
sentry_sdk.capture_message("Possibility of bad trends query")
for key, item in results.items():
formatted_results[key] = SnubaTSResult(
{
"data": (
zerofill(
item["data"],
pruned_snuba_params.start_date,
pruned_snuba_params.end_date,
rollup,
["time"],
)
if zerofill_results
else item["data"]
),
"project": item["project_id"],
"isMetricsData": True,
"order": item["order"],
},
pruned_snuba_params.start,
pruned_snuba_params.end,
rollup,
)
return formatted_results
def get_event_stats_metrics(_, user_query, snuba_params, rollup, zerofill_results, __):
top_event_limit = min(
int(request.GET.get("topEvents", DEFAULT_TOP_EVENTS_LIMIT)),
MAX_TOP_EVENTS_LIMIT,
)
# Fetch transactions names with the highest event count
top_trending_transactions = get_top_events(
user_query=user_query,
snuba_params=snuba_params,
event_limit=top_event_limit,
referrer=Referrer.API_TRENDS_GET_EVENT_STATS_V2_TOP_EVENTS.value,
)
sentry_sdk.set_tag(
"performance.trendsv2.top_events",
top_trending_transactions.get("data", None) is not None,
)
if len(top_trending_transactions.get("data", [])) == 0:
return {}
# Fetch timeseries for each top transaction name
return get_timeseries(top_trending_transactions, snuba_params, rollup, zerofill_results)
def format_start_end(data):
# format start and end
data_start = data[1].pop("start", "")
data_end = data[1].pop("end", "")
# data start and end that analysis is ran on
data[1]["data_start"] = data_start
data[1]["data_end"] = data_end
# user requested start and end
data[1]["request_start"] = int(snuba_params.start_date.timestamp())
data[1]["request_end"] = data_end
return data
def get_trends_data(stats_data, request):
stats_data = dict(
[format_start_end(data) for data in list(stats_data.items()) if data[1] is not None]
)
trend_sort = "" if trend_type == ANY else request.GET.get("sort", "trend_percentage()")
trend_function = request.GET.get("trendFunction", "p50()")
# list of requests to send to microservice async
trends_requests = [
{
"data": dict(chunk),
"sort": trend_sort,
"trendFunction": trend_function,
}
for chunk in chunked(stats_data.items(), EVENTS_PER_QUERY)
]
# send the data to microservice
with ThreadPoolExecutor(thread_name_prefix=__name__) as query_thread_pool:
results = list(query_thread_pool.map(detect_breakpoints, trends_requests))
trend_results = []
# append all the results
for result in results:
output_dict = result["data"]
trend_results += output_dict
# sort the results into trending events list
if trend_sort == "trend_percentage()":
trending_events = sorted(trend_results, key=lambda d: d["trend_percentage"])
elif trend_sort == "-trend_percentage()":
trending_events = sorted(
trend_results, key=lambda d: d["trend_percentage"], reverse=True
)
else:
trending_events = sorted(
trend_results, key=lambda d: d["absolute_percentage_change"], reverse=True
)
sentry_sdk.set_tag("performance.trendsv2.trends", len(trending_events) > 0)
return trending_events, trends_requests
def paginate_trending_events(offset, limit):
return {"data": trending_events[offset : limit + offset]}
def get_stats_data_for_trending_events(results):
trending_transaction_names_stats = {}
if request.GET.get("withTimeseries", False):
trending_transaction_names_stats = stats_data
else:
for t in results["data"]:
transaction_name = t["transaction"]
project = t["project"]
t_p_key = f"{project},{transaction_name}"
if t_p_key in stats_data:
selected_stats_data = stats_data[t_p_key]
idx = next(
i
for i, data in enumerate(selected_stats_data["data"])
if data[0] >= snuba_params.start_date.timestamp()
)
parsed_stats_data = selected_stats_data["data"][idx:]
selected_stats_data["data"] = parsed_stats_data
trending_transaction_names_stats[t_p_key] = selected_stats_data
else:
logger.warning(
"trends.trends-request.timeseries.key-mismatch",
extra={"result_key": t_p_key, "timeseries_keys": stats_data.keys()},
)
return {
"events": self.handle_results_with_meta(
request,
organization,
snuba_params.project_ids,
{"data": results["data"], "meta": {"isMetricsData": True}},
True,
),
"stats": trending_transaction_names_stats,
}
with handle_query_errors():
stats_data = self.get_event_stats_data(
request,
organization,
get_event_stats_metrics,
top_events=EVENTS_PER_QUERY,
query_column=trend_function,
snuba_params=snuba_params,
query=query,
)
sentry_sdk.set_tag("performance.trendsv2.stats_data", bool(stats_data))
# Handle empty response
if not bool(stats_data):
return Response(
{
"events": self.handle_results_with_meta(
request,
organization,
snuba_params.project_ids,
{"data": [], "meta": {"isMetricsData": True}},
True,
),
"stats": {},
},
status=200,
)
(
trending_events,
trends_requests,
) = get_trends_data(stats_data, request)
return self.paginate(
request=request,
paginator=GenericOffsetPaginator(data_fn=paginate_trending_events),
on_results=get_stats_data_for_trending_events,
default_per_page=5,
max_per_page=5,
)
| OrganizationEventsNewTrendsStatsEndpoint |
python | GoogleCloudPlatform__python-docs-samples | firestore/cloud-async-client/snippets.py | {
"start": 3050,
"end": 26568
} | class ____:
def __init__(self, name, state, country, capital=False, population=0, regions=[]):
self.name = name
self.state = state
self.country = country
self.capital = capital
self.population = population
self.regions = regions
@staticmethod
def from_dict(source):
# [START_EXCLUDE]
city = City(source["name"], source["state"], source["country"])
if "capital" in source:
city.capital = source["capital"]
if "population" in source:
city.population = source["population"]
if "regions" in source:
city.regions = source["regions"]
return city
# [END_EXCLUDE]
def to_dict(self):
# [START_EXCLUDE]
dest = {"name": self.name, "state": self.state, "country": self.country}
if self.capital:
dest["capital"] = self.capital
if self.population:
dest["population"] = self.population
if self.regions:
dest["regions"] = self.regions
return dest
# [END_EXCLUDE]
def __repr__(self):
return f"City(\
name={self.name}, \
country={self.country}, \
population={self.population}, \
capital={self.capital}, \
regions={self.regions}\
)"
# [END firestore_data_custom_type_definition_async]
async def add_example_data():
db = firestore.AsyncClient()
# [START firestore_data_get_dataset_async]
cities_ref = db.collection("cities")
await cities_ref.document("BJ").set(
City("Beijing", None, "China", True, 21500000, ["hebei"]).to_dict()
)
await cities_ref.document("SF").set(
City(
"San Francisco", "CA", "USA", False, 860000, ["west_coast", "norcal"]
).to_dict()
)
await cities_ref.document("LA").set(
City(
"Los Angeles", "CA", "USA", False, 3900000, ["west_coast", "socal"]
).to_dict()
)
await cities_ref.document("DC").set(
City("Washington D.C.", None, "USA", True, 680000, ["east_coast"]).to_dict()
)
await cities_ref.document("TOK").set(
City("Tokyo", None, "Japan", True, 9000000, ["kanto", "honshu"]).to_dict()
)
# [END firestore_data_get_dataset_async]
async def add_custom_class_with_id():
db = firestore.AsyncClient()
# [START firestore_data_set_from_custom_type_async]
city = City(name="Los Angeles", state="CA", country="USA")
await db.collection("cities").document("LA").set(city.to_dict())
# [END firestore_data_set_from_custom_type_async]
async def add_data_with_id():
db = firestore.AsyncClient()
data = {}
# [START firestore_data_set_id_specified_async]
await db.collection("cities").document("new-city-id").set(data)
# [END firestore_data_set_id_specified_async]
async def add_custom_class_generated_id():
db = firestore.AsyncClient()
# [START firestore_data_set_id_random_collection_async]
city = City(name="Tokyo", state=None, country="Japan")
await db.collection("cities").add(city.to_dict())
# [END firestore_data_set_id_random_collection_async]
async def add_new_doc():
db = firestore.AsyncClient()
# [START firestore_data_set_id_random_document_ref_async]
new_city_ref = db.collection("cities").document()
# later...
await new_city_ref.set(
{
# ...
}
)
# [END firestore_data_set_id_random_document_ref_async]
async def get_check_exists():
db = firestore.AsyncClient()
# [START firestore_data_get_as_map_async]
doc_ref = db.collection("cities").document("SF")
doc = await doc_ref.get()
if doc.exists:
print(f"Document data: {doc.to_dict()}")
else:
print("No such document!")
# [END firestore_data_get_as_map_async]
async def get_custom_class():
db = firestore.AsyncClient()
# [START firestore_data_get_as_custom_type_async]
doc_ref = db.collection("cities").document("BJ")
doc = await doc_ref.get()
city = City.from_dict(doc.to_dict())
print(city)
# [END firestore_data_get_as_custom_type_async]
async def get_simple_query():
db = firestore.AsyncClient()
# [START firestore_data_query_async]
# Note: Use of CollectionRef stream() is prefered to get()
docs = (
db.collection("cities")
.where(filter=FieldFilter("capital", "==", True))
.stream()
)
async for doc in docs:
print(f"{doc.id} => {doc.to_dict()}")
# [END firestore_data_query_async]
async def array_contains_filter():
db = firestore.AsyncClient()
# [START firestore_query_filter_array_contains_async]
cities_ref = db.collection("cities")
query = cities_ref.where(
filter=FieldFilter("regions", "array_contains", "west_coast")
)
# [END firestore_query_filter_array_contains_async]
docs = query.stream()
async for doc in docs:
print(f"{doc.id} => {doc.to_dict()}")
async def get_full_collection():
db = firestore.AsyncClient()
# [START firestore_data_get_all_documents_async]
docs = db.collection("cities").stream()
async for doc in docs:
print(f"{doc.id} => {doc.to_dict()}")
# [END firestore_data_get_all_documents_async]
async def structure_doc_ref():
db = firestore.AsyncClient()
# [START firestore_data_reference_document_async]
a_lovelace_ref = db.collection("users").document("alovelace")
# [END firestore_data_reference_document_async]
print(a_lovelace_ref)
async def structure_collection_ref():
db = firestore.AsyncClient()
# [START firestore_data_reference_collection_async]
users_ref = db.collection("users")
# [END firestore_data_reference_collection_async]
print(users_ref)
async def structure_doc_ref_alternate():
db = firestore.AsyncClient()
# [START firestore_data_reference_document_path_async]
a_lovelace_ref = db.document("users/alovelace")
# [END firestore_data_reference_document_path_async]
return a_lovelace_ref
async def structure_subcollection_ref():
db = firestore.AsyncClient()
# [START firestore_data_reference_subcollection_async]
room_a_ref = db.collection("rooms").document("roomA")
message_ref = room_a_ref.collection("messages").document("message1")
# [END firestore_data_reference_subcollection_async]
print(message_ref)
async def update_doc():
db = firestore.AsyncClient()
await db.collection("cities").document("DC").set(
City("Washington D.C.", None, "USA", True, 680000, ["east_coast"]).to_dict()
)
# [START firestore_data_set_field_async]
city_ref = db.collection("cities").document("DC")
# Set the capital field
await city_ref.update({"capital": True})
# [END firestore_data_set_field_async]
async def update_doc_array():
db = firestore.AsyncClient()
await db.collection("cities").document("DC").set(
City("Washington D.C.", None, "USA", True, 680000, ["east_coast"]).to_dict()
)
# [START firestore_data_set_array_operations_async]
city_ref = db.collection("cities").document("DC")
# Atomically add a new region to the 'regions' array field.
await city_ref.update({"regions": firestore.ArrayUnion(["greater_virginia"])})
# // Atomically remove a region from the 'regions' array field.
await city_ref.update({"regions": firestore.ArrayRemove(["east_coast"])})
# [END firestore_data_set_array_operations_async]
city = await city_ref.get()
print(f"Updated the regions field of the DC. {city.to_dict()}")
async def update_multiple():
db = firestore.AsyncClient()
await db.collection("cities").document("DC").set(
City("Washington D.C.", None, "USA", True, 680000, ["east_coast"]).to_dict()
)
# [START firestore_update_multiple_async]
doc_ref = db.collection("cities").document("DC")
await doc_ref.update({"name": "Washington D.C.", "country": "USA", "capital": True})
# [END firestore_update_multiple_async]
async def update_create_if_missing():
db = firestore.AsyncClient()
# [START firestore_data_set_doc_upsert_async]
city_ref = db.collection("cities").document("BJ")
await city_ref.set({"capital": True}, merge=True)
# [END firestore_data_set_doc_upsert_async]
async def update_nested():
db = firestore.AsyncClient()
# [START firestore_data_set_nested_fields_async]
# Create an initial document to update
frank_ref = db.collection("users").document("frank")
await frank_ref.set(
{
"name": "Frank",
"favorites": {"food": "Pizza", "color": "Blue", "subject": "Recess"},
"age": 12,
}
)
# Update age and favorite color
await frank_ref.update({"age": 13, "favorites.color": "Red"})
# [END firestore_data_set_nested_fields_async]
async def update_server_timestamp():
db = firestore.AsyncClient()
# [START firestore_data_set_server_timestamp_async]
city_ref = db.collection("objects").document("some-id")
await city_ref.update({"timestamp": firestore.SERVER_TIMESTAMP})
# [END firestore_data_set_server_timestamp_async]
async def update_data_transaction():
db = firestore.AsyncClient()
# [START firestore_transaction_document_update_async]
transaction = db.transaction()
city_ref = db.collection("cities").document("SF")
@firestore.async_transactional
async def update_in_transaction(transaction, city_ref):
snapshot = await city_ref.get(transaction=transaction)
transaction.update(city_ref, {"population": snapshot.get("population") + 1})
await update_in_transaction(transaction, city_ref)
# [END firestore_transaction_document_update_async]
async def update_data_transaction_result():
db = firestore.AsyncClient()
# [START firestore_transaction_document_update_conditional_async]
transaction = db.transaction()
city_ref = db.collection("cities").document("SF")
@firestore.async_transactional
async def update_in_transaction(transaction, city_ref):
snapshot = await city_ref.get(transaction=transaction)
new_population = snapshot.get("population") + 1
if new_population < 1000000:
transaction.update(city_ref, {"population": new_population})
return True
else:
return False
result = await update_in_transaction(transaction, city_ref)
if result:
print("Population updated")
else:
print("Sorry! Population is too big.")
# [END firestore_transaction_document_update_conditional_async]
async def update_data_batch():
db = firestore.AsyncClient()
# [START firestore_data_batch_writes_async]
batch = db.batch()
# Set the data for NYC
nyc_ref = db.collection("cities").document("NYC")
batch.set(nyc_ref, {"name": "New York City"})
# Update the population for SF
sf_ref = db.collection("cities").document("SF")
batch.update(sf_ref, {"population": 1000000})
# Delete DEN
den_ref = db.collection("cities").document("DEN")
batch.delete(den_ref)
# Commit the batch
await batch.commit()
# [END firestore_data_batch_writes_async]
async def compound_query_example():
db = firestore.AsyncClient()
# [START firestore_query_filter_eq_string_async]
# Create a reference to the cities collection
cities_ref = db.collection("cities")
# Create a query against the collection
query_ref = cities_ref.where(filter=FieldFilter("state", "==", "CA"))
# [END firestore_query_filter_eq_string_async]
return query_ref
async def compound_query_simple():
db = firestore.AsyncClient()
# [START firestore_query_filter_eq_boolean_async]
cities_ref = db.collection("cities")
query = cities_ref.where(filter=FieldFilter("capital", "==", True))
# [END firestore_query_filter_eq_boolean_async]
print(query)
async def compound_query_single_clause():
db = firestore.AsyncClient()
# [START firestore_query_filter_single_examples_async]
cities_ref = db.collection("cities")
cities_ref.where(filter=FieldFilter("state", "==", "CA"))
cities_ref.where(filter=FieldFilter("population", "<", 1000000))
cities_ref.where(filter=FieldFilter("name", ">=", "San Francisco"))
# [END firestore_query_filter_single_examples_async]
async def compound_query_valid_multi_clause():
db = firestore.AsyncClient()
# [START firestore_query_filter_compound_multi_eq_async]
cities_ref = db.collection("cities")
denver_query = cities_ref.where(filter=FieldFilter("state", "==", "CO")).where(
filter=FieldFilter("name", "==", "Denver")
)
large_us_cities_query = cities_ref.where(
filter=FieldFilter("state", "==", "CA")
).where(filter=FieldFilter("population", ">", 1000000))
# [END firestore_query_filter_compound_multi_eq_async]
print(denver_query)
print(large_us_cities_query)
async def compound_query_valid_single_field():
db = firestore.AsyncClient()
# [START firestore_query_filter_range_valid_async]
cities_ref = db.collection("cities")
cities_ref.where(filter=FieldFilter("state", ">=", "CA")).where(
filter=FieldFilter("state", "<=", "IN")
)
# [END firestore_query_filter_range_valid_async]
async def compound_query_invalid_multi_field():
db = firestore.AsyncClient()
# [START firestore_query_filter_range_invalid_async]
cities_ref = db.collection("cities")
cities_ref.where(filter=FieldFilter("state", ">=", "CA")).where(
filter=FieldFilter("population", ">=", 1000000)
)
# [END firestore_query_filter_range_invalid_async]
async def order_simple_limit():
db = firestore.AsyncClient()
# [START firestore_order_simple_limit_async]
db.collection("cities").order_by("name").limit(3).stream()
# [END firestore_order_simple_limit_async]
async def order_simple_limit_desc():
db = firestore.AsyncClient()
# [START firestore_query_order_desc_limit_async]
cities_ref = db.collection("cities")
query = cities_ref.order_by("name", direction=firestore.Query.DESCENDING).limit(3)
results = query.stream()
# [END firestore_query_order_desc_limit_async]
print(results)
async def order_multiple():
db = firestore.AsyncClient()
# [START firestore_query_order_multi_async]
cities_ref = db.collection("cities")
cities_ref.order_by("state").order_by(
"population", direction=firestore.Query.DESCENDING
)
# [END firestore_query_order_multi_async]
async def order_where_limit():
db = firestore.AsyncClient()
# [START firestore_query_order_limit_field_valid_async]
cities_ref = db.collection("cities")
query = (
cities_ref.where(filter=FieldFilter("population", ">", 2500000))
.order_by("population")
.limit(2)
)
results = query.stream()
# [END firestore_query_order_limit_field_valid_async]
print([d async for d in results])
async def order_limit_to_last():
db = firestore.AsyncClient()
# [START firestore_query_order_limit_async]
cities_ref = db.collection("cities")
query = cities_ref.order_by("name").limit_to_last(2)
results = await query.get()
# [END firestore_query_order_limit_async]
print(results)
async def order_where_valid():
db = firestore.AsyncClient()
# [START firestore_query_order_with_filter_async]
cities_ref = db.collection("cities")
query = cities_ref.where(filter=FieldFilter("population", ">", 2500000)).order_by(
"population"
)
results = query.stream()
# [END firestore_query_order_with_filter_async]
print([d async for d in results])
async def order_where_invalid():
db = firestore.AsyncClient()
# [START firestore_query_order_field_invalid_async]
cities_ref = db.collection("cities")
query = cities_ref.where(filter=FieldFilter("population", ">", 2500000)).order_by(
"country"
)
results = query.stream()
# [END firestore_query_order_field_invalid_async]
print(results)
async def cursor_simple_start_at():
db = firestore.AsyncClient()
# [START firestore_query_cursor_start_at_field_value_single_async]
cities_ref = db.collection("cities")
query_start_at = cities_ref.order_by("population").start_at({"population": 1000000})
# [END firestore_query_cursor_start_at_field_value_single_async]
return query_start_at
async def cursor_simple_end_at():
db = firestore.AsyncClient()
# [START firestore_query_cursor_end_at_field_value_single_async]
cities_ref = db.collection("cities")
query_end_at = cities_ref.order_by("population").end_at({"population": 1000000})
# [END firestore_query_cursor_end_at_field_value_single_async]
return query_end_at
async def snapshot_cursors():
db = firestore.AsyncClient()
# [START firestore_query_cursor_start_at_document_async]
doc_ref = db.collection("cities").document("SF")
snapshot = await doc_ref.get()
start_at_snapshot = (
db.collection("cities").order_by("population").start_at(snapshot)
)
# [END firestore_query_cursor_start_at_document_async]
results = start_at_snapshot.limit(10).stream()
async for doc in results:
print(f"{doc.id}")
return results
async def cursor_paginate():
db = firestore.AsyncClient()
# [START firestore_query_cursor_pagination_async]
cities_ref = db.collection("cities")
first_query = cities_ref.order_by("population").limit(3)
# Get the last document from the results
docs = [d async for d in first_query.stream()]
last_doc = list(docs)[-1]
# Construct a new query starting at this document
# Note: this will not have the desired effect if
# multiple cities have the exact same population value
last_pop = last_doc.to_dict()["population"]
next_query = (
cities_ref.order_by("population").start_after({"population": last_pop}).limit(3)
)
# Use the query for pagination
# ...
# [END firestore_query_cursor_pagination_async]
return next_query
async def cursor_multiple_conditions():
db = firestore.AsyncClient()
# [START firestore_query_cursor_start_at_field_value_multi_async]
start_at_name = (
db.collection("cities")
.order_by("name")
.order_by("state")
.start_at({"name": "Springfield"})
)
start_at_name_and_state = (
db.collection("cities")
.order_by("name")
.order_by("state")
.start_at({"name": "Springfield", "state": "Missouri"})
)
# [END firestore_query_cursor_start_at_field_value_multi_async]
return start_at_name, start_at_name_and_state
async def delete_single_doc():
db = firestore.AsyncClient()
# [START firestore_data_delete_doc_async]
await db.collection("cities").document("DC").delete()
# [END firestore_data_delete_doc_async]
async def delete_field():
db = firestore.AsyncClient()
# [START firestore_data_delete_field_async]
city_ref = db.collection("cities").document("BJ")
await city_ref.update({"capital": firestore.DELETE_FIELD})
# [END firestore_data_delete_field_async]
async def delete_full_collection():
db = firestore.AsyncClient()
# [START firestore_data_delete_collection_async]
async def delete_collection(coll_ref, batch_size):
docs = coll_ref.limit(batch_size).stream()
deleted = 0
async for doc in docs:
print(f"Deleting doc {doc.id} => {doc.to_dict()}")
await doc.reference.delete()
deleted = deleted + 1
if deleted >= batch_size:
return delete_collection(coll_ref, batch_size)
# [END firestore_data_delete_collection_async]
await delete_collection(db.collection("cities"), 10)
await delete_collection(db.collection("data"), 10)
await delete_collection(db.collection("objects"), 10)
await delete_collection(db.collection("users"), 10)
async def collection_group_query(db):
# [START firestore_query_collection_group_dataset_async]
cities = db.collection("cities")
sf_landmarks = cities.document("SF").collection("landmarks")
await sf_landmarks.document().set({"name": "Golden Gate Bridge", "type": "bridge"})
await sf_landmarks.document().set({"name": "Legion of Honor", "type": "museum"})
la_landmarks = cities.document("LA").collection("landmarks")
await la_landmarks.document().set({"name": "Griffith Park", "type": "park"})
await la_landmarks.document().set({"name": "The Getty", "type": "museum"})
dc_landmarks = cities.document("DC").collection("landmarks")
await dc_landmarks.document().set({"name": "Lincoln Memorial", "type": "memorial"})
await dc_landmarks.document().set(
{"name": "National Air and Space Museum", "type": "museum"}
)
tok_landmarks = cities.document("TOK").collection("landmarks")
await tok_landmarks.document().set({"name": "Ueno Park", "type": "park"})
await tok_landmarks.document().set(
{"name": "National Museum of Nature and Science", "type": "museum"}
)
bj_landmarks = cities.document("BJ").collection("landmarks")
await bj_landmarks.document().set({"name": "Jingshan Park", "type": "park"})
await bj_landmarks.document().set(
{"name": "Beijing Ancient Observatory", "type": "museum"}
)
# [END firestore_query_collection_group_dataset_async]
# [START firestore_query_collection_group_filter_eq_async]
museums = db.collection_group("landmarks").where(
filter=FieldFilter("type", "==", "museum")
)
docs = museums.stream()
async for doc in docs:
print(f"{doc.id} => {doc.to_dict()}")
# [END firestore_query_collection_group_filter_eq_async]
return docs
async def array_contains_any_queries(db):
# [START firestore_query_filter_array_contains_any_async]
cities_ref = db.collection("cities")
query = cities_ref.where(
filter=FieldFilter(
"regions", "array_contains_any", ["west_coast", "east_coast"]
)
)
return query
# [END firestore_query_filter_array_contains_any_async]
async def in_query_without_array(db):
# [START firestore_query_filter_in_async]
cities_ref = db.collection("cities")
query = cities_ref.where(filter=FieldFilter("country", "in", ["USA", "Japan"]))
return query
# [END firestore_query_filter_in_async]
async def in_query_with_array(db):
# [START firestore_query_filter_in_with_array_async]
cities_ref = db.collection("cities")
query = cities_ref.where(
filter=FieldFilter("regions", "in", [["west_coast"], ["east_coast"]])
)
return query
# [END firestore_query_filter_in_with_array_async]
async def update_document_increment(db):
# [START firestore_data_set_numeric_increment_async]
washington_ref = db.collection("cities").document("DC")
await washington_ref.update({"population": firestore.Increment(50)})
# [END firestore_data_set_numeric_increment_async]
async def list_document_subcollections():
db = firestore.AsyncClient()
# [START firestore_data_get_sub_collections_async]
collections = db.collection("cities").document("SF").collections()
async for collection in collections:
async for doc in collection.stream():
print(f"{doc.id} => {doc.to_dict()}")
# [END firestore_data_get_sub_collections_async]
| City |
python | pytorch__pytorch | torch/cuda/__init__.py | {
"start": 57866,
"end": 58092
} | class ____(_CudaLegacyStorage):
@classproperty
def dtype(self):
_warn_typed_storage_removal()
return self._dtype
@classproperty
def _dtype(self):
return torch.cdouble
| ComplexDoubleStorage |
python | getsentry__sentry | src/sentry/net/http.py | {
"start": 3989,
"end": 4065
} | class ____(SafeConnectionMixin, HTTPSConnection):
pass
| SafeHTTPSConnection |
python | ApeWorX__ape | src/ape_accounts/accounts.py | {
"start": 1493,
"end": 1684
} | class ____(AccountsError):
"""
Raised when password to unlock an account is incorrect.
"""
def __init__(self):
super().__init__("Invalid password")
| InvalidPasswordError |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_internal/operations/prepare.py | {
"start": 6887,
"end": 28118
} | class ____:
"""Prepares a Requirement"""
def __init__(
self,
build_dir: str,
download_dir: Optional[str],
src_dir: str,
build_isolation: bool,
check_build_deps: bool,
build_tracker: BuildTracker,
session: PipSession,
progress_bar: str,
finder: PackageFinder,
require_hashes: bool,
use_user_site: bool,
lazy_wheel: bool,
verbosity: int,
legacy_resolver: bool,
) -> None:
super().__init__()
self.src_dir = src_dir
self.build_dir = build_dir
self.build_tracker = build_tracker
self._session = session
self._download = Downloader(session, progress_bar)
self._batch_download = BatchDownloader(session, progress_bar)
self.finder = finder
# Where still-packed archives should be written to. If None, they are
# not saved, and are deleted immediately after unpacking.
self.download_dir = download_dir
# Is build isolation allowed?
self.build_isolation = build_isolation
# Should check build dependencies?
self.check_build_deps = check_build_deps
# Should hash-checking be required?
self.require_hashes = require_hashes
# Should install in user site-packages?
self.use_user_site = use_user_site
# Should wheels be downloaded lazily?
self.use_lazy_wheel = lazy_wheel
# How verbose should underlying tooling be?
self.verbosity = verbosity
# Are we using the legacy resolver?
self.legacy_resolver = legacy_resolver
# Memoized downloaded files, as mapping of url: path.
self._downloaded: Dict[str, str] = {}
# Previous "header" printed for a link-based InstallRequirement
self._previous_requirement_header = ("", "")
def _log_preparing_link(self, req: InstallRequirement) -> None:
"""Provide context for the requirement being prepared."""
if req.link.is_file and not req.is_wheel_from_cache:
message = "Processing %s"
information = str(display_path(req.link.file_path))
else:
message = "Collecting %s"
information = redact_auth_from_requirement(req.req) if req.req else str(req)
# If we used req.req, inject requirement source if available (this
# would already be included if we used req directly)
if req.req and req.comes_from:
if isinstance(req.comes_from, str):
comes_from: Optional[str] = req.comes_from
else:
comes_from = req.comes_from.from_path()
if comes_from:
information += f" (from {comes_from})"
if (message, information) != self._previous_requirement_header:
self._previous_requirement_header = (message, information)
logger.info(message, information)
if req.is_wheel_from_cache:
with indent_log():
logger.info("Using cached %s", req.link.filename)
def _ensure_link_req_src_dir(
self, req: InstallRequirement, parallel_builds: bool
) -> None:
"""Ensure source_dir of a linked InstallRequirement."""
# Since source_dir is only set for editable requirements.
if req.link.is_wheel:
# We don't need to unpack wheels, so no need for a source
# directory.
return
assert req.source_dir is None
if req.link.is_existing_dir():
# build local directories in-tree
req.source_dir = req.link.file_path
return
# We always delete unpacked sdists after pip runs.
req.ensure_has_source_dir(
self.build_dir,
autodelete=True,
parallel_builds=parallel_builds,
)
req.ensure_pristine_source_checkout()
def _get_linked_req_hashes(self, req: InstallRequirement) -> Hashes:
# By the time this is called, the requirement's link should have
# been checked so we can tell what kind of requirements req is
# and raise some more informative errors than otherwise.
# (For example, we can raise VcsHashUnsupported for a VCS URL
# rather than HashMissing.)
if not self.require_hashes:
return req.hashes(trust_internet=True)
# We could check these first 2 conditions inside unpack_url
# and save repetition of conditions, but then we would
# report less-useful error messages for unhashable
# requirements, complaining that there's no hash provided.
if req.link.is_vcs:
raise VcsHashUnsupported()
if req.link.is_existing_dir():
raise DirectoryUrlHashUnsupported()
# Unpinned packages are asking for trouble when a new version
# is uploaded. This isn't a security check, but it saves users
# a surprising hash mismatch in the future.
# file:/// URLs aren't pinnable, so don't complain about them
# not being pinned.
if not req.is_direct and not req.is_pinned:
raise HashUnpinned()
# If known-good hashes are missing for this requirement,
# shim it with a facade object that will provoke hash
# computation and then raise a HashMissing exception
# showing the user what the hash should be.
return req.hashes(trust_internet=False) or MissingHashes()
def _fetch_metadata_only(
self,
req: InstallRequirement,
) -> Optional[BaseDistribution]:
if self.legacy_resolver:
logger.debug(
"Metadata-only fetching is not used in the legacy resolver",
)
return None
if self.require_hashes:
logger.debug(
"Metadata-only fetching is not used as hash checking is required",
)
return None
# Try PEP 658 metadata first, then fall back to lazy wheel if unavailable.
return self._fetch_metadata_using_link_data_attr(
req
) or self._fetch_metadata_using_lazy_wheel(req.link)
def _fetch_metadata_using_link_data_attr(
self,
req: InstallRequirement,
) -> Optional[BaseDistribution]:
"""Fetch metadata from the data-dist-info-metadata attribute, if possible."""
# (1) Get the link to the metadata file, if provided by the backend.
metadata_link = req.link.metadata_link()
if metadata_link is None:
return None
assert req.req is not None
logger.verbose(
"Obtaining dependency information for %s from %s",
req.req,
metadata_link,
)
# (2) Download the contents of the METADATA file, separate from the dist itself.
metadata_file = get_http_url(
metadata_link,
self._download,
hashes=metadata_link.as_hashes(),
)
with open(metadata_file.path, "rb") as f:
metadata_contents = f.read()
# (3) Generate a dist just from those file contents.
metadata_dist = get_metadata_distribution(
metadata_contents,
req.link.filename,
req.req.name,
)
# (4) Ensure the Name: field from the METADATA file matches the name from the
# install requirement.
#
# NB: raw_name will fall back to the name from the install requirement if
# the Name: field is not present, but it's noted in the raw_name docstring
# that that should NEVER happen anyway.
if canonicalize_name(metadata_dist.raw_name) != canonicalize_name(req.req.name):
raise MetadataInconsistent(
req, "Name", req.req.name, metadata_dist.raw_name
)
return metadata_dist
def _fetch_metadata_using_lazy_wheel(
self,
link: Link,
) -> Optional[BaseDistribution]:
"""Fetch metadata using lazy wheel, if possible."""
# --use-feature=fast-deps must be provided.
if not self.use_lazy_wheel:
return None
if link.is_file or not link.is_wheel:
logger.debug(
"Lazy wheel is not used as %r does not point to a remote wheel",
link,
)
return None
wheel = Wheel(link.filename)
name = canonicalize_name(wheel.name)
logger.info(
"Obtaining dependency information from %s %s",
name,
wheel.version,
)
url = link.url.split("#", 1)[0]
try:
return dist_from_wheel_url(name, url, self._session)
except HTTPRangeRequestUnsupported:
logger.debug("%s does not support range requests", url)
return None
def _complete_partial_requirements(
self,
partially_downloaded_reqs: Iterable[InstallRequirement],
parallel_builds: bool = False,
) -> None:
"""Download any requirements which were only fetched by metadata."""
# Download to a temporary directory. These will be copied over as
# needed for downstream 'download', 'wheel', and 'install' commands.
temp_dir = TempDirectory(kind="unpack", globally_managed=True).path
# Map each link to the requirement that owns it. This allows us to set
# `req.local_file_path` on the appropriate requirement after passing
# all the links at once into BatchDownloader.
links_to_fully_download: Dict[Link, InstallRequirement] = {}
for req in partially_downloaded_reqs:
assert req.link
links_to_fully_download[req.link] = req
batch_download = self._batch_download(
links_to_fully_download.keys(),
temp_dir,
)
for link, (filepath, _) in batch_download:
logger.debug("Downloading link %s to %s", link, filepath)
req = links_to_fully_download[link]
# Record the downloaded file path so wheel reqs can extract a Distribution
# in .get_dist().
req.local_file_path = filepath
# Record that the file is downloaded so we don't do it again in
# _prepare_linked_requirement().
self._downloaded[req.link.url] = filepath
# If this is an sdist, we need to unpack it after downloading, but the
# .source_dir won't be set up until we are in _prepare_linked_requirement().
# Add the downloaded archive to the install requirement to unpack after
# preparing the source dir.
if not req.is_wheel:
req.needs_unpacked_archive(Path(filepath))
# This step is necessary to ensure all lazy wheels are processed
# successfully by the 'download', 'wheel', and 'install' commands.
for req in partially_downloaded_reqs:
self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool = False
) -> BaseDistribution:
"""Prepare a requirement to be obtained from req.link."""
assert req.link
self._log_preparing_link(req)
with indent_log():
# Check if the relevant file is already available
# in the download directory
file_path = None
if self.download_dir is not None and req.link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(
req.link,
self.download_dir,
hashes,
# When a locally built wheel has been found in cache, we don't warn
# about re-downloading when the already downloaded wheel hash does
# not match. This is because the hash must be checked against the
# original link, not the cached link. It that case the already
# downloaded file will be removed and re-fetched from cache (which
# implies a hash check against the cache entry's origin.json).
warn_on_hash_mismatch=not req.is_wheel_from_cache,
)
if file_path is not None:
# The file is already available, so mark it as downloaded
self._downloaded[req.link.url] = file_path
else:
# The file is not available, attempt to fetch only metadata
metadata_dist = self._fetch_metadata_only(req)
if metadata_dist is not None:
req.needs_more_preparation = True
return metadata_dist
# None of the optimizations worked, fully prepare the requirement
return self._prepare_linked_requirement(req, parallel_builds)
def prepare_linked_requirements_more(
self, reqs: Iterable[InstallRequirement], parallel_builds: bool = False
) -> None:
"""Prepare linked requirements more, if needed."""
reqs = [req for req in reqs if req.needs_more_preparation]
for req in reqs:
# Determine if any of these requirements were already downloaded.
if self.download_dir is not None and req.link.is_wheel:
hashes = self._get_linked_req_hashes(req)
file_path = _check_download_dir(req.link, self.download_dir, hashes)
if file_path is not None:
self._downloaded[req.link.url] = file_path
req.needs_more_preparation = False
# Prepare requirements we found were already downloaded for some
# reason. The other downloads will be completed separately.
partially_downloaded_reqs: List[InstallRequirement] = []
for req in reqs:
if req.needs_more_preparation:
partially_downloaded_reqs.append(req)
else:
self._prepare_linked_requirement(req, parallel_builds)
# TODO: separate this part out from RequirementPreparer when the v1
# resolver can be removed!
self._complete_partial_requirements(
partially_downloaded_reqs,
parallel_builds=parallel_builds,
)
def _prepare_linked_requirement(
self, req: InstallRequirement, parallel_builds: bool
) -> BaseDistribution:
assert req.link
link = req.link
hashes = self._get_linked_req_hashes(req)
if hashes and req.is_wheel_from_cache:
assert req.download_info is not None
assert link.is_wheel
assert link.is_file
# We need to verify hashes, and we have found the requirement in the cache
# of locally built wheels.
if (
isinstance(req.download_info.info, ArchiveInfo)
and req.download_info.info.hashes
and hashes.has_one_of(req.download_info.info.hashes)
):
# At this point we know the requirement was built from a hashable source
# artifact, and we verified that the cache entry's hash of the original
# artifact matches one of the hashes we expect. We don't verify hashes
# against the cached wheel, because the wheel is not the original.
hashes = None
else:
logger.warning(
"The hashes of the source archive found in cache entry "
"don't match, ignoring cached built wheel "
"and re-downloading source."
)
req.link = req.cached_wheel_source_link
link = req.link
self._ensure_link_req_src_dir(req, parallel_builds)
if link.is_existing_dir():
local_file = None
elif link.url not in self._downloaded:
try:
local_file = unpack_url(
link,
req.source_dir,
self._download,
self.verbosity,
self.download_dir,
hashes,
)
except NetworkConnectionError as exc:
raise InstallationError(
f"Could not install requirement {req} because of HTTP "
f"error {exc} for URL {link}"
)
else:
file_path = self._downloaded[link.url]
if hashes:
hashes.check_against_path(file_path)
local_file = File(file_path, content_type=None)
# If download_info is set, we got it from the wheel cache.
if req.download_info is None:
# Editables don't go through this function (see
# prepare_editable_requirement).
assert not req.editable
req.download_info = direct_url_from_link(link, req.source_dir)
# Make sure we have a hash in download_info. If we got it as part of the
# URL, it will have been verified and we can rely on it. Otherwise we
# compute it from the downloaded file.
# FIXME: https://github.com/pypa/pip/issues/11943
if (
isinstance(req.download_info.info, ArchiveInfo)
and not req.download_info.info.hashes
and local_file
):
hash = hash_file(local_file.path)[0].hexdigest()
# We populate info.hash for backward compatibility.
# This will automatically populate info.hashes.
req.download_info.info.hash = f"sha256={hash}"
# For use in later processing,
# preserve the file path on the requirement.
if local_file:
req.local_file_path = local_file.path
dist = _get_prepared_distribution(
req,
self.build_tracker,
self.finder,
self.build_isolation,
self.check_build_deps,
)
return dist
def save_linked_requirement(self, req: InstallRequirement) -> None:
assert self.download_dir is not None
assert req.link is not None
link = req.link
if link.is_vcs or (link.is_existing_dir() and req.editable):
# Make a .zip of the source_dir we already created.
req.archive(self.download_dir)
return
if link.is_existing_dir():
logger.debug(
"Not copying link to destination directory "
"since it is a directory: %s",
link,
)
return
if req.local_file_path is None:
# No distribution was downloaded for this requirement.
return
download_location = os.path.join(self.download_dir, link.filename)
if not os.path.exists(download_location):
shutil.copy(req.local_file_path, download_location)
download_path = display_path(download_location)
logger.info("Saved %s", download_path)
def prepare_editable_requirement(
self,
req: InstallRequirement,
) -> BaseDistribution:
"""Prepare an editable requirement."""
assert req.editable, "cannot prepare a non-editable req as editable"
logger.info("Obtaining %s", req)
with indent_log():
if self.require_hashes:
raise InstallationError(
f"The editable requirement {req} cannot be installed when "
"requiring hashes, because there is no single file to "
"hash."
)
req.ensure_has_source_dir(self.src_dir)
req.update_editable()
assert req.source_dir
req.download_info = direct_url_for_editable(req.unpacked_source_directory)
dist = _get_prepared_distribution(
req,
self.build_tracker,
self.finder,
self.build_isolation,
self.check_build_deps,
)
req.check_if_exists(self.use_user_site)
return dist
def prepare_installed_requirement(
self,
req: InstallRequirement,
skip_reason: str,
) -> BaseDistribution:
"""Prepare an already-installed requirement."""
assert req.satisfied_by, "req should have been satisfied but isn't"
assert skip_reason is not None, (
"did not get skip reason skipped but req.satisfied_by "
f"is set to {req.satisfied_by}"
)
logger.info(
"Requirement %s: %s (%s)", skip_reason, req, req.satisfied_by.version
)
with indent_log():
if self.require_hashes:
logger.debug(
"Since it is already installed, we are trusting this "
"package without checking its hash. To ensure a "
"completely repeatable environment, install into an "
"empty virtualenv."
)
return InstalledDistribution(req).get_metadata_distribution()
| RequirementPreparer |
python | coleifer__peewee | tests/regressions.py | {
"start": 34864,
"end": 34910
} | class ____(TestModel):
url = TextField()
| Site |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/datamodels/taskinstance.py | {
"start": 12066,
"end": 12228
} | class ____(BaseModel):
"""Response for inactive assets."""
inactive_assets: Annotated[list[AssetProfile], Field(default_factory=list)]
| InactiveAssetsResponse |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI046.py | {
"start": 155,
"end": 198
} | class ____(Protocol[_T]):
x: _T
# OK
| _Baz |
python | numpy__numpy | numpy/ma/tests/test_core.py | {
"start": 208555,
"end": 212355
} | class ____:
def _do_add_test(self, add):
# sanity check
assert_(add(np.ma.masked, 1) is np.ma.masked)
# now try with a vector
vector = np.array([1, 2, 3])
result = add(np.ma.masked, vector)
# lots of things could go wrong here
assert_(result is not np.ma.masked)
assert_(not isinstance(result, np.ma.core.MaskedConstant))
assert_equal(result.shape, vector.shape)
assert_equal(np.ma.getmask(result), np.ones(vector.shape, dtype=bool))
def test_ufunc(self):
self._do_add_test(np.add)
def test_operator(self):
self._do_add_test(lambda a, b: a + b)
def test_ctor(self):
m = np.ma.array(np.ma.masked)
# most importantly, we do not want to create a new MaskedConstant
# instance
assert_(not isinstance(m, np.ma.core.MaskedConstant))
assert_(m is not np.ma.masked)
def test_repr(self):
# copies should not exist, but if they do, it should be obvious that
# something is wrong
assert_equal(repr(np.ma.masked), 'masked')
# create a new instance in a weird way
masked2 = np.ma.MaskedArray.__new__(np.ma.core.MaskedConstant)
assert_not_equal(repr(masked2), 'masked')
def test_pickle(self):
from io import BytesIO
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
with BytesIO() as f:
pickle.dump(np.ma.masked, f, protocol=proto)
f.seek(0)
res = pickle.load(f)
assert_(res is np.ma.masked)
def test_copy(self):
# gh-9328
# copy is a no-op, like it is with np.True_
assert_equal(
np.ma.masked.copy() is np.ma.masked,
np.True_.copy() is np.True_)
def test__copy(self):
import copy
assert_(
copy.copy(np.ma.masked) is np.ma.masked)
def test_deepcopy(self):
import copy
assert_(
copy.deepcopy(np.ma.masked) is np.ma.masked)
def test_immutable(self):
orig = np.ma.masked
assert_raises(np.ma.core.MaskError, operator.setitem, orig, (), 1)
assert_raises(ValueError, operator.setitem, orig.data, (), 1)
assert_raises(ValueError, operator.setitem, orig.mask, (), False)
view = np.ma.masked.view(np.ma.MaskedArray)
assert_raises(ValueError, operator.setitem, view, (), 1)
assert_raises(ValueError, operator.setitem, view.data, (), 1)
assert_raises(ValueError, operator.setitem, view.mask, (), False)
def test_coercion_int(self):
a_i = np.zeros((), int)
assert_raises(MaskError, operator.setitem, a_i, (), np.ma.masked)
assert_raises(MaskError, int, np.ma.masked)
def test_coercion_float(self):
a_f = np.zeros((), float)
pytest.warns(UserWarning, operator.setitem, a_f, (), np.ma.masked)
assert_(np.isnan(a_f[()]))
@pytest.mark.xfail(reason="See gh-9750")
def test_coercion_unicode(self):
a_u = np.zeros((), 'U10')
a_u[()] = np.ma.masked
assert_equal(a_u[()], '--')
@pytest.mark.xfail(reason="See gh-9750")
def test_coercion_bytes(self):
a_b = np.zeros((), 'S10')
a_b[()] = np.ma.masked
assert_equal(a_b[()], b'--')
def test_subclass(self):
# https://github.com/astropy/astropy/issues/6645
class Sub(type(np.ma.masked)):
pass
a = Sub()
assert_(a is Sub())
assert_(a is not np.ma.masked)
assert_not_equal(repr(a), 'masked')
def test_attributes_readonly(self):
assert_raises(AttributeError, setattr, np.ma.masked, 'shape', (1,))
assert_raises(AttributeError, setattr, np.ma.masked, 'dtype', np.int64)
| TestMaskedConstant |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/sticky_variant/package.py | {
"start": 227,
"end": 617
} | class ____(AutotoolsPackage):
"""Package with a sticky variant and a conflict"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
variant("allow-gcc", description="", default=False, sticky=True)
conflicts("%gcc", when="~allow-gcc")
depends_on("c", type="build")
| StickyVariant |
python | HIPS__autograd | autograd/builtins.py | {
"start": 592,
"end": 1082
} | class ____(Box):
__slots__ = []
__getitem__ = container_take
def __len__(self):
return len(self._value)
def __add__(self, other):
return sequence_extend_right(self, *other)
def __radd__(self, other):
return sequence_extend_left(self, *other)
def __contains__(self, elt):
return elt in self._value
def index(self, elt):
return self._value.index(elt)
SequenceBox.register(tuple_)
SequenceBox.register(list_)
| SequenceBox |
python | PrefectHQ__prefect | tests/test_task_worker.py | {
"start": 16225,
"end": 17066
} | class ____:
async def test_task_run_via_task_worker_respects_custom_task_run_name(
self, async_foo_task, prefect_client, events_pipeline
):
async_foo_task_with_custom_name = async_foo_task.with_options(
task_run_name="{x}"
)
task_worker = TaskWorker(async_foo_task_with_custom_name)
task_run_future = async_foo_task_with_custom_name.apply_async((42,))
task_run = await prefect_client.read_task_run(task_run_future.task_run_id)
await task_worker.execute_task_run(task_run)
await events_pipeline.process_events()
updated_task_run = await prefect_client.read_task_run(
task_run_future.task_run_id
)
assert updated_task_run.state.is_completed()
assert updated_task_run.name == "42"
| TestTaskWorkerCustomTaskRunName |
python | has2k1__plotnine | tests/test_doctools.py | {
"start": 636,
"end": 1055
} | class ____(stat):
"""
Stat ABC
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {"weight": None}
DEFAULT_PARAMS = {"geom": geom_abc, "position": "stack", "na_rm": False}
def test_document_stat():
doc = stat_abc.__doc__
assert "geom_abc" in doc
# assert "~plotnine.positions.position_stack" in doc
assert 'position, default="stack"' in doc
| stat_abc |
python | getsentry__sentry | fixtures/page_objects/trace_view.py | {
"start": 124,
"end": 1054
} | class ____(BasePage):
def __init__(self, browser, client):
super().__init__(browser)
self.client = client
self.global_selection = GlobalSelectionPage(browser)
def visit_trace_view(self, org, trace_id):
self.browser.get(f"/organizations/{org}/traces/trace/{trace_id}/")
self.wait_until_loaded()
def get_trace_span_row(self, op, description):
return self.browser.find_element(
by=By.XPATH,
value=(
f'//div[contains(@class, "TraceLeftColumnInner") and '
f'contains(normalize-space(), "{op} — {description}")]'
),
)
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
def normalize_span_row_text(self, text):
# Remove newlines and extra spaces
return text.replace("\n", " ").replace("—", "-").strip()
| TraceViewWaterfallPage |
python | walkccc__LeetCode | solutions/3366. Minimum Array Sum/3366.py | {
"start": 0,
"end": 865
} | class ____:
def minArraySum(self, nums: list[int], k: int, op1: int, op2: int) -> int:
@functools.lru_cache(None)
def dp(i: int, op1: int, op2: int) -> int:
"""
Returns the minimum sum of nums[i..n - 1] with `op1` operations of op1 and
`op2` operations of op2.
"""
if i == len(nums):
return 0
res = nums[i] + dp(i + 1, op1, op2)
if op1 > 0:
res = min(res, (nums[i] + 1) // 2 + dp(i + 1, op1 - 1, op2))
if op2 > 0 and nums[i] >= k:
res = min(res, nums[i] - k + dp(i + 1, op1, op2 - 1))
if op1 > 0 and op2 > 0:
if (nums[i] + 1) // 2 >= k:
res = min(res, (nums[i] + 1) // 2 - k + dp(i + 1, op1 - 1, op2 - 1))
if nums[i] >= k:
res = min(res, (nums[i] - k + 1) // 2 + dp(i + 1, op1 - 1, op2 - 1))
return res
return dp(0, op1, op2)
| Solution |
python | wandb__wandb | wandb/errors/term.py | {
"start": 1646,
"end": 1759
} | class ____(Exception):
"""The output device is not sufficiently capable for the operation."""
| NotATerminalError |
python | google__jax | jax/experimental/array_serialization/tensorstore_impl.py | {
"start": 1579,
"end": 25274
} | class ____:
"""Limits host scratch memory usage when reading/writing checkpoints per process."""
def __init__(self, host_memory_bytes_limit: int):
self._max_bytes = host_memory_bytes_limit
self._available_bytes = host_memory_bytes_limit
self._cv = asyncio.Condition(lock=asyncio.Lock())
async def wait_for_bytes(self, requested_bytes):
if requested_bytes > self._max_bytes:
logger.debug("A single array item requests more bytes than we reserved"
" space for in the parallel pool: %d > %d. Increasing the"
" limit to %d.", requested_bytes, self._max_bytes,
requested_bytes)
bytes_currently_used = self._max_bytes - self._available_bytes
self._max_bytes = requested_bytes
self._available_bytes = self._max_bytes - bytes_currently_used
async with self._cv:
await self._cv.wait_for(lambda: self._available_bytes >= requested_bytes)
self._available_bytes -= requested_bytes
assert self._available_bytes >= 0
async def release_bytes(self, requested_bytes):
async with self._cv:
self._available_bytes += requested_bytes
assert self._available_bytes <= self._max_bytes
self._cv.notify_all()
def is_tensorstore_spec_leaf(leaf: Any):
# TODO(rdyro): think of a better way to detect which leaf is a ts config
return leaf is None or (isinstance(leaf, dict)
and ("driver" in leaf or "kvstore" in leaf))
def _prime_factors(x: int) -> list[int]:
# find prime factors of axis sizes to help efficiently find divisor chunks
factors = []
while x % 2 == 0:
factors.append(2)
x //= 2
for i in range(3, int(math.sqrt(x)) + 1, 2):
while x % i == 0:
factors.append(i)
x //= i
if x > 1:
factors.append(x)
return sorted(factors)
@functools.lru_cache(maxsize=1024)
def _compute_chunk_shape(
local_shape: Sequence[int], dtype: str | jnp.dtype,
file_size_target: int = _FILE_SIZE_TARGET) -> list[int]:
"""Compute a chunk such that it divides the local shape and is less than
target file size. This helps the tensorstore kvstore driver limit the largest
file size on disk to below the ``file_size_target``. We compute a chunk with a
byte size at most 110% of the ``file_size_target``.
"""
local_shape = list(local_shape)
if len(local_shape) == 0 or math.prod(local_shape) == 0:
# a zero size array needs a non-zero chunk passed to tensorstore for compat.
return [max(z, 1) for z in local_shape]
total_size = math.prod(local_shape) * jnp.dtype(dtype).itemsize
axis_prime_factors = [_prime_factors(z) for z in local_shape]
chunk_shape, chunk_size = list(local_shape), total_size
# while chunk_size exceeds target size, reduce chunk_shape
while chunk_size > 1.1 * file_size_target: # 10% buffer
# 1. find the smallest axis divisor across all axes
chosen_axis_idx, chosen_divisor = None, 1
for axis_idx in range(len(chunk_shape)):
if len(axis_prime_factors[axis_idx]) == 1: # ignore axes sizes == 1
continue
if (chosen_axis_idx is None
or chosen_divisor > axis_prime_factors[axis_idx][0]):
chosen_axis_idx = axis_idx
chosen_divisor = axis_prime_factors[axis_idx][0]
# 2. if no divisor found, give up, return current chunk shape
if chosen_axis_idx is None:
return chunk_shape
# 3. remove the applied divisor from prime factors
prime_factors = axis_prime_factors[chosen_axis_idx]
prime_factors.pop(0)
# 4. apply the found divisor to reduce the chunk size
chunk_shape[chosen_axis_idx] //= chosen_divisor
chunk_size //= chosen_divisor
return chunk_shape
def _get_tensorstore_metadata(arr, is_remote: bool = False,
file_size_target: int = _FILE_SIZE_TARGET,
driver: str = _TS_ARRAY_DRIVER) -> dict[str, Any]:
global_shape, dtype = arr.shape, arr.dtype
if isinstance(arr, jax.Array):
local_shape = arr.sharding.shard_shape(global_shape)
else: # np.ndarray
local_shape = global_shape
return _get_tensorstore_metadata_cached(global_shape, dtype, local_shape,
is_remote, file_size_target, driver)
@functools.lru_cache(maxsize=1024)
def _get_tensorstore_metadata_cached(
global_shape: Sequence[int], dtype: jnp.dtype, local_shape: Sequence[int],
is_remote: bool = False, file_size_target: int = _FILE_SIZE_TARGET,
driver: str = _TS_ARRAY_DRIVER) -> dict[str, Any]:
if driver == "zarr3":
codecs = ([{"name": "zstd"}] if is_remote else [])
return {
'codecs': codecs,
'shape': global_shape,
'data_type': jnp.dtype(dtype).name,
'chunk_grid': {
'name': 'regular',
'configuration': {'chunk_shape': _compute_chunk_shape(
local_shape, dtype, file_size_target=file_size_target)}
}
}
elif driver == "zarr": # in zarr dtype goes in the base spec
return {'compressor': {'id': 'zstd'}, 'shape': global_shape,
'chunks': np.array(np.maximum(1, local_shape)).tolist()}
else:
raise ValueError(f"Unsupported driver: {driver}")
_divides = lambda x, y: np.all((np.array(x) % np.array(y)) == 0)
def merge_nested_ts_specs(dict1: dict[Any, Any], dict2: dict[Any, Any] | None):
"""Merge two ts specs, dict2 takes precedence."""
if dict2 is None: # nothing to do
return dict1
# TODO(rdyro): this is an opinionated merge, we should get user feedback
# merge kvstore explicitly
kvstore = dict1.get("kvstore", {}) | dict2.get("kvstore", {})
return dict1 | dict(dict2, kvstore=kvstore) # merge with dict2 preferred
def verify_tensorstore_spec(spec: dict[str, Any], arr: jax.Array | None,
path: str | os.PathLike[str], ocdbt: bool,
check_metadata: bool = True) -> None:
"""Verify the minimum requirements for a tensorstore spec."""
if ocdbt:
if spec.get("kvstore", {}).get("driver", "") != "ocdbt":
raise ValueError(f"Expected ocdbt driver, got {spec=}")
if check_metadata:
if arr is None:
raise ValueError("Array is required for metadata verification.")
metadata = spec['metadata']
if spec.get("driver", "") == "zarr3":
if metadata['data_type'] != jnp.dtype(arr.dtype).name:
raise ValueError(f"Provided dtype ({metadata['data_type']=}) doesn't"
f" match ({arr.dtype=})")
if 'shape' in metadata:
if metadata['shape'] != arr.shape:
raise ValueError(f"Provided shape ({metadata['shape']=}) doesn't match"
f" ({arr.shape=})")
if isinstance(arr, jax.Array):
local_shape = arr.sharding.shard_shape(arr.shape)
else: # np.ndarray
local_shape = arr.shape # pytype: disable=attribute-error
if spec.get("driver", "") == "zarr3":
chunk_shape = metadata['chunk_grid']['configuration']['chunk_shape']
if not _divides(local_shape, chunk_shape):
raise ValueError(f"Provided chunk shape {chunk_shape} does not divide"
f" the local shape of the array {local_shape}")
# check path is still the same one we expect
if ocdbt:
found_path = spec["kvstore"]['base']['path']
else:
found_path = spec["kvstore"]['path']
if str(found_path) != str(path):
raise ValueError(f"Provided {path=} does not match the spec path:"
f" {spec['kvstore']}")
def _spec_has_metadata(tree):
if not isinstance(tree, dict):
return False
return 'metadata' in tree or any(
_spec_has_metadata(subtree) for _, subtree in tree.items())
def _get_kvstore_for_gcs(ckpt_path: str):
m = re.fullmatch('^gs://([^/]*)/(.*)$', ckpt_path)
if m is None:
raise ValueError('The ckpt_path should contain the bucket name and the '
f'file path inside the bucket. Got: {ckpt_path}')
bucket = m.group(1)
path_without_bucket = m.group(2)
return {'driver': 'gcs', 'bucket': bucket, 'path': path_without_bucket}
def _get_kvstore_for_s3(ckpt_path: str):
m = re.fullmatch('^s3://([^/]*)/(.*)$', ckpt_path, re.DOTALL)
if m is None:
raise ValueError('The ckpt_path should contain the bucket name and the '
f'file path inside the bucket. Got: {ckpt_path}')
bucket = m.group(1)
path_without_bucket = m.group(2)
return {'driver': 's3', 'bucket': bucket, 'path': path_without_bucket}
def get_tensorstore_spec(
ckpt_path: str | PathLike[str], ocdbt: bool = True,
process_idx: int | None = None, arr: jax.Array | None = None,
driver: str = _TS_ARRAY_DRIVER) -> dict[str, Any]:
# Normalize path to exclude trailing '/'. In GCS path case, normpath will
# replace a the double '//' with a single '/' and we need to restore the
# filesystem type:// prefix for GCS (gs://) and S3 paths (s3://)
ckpt_path = os.path.normpath(str(ckpt_path))
ckpt_path = re.sub(r"^([a-z]+):/", r"\1://", ckpt_path)
# in cases of multi-process writes, we need to write to a different location
# for each process and finally created a combined symlink to the final
# location, tensorstore can do this via ts.KvStore.experimental_copy_range_to
if process_idx is not None:
_parent, _name = os.path.split(ckpt_path)
ckpt_path = os.path.join(_parent, _PROCESS_DIR_FORMAT.format(process_idx),
_name)
is_gcs_path = ckpt_path.startswith('gs://')
is_s3_path = ckpt_path.startswith('s3://')
spec = {'driver': driver, 'kvstore': {}}
# use a combined OCDBT store, the actual path is the parent path
# the name (filename/last part of the path) is the key in the ocdbt kvstore
entry_key = None
if ocdbt:
(ckpt_path, entry_key), org_ckpt_path = os.path.split(ckpt_path), ckpt_path
if is_gcs_path:
m = re.fullmatch('^gs://([^/]*)/(.*)$', ckpt_path)
elif is_s3_path:
m = re.fullmatch('^s3://([^/]*)/(.*)$', ckpt_path)
else:
m = re.match("a", "a") # make it True
if m is None:
raise ValueError('Using OCDBT requires the bucket name, the directory'
' name and the array name, your path is: '
f'{org_ckpt_path}')
if is_gcs_path:
base_kvstore = _get_kvstore_for_gcs(ckpt_path)
elif is_s3_path:
base_kvstore = _get_kvstore_for_s3(ckpt_path)
else:
base_kvstore = {'driver': _DEFAULT_BASE_DRIVER, 'path': ckpt_path}
if ocdbt:
if not is_gcs_path and not is_s3_path and not os.path.isabs(ckpt_path):
raise ValueError(f'Checkpoint path should be absolute. Got {ckpt_path}')
spec['kvstore'] = {'driver': 'ocdbt', 'base': base_kvstore,
'path': entry_key}
else:
spec['kvstore'] = base_kvstore
# done writing tensorstore spec based on destination path
# optionally, if array is provided, we can add metadata to the spec
if arr is not None:
spec["metadata"] = _get_tensorstore_metadata(
arr, driver=str(spec["driver"]))
return spec
async def _create_async_array_from_callback(
global_shape: array.Shape,
dtype: str | jnp.dtype | None,
inp_sharding: jax.sharding.Sharding,
data_callback: Callable[[array.Index, jax.Device], Awaitable[jax.Array]],
):
device_to_index_map = inp_sharding.devices_indices_map(global_shape)
addressable_da = inp_sharding._addressable_device_assignment
future_arrays = [data_callback(device_to_index_map[d], d)
for d in addressable_da]
dbs = await asyncio.gather(*future_arrays)
return array.make_array_from_single_device_arrays(
global_shape, inp_sharding, dbs, dtype=dtype)
async def _transfer_shard_to_host(shard: array.Shard) -> np.ndarray:
data = shard.data
has_pinned_host = any(
m.kind == "pinned_host" for m in shard.device.addressable_memories())
if has_pinned_host:
# If available, transfer to pinned host memory
sharding = jax.sharding.SingleDeviceSharding(shard.device,
memory_kind="pinned_host")
data = jax.device_put(data, sharding)
else:
data.copy_to_host_async()
# Allow other transfers to be scheduled simultaneously
await asyncio.sleep(0)
# Ensure that jax.Array's internal numpy array can be zero-copied. Tensorstore
# implicitly converts the written data to a numpy array, and would otherwise
# silently copy host-to-host.
return np.array(data, copy=False)
async def combine_kvstores(combined_kvstore: dict[str, Any],
kvstores: list[dict[str, Any]],
context: ts.Context | dict[str, Any] = _TS_CONTEXT
) -> None:
"""Merge a list of kvstores into a single kvstore. NOT multi-process safe."""
combined_fut = ts.KvStore.open(combined_kvstore, context=context)
kvstores_futs = [ts.KvStore.open(kvstore, context=context)
for kvstore in kvstores]
combined, kvstores = await asyncio.gather(combined_fut,
asyncio.gather(*kvstores_futs))
tx = ts.Transaction()
await asyncio.gather(*[kvstore.experimental_copy_range_to(
combined.with_transaction(tx)) for kvstore in kvstores])
await tx.commit_async()
async def async_serialize(
arr_inp,
tensorstore_spec,
commit_future=None,
context=_TS_CONTEXT,
chunk_layout=_TS_CHUNK_LAYOUT,
primary_host: int | None = None,
replica_id: int = 0,
transaction: ts.Transaction | None = None,
):
"""Serialize an array using TensorStore.
Args:
arr_inp: The array to serialize.
tensorstore_spec: The tensorstore spec to use.
commit_future: A list of futures that will be appended to. The futures can
be awaited asynchronously. If None, the futures will be awaited
synchronously by this method.
context: ts.Context instance.
primary_host: Primary host, which indicates the host that will be treated as
the "leader". If None, all hosts are treated as the primary. DO NOT USE
unless you are sure you know what you are doing.
replica_id: Allows overriding the shard replica id that will be saved. DO
NOT USE unless you are sure you know what you are doing.
transaction: TensorStore transaction to use for opening and writing the
array. If not specified, a non-transactional write will be used.
"""
if (isinstance(arr_inp, array.ArrayImpl) and jax.process_count() > 1 and
arr_inp.is_fully_addressable):
raise ValueError(
f'Passing fully addressable arrays to a multiprocess '
f'serialization is not allowed, as this may lead to a race condition '
f'between processes. Serialization have failed for the array with '
f'the path from kvstore: "{tensorstore_spec["kvstore"]}".')
# 'metadata' may not be present at the top level (for example, if we are using
# a 'cast' driver).
if not _spec_has_metadata(tensorstore_spec):
tensorstore_spec['metadata'] = _get_tensorstore_metadata(
arr_inp, driver=tensorstore_spec['driver'])
## zarr driver requires specifying the dtype in the spec base
if tensorstore_spec['driver'] == 'zarr' and 'dtype' not in tensorstore_spec:
tensorstore_spec['dtype'] = jnp.dtype(arr_inp.dtype).name
# If primary_host is None, all hosts will checkpoint. This is used
# for checkpointing to local filesystem.
if primary_host is None or jax.process_index() == primary_host:
open_future = ts.open(
ts.Spec(tensorstore_spec),
create=True,
open=True,
context=context,
chunk_layout=chunk_layout,
transaction=transaction,
)
# Asynchronous case.
if commit_future is not None:
assert isinstance(commit_future, list)
commit_future.append(open_future)
else:
await open_future
# `ts.open` runs twice for process `primary_host` because for the first time,
# we just get the future to be awaited upon in the background thread. The
# second one runs with `assume_metadata=True` which does no I/O operation and
# returns the tensorstore object.
# For every process other than `primary_host`, we open with
# `assume_metadata=True`.
t = await ts.open(
ts.Spec(tensorstore_spec),
open=True,
assume_metadata=True,
context=context,
chunk_layout=chunk_layout,
transaction=transaction,
)
async def _write_array(shard):
if shard.replica_id == replica_id:
data = await _transfer_shard_to_host(shard)
write_future = t[shard.index].write(
data,
# Avoid additional copy of input array into the TensorStore chunk
# cache. If `arr_inp` is a jax.Array, the result of converting
# it to a NumPy array, as is done internally by TensorStore, is
# guaranteed to be immutable and therefore it is safe to retain a
# reference indefinitely.
can_reference_source_data_indefinitely=isinstance(
arr_inp, array.ArrayImpl
),
)
if commit_future is not None:
assert isinstance(commit_future, list)
commit_future.append(write_future.commit)
await write_future.copy
else:
await write_future.commit
local_shards = arr_inp.addressable_shards
future_write_state = jax.tree_util.tree_map(_write_array, local_shards)
return await asyncio.gather(*future_write_state)
# TODO(rdyro): Remove this function.
def _run_serialization(arrays, tensorstore_specs):
"""Legacy serialization of a list of arrays."""
async def _run_serializer():
future_writer = jax.tree_util.tree_map(async_serialize, arrays, tensorstore_specs)
return await asyncio.gather(*future_writer)
asyncio.run(_run_serializer())
def estimate_read_memory_footprint(t: ts.TensorStore,
domain: ts.IndexDomain) -> int:
rank = t.rank
num_bytes = t.dtype.numpy_dtype.itemsize
chunk_template = t.chunk_layout.read_chunk_template
if domain is None:
domain = t.domain
origin = domain.origin
shape = domain.shape
chunk_origin = chunk_template.origin
chunk_shape = chunk_template.shape
# Some TensorStore drivers are not chunked, e.g. the inline 'array' driver.
# For those, instead of returning a near-infinite memory footprint, estimate
# the footprint as the entire shape.
for i in range(rank):
if not chunk_template[i].finite:
return domain.size * num_bytes
# Otherwise, if we have a chunked driver, estimate based on chunk size.
for i in range(rank):
origin_value = origin[i]
chunk_origin_value = chunk_origin[i]
chunk_size = chunk_shape[i]
lower = origin_value - chunk_origin_value
upper = origin_value + shape[i] - chunk_origin_value
lower_aligned = lower // chunk_size * chunk_size
upper_aligned = -(-upper // chunk_size) * chunk_size
num_bytes *= (upper_aligned - lower_aligned)
return num_bytes
async def async_deserialize(
user_in_sharding: jax.sharding.Sharding | Format | jax.ShapeDtypeStruct,
tensorstore_spec: ts.Spec | dict[str, Any],
global_shape: Sequence[int] | None = None,
dtype=None,
byte_limiter: _LimitInFlightBytes | None = None,
context=_TS_CONTEXT,
chunk_layout=_TS_CHUNK_LAYOUT,
assume_metadata: bool = False,
):
"""Main performant deserialization routine for arrays using tensorstore."""
in_sharding = (user_in_sharding.sharding
if isinstance(user_in_sharding, Format) else user_in_sharding)
if isinstance(user_in_sharding, jax.ShapeDtypeStruct):
dtype = dtype if dtype is not None else user_in_sharding.dtype
in_sharding = user_in_sharding.sharding
if not isinstance(in_sharding, jax.sharding.Sharding):
raise ValueError(
'sharding passed to deserialization should be specified, concrete and'
f' an instance of `jax.sharding.Sharding`. Got {in_sharding}')
dll = (user_in_sharding.layout
if isinstance(user_in_sharding, Format) else None)
t = await ts.open(
tensorstore_spec,
open=True,
assume_metadata=assume_metadata,
context=context,
chunk_layout=chunk_layout,
)
shape = t.shape if global_shape is None else global_shape
dtype = dtype if dtype is not None else t.dtype.numpy_dtype
new_shard_shape = in_sharding.shard_shape(tuple(shape))
async def cb(index: array.Index, device: jax.Device):
requested_domain = ts.IndexTransform(input_shape=shape)[index].domain
restricted_domain = t.domain.intersect(requested_domain)
requested_bytes = estimate_read_memory_footprint(t, restricted_domain)
# Limit the bytes read for every shard.
if byte_limiter is not None:
await byte_limiter.wait_for_bytes(requested_bytes)
# This maybe needed because the shape the array was saved with is smaller
# than the requested shape of the array in which it will be reloaded. So
# the extra values will be filled with 0s.
out = np.zeros(new_shard_shape, dtype=t.dtype.numpy_dtype)
await ts.array(out)[ts.d[:].translate_to[requested_domain.origin]][
restricted_domain].write(t[restricted_domain])
if dtype is not None:
# Cast while reloading on process to avoid 2 copies on device if the
# casting is done on device.
out = out.astype(dtype)
# Convert to jnp array so that layouts are initialized properly for
# sub-byte dtypes.
# TODO(yashkatariya): This is a band-aid fix. Figure out a better way to
# make this work.
if out.dtype == jnp.int4:
out = jnp.asarray(out) # type: ignore
result = jax.device_put(
out, Format(dll, jax.sharding.SingleDeviceSharding(device)))
if byte_limiter is not None:
# NB: `out` actually might not be ready for garbage collection by the
# time we call release_bytes . Thus peak memory usage still might grow
# beyond what byte_limiter limit suggests it should. The simplest option
# would be to call `result.block_until_ready()`` here. However it
# also comes with ~15-20% perf penalty as we would be waiting for CPU->GPU
# transfer instead of loading data. In the future, if memory pressure
# becomes a problem, we can instead instrument bytelimiter to
# keep track of all in-flight tensors and only block_until_ready, if byte
# limiter hits the limit to get reduced memory usage, without losing
# performance in common use cases.
await byte_limiter.release_bytes(requested_bytes)
return result
# for deserialization canonicalize dtype to a dtype representable in jax
return await _create_async_array_from_callback(
tuple(shape), jax.dtypes.canonicalize_dtype(dtype), in_sharding, cb)
# TODO(rdyro): Remove this function.
def _run_deserialization(shardings: Sequence[jax.sharding.Sharding | Format],
tensorstore_specs: Sequence[dict[str, Any] | ts.Spec],
global_shapes: Sequence[array.Shape] | None = None,
dtypes: Sequence[typing.DTypeLike] | None = None,
concurrent_gb: int = 32):
"""Legacy deserialization of a list of arrays. Optionally pass global_shapes
and dtypes for type-checking.
"""
concurrent_bytes = concurrent_gb * 10**9
async def _run_deserializer():
# Object should be created once per process.
byte_limiter = _LimitInFlightBytes(concurrent_bytes)
future_arrays = jax.tree_util.tree_map(
partial(async_deserialize, byte_limiter=byte_limiter),
list(shardings), list(tensorstore_specs),
[None] * len(tensorstore_specs) if global_shapes is None else global_shapes,
[None] * len(tensorstore_specs) if dtypes is None else dtypes)
return await asyncio.gather(*future_arrays)
return asyncio.run(_run_deserializer())
| _LimitInFlightBytes |
python | Netflix__metaflow | metaflow/_vendor/v3_7/typeguard/_config.py | {
"start": 700,
"end": 1489
} | class ____(Enum):
"""
Specifies how thoroughly the contents of collections are type checked.
This has an effect on the following built-in checkers:
* ``AbstractSet``
* ``Dict``
* ``List``
* ``Mapping``
* ``Set``
* ``Tuple[<type>, ...]`` (arbitrarily sized tuples)
Members:
* ``FIRST_ITEM``: check only the first item
* ``ALL_ITEMS``: check all items
"""
FIRST_ITEM = auto()
ALL_ITEMS = auto()
def iterate_samples(self, collection: Collection[T]) -> Collection[T]:
if self is CollectionCheckStrategy.FIRST_ITEM:
if len(collection):
return [next(iter(collection))]
else:
return ()
else:
return collection
@dataclass
| CollectionCheckStrategy |
python | PrefectHQ__prefect | src/prefect/server/events/actions.py | {
"start": 61294,
"end": 61734
} | class ____(AutomationCommandAction):
"""Pauses a Work Queue"""
type: Literal["pause-automation"] = "pause-automation"
_action_description: ClassVar[str] = "Pausing automation"
async def command(
self,
events: PrefectServerEventsAPIClient,
automation_id: UUID,
triggered_action: "TriggeredAction",
) -> Response:
return await events.pause_automation(automation_id)
| PauseAutomation |
python | realpython__materials | python-namedtuple/person_dataclass.py | {
"start": 56,
"end": 295
} | class ____:
name: str
age: int
height: float
weight: float
country: str = "Canada"
def __iter__(self):
return iter(astuple(self))
jane = Person("Jane", 25, 1.75, 67)
for field in jane:
print(field)
| Person |
python | pypa__pip | src/pip/_internal/exceptions.py | {
"start": 26133,
"end": 27114
} | class ____(DiagnosticPipError):
reference = "invalid-installed-package"
def __init__(
self,
*,
dist: BaseDistribution,
invalid_exc: InvalidRequirement | InvalidVersion,
) -> None:
installed_location = dist.installed_location
if isinstance(invalid_exc, InvalidRequirement):
invalid_type = "requirement"
else:
invalid_type = "version"
super().__init__(
message=Text(
f"Cannot process installed package {dist} "
+ (f"in {installed_location!r} " if installed_location else "")
+ f"because it has an invalid {invalid_type}:\n{invalid_exc.args[0]}"
),
context=(
"Starting with pip 24.1, packages with invalid "
f"{invalid_type}s can not be processed."
),
hint_stmt="To proceed this package must be uninstalled.",
)
| InvalidInstalledPackage |
python | vyperlang__vyper | vyper/compiler/settings.py | {
"start": 848,
"end": 1403
} | class ____(Enum):
NONE = 1
GAS = 2
CODESIZE = 3
@classmethod
def from_string(cls, val):
match val:
case "none":
return cls.NONE
case "gas":
return cls.GAS
case "codesize":
return cls.CODESIZE
raise ValueError(f"unrecognized optimization level: {val}")
@classmethod
def default(cls):
return cls.GAS
def __str__(self):
return self._name_.lower()
DEFAULT_ENABLE_DECIMALS = False
@dataclass
| OptimizationLevel |
python | ray-project__ray | python/ray/air/tests/test_integration_comet.py | {
"start": 357,
"end": 1828
} | class ____(unittest.TestCase):
def setUp(self):
self.logger = CometLoggerCallback()
def test_class_variable_to_instance(self):
"""Test that class variables get properly assigned to instance
variables.
"""
logger = self.logger
self.assertEqual(logger._to_exclude, logger._exclude_results)
self.assertEqual(logger._to_system, logger._system_results)
self.assertEqual(logger._to_other, logger._other_results)
self.assertEqual(logger._to_episodes, logger._episode_results)
def test_configure_experiment_defaults(self):
"""Test CometLoggerCallback._configure_experiment_defaults."""
logger = self.logger
# Test that autologging features are properly disabled
exclude = CometLoggerCallback._exclude_autolog
for option in exclude:
self.assertFalse(logger.experiment_kwargs.get(option))
del logger
# Don't disable logging if user overwrites defaults by passing in args
for include_option in exclude:
# This unpacks to become e.g. CometLoggerCallback(log_env_cpu=True)
logger = CometLoggerCallback(**{include_option: True})
for option in exclude:
if option == include_option:
self.assertTrue(logger.experiment_kwargs.get(option))
else:
self.assertFalse(logger.experiment_kwargs.get(option))
| InitializationTests |
python | pytorch__pytorch | test/mobile/model_test/math_ops.py | {
"start": 90,
"end": 8845
} | class ____(torch.nn.Module):
def forward(self):
return self.pointwise_ops()
def pointwise_ops(self):
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
f = torch.zeros(3)
g = torch.tensor([-1, 0, 1])
w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
return len(
torch.abs(torch.tensor([-1, -2, 3])),
torch.absolute(torch.tensor([-1, -2, 3])),
torch.acos(a),
torch.arccos(a),
torch.acosh(a.uniform_(1.0, 2.0)),
torch.add(a, 20),
torch.add(a, b, out=a),
b.add(a),
b.add(a, out=b),
b.add_(a),
b.add(1),
torch.add(a, torch.randn(4, 1), alpha=10),
torch.addcdiv(
torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1
),
torch.addcmul(
torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1
),
torch.angle(a),
torch.asin(a),
torch.arcsin(a),
torch.asinh(a),
torch.arcsinh(a),
torch.atan(a),
torch.arctan(a),
torch.atanh(a.uniform_(-1.0, 1.0)),
torch.arctanh(a.uniform_(-1.0, 1.0)),
torch.atan2(a, a),
torch.bitwise_not(t),
torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.ceil(a),
torch.ceil(float(torch.tensor(0.5))),
torch.ceil(torch.tensor(0.5).item()),
torch.clamp(a, min=-0.5, max=0.5),
torch.clamp(a, min=0.5),
torch.clamp(a, max=0.5),
torch.clip(a, min=-0.5, max=0.5),
torch.conj(a),
torch.copysign(a, 1),
torch.copysign(a, b),
torch.cos(a),
torch.cosh(a),
torch.deg2rad(
torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
),
torch.div(a, b),
a.div(b),
a.div(1),
a.div_(b),
torch.divide(a, b, rounding_mode="trunc"),
torch.divide(a, b, rounding_mode="floor"),
torch.digamma(torch.tensor([1.0, 0.5])),
torch.erf(torch.tensor([0.0, -1.0, 10.0])),
torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
torch.exp(torch.tensor([0.0, math.log(2.0)])),
torch.exp(float(torch.tensor(1))),
torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
torch.expm1(torch.tensor([0.0, math.log(2.0)])),
torch.fake_quantize_per_channel_affine(
torch.randn(2, 2, 2),
(torch.randn(2) + 1) * 0.05,
torch.zeros(2),
1,
0,
255,
),
torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
torch.float_power(torch.randint(10, (4,)), 2),
torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5])),
torch.floor(a),
torch.floor(float(torch.tensor(1))),
torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
torch.frac(torch.tensor([1.0, 2.5, -3.2])),
torch.randn(4, dtype=torch.cfloat).imag,
torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
torch.lerp(torch.arange(1.0, 5.0), torch.empty(4).fill_(10), 0.5),
torch.lerp(
torch.arange(1.0, 5.0),
torch.empty(4).fill_(10),
torch.full_like(torch.arange(1.0, 5.0), 0.5),
),
torch.lgamma(torch.arange(0.5, 2, 0.5)),
torch.log(torch.arange(5) + 10),
torch.log10(torch.rand(5)),
torch.log1p(torch.randn(5)),
torch.log2(torch.rand(5)),
torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
torch.logaddexp(
torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp(
torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
torch.logaddexp2(
torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp2(
torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])
),
torch.logical_and(r, s),
torch.logical_and(r.double(), s.double()),
torch.logical_and(r.double(), s),
torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
torch.logical_not(
torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
out=torch.empty(3, dtype=torch.int16),
),
torch.logical_or(r, s),
torch.logical_or(r.double(), s.double()),
torch.logical_or(r.double(), s),
torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logical_xor(r, s),
torch.logical_xor(r.double(), s.double()),
torch.logical_xor(r.double(), s),
torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logit(torch.rand(5), eps=1e-6),
torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
torch.i0(torch.arange(5, dtype=torch.float32)),
torch.igamma(a, b),
torch.igammac(a, b),
torch.mul(torch.randn(3), 100),
b.mul(a),
b.mul(5),
b.mul(a, out=b),
b.mul_(a),
b.mul_(5),
torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
torch.tensor([float("nan"), float("inf"), -float("inf"), 3.14]),
torch.nan_to_num(w),
torch.nan_to_num_(w),
torch.nan_to_num(w, nan=2.0),
torch.nan_to_num(w, nan=2.0, posinf=1.0),
torch.neg(torch.randn(5)),
# torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
torch.polygamma(1, torch.tensor([1.0, 0.5])),
torch.polygamma(2, torch.tensor([1.0, 0.5])),
torch.polygamma(3, torch.tensor([1.0, 0.5])),
torch.polygamma(4, torch.tensor([1.0, 0.5])),
torch.pow(a, 2),
torch.pow(2, float(torch.tensor(0.5))),
torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
torch.rad2deg(
torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
),
torch.randn(4, dtype=torch.cfloat).real,
torch.reciprocal(a),
torch.remainder(torch.tensor([-3.0, -2.0]), 2),
torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
torch.round(a),
torch.round(torch.tensor(0.5).item()),
torch.rsqrt(a),
torch.sigmoid(a),
torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
torch.sgn(a),
torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
torch.sin(a),
torch.sinc(a),
torch.sinh(a),
torch.sqrt(a),
torch.square(a),
torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
b.sub(a),
b.sub_(a),
b.sub(5),
torch.sum(5),
torch.tan(a),
torch.tanh(a),
torch.true_divide(a, a),
torch.trunc(a),
torch.trunc_(a),
torch.xlogy(f, g),
torch.xlogy(f, g),
torch.xlogy(f, 4),
torch.xlogy(2, g),
)
| PointwiseOpsModule |
python | sympy__sympy | sympy/integrals/tests/test_risch.py | {
"start": 32703,
"end": 39465
} | class ____(Exception):
"""Dummy Exception class for testing."""
pass
def test_DecrementLevel():
DE = DifferentialExtension(x*log(exp(x) + 1), x)
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
with DecrementLevel(DE):
assert DE.level == -2
assert DE.t == t0
assert DE.d == Poly(t0, t0)
assert DE.case == 'exp'
with DecrementLevel(DE):
assert DE.level == -3
assert DE.t == x
assert DE.d == Poly(1, x)
assert DE.case == 'base'
assert DE.level == -2
assert DE.t == t0
assert DE.d == Poly(t0, t0)
assert DE.case == 'exp'
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
# Test that __exit__ is called after an exception correctly
try:
with DecrementLevel(DE):
raise _TestingException
except _TestingException:
pass
else:
raise AssertionError("Did not raise.")
assert DE.level == -1
assert DE.t == t1
assert DE.d == Poly(t0/(t0 + 1), t1)
assert DE.case == 'primitive'
def test_risch_integrate():
assert risch_integrate(t0*exp(x), x) == t0*exp(x)
assert risch_integrate(sin(x), x, rewrite_complex=True) == -exp(I*x)/2 - exp(-I*x)/2
# From my GSoC writeup
assert risch_integrate((1 + 2*x**2 + x**4 + 2*x**3*exp(2*x**2))/
(x**4*exp(x**2) + 2*x**2*exp(x**2) + exp(x**2)), x) == \
NonElementaryIntegral(exp(-x**2), x) + exp(x**2)/(1 + x**2)
assert risch_integrate(0, x) == 0
# also tests prde_cancel()
e1 = log(x/exp(x) + 1)
ans1 = risch_integrate(e1, x)
assert ans1 == (x*log(x*exp(-x) + 1) + NonElementaryIntegral((x**2 - x)/(x + exp(x)), x))
assert cancel(diff(ans1, x) - e1) == 0
# also tests issue #10798
e2 = (log(-1/y)/2 - log(1/y)/2)/y - (log(1 - 1/y)/2 - log(1 + 1/y)/2)/y
ans2 = risch_integrate(e2, y)
assert ans2 == log(1/y)*log(1 - 1/y)/2 - log(1/y)*log(1 + 1/y)/2 + \
NonElementaryIntegral((I*pi*y**2 - 2*y*log(1/y) - I*pi)/(2*y**3 - 2*y), y)
assert expand_log(cancel(diff(ans2, y) - e2), force=True) == 0
# These are tested here in addition to in test_DifferentialExtension above
# (symlogs) to test that backsubs works correctly. The integrals should be
# written in terms of the original logarithms in the integrands.
# XXX: Unfortunately, making backsubs work on this one is a little
# trickier, because x**x is converted to exp(x*log(x)), and so log(x**x)
# is converted to x*log(x). (x**2*log(x)).subs(x*log(x), log(x**x)) is
# smart enough, the issue is that these splits happen at different places
# in the algorithm. Maybe a heuristic is in order
assert risch_integrate(log(x**x), x) == x**2*log(x)/2 - x**2/4
assert risch_integrate(log(x**y), x) == x*log(x**y) - x*y
assert risch_integrate(log(sqrt(x)), x) == x*log(sqrt(x)) - x/2
# Example 6.2.1
expr = (exp(x) - x**2 + 2*x)/((exp(x) + x)**2*x**2)*exp((x**2 - 1)/x + 1/(exp(x) + x))
assert risch_integrate(expr, x) == exp(-x)*exp(1/(x + exp(x)) + (x**2 - 1)/x)
# issue 28407
# TODO: exp(exp(x)) - exp(-exp(x)) would be a simpler return form
expr = exp(x + exp(x)) + exp(x - exp(x))
assert risch_integrate(expr, x) == \
(exp(2*x)*exp(-x + exp(x)) - exp(x - exp(x)))*exp(-x)
# Ensure the results from integrate_hyperexponential() are in a simple
# form, i.e., this doesn't return something like (1 + exp(-2*x))*exp(x)/2
# sinh(x).rewrite(exp)
expr = exp(x)/2 - exp(-x)/2
assert risch_integrate(expr, x) == exp(x)/2 + exp(-x)/2
# sin(x).rewrite(exp)
expr = -I*(exp(I*x) - exp(-I*x))/2
assert risch_integrate(expr, x) == -exp(I*x)/2 - exp(-I*x)/2
def test_risch_integrate_float():
assert risch_integrate((-60*exp(x) - 19.2*exp(4*x))*exp(4*x), x) == -2.4*exp(8*x) - 12.0*exp(5*x)
def test_NonElementaryIntegral():
assert isinstance(risch_integrate(exp(x**2), x), NonElementaryIntegral)
assert isinstance(risch_integrate(x**x*log(x), x), NonElementaryIntegral)
# Make sure methods of Integral still give back a NonElementaryIntegral
assert isinstance(NonElementaryIntegral(x**x*t0, x).subs(t0, log(x)), NonElementaryIntegral)
def test_xtothex():
a = risch_integrate(x**x, x)
assert a == NonElementaryIntegral(x**x, x)
assert isinstance(a, NonElementaryIntegral)
def test_DifferentialExtension_equality():
DE1 = DE2 = DifferentialExtension(log(x), x)
assert DE1 == DE2
def test_DifferentialExtension_printing():
DE = DifferentialExtension(exp(2*x**2) + log(exp(x**2) + 1), x)
assert repr(DE) == ("DifferentialExtension(dict([('f', exp(2*x**2) + log(exp(x**2) + 1)), "
"('x', x), ('T', [x, t0, t1]), ('D', [Poly(1, x, domain='ZZ'), Poly(2*x*t0, t0, domain='ZZ[x]'), "
"Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')]), ('fa', Poly(t1 + t0**2, t1, domain='ZZ[t0]')), "
"('fd', Poly(1, t1, domain='ZZ')), ('Tfuncs', [Lambda(i, exp(i**2)), Lambda(i, log(t0 + 1))]), "
"('backsubs', []), ('exts', [None, 'exp', 'log']), ('extargs', [None, x**2, t0 + 1]), "
"('cases', ['base', 'exp', 'primitive']), ('case', 'primitive'), ('t', t1), "
"('d', Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')), ('newf', t0**2 + t1), ('level', -1), "
"('dummy', False)]))")
assert str(DE) == ("DifferentialExtension({fa=Poly(t1 + t0**2, t1, domain='ZZ[t0]'), "
"fd=Poly(1, t1, domain='ZZ'), D=[Poly(1, x, domain='ZZ'), Poly(2*x*t0, t0, domain='ZZ[x]'), "
"Poly(2*t0*x/(t0 + 1), t1, domain='ZZ(x,t0)')]})")
def test_issue_23948():
f = (
( (-2*x**5 + 28*x**4 - 144*x**3 + 324*x**2 - 270*x)*log(x)**2
+(-4*x**6 + 56*x**5 - 288*x**4 + 648*x**3 - 540*x**2)*log(x)
+(2*x**5 - 28*x**4 + 144*x**3 - 324*x**2 + 270*x)*exp(x)
+(2*x**5 - 28*x**4 + 144*x**3 - 324*x**2 + 270*x)*log(5)
-2*x**7 + 26*x**6 - 116*x**5 + 180*x**4 + 54*x**3 - 270*x**2
)*log(-log(x)**2 - 2*x*log(x) + exp(x) + log(5) - x**2 - x)**2
+( (4*x**5 - 44*x**4 + 168*x**3 - 216*x**2 - 108*x + 324)*log(x)
+(-2*x**5 + 24*x**4 - 108*x**3 + 216*x**2 - 162*x)*exp(x)
+4*x**6 - 42*x**5 + 144*x**4 - 108*x**3 - 324*x**2 + 486*x
)*log(-log(x)**2 - 2*x*log(x) + exp(x) + log(5) - x**2 - x)
)/(x*exp(x)**2*log(x)**2 + 2*x**2*exp(x)**2*log(x) - x*exp(x)**3
+(-x*log(5) + x**3 + x**2)*exp(x)**2)
F = ((x**4 - 12*x**3 + 54*x**2 - 108*x + 81)*exp(-2*x)
*log(-x**2 - 2*x*log(x) - x + exp(x) - log(x)**2 + log(5))**2)
assert risch_integrate(f, x) == F
| _TestingException |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-google/llama_index/vector_stores/google/base.py | {
"start": 2910,
"end": 3065
} | class ____(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
| NoSuchCorpusException |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/distlib/util.py | {
"start": 52379,
"end": 53504
} | class ____(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
return self._connection[1]
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(
h, None, **kwargs)
return self._connection[1]
| Transport |
python | getsentry__sentry | src/sentry/integrations/api/endpoints/organization_integration_repos.py | {
"start": 745,
"end": 895
} | class ____(TypedDict):
name: str
identifier: str
isInstalled: bool
defaultBranch: str | None
@region_silo_endpoint
| IntegrationRepository |
python | sqlalchemy__sqlalchemy | test/orm/test_default_strategies.py | {
"start": 670,
"end": 5799
} | class ____(_fixtures.FixtureTest):
def _assert_fully_loaded(self, users):
# verify everything loaded, with no additional sql needed
def go():
# comparison with no additional sql
eq_(users, self.static.user_all_result)
# keywords are not part of self.static.user_all_result, so
# verify all the item keywords were loaded, with no more sql.
# 'any' verifies at least some items have keywords; we build
# a list for any([...]) instead of any(...) to prove we've
# iterated all the items with no sql.
f = util.flatten_iterator
assert any(
[
i.keywords
for i in f([o.items for o in f([u.orders for u in users])])
]
)
self.assert_sql_count(testing.db, go, 0)
def _assert_addresses_loaded(self, users):
# verify all the addresses were joined loaded with no more sql
def go():
for u, static in zip(users, self.static.user_all_result):
eq_(u.addresses, static.addresses)
self.assert_sql_count(testing.db, go, 0)
def _downgrade_fixture(self):
(
users,
Keyword,
items,
order_items,
orders,
Item,
User,
Address,
keywords,
item_keywords,
Order,
addresses,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.keywords,
self.tables.item_keywords,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="subquery",
order_by=item_keywords.c.keyword_id,
)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy="subquery",
order_by=order_items.c.item_id,
)
),
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy="joined", order_by=addresses.c.id
),
orders=relationship(
Order, lazy="joined", order_by=orders.c.id
),
),
)
return fixture_session()
def _upgrade_fixture(self):
(
users,
Keyword,
items,
order_items,
orders,
Item,
User,
Address,
keywords,
item_keywords,
Order,
addresses,
) = (
self.tables.users,
self.classes.Keyword,
self.tables.items,
self.tables.order_items,
self.tables.orders,
self.classes.Item,
self.classes.User,
self.classes.Address,
self.tables.keywords,
self.tables.item_keywords,
self.classes.Order,
self.tables.addresses,
)
self.mapper_registry.map_imperatively(Address, addresses)
self.mapper_registry.map_imperatively(Keyword, keywords)
self.mapper_registry.map_imperatively(
Item,
items,
properties=dict(
keywords=relationship(
Keyword,
secondary=item_keywords,
lazy="select",
order_by=item_keywords.c.keyword_id,
)
),
)
self.mapper_registry.map_imperatively(
Order,
orders,
properties=dict(
items=relationship(
Item,
secondary=order_items,
lazy=True,
order_by=order_items.c.item_id,
)
),
)
self.mapper_registry.map_imperatively(
User,
users,
properties=dict(
addresses=relationship(
Address, lazy=True, order_by=addresses.c.id
),
orders=relationship(Order, order_by=orders.c.id),
),
)
return fixture_session()
| DefaultStrategyOptionsTestFixtures |
python | streamlit__streamlit | lib/streamlit/elements/lib/built_in_chart_utils.py | {
"start": 43355,
"end": 43782
} | class ____(StreamlitAPIException):
def __init__(
self,
color_values: str | Color | Collection[Color] | None,
y_column_list: list[str],
) -> None:
message = (
f"The list of colors `{color_values}` must have the same "
"length as the list of columns to be colored "
f"`{y_column_list}`."
)
super().__init__(message)
| StreamlitColorLengthError |
python | lazyprogrammer__machine_learning_examples | airline/ann.py | {
"start": 536,
"end": 965
} | class ____(object):
def __init__(self, M1, M2, f, an_id):
self.id = an_id
self.M1 = M1
self.M2 = M2
self.f = f
W = init_weight(M1, M2)
b = np.zeros(M2)
self.W = theano.shared(W, 'W_%s' % self.id)
self.b = theano.shared(b, 'b_%s' % self.id)
self.params = [self.W, self.b]
def forward(self, X):
return self.f(X.dot(self.W) + self.b)
| HiddenLayer |
python | has2k1__plotnine | tests/test_doctools.py | {
"start": 161,
"end": 636
} | class ____(geom):
"""
Geom ABC
{usage}
Parameters
----------
{common_parameters}
"""
DEFAULT_AES = {"color": None}
DEFAULT_PARAMS = {
"stat": "bin",
"position": position_stack,
"na_rm": False,
}
def test_document_geom():
doc = geom_abc.__doc__
# assert "~plotnine.stats.stat_bin" in doc
assert 'stat, default="bin"' in doc
assert 'position, default="position_stack"' in doc
@document
| geom_abc |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 12000,
"end": 12203
} | class ____(models.Model):
isbn = models.CharField(max_length=15, primary_key=True)
history = HistoricalRecords(
verbose_name="dead trees", verbose_name_plural="dead trees plural"
)
| Book |
python | pyparsing__pyparsing | pyparsing/core.py | {
"start": 113909,
"end": 124587
} | class ____(Token):
"""Token for matching words composed of allowed character sets.
Parameters:
- ``init_chars`` - string of all characters that should be used to
match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.;
if ``body_chars`` is also specified, then this is the string of
initial characters
- ``body_chars`` - string of characters that
can be used for matching after a matched initial character as
given in ``init_chars``; if omitted, same as the initial characters
(default=``None``)
- ``min`` - minimum number of characters to match (default=1)
- ``max`` - maximum number of characters to match (default=0)
- ``exact`` - exact number of characters to match (default=0)
- ``as_keyword`` - match as a keyword (default=``False``)
- ``exclude_chars`` - characters that might be
found in the input ``body_chars`` string but which should not be
accepted for matching ;useful to define a word of all
printables except for one or two characters, for instance
(default=``None``)
:class:`srange` is useful for defining custom character set strings
for defining :class:`Word` expressions, using range notation from
regular expression character sets.
A common mistake is to use :class:`Word` to match a specific literal
string, as in ``Word("Address")``. Remember that :class:`Word`
uses the string argument to define *sets* of matchable characters.
This expression would match "Add", "AAA", "dAred", or any other word
made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an
exact literal string, use :class:`Literal` or :class:`Keyword`.
pyparsing includes helper strings for building Words:
- :attr:`alphas`
- :attr:`nums`
- :attr:`alphanums`
- :attr:`hexnums`
- :attr:`alphas8bit` (alphabetic characters in ASCII range 128-255
- accented, tilded, umlauted, etc.)
- :attr:`punc8bit` (non-alphabetic characters in ASCII range
128-255 - currency, symbols, superscripts, diacriticals, etc.)
- :attr:`printables` (any non-whitespace character)
``alphas``, ``nums``, and ``printables`` are also defined in several
Unicode sets - see :class:`pyparsing_unicode`.
Example:
.. testcode::
# a word composed of digits
integer = Word(nums)
# Two equivalent alternate forms:
Word("0123456789")
Word(srange("[0-9]"))
# a word with a leading capital, and zero or more lowercase
capitalized_word = Word(alphas.upper(), alphas.lower())
# hostnames are alphanumeric, with leading alpha, and '-'
hostname = Word(alphas, alphanums + '-')
# roman numeral
# (not a strict parser, accepts invalid mix of characters)
roman = Word("IVXLCDM")
# any string of non-whitespace characters, except for ','
csv_value = Word(printables, exclude_chars=",")
:raises ValueError: If ``min`` and ``max`` are both specified
and the test ``min <= max`` fails.
.. versionchanged:: 3.1.0
Raises :exc:`ValueError` if ``min`` > ``max``.
"""
def __init__(
self,
init_chars: str = "",
body_chars: typing.Optional[str] = None,
min: int = 1,
max: int = 0,
exact: int = 0,
as_keyword: bool = False,
exclude_chars: typing.Optional[str] = None,
**kwargs,
) -> None:
initChars: typing.Optional[str] = deprecate_argument(kwargs, "initChars", None)
bodyChars: typing.Optional[str] = deprecate_argument(kwargs, "bodyChars", None)
asKeyword: bool = deprecate_argument(kwargs, "asKeyword", False)
excludeChars: typing.Optional[str] = deprecate_argument(
kwargs, "excludeChars", None
)
initChars = initChars or init_chars
bodyChars = bodyChars or body_chars
asKeyword = asKeyword or as_keyword
excludeChars = excludeChars or exclude_chars
super().__init__()
if not initChars:
raise ValueError(
f"invalid {type(self).__name__}, initChars cannot be empty string"
)
initChars_set = set(initChars)
if excludeChars:
excludeChars_set = set(excludeChars)
initChars_set -= excludeChars_set
if bodyChars:
bodyChars = "".join(set(bodyChars) - excludeChars_set)
self.init_chars = initChars_set
self.initCharsOrig = "".join(sorted(initChars_set))
if bodyChars:
self.bodyChars = set(bodyChars)
self.bodyCharsOrig = "".join(sorted(bodyChars))
else:
self.bodyChars = initChars_set
self.bodyCharsOrig = self.initCharsOrig
self.maxSpecified = max > 0
if min < 1:
raise ValueError(
"cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted"
)
if self.maxSpecified and min > max:
raise ValueError(
f"invalid args, if min and max both specified min must be <= max (min={min}, max={max})"
)
self.minLen = min
if max > 0:
self.maxLen = max
else:
self.maxLen = _MAX_INT
if exact > 0:
min = max = exact
self.maxLen = exact
self.minLen = exact
self.errmsg = f"Expected {self.name}"
self.mayIndexError = False
self.asKeyword = asKeyword
if self.asKeyword:
self.errmsg += " as a keyword"
# see if we can make a regex for this Word
if " " not in (self.initChars | self.bodyChars):
if len(self.initChars) == 1:
re_leading_fragment = re.escape(self.initCharsOrig)
else:
re_leading_fragment = f"[{_collapse_string_to_ranges(self.initChars)}]"
if self.bodyChars == self.initChars:
if max == 0 and self.minLen == 1:
repeat = "+"
elif max == 1:
repeat = ""
else:
if self.minLen != self.maxLen:
repeat = f"{{{self.minLen},{'' if self.maxLen == _MAX_INT else self.maxLen}}}"
else:
repeat = f"{{{self.minLen}}}"
self.reString = f"{re_leading_fragment}{repeat}"
else:
if max == 1:
re_body_fragment = ""
repeat = ""
else:
re_body_fragment = f"[{_collapse_string_to_ranges(self.bodyChars)}]"
if max == 0 and self.minLen == 1:
repeat = "*"
elif max == 2:
repeat = "?" if min <= 1 else ""
else:
if min != max:
repeat = f"{{{min - 1 if min > 0 else ''},{max - 1 if max > 0 else ''}}}"
else:
repeat = f"{{{min - 1 if min > 0 else ''}}}"
self.reString = f"{re_leading_fragment}{re_body_fragment}{repeat}"
if self.asKeyword:
self.reString = rf"\b{self.reString}\b"
try:
self.re = re.compile(self.reString)
except re.error:
self.re = None # type: ignore[assignment]
else:
self.re_match = self.re.match
self.parseImpl = self.parseImpl_regex # type: ignore[method-assign]
@property
def initChars(self) -> set[str]:
"""
.. deprecated:: 3.3.0
use `init_chars` instead.
Property returning the initial chars to be used when matching this
Word expression. If no body chars were specified, the initial characters
will also be the body characters.
"""
return set(self.init_chars)
def copy(self) -> Word:
"""
Returns a copy of this expression.
Generally only used internally by pyparsing.
"""
ret: Word = cast(Word, super().copy())
if hasattr(self, "re_match"):
ret.re_match = self.re_match
ret.parseImpl = ret.parseImpl_regex # type: ignore[method-assign]
return ret
def _generateDefaultName(self) -> str:
def charsAsStr(s):
max_repr_len = 16
s = _collapse_string_to_ranges(s, re_escape=False)
if len(s) > max_repr_len:
return s[: max_repr_len - 3] + "..."
return s
if self.initChars != self.bodyChars:
base = f"W:({charsAsStr(self.initChars)}, {charsAsStr(self.bodyChars)})"
else:
base = f"W:({charsAsStr(self.initChars)})"
# add length specification
if self.minLen > 1 or self.maxLen != _MAX_INT:
if self.minLen == self.maxLen:
if self.minLen == 1:
return base[2:]
else:
return base + f"{{{self.minLen}}}"
elif self.maxLen == _MAX_INT:
return base + f"{{{self.minLen},...}}"
else:
return base + f"{{{self.minLen},{self.maxLen}}}"
return base
def parseImpl(self, instring, loc, do_actions=True) -> ParseImplReturnType:
if instring[loc] not in self.initChars:
raise ParseException(instring, loc, self.errmsg, self)
start = loc
loc += 1
instrlen = len(instring)
body_chars: set[str] = self.bodyChars
maxloc = start + self.maxLen
maxloc = min(maxloc, instrlen)
while loc < maxloc and instring[loc] in body_chars:
loc += 1
throw_exception = False
if loc - start < self.minLen:
throw_exception = True
elif self.maxSpecified and loc < instrlen and instring[loc] in body_chars:
throw_exception = True
elif self.asKeyword and (
(start > 0 and instring[start - 1] in body_chars)
or (loc < instrlen and instring[loc] in body_chars)
):
throw_exception = True
if throw_exception:
raise ParseException(instring, loc, self.errmsg, self)
return loc, instring[start:loc]
def parseImpl_regex(self, instring, loc, do_actions=True) -> ParseImplReturnType:
result = self.re_match(instring, loc)
if not result:
raise ParseException(instring, loc, self.errmsg, self)
loc = result.end()
return loc, result.group()
| Word |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 117542,
"end": 117945
} | class ____(torch.nn.Module):
def forward(self, x, y):
combined = torch.cat((x, y), dim=1)
out = torch.nn.ReLU()(combined)
out = torch.nn.ReLU()(out)
return out
def udf_module(mod, x, y):
return mod(x, y)
def global_func_with_default_tensor_args(
x=torch.zeros((2, 2)), *, kw_x=torch.zeros((1, 2))
):
x.add_(1)
kw_x.add_(1)
return x, kw_x
| SmallNN |
python | pytorch__pytorch | test/distributed/test_aten_comm_compute_reordering.py | {
"start": 3103,
"end": 21340
} | class ____(DynamoDistributedMultiProcTestCase):
"""
Run correctness checks in multi-proc runner, mark with minimum # GPUs to run under
Note: these tests are a fork of test/distributed/test_compute_comm_reordering.py
"""
def setUp(self):
super().setUp()
torch._dynamo.reset()
torch._dynamo.utils.counters.clear()
def get_world_trs(self):
return {
"tag": "",
"ranks": list(range(self.world_size)),
"group_size": self.world_size,
}
@property
def world_size(self) -> int:
# hack: no matter whether we have 2 or 3 or 4 gpus, just run on 2
# works around issue with skipif<2 and workers with unpredictable #s gpu
return 2
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(get_patches())
def test_sink_waits(self):
def func(a):
ar = _functional_collectives.all_reduce(a, "sum", "0")
b = torch.matmul(a, a)
return torch.matmul(ar, b)
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
out, aten_graph_str = run_and_get_aten_graph(torch.compile(func), inputs)
# Verify that the wait_tensor is sinked below the 1st matmul but
# above the 2nd matmul.
(
FileCheck()
.check("all_reduce.default")
.check("aten.mm.default")
.check("wait_tensor.default")
.check("aten.mm.default")
.run(aten_graph_str)
)
correct = func(inputs)
self.assertTrue(same(out, correct))
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 0)
@torch._inductor.config.patch(get_patches())
def test_raise_comms(self):
def func(a):
b = torch.matmul(a, a)
c = torch.relu(b)
d = torch.matmul(c, c)
e = _functional_collectives.all_reduce((b + 1), "sum", "0")
return torch.matmul(d, e)
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
compiled = torch.compile(func)
out, aten_graph_str = run_and_get_aten_graph(torch.compile(func), inputs)
# Verify that the all_reduce_ has been raised above the 2nd matmul
# but below the 1st matmul. Note that the all_reduce_ directly
# writes to the output buffer of the 1st matmul, which is an input
# to the first relu. Therefore, the all_reduce_ should be scheduled
# after the first relu.
(
FileCheck()
.check("aten.mm")
.check("all_reduce.default")
.check("aten.mm")
.check("wait_tensor.default")
.check("aten.mm")
.run(aten_graph_str)
)
out = compiled(inputs)
correct = func(inputs)
self.assertTrue(same(out, correct))
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 0)
@torch._inductor.config.patch(get_patches())
def test_sink_waits_raise_comms(self):
def func(a, *, tag, ranks, group_size):
b = torch.matmul(a, a)
c = torch.relu(b)
d = torch.matmul(c, c)
e = _functional_collectives.all_reduce(b, "sum", "0")
f = torch.relu(d)
g = torch.matmul(f, f)
return torch.mm(e, g)
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(
4, 4, dtype=torch.float, device=device_type
) # + self.rank
kwargs = self.get_world_trs()
func = functools.partial(func, **kwargs)
compiled = torch.compile(func)
out, aten_graph_str = run_and_get_aten_graph(compiled, inputs)
# Things to verify:
# - The all_reduce_ and its prologue should be raised above the 2nd
# matmul but below the 1st matmul.
# - The wait_tensor should be sinked below the 3rd matmul but above
# the 4th matmul.
self.assertExpectedInline(
aten_graph_str,
"""\
graph():
%arg0_1 : [num_users=1] = placeholder[target=arg0_1]
%mm : [num_users=2] = call_function[target=torch.ops.aten.mm.default](args = (%arg0_1, %arg0_1), kwargs = {})
%relu : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%mm,), kwargs = {})
%all_reduce : [num_users=1] = call_function[target=torch.ops._c10d_functional.all_reduce.default](args = (%mm, sum, 0), kwargs = {})
%mm_1 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%relu, %relu), kwargs = {})
%relu_1 : [num_users=1] = call_function[target=torch.ops.aten.relu.default](args = (%mm_1,), kwargs = {})
%mm_2 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%relu_1, %relu_1), kwargs = {})
%wait_tensor : [num_users=1] = call_function[target=torch.ops._c10d_functional.wait_tensor.default](args = (%all_reduce,), kwargs = {})
%mm_3 : [num_users=1] = call_function[target=torch.ops.aten.mm.default](args = (%wait_tensor, %mm_2), kwargs = {})
return (mm_3,)""",
)
# Note: this triggered an all_reduce_ bug
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 0)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(get_patches())
def test_schedulable_wait(self):
"""Test that if a wait node is scheduable or not."""
from torch._inductor.fx_passes.bucketing import _schedulable_wait_node
def test_graph():
graph = fx.Graph()
inp = graph.placeholder("inp")
group_size = graph.placeholder("group_size")
group_name = graph.placeholder("group_name")
ag_0_out = graph.call_function(
torch.ops._c10d_functional.all_gather_into_tensor.default,
args=(inp, group_size, group_name),
)
ag_0_wait = graph.call_function(
torch.ops._c10d_functional.wait_tensor.default,
args=(ag_0_out,),
)
ag_1_out = graph.call_function(
torch.ops._c10d_functional.all_gather_into_tensor.default,
args=(ag_0_wait, group_size, group_name),
)
ag_1_wait = graph.call_function(
torch.ops._c10d_functional.wait_tensor.default,
args=(ag_1_out,),
)
ag_2_wait = graph.call_function(
torch.ops._c10d_functional.wait_tensor.default,
args=(ag_1_wait,),
)
graph.output(ag_2_wait)
return graph
graph = test_graph()
schedulable = {"wait_tensor_default", "wait_tensor_default_1"}
for node in list(graph.nodes):
expected = node.name in schedulable
assert _schedulable_wait_node(node) is expected
@torch._inductor.config.patch(get_patches())
def test_reorder_compute_for_overlap_mul(self):
def func(a, *, tag, ranks, group_size):
ar = _functional_collectives.all_reduce(a, "sum", ranks, tag)
g = torch.matmul(a, a)
c = torch.relu(a)
d = torch.matmul(c, c)
f = d * c * ar
fr = _functional_collectives.all_reduce(f, "sum", ranks, tag)
e = torch.matmul(d + ar + fr, g)
return (e,)
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
func_c = functools.partial(func, **self.get_world_trs())
compiled = torch.compile(func_c)
out_c, aten_graph_str = run_and_get_aten_graph(compiled, inputs)
# Note: because we have given collectives and mms equal estimation,
# we overlap each collective with a single mm.
# Same schedule as in test_reorder_compute_for_overlap_custom_runtime_estimation
# although there is an exposed collective
(
FileCheck()
.check("all_reduce.default")
.check("aten.mm")
.check("aten.mm")
.check("wait_tensor.default")
.check("aten.mul")
.check("all_reduce.default")
.check("wait_tensor.default")
.check("aten.mm")
.run(aten_graph_str)
)
correct = func(inputs, **self.get_world_trs())
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 1)
self.assertTrue(same(out_c, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@skipIfRocm
# TODO: somehow inductor bg compile threads are causing hangs at exit with distributed work dtor
@patch.object(torch._inductor.config, "compile_threads", 1)
@unittest.skipIf(True, "Logic not yet implemented")
@torch._inductor.config.patch(get_patches())
def test_grouped_scheduler_node(self):
def func(a, *, tag, ranks, group_size):
add = a + a
div = add / a
ar = _functional_collectives.all_reduce(div, "sum", ranks, tag)
# Normally, we would fuse `add = a + a`, `div = add / a` and `mul = a * a` together into a single fused op,
# but here in this unit test, we intentionally put `add`, `div` and `ar` computation
# into a GroupedSchedulerNode, which prevents them from being fused with any other ops.
mul = a * a
mm = torch.matmul(mul, ar)
return (mm,)
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
compiled = torch.compile(func)
code = run_and_get_triton_code(compiled, inputs, **self.get_world_trs())
# Expectations:
# 1. `add = a + a` and `div = add / a` are still fused, which means fusion
# still happens among nodes within a GroupedSchedulerNode.
# 2. `mul = a * a` is not fused with `add` or `div`, because the latter two are within
# GroupedSchedulerNode and thus are prevented from being fused with any outside ops.
FileCheck().check("triton_poi_fused_add_all_reduce_div_0.").check(
"_c10d_functional.all_reduce_."
).check("triton_poi_fused_mul_1.").run(code)
out = compiled(inputs, **self.get_world_trs())
correct = func(inputs, **self.get_world_trs())
self.assertTrue(same(out, correct))
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
@torch._inductor.config.patch(get_patches())
def test_inductor_default_comms_ordering(self):
pg_info = self.get_world_trs()
tag = pg_info["tag"]
ranks = pg_info["ranks"]
group_size = pg_info["group_size"]
g1 = torch.ones(10, 10, device=device_type)
g2 = torch.ones(11, 11, device=device_type)
g3 = torch.ones(12, 12, device=device_type)
@torch.compile
def fn(g1, g2, g3):
handle1 = torch.ops.c10d_functional.all_reduce(
g1, "avg", tag, ranks, group_size
)
handle2 = torch.ops.c10d_functional.all_reduce(
g2, "avg", tag, ranks, group_size
)
handle3 = torch.ops.c10d_functional.all_reduce(
g3, "avg", tag, ranks, group_size
)
# wait on them in a different order
grad3 = torch.ops._c10d_functional.wait_tensor.default(handle3)
grad2 = torch.ops._c10d_functional.wait_tensor.default(handle2)
grad1 = torch.ops._c10d_functional.wait_tensor.default(handle1)
return grad3, grad2, grad1
with _dynamo_dist_per_rank_init(
self.rank, self.world_size, self.backend(device_type), fake_pg=True
):
# all_reduces remain in order!
# note: this isn't actually invariant of pass currently..
# but we should keep collectives stable without reordering opportunities
_, code = run_and_get_aten_graph(fn, g1, g2, g3)
FileCheck().check("all_reduce").check_same("arg0_1").check(
"all_reduce"
).check_same("arg1_1").check("all_reduce").check_same("arg2_1").run(code)
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 3)
# these have no overlap opportunities
self.assertEqual(counters["inductor"]["overlap_scheduling_bad_exposed"], 0)
@unittest.skipIf(not HAS_GPU, "Inductor+gpu needs triton and recent GPU arch")
def test_overlap_scheduling_via_config(self):
"""Test overlap scheduling enabled via config in post_grad pass."""
def func(a):
ar = _functional_collectives.all_reduce(a, "sum", "0")
b = torch.matmul(a, a)
return torch.matmul(ar, b)
patches = {
**get_patches(),
"aten_distributed_optimizations.enable_overlap_scheduling": True,
}
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs = torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
with torch._inductor.config.patch(patches):
compiled_func = torch.compile(func)
out, code = run_and_get_code(compiled_func, inputs)
# Verify that wait_tensor is sinked below matmul
FileCheck().check("all_reduce").check("mm").check("wait_tensor").check(
"mm"
).run(code[0])
correct = func(inputs)
self.assertTrue(same(out, correct))
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 0)
@torch._inductor.config.patch(get_patches())
def test_custom_estimator_for_non_compute_nodes(self):
"""Test that non-compute nodes with custom runtime estimates can trigger collective prefetching."""
def custom_estimator_with_relu(fx_node, override_size=None):
"""Custom estimator that provides runtime for relu."""
# Collective ops
if "c10" in str(fx_node.target):
return 1.0
# Non-compute ops that we want to overlap
elif fx_node.target == aten.relu.default:
return 1.0 # relu has same time as collective
else:
return None
def func(a, b):
c = torch.relu(a)
d = torch.mm(c, c)
# Collective that is independent and should be prefetched during relu
ar = _functional_collectives.all_reduce(b, "sum", "0")
# Use both results
return d * ar
patches = {
**get_patches(),
"aten_distributed_optimizations.custom_runtime_estimation": custom_estimator_with_relu,
}
with _dynamo_dist_per_rank_init(
self.rank,
self.world_size,
self.backend(device_type),
fake_pg=not at_least_x_gpu(2),
):
inputs_a = (
torch.ones(4, 4, dtype=torch.float, device=device_type) + self.rank
)
inputs_b = torch.ones(4, 4, dtype=torch.float, device=device_type) * 2
with torch._inductor.config.patch(patches):
out, aten_graph_str = run_and_get_aten_graph(
torch.compile(func), inputs_a, inputs_b
)
# Verify that all_reduce is prefetched to run concurrently with relu
# The collective should start before relu completes to enable perfect overlap
FileCheck().check("all_reduce").check("relu").check("wait_tensor").run(
aten_graph_str
)
correct = func(inputs_a, inputs_b)
self.assertTrue(same(out, correct))
self.assertEqual(counters["inductor"]["overlap_scheduling_exposed"], 0)
def get_bucket_patches(compute_multiplier=1.0):
estimate_aten_runtime_part = functools.partial(
estimate_aten_runtime, compute_multiplier=compute_multiplier
)
return {
"aten_distributed_optimizations.custom_runtime_estimation": estimate_aten_runtime_part,
"aten_distributed_optimizations.collective_bucketing": True,
"reorder_for_locality": False,
"triton.native_matmul": False,
"reorder_for_compute_comm_overlap_passes": [],
"compile_threads": 1,
"force_disable_caches": True,
# messes up test strings
"aten_distributed_optimizations.insert_overlap_deps": False,
# interferes with testing, / custom estimation
"test_configs.assume_bucketing_reduces_latency": False,
}
| TestComputeCommReorderingMultiProc |
python | tornadoweb__tornado | tornado/test/queues_test.py | {
"start": 5987,
"end": 10458
} | class ____(AsyncTestCase):
@gen_test
def test_blocking_put(self):
q = queues.Queue() # type: queues.Queue[int]
q.put(0)
self.assertEqual(0, q.get_nowait())
def test_nonblocking_put_exception(self):
q = queues.Queue(1) # type: queues.Queue[int]
q.put(0)
self.assertRaises(queues.QueueFull, q.put_nowait, 1)
@gen_test
def test_put_with_getters(self):
q = queues.Queue() # type: queues.Queue[int]
get0 = q.get()
get1 = q.get()
yield q.put(0)
self.assertEqual(0, (yield get0))
yield q.put(1)
self.assertEqual(1, (yield get1))
@gen_test
def test_nonblocking_put_with_getters(self):
q = queues.Queue() # type: queues.Queue[int]
get0 = q.get()
get1 = q.get()
q.put_nowait(0)
# put_nowait does *not* immediately unblock getters.
yield gen.moment
self.assertEqual(0, (yield get0))
q.put_nowait(1)
yield gen.moment
self.assertEqual(1, (yield get1))
@gen_test
def test_blocking_put_wait(self):
q = queues.Queue(1) # type: queues.Queue[int]
q.put_nowait(0)
def get_and_discard():
q.get()
self.io_loop.call_later(0.01, get_and_discard)
self.io_loop.call_later(0.02, get_and_discard)
futures = [q.put(0), q.put(1)]
self.assertFalse(any(f.done() for f in futures))
yield futures
@gen_test
def test_put_timeout(self):
q = queues.Queue(1) # type: queues.Queue[int]
q.put_nowait(0) # Now it's full.
put_timeout = q.put(1, timeout=timedelta(seconds=0.01))
put = q.put(2)
with self.assertRaises(TimeoutError):
yield put_timeout
self.assertEqual(0, q.get_nowait())
# 1 was never put in the queue.
self.assertEqual(2, (yield q.get()))
# Final get() unblocked this putter.
yield put
@gen_test
def test_put_timeout_preempted(self):
q = queues.Queue(1) # type: queues.Queue[int]
q.put_nowait(0)
put = q.put(1, timeout=timedelta(seconds=0.01))
q.get()
yield gen.sleep(0.02)
yield put # No TimeoutError.
@gen_test
def test_put_clears_timed_out_putters(self):
q = queues.Queue(1) # type: queues.Queue[int]
# First putter succeeds, remainder block.
putters = [q.put(i, timedelta(seconds=0.01)) for i in range(10)]
put = q.put(10)
self.assertEqual(10, len(q._putters))
yield gen.sleep(0.02)
self.assertEqual(10, len(q._putters))
self.assertFalse(put.done()) # Final waiter is still active.
q.put(11) # put() clears the waiters.
self.assertEqual(2, len(q._putters))
for putter in putters[1:]:
self.assertRaises(TimeoutError, putter.result)
@gen_test
def test_put_clears_timed_out_getters(self):
q = queues.Queue() # type: queues.Queue[int]
getters = [
asyncio.ensure_future(q.get(timedelta(seconds=0.01))) for _ in range(10)
]
get = asyncio.ensure_future(q.get())
q.get()
self.assertEqual(12, len(q._getters))
yield gen.sleep(0.02)
self.assertEqual(12, len(q._getters))
self.assertFalse(get.done()) # Final waiters still active.
q.put(0) # put() clears the waiters.
self.assertEqual(1, len(q._getters))
self.assertEqual(0, (yield get))
for getter in getters:
self.assertRaises(TimeoutError, getter.result)
@gen_test
def test_float_maxsize(self):
# If a float is passed for maxsize, a reasonable limit should
# be enforced, instead of being treated as unlimited.
# It happens to be rounded up.
# http://bugs.python.org/issue21723
q = queues.Queue(maxsize=1.3) # type: ignore
self.assertTrue(q.empty())
self.assertFalse(q.full())
q.put_nowait(0)
q.put_nowait(1)
self.assertFalse(q.empty())
self.assertTrue(q.full())
self.assertRaises(queues.QueueFull, q.put_nowait, 2)
self.assertEqual(0, q.get_nowait())
self.assertFalse(q.empty())
self.assertFalse(q.full())
yield q.put(2)
put = q.put(3)
self.assertFalse(put.done())
self.assertEqual(1, (yield q.get()))
yield put
self.assertTrue(q.full())
| QueuePutTest |
python | huggingface__transformers | tests/models/musicgen/test_processing_musicgen.py | {
"start": 1503,
"end": 6063
} | class ____(unittest.TestCase):
def setUp(self):
self.checkpoint = "facebook/musicgen-small"
self.tmpdirname = tempfile.mkdtemp()
def get_tokenizer(self, **kwargs):
return T5Tokenizer.from_pretrained(self.checkpoint, **kwargs)
def get_feature_extractor(self, **kwargs):
return EncodecFeatureExtractor.from_pretrained(self.checkpoint, **kwargs)
def tearDown(self):
shutil.rmtree(self.tmpdirname)
def test_save_load_pretrained_default(self):
tokenizer = self.get_tokenizer()
feature_extractor = self.get_feature_extractor()
processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
processor.save_pretrained(self.tmpdirname)
processor = MusicgenProcessor.from_pretrained(self.tmpdirname)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer.get_vocab())
self.assertIsInstance(processor.tokenizer, T5TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor.to_json_string())
self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor)
def test_save_load_pretrained_additional_features(self):
processor = MusicgenProcessor(tokenizer=self.get_tokenizer(), feature_extractor=self.get_feature_extractor())
processor.save_pretrained(self.tmpdirname)
tokenizer_add_kwargs = self.get_tokenizer(bos_token="(BOS)", eos_token="(EOS)")
feature_extractor_add_kwargs = self.get_feature_extractor(do_normalize=False, padding_value=1.0)
processor = MusicgenProcessor.from_pretrained(
self.tmpdirname, bos_token="(BOS)", eos_token="(EOS)", do_normalize=False, padding_value=1.0
)
self.assertEqual(processor.tokenizer.get_vocab(), tokenizer_add_kwargs.get_vocab())
self.assertIsInstance(processor.tokenizer, T5TokenizerFast)
self.assertEqual(processor.feature_extractor.to_json_string(), feature_extractor_add_kwargs.to_json_string())
self.assertIsInstance(processor.feature_extractor, EncodecFeatureExtractor)
def test_feature_extractor(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = floats_list((3, 1000))
input_feat_extract = feature_extractor(raw_speech, return_tensors="np")
input_processor = processor(raw_speech, return_tensors="np")
for key in input_feat_extract:
self.assertAlmostEqual(input_feat_extract[key].sum(), input_processor[key].sum(), delta=1e-2)
def test_tokenizer(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
input_str = "This is a test string"
encoded_processor = processor(text=input_str)
encoded_tok = tokenizer(input_str)
for key in encoded_tok:
self.assertListEqual(encoded_tok[key], encoded_processor[key])
def test_tokenizer_decode(self):
feature_extractor = self.get_feature_extractor()
tokenizer = self.get_tokenizer()
processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
predicted_ids = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
decoded_processor = processor.batch_decode(sequences=predicted_ids)
decoded_tok = tokenizer.decode(predicted_ids)
self.assertListEqual(decoded_tok, decoded_processor)
def test_decode_audio(self):
feature_extractor = self.get_feature_extractor(padding_side="left")
tokenizer = self.get_tokenizer()
processor = MusicgenProcessor(tokenizer=tokenizer, feature_extractor=feature_extractor)
raw_speech = [floats_list((1, x))[0] for x in range(5, 20, 5)]
padding_mask = processor(raw_speech).padding_mask
generated_speech = np.asarray(floats_list((3, 20)))[:, None, :]
decoded_audios = processor.batch_decode(generated_speech, padding_mask=padding_mask)
self.assertIsInstance(decoded_audios, list)
for audio in decoded_audios:
self.assertIsInstance(audio, np.ndarray)
self.assertTrue(decoded_audios[0].shape == (1, 10))
self.assertTrue(decoded_audios[1].shape == (1, 15))
self.assertTrue(decoded_audios[2].shape == (1, 20))
| MusicgenProcessorTest |
python | ray-project__ray | python/ray/tune/tests/test_api.py | {
"start": 50192,
"end": 51779
} | class ____(unittest.TestCase):
def testCreateScheduler(self):
kwargs = {"metric": "metric_foo", "mode": "min"}
scheduler = "async_hyperband"
shim_scheduler = tune.create_scheduler(scheduler, **kwargs)
real_scheduler = AsyncHyperBandScheduler(**kwargs)
assert type(shim_scheduler) is type(real_scheduler)
def testCreateLazyImportScheduler(self):
kwargs = {
"metric": "metric_foo",
"mode": "min",
"hyperparam_bounds": {"param1": [0, 1]},
}
shim_scheduler_pb2 = tune.create_scheduler("pb2", **kwargs)
real_scheduler_pb2 = PB2(**kwargs)
assert type(shim_scheduler_pb2) is type(real_scheduler_pb2)
def testCreateSearcher(self):
kwargs = {"metric": "metric_foo", "mode": "min"}
searcher_ax = "ax"
shim_searcher_ax = tune.create_searcher(searcher_ax, **kwargs)
real_searcher_ax = AxSearch(space=[], **kwargs)
assert type(shim_searcher_ax) is type(real_searcher_ax)
searcher_hyperopt = "hyperopt"
shim_searcher_hyperopt = tune.create_searcher(searcher_hyperopt, **kwargs)
real_searcher_hyperopt = HyperOptSearch({}, **kwargs)
assert type(shim_searcher_hyperopt) is type(real_searcher_hyperopt)
def testExtraParams(self):
kwargs = {"metric": "metric_foo", "mode": "min", "extra_param": "test"}
scheduler = "async_hyperband"
tune.create_scheduler(scheduler, **kwargs)
searcher_ax = "ax"
tune.create_searcher(searcher_ax, **kwargs)
| ShimCreationTest |
python | sphinx-doc__sphinx | sphinx/domains/cpp/_ast.py | {
"start": 49051,
"end": 50921
} | class ____(ASTExpression):
def __init__(
self,
leftExpr: ASTExpression,
op: str,
rightExpr: ASTExpression | ASTBracedInitList,
) -> None:
self.leftExpr = leftExpr
self.op = op
self.rightExpr = rightExpr
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTAssignmentExpr):
return NotImplemented
return (
self.leftExpr == other.leftExpr
and self.op == other.op
and self.rightExpr == other.rightExpr
)
def __hash__(self) -> int:
return hash((self.leftExpr, self.op, self.rightExpr))
def _stringify(self, transform: StringifyTransform) -> str:
res: list[str] = []
res.extend((
transform(self.leftExpr),
' ',
self.op,
' ',
transform(self.rightExpr),
))
return ''.join(res)
def get_id(self, version: int) -> str:
# we end up generating the ID from left to right, instead of right to left
res: list[str] = []
res.extend((
_id_operator_v2[self.op],
self.leftExpr.get_id(version),
self.rightExpr.get_id(version),
))
return ''.join(res)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
self.leftExpr.describe_signature(signode, mode, env, symbol)
signode += addnodes.desc_sig_space()
if ord(self.op[0]) >= ord('a') and ord(self.op[0]) <= ord('z'):
signode += addnodes.desc_sig_keyword(self.op, self.op)
else:
signode += addnodes.desc_sig_operator(self.op, self.op)
signode += addnodes.desc_sig_space()
self.rightExpr.describe_signature(signode, mode, env, symbol)
| ASTAssignmentExpr |
python | kamyu104__LeetCode-Solutions | Python/distribute-candies-to-people.py | {
"start": 64,
"end": 1031
} | class ____(object):
def distributeCandies(self, candies, num_people):
"""
:type candies: int
:type num_people: int
:rtype: List[int]
"""
# find max integer p s.t. sum(1 + 2 + ... + p) <= C
# => remaining : 0 <= C-(1+p)*p/2 < p+1
# => -2p-2 < p^2+p-2C <= 0
# => 2C+1/4 < (p+3/2)^2 and (p+1/2)^2 <= 2C+1/4
# => sqrt(2C+1/4)-3/2 < p <= sqrt(2C+1/4)-1/2
# => p = floor(sqrt(2C+1/4)-1/2)
p = int((2*candies + 0.25)**0.5 - 0.5)
remaining = candies - (p+1)*p//2
rows, cols = divmod(p, num_people)
result = [0]*num_people
for i in xrange(num_people):
result[i] = (i+1)*(rows+1) + (rows*(rows+1)//2)*num_people if i < cols else \
(i+1)*rows + ((rows-1)*rows//2)*num_people
result[cols] += remaining
return result
# Time: O(n + logc), c is the number of candies
# Space: O(1)
| Solution |
python | PyCQA__pylint | tests/functional/a/access/access_to_protected_members.py | {
"start": 163,
"end": 746
} | class ____:
"""Class with protected members."""
_cls_protected = 5
def __init__(self, other):
MyClass._cls_protected = 6
self._protected = 1
self.public = other
self.attr = 0
def test(self):
"""Docstring."""
self._protected += self._cls_protected
print(self.public._haha) # [protected-access]
def clsmeth(cls):
"""Docstring."""
cls._cls_protected += 1
print(cls._cls_protected)
clsmeth = classmethod(clsmeth)
def _private_method(self):
"""Doing nothing."""
| MyClass |
python | google__pytype | pytype/pytd/pytd.py | {
"start": 15877,
"end": 16822
} | class ____(Type, frozen=False, eq=False):
"""Super class for shared behavior of UnionType and IntersectionType."""
# NOTE: This class is not frozen so that we can flatten types after
# initialization. It should still be treated as a frozen type.
# NOTE: type_list is kept as a tuple, to preserve the original order
# even though in most respects it acts like a frozenset.
# It also flattens the input, such that printing without
# parentheses gives the same result.
type_list: tuple[TypeU, ...] = ()
def __post_init__(self):
self.type_list = _FlattenTypes(self.type_list)
def __eq__(self, other):
if self is other:
return True
if isinstance(other, type(self)):
# equality doesn't care about the ordering of the type_list
return frozenset(self.type_list) == frozenset(other.type_list)
return NotImplemented
def __hash__(self):
return hash(self.type_list)
| _SetOfTypes |
python | numba__numba | numba/core/typing/builtins.py | {
"start": 13220,
"end": 13287
} | class ____(OrderedCmpOp):
pass
@infer_global(operator.le)
| CmpOpLt |
python | pytorch__pytorch | test/nn/test_multihead_attention.py | {
"start": 30115,
"end": 40314
} | class ____(NNTestCase):
def test_multihead_self_attn_two_masks_fast_path(self, device):
"""
Multihead self-attention should give the same result on the fast path (BetterTransformer) as on the slow path
when both attention mask (mask type 0) and key padding mask (mask type 1) are provided
"""
with torch.no_grad():
embed_dim = 14
num_heads = 7
batch_size = 8
src_len = 5
query = value = key = torch.rand(batch_size, src_len, embed_dim).to(device)
# Create masks of two different types
attn_mask = torch.randint(0, 2, (src_len, src_len)).bool().to(device)
key_padding_mask = (
torch.randint(0, 2, (batch_size, src_len)).bool().to(device)
)
# We'll need expanded versions of the masks for masking out the outputs below
attn_mask_expanded = attn_mask.reshape(1, 1, src_len, src_len).expand(
batch_size, num_heads, src_len, src_len
)
key_padding_mask_expanded = key_padding_mask.reshape(
batch_size, 1, 1, src_len
).expand(batch_size, num_heads, src_len, src_len)
merged_mask = attn_mask_expanded.logical_or(key_padding_mask_expanded)
# Compute attention on the fast path
mta_model = torch.nn.MultiheadAttention(
embed_dim, num_heads, batch_first=True, device=device
)
mta_model.training = False
result_fast_path, _ = mta_model(
query,
key,
value,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
)
# Compute attention on the slow path
result_ref, _ = torch.nn.functional.multi_head_attention_forward(
query.transpose(0, 1),
key.transpose(0, 1),
value.transpose(0, 1),
embed_dim,
num_heads,
mta_model.in_proj_weight,
mta_model.in_proj_bias,
mta_model.bias_k,
mta_model.bias_v,
mta_model.add_zero_attn,
mta_model.dropout,
mta_model.out_proj.weight,
mta_model.out_proj.bias,
training=mta_model.training,
key_padding_mask=key_padding_mask,
need_weights=False,
attn_mask=attn_mask,
use_separate_proj_weight=False,
q_proj_weight=mta_model.q_proj_weight,
k_proj_weight=mta_model.k_proj_weight,
v_proj_weight=mta_model.v_proj_weight,
average_attn_weights=False,
)
result_ref = result_ref.transpose(0, 1) # Convert to batch-first
# Rows which are completely masked out are nan, we need to exclude them from comparison
mask_out = (
merged_mask[:, 0, :, :]
.all(-1, keepdim=True)
.expand(batch_size, src_len, embed_dim)
)
result_fast_path_masked = result_fast_path.masked_fill(mask_out, 0)
result_ref_masked = result_ref.masked_fill(mask_out, 0)
self.assertEqual(result_fast_path_masked, result_ref_masked)
@torch.no_grad()
@unittest.skipIf(
TEST_WITH_CROSSREF,
"CrossRef turns on TorchFunctionMode, and so disables fastpath.",
)
def test_multihead_self_attn_two_masks_fast_path_mock(self, device):
"""
Multihead self-attention should take fast path when both attention mask (mask type 0)
and key padding mask (mask type 1) are provided at the same time on CPU and CUDA and PrivateUse1
"""
device = device.rstrip(":0123456789")
if device not in [
"cpu",
"cuda",
"xpu",
torch._C._get_privateuse1_backend_name(),
]:
self.skipTest("Fastpath only runs on CPU and CUDA and XPU and PrivateUse1.")
with torch.autocast(device_type=device, enabled=False):
embed_dim = 16
num_heads = 8
batch_size = 8
src_len = 5
query = value = key = torch.rand(batch_size, src_len, embed_dim).to(device)
# Create masks of two different types
attn_mask = torch.randint(0, 2, (src_len, src_len)).bool().to(device)
key_padding_mask = (
torch.randint(0, 2, (batch_size, src_len)).bool().to(device)
)
with mock.patch(
"torch._native_multi_head_attention",
new=mock.MagicMock(return_value=(torch.Tensor(), torch.Tensor())),
) as fastpath_mock:
# Compute attention on the fast path
mta_model = torch.nn.MultiheadAttention(
embed_dim, num_heads, batch_first=True, device=device
).eval()
mta_model.training = False
mta_model(
query,
key,
value,
attn_mask=attn_mask,
key_padding_mask=key_padding_mask,
)
# If mock was called, fastpath was taken
self.assertTrue(fastpath_mock.called)
@onlyOn(["cuda", "xpu", torch._C._get_privateuse1_backend_name()])
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
model = nn.MultiheadAttention(embed_dim, num_heads).to(device).to(dtype)
q = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
k = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
v = torch.randn(sl, bs, embed_dim, device=device, dtype=dtype)
out = model(q, k, v)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@onlyOn(["cuda", "xpu", torch._C._get_privateuse1_backend_name()])
@dtypes(torch.half, torch.float, torch.double)
def test_multihead_attention_dtype_batch_first(self, device, dtype):
embed_dim = 128
num_heads = 8
sl = 10
bs = 8
# With batch_first=True, we have the possibility of hitting
# the native fast path if we call .eval() and enable inference
# mode. Test both paths.
for training in (True, False):
model = (
nn.MultiheadAttention(embed_dim, num_heads, batch_first=True)
.to(device)
.to(dtype)
)
if not training:
model = model.eval()
cm = torch.no_grad()
else:
cm = contextlib.nullcontext()
with cm:
q = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
k = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
v = torch.randn(bs, sl, embed_dim, device=device, dtype=dtype)
# fast path currently doesn't support weights
out = model(q, k, v, need_weights=False)
self.assertEqual(q.size(), out[0].size())
self.assertEqual(dtype, out[0].dtype)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_fast_path_query_and_bias_have_different_dtypes(
self, device, dtype
):
mha = torch.nn.MultiheadAttention(
4, 4, batch_first=True, dtype=dtype, device=device
).eval()
mha.in_proj_bias = torch.nn.Parameter(
mha.in_proj_bias.to(torch.half).to(device)
)
query = torch.randn(4, 4, 4, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_fast_path_small_test(self, device, dtype):
mha = torch.nn.MultiheadAttention(
4, 4, batch_first=True, dtype=dtype, device=device
).eval()
query = torch.randn(4, 4, 4, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
def test_fast_path_check_with_mask_does_not_break_in_compile(self, device, dtype):
# Test TransformerEncoder fast path determination with src_key_padding_mask set.
# Specifically, ensure the mask left-align check doesn't fail in torch.compile.
# See https://github.com/pytorch/pytorch/issues/163640
layer = nn.TransformerEncoderLayer(
d_model=512,
nhead=8,
batch_first=True,
dropout=0.1,
device=device,
dtype=dtype,
)
encoder = nn.TransformerEncoder(layer, num_layers=2).eval()
encoder = torch.compile(encoder, fullgraph=True)
x = torch.randn(1, 41, 512, dtype=dtype, device=device)
pad_mask = torch.rand(1, 41, device=device) > 0.5
pad_mask[..., 0] = True
encoder(x, mask=None, src_key_padding_mask=pad_mask)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_in_proj_bias_none(self, device, dtype):
mha = torch.nn.MultiheadAttention(2, 2, bias=False, dtype=dtype, device=device)
query = torch.rand(2, 2, 2, dtype=dtype, device=device)
mha(query, query, query)
@dtypes(torch.double)
@torch.no_grad()
def test_multihead_attn_in_proj_weight_none(self, device, dtype):
# Setting kdim == vdim == 2 means that vdim != embed_dim
# will cause the logic to use per-input project weights, thereby
# forcing self.in_proj_weight = None
mha = torch.nn.MultiheadAttention(
4, 4, vdim=2, kdim=2, dtype=dtype, device=device
)
query = torch.rand(4, 4, 4, dtype=dtype, device=device)
key = torch.rand(4, 4, 2, dtype=dtype, device=device)
mha(query, key, key)
instantiate_device_type_tests(TestMultiheadAttentionNNDeviceType, globals())
instantiate_parametrized_tests(TestMultiheadAttentionNN)
if __name__ == "__main__":
run_tests()
| TestMultiheadAttentionNNDeviceType |
python | realpython__materials | python-microservices-with-grpc/marketplace/recommendations_pb2_grpc.py | {
"start": 1644,
"end": 2465
} | class ____(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def Recommend(
request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None,
):
return grpc.experimental.unary_unary(
request,
target,
"/Recommendations/Recommend",
recommendations__pb2.RecommendationRequest.SerializeToString,
recommendations__pb2.RecommendationResponse.FromString,
options,
channel_credentials,
call_credentials,
compression,
wait_for_ready,
timeout,
metadata,
)
| Recommendations |
python | kamyu104__LeetCode-Solutions | Python/palindrome-rearrangement-queries.py | {
"start": 2355,
"end": 4358
} | class ____(object):
def canMakePalindromeQueries(self, s, queries):
"""
:type s: str
:type queries: List[List[int]]
:rtype: List[bool]
"""
def check(left1, right1, left2, right2):
def same(left, right):
return all(prefixs1[right+1][i]-prefixs1[left][i] == prefixs2[right+1][i]-prefixs2[left][i] for i in xrange(26))
min_left, max_left = min(left1, left2), max(left1, left2)
min_right, max_right = min(right1, right2), max(right1, right2)
if not (prefix[min_left]-prefix[0] == prefix[-1]-prefix[max_right+1] == 0):
return False
if min_right < max_left: # non-overlapped
return prefix[max_left]-prefix[min_right+1] == 0 and same(min_left, min_right) and same(max_left, max_right)
# overlapped
if (left1 == min_left) == (right1 == max_right): # inside another
return same(min_left, max_right)
# not inside another
p1, p2 = (prefixs1, prefixs2) if min_left == left1 else (prefixs2, prefixs1)
diff1 = [(p1[min_right+1][i]-p1[min_left][i])-(p2[max_left][i]-p2[min_left][i]) for i in xrange(26)]
diff2 = [(p2[max_right+1][i]-p2[max_left][i])-(p1[max_right+1][i]-p1[min_right+1][i]) for i in xrange(26)]
return diff1 == diff2 and all(x >= 0 for x in diff1) # test case: s = "aabbba", queries = [[0,1,3,4]]
prefix = [0]*(len(s)//2+1)
prefixs1 = [[0]*26 for _ in xrange(len(s)//2+1)]
prefixs2 = [[0]*26 for _ in xrange(len(s)//2+1)]
for i in xrange(len(s)//2):
x, y = ord(s[i])-ord('a'), ord(s[~i])-ord('a')
prefix[i+1] = prefix[i]+int(x != y)
for j in xrange(26):
prefixs1[i+1][j] = prefixs1[i][j]+int(j == x)
prefixs2[i+1][j] = prefixs2[i][j]+int(j == y)
return [check(q[0], q[1], (len(s)-1)-q[3], (len(s)-1)-q[2]) for q in queries]
| Solution2 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/vertex_ai.py | {
"start": 8923,
"end": 9959
} | class ____(BaseVertexAIJobTrigger):
"""
Make async calls to Vertex AI to check the state of a running custom python package training job.
Return the job when it enters a completed state.
"""
job_type_verbose_name = "Custom Python Package Training Job"
job_serializer_class = types.TrainingPipeline
statuses_success = {
PipelineState.PIPELINE_STATE_PAUSED,
PipelineState.PIPELINE_STATE_SUCCEEDED,
}
@cached_property
def async_hook(self) -> CustomJobAsyncHook:
return CustomJobAsyncHook(
gcp_conn_id=self.conn_id,
impersonation_chain=self.impersonation_chain,
)
async def _wait_job(self) -> types.TrainingPipeline:
pipeline: types.TrainingPipeline = await self.async_hook.wait_for_training_pipeline(
project_id=self.project_id,
location=self.location,
pipeline_id=self.job_id,
poll_interval=self.poll_interval,
)
return pipeline
| CustomPythonPackageTrainingJobTrigger |
python | altair-viz__altair | altair/utils/schemapi.py | {
"start": 41485,
"end": 55082
} | class ____:
"""
Base class for schema wrappers.
Each derived class should set the _schema class attribute (and optionally
the _rootschema class attribute) which is used for validation.
"""
_schema: ClassVar[dict[str, Any] | Any] = None
_rootschema: ClassVar[dict[str, Any] | None] = None
_class_is_valid_at_instantiation: ClassVar[bool] = True
def __init__(self, *args: Any, **kwds: Any) -> None:
# Two valid options for initialization, which should be handled by
# derived classes:
# - a single arg with no kwds, for, e.g. {'type': 'string'}
# - zero args with zero or more kwds for {'type': 'object'}
if self._schema is None:
msg = (
f"Cannot instantiate object of type {self.__class__}: "
"_schema class attribute is not defined."
""
)
raise ValueError(msg)
if kwds:
assert len(args) == 0
else:
assert len(args) in {0, 1}
# use object.__setattr__ because we override setattr below.
object.__setattr__(self, "_args", args)
object.__setattr__(self, "_kwds", kwds)
if DEBUG_MODE and self._class_is_valid_at_instantiation:
self.to_dict(validate=True)
def copy(
self, deep: bool | Iterable[Any] = True, ignore: list[str] | None = None
) -> Self:
"""
Return a copy of the object.
Parameters
----------
deep : boolean or list, optional
If True (default) then return a deep copy of all dict, list, and
SchemaBase objects within the object structure.
If False, then only copy the top object.
If a list or iterable, then only copy the listed attributes.
ignore : list, optional
A list of keys for which the contents should not be copied, but
only stored by reference.
"""
if deep is True:
return cast("Self", _deep_copy(self, set(ignore) if ignore else set()))
with debug_mode(False):
copy = self.__class__(*self._args, **self._kwds)
if _is_iterable(deep):
for attr in deep:
copy[attr] = _shallow_copy(copy._get(attr))
return copy
def _get(self, attr, default=Undefined):
"""Get an attribute, returning default if not present."""
attr = self._kwds.get(attr, Undefined)
if attr is Undefined:
attr = default
return attr
def __getattr__(self, attr):
# reminder: getattr is called after the normal lookups
if attr == "_kwds":
raise AttributeError()
if attr in self._kwds:
return self._kwds[attr]
else:
try:
_getattr = super().__getattr__ # pyright: ignore[reportAttributeAccessIssue]
except AttributeError:
_getattr = super().__getattribute__
return _getattr(attr)
def __setattr__(self, item, val) -> None:
self._kwds[item] = val
def __getitem__(self, item):
return self._kwds[item]
def __setitem__(self, item, val) -> None:
self._kwds[item] = val
def __repr__(self) -> str:
name = type(self).__name__
if kwds := self._kwds:
it = (f"{k}: {v!r}" for k, v in sorted(kwds.items()) if v is not Undefined)
args = ",\n".join(it).replace("\n", "\n ")
LB, RB = "{", "}"
return f"{name}({LB}\n {args}\n{RB})"
else:
return f"{name}({self._args[0]!r})"
def __eq__(self, other: Any) -> bool:
return (
type(self) is type(other)
and self._args == other._args
and self._kwds == other._kwds
)
def to_dict(
self,
validate: bool = True,
*,
ignore: list[str] | None = None,
context: dict[str, Any] | None = None,
) -> dict[str, Any]:
"""
Return a dictionary representation of the object.
Parameters
----------
validate : bool, optional
If True (default), then validate the result against the schema.
ignore : list[str], optional
A list of keys to ignore.
context : dict[str, Any], optional
A context dictionary.
Raises
------
SchemaValidationError :
If ``validate`` and the result does not conform to the schema.
Notes
-----
- ``ignore``, ``context`` are usually not needed to be specified as a user.
- *Technical*: ``ignore`` will **not** be passed to child :meth:`.to_dict()`.
"""
context = context or {}
ignore = ignore or []
opts = _get_optional_modules(np_opt="numpy", pd_opt="pandas")
if self._args and not self._kwds:
kwds = self._args[0]
elif not self._args:
kwds = self._kwds.copy()
exclude = {*ignore, "shorthand"}
if parsed := context.pop("parsed_shorthand", None):
kwds = _replace_parsed_shorthand(parsed, kwds)
kwds = {k: v for k, v in kwds.items() if k not in exclude}
if (mark := kwds.get("mark")) and isinstance(mark, str):
kwds["mark"] = {"type": mark}
else:
msg = f"{type(self)} instance has both a value and properties : cannot serialize to dict"
raise ValueError(msg)
result = _todict(kwds, context=context, **opts)
if validate:
# NOTE: Don't raise `from err`, see `SchemaValidationError` doc
try:
self.validate(result)
except jsonschema.ValidationError as err:
raise SchemaValidationError(self, err) from None
return result
def to_json(
self,
validate: bool = True,
indent: int | str | None = 2,
sort_keys: bool = True,
*,
ignore: list[str] | None = None,
context: dict[str, Any] | None = None,
**kwargs,
) -> str:
"""
Emit the JSON representation for this object as a string.
Parameters
----------
validate : bool, optional
If True (default), then validate the result against the schema.
indent : int, optional
The number of spaces of indentation to use. The default is 2.
sort_keys : bool, optional
If True (default), sort keys in the output.
ignore : list[str], optional
A list of keys to ignore.
context : dict[str, Any], optional
A context dictionary.
**kwargs
Additional keyword arguments are passed to ``json.dumps()``
Raises
------
SchemaValidationError :
If ``validate`` and the result does not conform to the schema.
Notes
-----
- ``ignore``, ``context`` are usually not needed to be specified as a user.
- *Technical*: ``ignore`` will **not** be passed to child :meth:`.to_dict()`.
"""
if ignore is None:
ignore = []
if context is None:
context = {}
dct = self.to_dict(validate=validate, ignore=ignore, context=context)
return json.dumps(dct, indent=indent, sort_keys=sort_keys, **kwargs)
@classmethod
def _default_wrapper_classes(cls) -> Iterator[type[SchemaBase]]:
"""Return the set of classes used within cls.from_dict()."""
return _subclasses(SchemaBase)
@classmethod
def from_dict(
cls: type[TSchemaBase], dct: dict[str, Any], validate: bool = True
) -> TSchemaBase:
"""
Construct class from a dictionary representation.
Parameters
----------
dct : dictionary
The dict from which to construct the class
validate : boolean
If True (default), then validate the input against the schema.
Raises
------
jsonschema.ValidationError :
If ``validate`` and ``dct`` does not conform to the schema
"""
if validate:
cls.validate(dct)
converter = _FromDict(cls._default_wrapper_classes())
return converter.from_dict(dct, cls)
@classmethod
def from_json(
cls,
json_string: str,
validate: bool = True,
**kwargs: Any,
# Type hints for this method would get rather complicated
# if we want to provide a more specific return type
) -> ChartType:
"""
Instantiate the object from a valid JSON string.
Parameters
----------
json_string : string
The string containing a valid JSON chart specification.
validate : boolean
If True (default), then validate the input against the schema.
**kwargs :
Additional keyword arguments are passed to json.loads
Returns
-------
chart : Chart object
The altair Chart object built from the specification.
"""
dct: dict[str, Any] = json.loads(json_string, **kwargs)
return cls.from_dict(dct, validate=validate) # type: ignore[return-value]
@classmethod
def validate(
cls, instance: dict[str, Any], schema: dict[str, Any] | None = None
) -> None:
"""Validate the instance against the class schema in the context of the rootschema."""
if schema is None:
schema = cls._schema
# For the benefit of mypy
assert schema is not None
validate_jsonschema(instance, schema, rootschema=cls._rootschema or cls._schema)
@classmethod
def resolve_references(cls, schema: dict[str, Any] | None = None) -> dict[str, Any]:
"""Resolve references in the context of this object's schema or root schema."""
schema_to_pass = schema or cls._schema
# For the benefit of mypy
assert schema_to_pass is not None
return _resolve_references(
schema=schema_to_pass,
rootschema=(cls._rootschema or cls._schema or schema),
)
@classmethod
def validate_property(
cls, name: str, value: Any, schema: dict[str, Any] | None = None
) -> None:
"""Validate a property against property schema in the context of the rootschema."""
opts = _get_optional_modules(np_opt="numpy", pd_opt="pandas")
value = _todict(value, context={}, **opts)
props = cls.resolve_references(schema or cls._schema).get("properties", {})
validate_jsonschema(
value, props.get(name, {}), rootschema=cls._rootschema or cls._schema
)
def __dir__(self) -> list[str]:
return sorted(chain(super().__dir__(), self._kwds))
def _get_optional_modules(**modules: str) -> dict[str, _OptionalModule]:
"""
Returns packages only if they have already been imported - otherwise they return `None`.
This is useful for `isinstance` checks.
For example, if `pandas` has not been imported, then an object is
definitely not a `pandas.Timestamp`.
Parameters
----------
**modules
Keyword-only binding from `{alias: module_name}`.
Examples
--------
>>> import pandas as pd # doctest: +SKIP
>>> import polars as pl # doctest: +SKIP
>>> from altair.utils.schemapi import _get_optional_modules # doctest: +SKIP
>>>
>>> _get_optional_modules(pd="pandas", pl="polars", ibis="ibis") # doctest: +SKIP
{
"pd": <module 'pandas' from '...'>,
"pl": <module 'polars' from '...'>,
"ibis": None,
}
If the user later imports ``ibis``, it would appear in subsequent calls.
>>> import ibis # doctest: +SKIP
>>>
>>> _get_optional_modules(ibis="ibis") # doctest: +SKIP
{
"ibis": <module 'ibis' from '...'>,
}
"""
return {k: sys.modules.get(v) for k, v in modules.items()}
def _replace_parsed_shorthand(
parsed_shorthand: dict[str, Any], kwds: dict[str, Any]
) -> dict[str, Any]:
"""
`parsed_shorthand` is added by `FieldChannelMixin`.
It's used below to replace shorthand with its long form equivalent
`parsed_shorthand` is removed from `context` if it exists so that it is
not passed to child `to_dict` function calls.
"""
# Prevent that pandas categorical data is automatically sorted
# when a non-ordinal data type is specified manually
# or if the encoding channel does not support sorting
if "sort" in parsed_shorthand and (
"sort" not in kwds or kwds["type"] not in {"ordinal", Undefined}
):
parsed_shorthand.pop("sort")
kwds.update(
(k, v)
for k, v in parsed_shorthand.items()
if kwds.get(k, Undefined) is Undefined
)
return kwds
TSchemaBase = TypeVar("TSchemaBase", bound=SchemaBase)
_CopyImpl = TypeVar("_CopyImpl", SchemaBase, dict[Any, Any], list[Any])
"""
Types which have an implementation in ``SchemaBase.copy()``.
All other types are returned **by reference**.
"""
def _is_dict(obj: Any | dict[Any, Any]) -> TypeIs[dict[Any, Any]]:
return isinstance(obj, dict)
def _is_list(obj: Any | list[Any]) -> TypeIs[list[Any]]:
return isinstance(obj, list)
def _is_iterable(
obj: Any, *, exclude: type | tuple[type, ...] = (str, bytes)
) -> TypeIs[Iterable[Any]]:
return not isinstance(obj, exclude) and isinstance(obj, Iterable)
def _passthrough(*args: Any, **kwds: Any) -> Any | dict[str, Any]:
return args[0] if args else kwds
| SchemaBase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.