language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_heapq.py | {
"start": 11123,
"end": 11245
} | class ____:
"Dummy sequence class defining __len__ but not __getitem__."
def __len__(self):
return 10
| LenOnly |
python | dagster-io__dagster | helm/dagster/schema/schema/charts/utils/kubernetes.py | {
"start": 2591,
"end": 2805
} | class ____(
RootModel[dict[str, Any]],
json_schema_extra={
"$ref": create_definition_ref("io.k8s.api.core.v1.SecurityContext"),
"additionalProperties": True,
},
):
pass
| SecurityContext |
python | pypa__pipenv | pipenv/patched/pip/_internal/resolution/resolvelib/found_candidates.py | {
"start": 3960,
"end": 6030
} | class ____(Sequence[Candidate]):
"""A lazy sequence to provide candidates to the resolver.
The intended usage is to return this from `find_matches()` so the resolver
can iterate through the sequence multiple times, but only access the index
page when remote packages are actually needed. This improve performances
when suitable candidates are already installed on disk.
"""
def __init__(
self,
get_infos: Callable[[], Iterator[IndexCandidateInfo]],
installed: Optional[Candidate],
prefers_installed: bool,
incompatible_ids: Set[int],
):
self._get_infos = get_infos
self._installed = installed
self._prefers_installed = prefers_installed
self._incompatible_ids = incompatible_ids
self._bool: Optional[bool] = None
def __getitem__(self, index: Any) -> Any:
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
def __iter__(self) -> Iterator[Candidate]:
infos = self._get_infos()
if not self._installed:
iterator = _iter_built(infos)
elif self._prefers_installed:
iterator = _iter_built_with_prepended(self._installed, infos)
else:
iterator = _iter_built_with_inserted(self._installed, infos)
return (c for c in iterator if id(c) not in self._incompatible_ids)
def __len__(self) -> int:
# Implemented to satisfy the ABC check. This is not needed by the
# resolver, and should not be used by the provider either (for
# performance reasons).
raise NotImplementedError("don't do this")
def __bool__(self) -> bool:
if self._bool is not None:
return self._bool
if self._prefers_installed and self._installed:
self._bool = True
return True
self._bool = any(self)
return self._bool
| FoundCandidates |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/d.py | {
"start": 467,
"end": 631
} | class ____(d):
run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}'
| d_with_header |
python | django__django | django/db/backends/postgresql/base.py | {
"start": 3002,
"end": 23670
} | class ____(BaseDatabaseWrapper):
vendor = "postgresql"
display_name = "PostgreSQL"
# This dictionary maps Field objects to their associated PostgreSQL column
# types, as strings. Column-type strings can contain format strings;
# they'll be interpolated against the values of Field.__dict__ before being
# output. If a column type is set to None, it won't be included in the
# output.
data_types = {
"AutoField": "integer",
"BigAutoField": "bigint",
"BinaryField": "bytea",
"BooleanField": "boolean",
"CharField": _get_varchar_column,
"DateField": "date",
"DateTimeField": "timestamp with time zone",
"DecimalField": _get_decimal_column,
"DurationField": "interval",
"FileField": "varchar(%(max_length)s)",
"FilePathField": "varchar(%(max_length)s)",
"FloatField": "double precision",
"IntegerField": "integer",
"BigIntegerField": "bigint",
"IPAddressField": "inet",
"GenericIPAddressField": "inet",
"JSONField": "jsonb",
"PositiveBigIntegerField": "bigint",
"PositiveIntegerField": "integer",
"PositiveSmallIntegerField": "smallint",
"SlugField": "varchar(%(max_length)s)",
"SmallAutoField": "smallint",
"SmallIntegerField": "smallint",
"TextField": "text",
"TimeField": "time",
"UUIDField": "uuid",
}
data_type_check_constraints = {
"PositiveBigIntegerField": '"%(column)s" >= 0',
"PositiveIntegerField": '"%(column)s" >= 0',
"PositiveSmallIntegerField": '"%(column)s" >= 0',
}
data_types_suffix = {
"AutoField": "GENERATED BY DEFAULT AS IDENTITY",
"BigAutoField": "GENERATED BY DEFAULT AS IDENTITY",
"SmallAutoField": "GENERATED BY DEFAULT AS IDENTITY",
}
operators = {
"exact": "= %s",
"iexact": "= UPPER(%s)",
"contains": "LIKE %s",
"icontains": "LIKE UPPER(%s)",
"regex": "~ %s",
"iregex": "~* %s",
"gt": "> %s",
"gte": ">= %s",
"lt": "< %s",
"lte": "<= %s",
"startswith": "LIKE %s",
"endswith": "LIKE %s",
"istartswith": "LIKE UPPER(%s)",
"iendswith": "LIKE UPPER(%s)",
}
# The patterns below are used to generate SQL pattern lookup clauses when
# the right-hand side of the lookup isn't a raw string (it might be an
# expression or the result of a bilateral transformation). In those cases,
# special characters for LIKE operators (e.g. \, *, _) should be escaped on
# database side.
#
# Note: we use str.format() here for readability as '%' is used as a
# wildcard for the LIKE operator.
pattern_esc = (
r"REPLACE(REPLACE(REPLACE({}, E'\\', E'\\\\'), E'%%', E'\\%%'), E'_', E'\\_')"
)
pattern_ops = {
"contains": "LIKE '%%' || {} || '%%'",
"icontains": "LIKE '%%' || UPPER({}) || '%%'",
"startswith": "LIKE {} || '%%'",
"istartswith": "LIKE UPPER({}) || '%%'",
"endswith": "LIKE '%%' || {}",
"iendswith": "LIKE '%%' || UPPER({})",
}
Database = Database
SchemaEditorClass = DatabaseSchemaEditor
# Classes instantiated in __init__().
client_class = DatabaseClient
creation_class = DatabaseCreation
features_class = DatabaseFeatures
introspection_class = DatabaseIntrospection
ops_class = DatabaseOperations
# PostgreSQL backend-specific attributes.
_named_cursor_idx = 0
_connection_pools = {}
@property
def pool(self):
pool_options = self.settings_dict["OPTIONS"].get("pool")
if self.alias == NO_DB_ALIAS or not pool_options:
return None
if self.alias not in self._connection_pools:
if self.settings_dict.get("CONN_MAX_AGE", 0) != 0:
raise ImproperlyConfigured(
"Pooling doesn't support persistent connections."
)
# Set the default options.
if pool_options is True:
pool_options = {}
try:
from psycopg_pool import ConnectionPool
except ImportError as err:
raise ImproperlyConfigured(
"Error loading psycopg_pool module.\nDid you install psycopg[pool]?"
) from err
connect_kwargs = self.get_connection_params()
# Ensure we run in autocommit, Django properly sets it later on.
connect_kwargs["autocommit"] = True
enable_checks = self.settings_dict["CONN_HEALTH_CHECKS"]
pool = ConnectionPool(
kwargs=connect_kwargs,
open=False, # Do not open the pool during startup.
configure=self._configure_connection,
check=ConnectionPool.check_connection if enable_checks else None,
**pool_options,
)
# setdefault() ensures that multiple threads don't set this in
# parallel. Since we do not open the pool during it's init above,
# this means that at worst during startup multiple threads generate
# pool objects and the first to set it wins.
self._connection_pools.setdefault(self.alias, pool)
return self._connection_pools[self.alias]
def close_pool(self):
if self.pool:
self.pool.close()
del self._connection_pools[self.alias]
def get_database_version(self):
"""
Return a tuple of the database's version.
E.g. for pg_version 120004, return (12, 4).
"""
return divmod(self.pg_version, 10000)
def get_connection_params(self):
settings_dict = self.settings_dict
# None may be used to connect to the default 'postgres' db
if settings_dict["NAME"] == "" and not settings_dict["OPTIONS"].get("service"):
raise ImproperlyConfigured(
"settings.DATABASES is improperly configured. "
"Please supply the NAME or OPTIONS['service'] value."
)
if len(settings_dict["NAME"] or "") > self.ops.max_name_length():
raise ImproperlyConfigured(
"The database name '%s' (%d characters) is longer than "
"PostgreSQL's limit of %d characters. Supply a shorter NAME "
"in settings.DATABASES."
% (
settings_dict["NAME"],
len(settings_dict["NAME"]),
self.ops.max_name_length(),
)
)
if settings_dict["NAME"]:
conn_params = {
"dbname": settings_dict["NAME"],
**settings_dict["OPTIONS"],
}
elif settings_dict["NAME"] is None:
# Connect to the default 'postgres' db.
settings_dict["OPTIONS"].pop("service", None)
conn_params = {"dbname": "postgres", **settings_dict["OPTIONS"]}
else:
conn_params = {**settings_dict["OPTIONS"]}
conn_params["client_encoding"] = "UTF8"
conn_params.pop("assume_role", None)
conn_params.pop("isolation_level", None)
pool_options = conn_params.pop("pool", None)
if pool_options and not is_psycopg3:
raise ImproperlyConfigured("Database pooling requires psycopg >= 3")
server_side_binding = conn_params.pop("server_side_binding", None)
conn_params.setdefault(
"cursor_factory",
(
ServerBindingCursor
if is_psycopg3 and server_side_binding is True
else Cursor
),
)
if settings_dict["USER"]:
conn_params["user"] = settings_dict["USER"]
if settings_dict["PASSWORD"]:
conn_params["password"] = settings_dict["PASSWORD"]
if settings_dict["HOST"]:
conn_params["host"] = settings_dict["HOST"]
if settings_dict["PORT"]:
conn_params["port"] = settings_dict["PORT"]
if is_psycopg3:
conn_params["context"] = get_adapters_template(
settings.USE_TZ, self.timezone
)
# Disable prepared statements by default to keep connection poolers
# working. Can be reenabled via OPTIONS in the settings dict.
conn_params["prepare_threshold"] = conn_params.pop(
"prepare_threshold", None
)
return conn_params
@async_unsafe
def get_new_connection(self, conn_params):
# self.isolation_level must be set:
# - after connecting to the database in order to obtain the database's
# default when no value is explicitly specified in options.
# - before calling _set_autocommit() because if autocommit is on, that
# will set connection.isolation_level to ISOLATION_LEVEL_AUTOCOMMIT.
options = self.settings_dict["OPTIONS"]
set_isolation_level = False
try:
isolation_level_value = options["isolation_level"]
except KeyError:
self.isolation_level = IsolationLevel.READ_COMMITTED
else:
# Set the isolation level to the value from OPTIONS.
try:
self.isolation_level = IsolationLevel(isolation_level_value)
set_isolation_level = True
except ValueError:
raise ImproperlyConfigured(
f"Invalid transaction isolation level {isolation_level_value} "
f"specified. Use one of the psycopg.IsolationLevel values."
)
if self.pool:
# If nothing else has opened the pool, open it now.
self.pool.open()
connection = self.pool.getconn()
else:
connection = self.Database.connect(**conn_params)
if set_isolation_level:
connection.isolation_level = self.isolation_level
if not is_psycopg3:
# Register dummy loads() to avoid a round trip from psycopg2's
# decode to json.dumps() to json.loads(), when using a custom
# decoder in JSONField.
psycopg2.extras.register_default_jsonb(
conn_or_curs=connection, loads=lambda x: x
)
return connection
def ensure_timezone(self):
# Close the pool so new connections pick up the correct timezone.
self.close_pool()
if self.connection is None:
return False
return self._configure_timezone(self.connection)
def _configure_timezone(self, connection):
conn_timezone_name = connection.info.parameter_status("TimeZone")
timezone_name = self.timezone_name
if timezone_name and conn_timezone_name != timezone_name:
with connection.cursor() as cursor:
cursor.execute(self.ops.set_time_zone_sql(), [timezone_name])
return True
return False
def _configure_role(self, connection):
if new_role := self.settings_dict["OPTIONS"].get("assume_role"):
with connection.cursor() as cursor:
sql = self.ops.compose_sql("SET ROLE %s", [new_role])
cursor.execute(sql)
return True
return False
def _configure_connection(self, connection):
# This function is called from init_connection_state and from the
# psycopg pool itself after a connection is opened.
# Commit after setting the time zone.
commit_tz = self._configure_timezone(connection)
# Set the role on the connection. This is useful if the credential used
# to login is not the same as the role that owns database resources. As
# can be the case when using temporary or ephemeral credentials.
commit_role = self._configure_role(connection)
return commit_role or commit_tz
def _close(self):
if self.connection is not None:
# `wrap_database_errors` only works for `putconn` as long as there
# is no `reset` function set in the pool because it is deferred
# into a thread and not directly executed.
with self.wrap_database_errors:
if self.pool:
# Ensure the correct pool is returned. This is a workaround
# for tests so a pool can be changed on setting changes
# (e.g. USE_TZ, TIME_ZONE).
self.connection._pool.putconn(self.connection)
# Connection can no longer be used.
self.connection = None
else:
return self.connection.close()
def init_connection_state(self):
super().init_connection_state()
if self.connection is not None and not self.pool:
commit = self._configure_connection(self.connection)
if commit and not self.get_autocommit():
self.connection.commit()
@async_unsafe
def create_cursor(self, name=None):
if name:
if is_psycopg3 and (
self.settings_dict["OPTIONS"].get("server_side_binding") is not True
):
# psycopg >= 3 forces the usage of server-side bindings for
# named cursors so a specialized class that implements
# server-side cursors while performing client-side bindings
# must be used if `server_side_binding` is disabled (default).
cursor = ServerSideCursor(
self.connection,
name=name,
scrollable=False,
withhold=self.connection.autocommit,
)
else:
# In autocommit mode, the cursor will be used outside of a
# transaction, hence use a holdable cursor.
cursor = self.connection.cursor(
name, scrollable=False, withhold=self.connection.autocommit
)
else:
cursor = self.connection.cursor()
if is_psycopg3:
# Register the cursor timezone only if the connection disagrees, to
# avoid copying the adapter map.
tzloader = self.connection.adapters.get_loader(TIMESTAMPTZ_OID, Format.TEXT)
if self.timezone != tzloader.timezone:
register_tzloader(self.timezone, cursor)
else:
cursor.tzinfo_factory = self.tzinfo_factory if settings.USE_TZ else None
return cursor
def tzinfo_factory(self, offset):
return self.timezone
@async_unsafe
def chunked_cursor(self):
self._named_cursor_idx += 1
# Get the current async task
# Note that right now this is behind @async_unsafe, so this is
# unreachable, but in future we'll start loosening this restriction.
# For now, it's here so that every use of "threading" is
# also async-compatible.
try:
current_task = asyncio.current_task()
except RuntimeError:
current_task = None
# Current task can be none even if the current_task call didn't error
if current_task:
task_ident = str(id(current_task))
else:
task_ident = "sync"
# Use that and the thread ident to get a unique name
return self._cursor(
name="_django_curs_%d_%s_%d"
% (
# Avoid reusing name in other threads / tasks
threading.current_thread().ident,
task_ident,
self._named_cursor_idx,
)
)
def _set_autocommit(self, autocommit):
with self.wrap_database_errors:
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None):
"""
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
with self.cursor() as cursor:
cursor.execute("SET CONSTRAINTS ALL IMMEDIATE")
cursor.execute("SET CONSTRAINTS ALL DEFERRED")
def is_usable(self):
if self.connection is None:
return False
try:
# Use a psycopg cursor directly, bypassing Django's utilities.
with self.connection.cursor() as cursor:
cursor.execute("SELECT 1")
except Database.Error:
return False
else:
return True
def close_if_health_check_failed(self):
if self.pool:
# The pool only returns healthy connections.
return
return super().close_if_health_check_failed()
@contextmanager
def _nodb_cursor(self):
cursor = None
try:
with super()._nodb_cursor() as cursor:
yield cursor
except (Database.DatabaseError, WrappedDatabaseError):
if cursor is not None:
raise
warnings.warn(
"Normally Django will use a connection to the 'postgres' database "
"to avoid running initialization queries against the production "
"database when it's not needed (for example, when running tests). "
"Django was unable to create a connection to the 'postgres' database "
"and will use the first PostgreSQL database instead.",
RuntimeWarning,
)
for connection in connections.all():
if (
connection.vendor == "postgresql"
and connection.settings_dict["NAME"] != "postgres"
):
conn = self.__class__(
{
**self.settings_dict,
"NAME": connection.settings_dict["NAME"],
},
alias=self.alias,
)
try:
with conn.cursor() as cursor:
yield cursor
finally:
conn.close()
break
else:
raise
@cached_property
def pg_version(self):
with self.temporary_connection():
return self.connection.info.server_version
def make_debug_cursor(self, cursor):
return CursorDebugWrapper(cursor, self)
if is_psycopg3:
class CursorMixin:
"""
A subclass of psycopg cursor implementing callproc.
"""
def callproc(self, name, args=None):
if not isinstance(name, sql.Identifier):
name = sql.Identifier(name)
qparts = [sql.SQL("SELECT * FROM "), name, sql.SQL("(")]
if args:
for item in args:
qparts.append(sql.Literal(item))
qparts.append(sql.SQL(","))
del qparts[-1]
qparts.append(sql.SQL(")"))
stmt = sql.Composed(qparts)
self.execute(stmt)
return args
class ServerBindingCursor(CursorMixin, Database.Cursor):
pass
class Cursor(CursorMixin, Database.ClientCursor):
pass
class ServerSideCursor(
CursorMixin, Database.client_cursor.ClientCursorMixin, Database.ServerCursor
):
"""
psycopg >= 3 forces the usage of server-side bindings when using named
cursors but the ORM doesn't yet support the systematic generation of
prepareable SQL (#20516).
ClientCursorMixin forces the usage of client-side bindings while
ServerCursor implements the logic required to declare and scroll
through named cursors.
Mixing ClientCursorMixin in wouldn't be necessary if Cursor allowed to
specify how parameters should be bound instead, which ServerCursor
would inherit, but that's not the case.
"""
class CursorDebugWrapper(BaseCursorDebugWrapper):
def copy(self, statement):
with self.debug_sql(statement):
return self.cursor.copy(statement)
else:
Cursor = psycopg2.extensions.cursor
class CursorDebugWrapper(BaseCursorDebugWrapper):
def copy_expert(self, sql, file, *args):
with self.debug_sql(sql):
return self.cursor.copy_expert(sql, file, *args)
def copy_to(self, file, table, *args, **kwargs):
with self.debug_sql(sql="COPY %s TO STDOUT" % table):
return self.cursor.copy_to(file, table, *args, **kwargs)
| DatabaseWrapper |
python | plotly__plotly.py | plotly/graph_objs/carpet/_baxis.py | {
"start": 233,
"end": 60421
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "carpet"
_path_str = "carpet.baxis"
_valid_props = {
"arraydtick",
"arraytick0",
"autorange",
"autotypenumbers",
"categoryarray",
"categoryarraysrc",
"categoryorder",
"cheatertype",
"color",
"dtick",
"endline",
"endlinecolor",
"endlinewidth",
"exponentformat",
"fixedrange",
"gridcolor",
"griddash",
"gridwidth",
"labelalias",
"labelpadding",
"labelprefix",
"labelsuffix",
"linecolor",
"linewidth",
"minexponent",
"minorgridcolor",
"minorgridcount",
"minorgriddash",
"minorgridwidth",
"nticks",
"range",
"rangemode",
"separatethousands",
"showexponent",
"showgrid",
"showline",
"showticklabels",
"showtickprefix",
"showticksuffix",
"smoothing",
"startline",
"startlinecolor",
"startlinewidth",
"tick0",
"tickangle",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"tickmode",
"tickprefix",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"title",
"type",
}
@property
def arraydtick(self):
"""
The stride between grid lines along the axis
The 'arraydtick' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 9223372036854775807]
Returns
-------
int
"""
return self["arraydtick"]
@arraydtick.setter
def arraydtick(self, val):
self["arraydtick"] = val
@property
def arraytick0(self):
"""
The starting index of grid lines along the axis
The 'arraytick0' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["arraytick0"]
@arraytick0.setter
def arraytick0(self, val):
self["arraytick0"] = val
@property
def autorange(self):
"""
Determines whether or not the range of this axis is computed in
relation to the input data. See `rangemode` for more info. If
`range` is provided, then `autorange` is set to False.
The 'autorange' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'reversed']
Returns
-------
Any
"""
return self["autorange"]
@autorange.setter
def autorange(self, val):
self["autorange"] = val
@property
def autotypenumbers(self):
"""
Using "strict" a numeric string in trace data is not converted
to a number. Using *convert types* a numeric string in trace
data may be treated as a number during automatic axis `type`
detection. Defaults to layout.autotypenumbers.
The 'autotypenumbers' property is an enumeration that may be specified as:
- One of the following enumeration values:
['convert types', 'strict']
Returns
-------
Any
"""
return self["autotypenumbers"]
@autotypenumbers.setter
def autotypenumbers(self, val):
self["autotypenumbers"] = val
@property
def categoryarray(self):
"""
Sets the order in which categories on this axis appear. Only
has an effect if `categoryorder` is set to "array". Used with
`categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the case of categorical
variables. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def cheatertype(self):
"""
The 'cheatertype' property is an enumeration that may be specified as:
- One of the following enumeration values:
['index', 'value']
Returns
-------
Any
"""
return self["cheatertype"]
@cheatertype.setter
def cheatertype(self, val):
self["cheatertype"] = val
@property
def color(self):
"""
Sets default for all colors associated with this axis all at
once: line, font, tick, and grid colors. Grid color is
lightened by blending this with the plot background Individual
pieces can override this.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dtick(self):
"""
The stride between grid lines along the axis
The 'dtick' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
@property
def endline(self):
"""
Determines whether or not a line is drawn at along the final
value of this axis. If True, the end line is drawn on top of
the grid lines.
The 'endline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["endline"]
@endline.setter
def endline(self, val):
self["endline"] = val
@property
def endlinecolor(self):
"""
Sets the line color of the end line.
The 'endlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["endlinecolor"]
@endlinecolor.setter
def endlinecolor(self, val):
self["endlinecolor"] = val
@property
def endlinewidth(self):
"""
Sets the width (in px) of the end line.
The 'endlinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["endlinewidth"]
@endlinewidth.setter
def endlinewidth(self, val):
self["endlinewidth"] = val
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B', 'SI extended']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
@property
def fixedrange(self):
"""
Determines whether or not this axis is zoom-able. If true, then
zoom is disabled.
The 'fixedrange' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["fixedrange"]
@fixedrange.setter
def fixedrange(self, val):
self["fixedrange"] = val
@property
def gridcolor(self):
"""
Sets the axis line color.
The 'gridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["gridcolor"]
@gridcolor.setter
def gridcolor(self, val):
self["gridcolor"] = val
@property
def griddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'griddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["griddash"]
@griddash.setter
def griddash(self, val):
self["griddash"] = val
@property
def gridwidth(self):
"""
Sets the width (in px) of the axis line.
The 'gridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["gridwidth"]
@gridwidth.setter
def gridwidth(self, val):
self["gridwidth"] = val
@property
def labelalias(self):
"""
Replacement text for specific tick or hover labels. For example
using {US: 'USA', CA: 'Canada'} changes US to USA and CA to
Canada. The labels we would have shown must match the keys
exactly, after adding any tickprefix or ticksuffix. For
negative numbers the minus sign symbol used (U+2212) is wider
than the regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis type, and
both keys (if needed) and values (if desired) can include html-
like tags or MathJax.
The 'labelalias' property accepts values of any type
Returns
-------
Any
"""
return self["labelalias"]
@labelalias.setter
def labelalias(self, val):
self["labelalias"] = val
@property
def labelpadding(self):
"""
Extra padding between label and the axis
The 'labelpadding' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["labelpadding"]
@labelpadding.setter
def labelpadding(self, val):
self["labelpadding"] = val
@property
def labelprefix(self):
"""
Sets a axis label prefix.
The 'labelprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["labelprefix"]
@labelprefix.setter
def labelprefix(self, val):
self["labelprefix"] = val
@property
def labelsuffix(self):
"""
Sets a axis label suffix.
The 'labelsuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["labelsuffix"]
@labelsuffix.setter
def labelsuffix(self, val):
self["labelsuffix"] = val
@property
def linecolor(self):
"""
Sets the axis line color.
The 'linecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["linecolor"]
@linecolor.setter
def linecolor(self, val):
self["linecolor"] = val
@property
def linewidth(self):
"""
Sets the width (in px) of the axis line.
The 'linewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["linewidth"]
@linewidth.setter
def linewidth(self, val):
self["linewidth"] = val
@property
def minexponent(self):
"""
Hide SI prefix for 10^n if |n| is below this number
The 'minexponent' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minexponent"]
@minexponent.setter
def minexponent(self, val):
self["minexponent"] = val
@property
def minorgridcolor(self):
"""
Sets the color of the grid lines.
The 'minorgridcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["minorgridcolor"]
@minorgridcolor.setter
def minorgridcolor(self, val):
self["minorgridcolor"] = val
@property
def minorgridcount(self):
"""
Sets the number of minor grid ticks per major grid tick
The 'minorgridcount' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["minorgridcount"]
@minorgridcount.setter
def minorgridcount(self, val):
self["minorgridcount"] = val
@property
def minorgriddash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'minorgriddash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["minorgriddash"]
@minorgriddash.setter
def minorgriddash(self, val):
self["minorgriddash"] = val
@property
def minorgridwidth(self):
"""
Sets the width (in px) of the grid lines.
The 'minorgridwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["minorgridwidth"]
@minorgridwidth.setter
def minorgridwidth(self, val):
self["minorgridwidth"] = val
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
@property
def range(self):
"""
Sets the range of this axis. If the axis `type` is "log", then
you must take the log of your desired range (e.g. to set the
range from 1 to 100, set the range from 0 to 2). If the axis
`type` is "date", it should be date strings, like date data,
though Date objects and unix milliseconds will be accepted and
converted to strings. If the axis `type` is "category", it
should be numbers, using the scale where each category is
assigned a serial number from zero in the order it appears.
The 'range' property is an info array that may be specified as:
* a list or tuple of 2 elements where:
(0) The 'range[0]' property accepts values of any type
(1) The 'range[1]' property accepts values of any type
Returns
-------
list
"""
return self["range"]
@range.setter
def range(self, val):
self["range"] = val
@property
def rangemode(self):
"""
If "normal", the range is computed in relation to the extrema
of the input data. If "tozero", the range extends to 0,
regardless of the input data If "nonnegative", the range is
non-negative, regardless of the input data.
The 'rangemode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'tozero', 'nonnegative']
Returns
-------
Any
"""
return self["rangemode"]
@rangemode.setter
def rangemode(self, val):
self["rangemode"] = val
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
@property
def showgrid(self):
"""
Determines whether or not grid lines are drawn. If True, the
grid lines are drawn at every tick mark.
The 'showgrid' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showgrid"]
@showgrid.setter
def showgrid(self, val):
self["showgrid"] = val
@property
def showline(self):
"""
Determines whether or not a line bounding this axis is drawn.
The 'showline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showline"]
@showline.setter
def showline(self, val):
self["showline"] = val
@property
def showticklabels(self):
"""
Determines whether axis labels are drawn on the low side, the
high side, both, or neither side of the axis.
The 'showticklabels' property is an enumeration that may be specified as:
- One of the following enumeration values:
['start', 'end', 'both', 'none']
Returns
-------
Any
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
@property
def smoothing(self):
"""
The 'smoothing' property is a number and may be specified as:
- An int or float in the interval [0, 1.3]
Returns
-------
int|float
"""
return self["smoothing"]
@smoothing.setter
def smoothing(self, val):
self["smoothing"] = val
@property
def startline(self):
"""
Determines whether or not a line is drawn at along the starting
value of this axis. If True, the start line is drawn on top of
the grid lines.
The 'startline' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["startline"]
@startline.setter
def startline(self, val):
self["startline"] = val
@property
def startlinecolor(self):
"""
Sets the line color of the start line.
The 'startlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["startlinecolor"]
@startlinecolor.setter
def startlinecolor(self, val):
self["startlinecolor"] = val
@property
def startlinewidth(self):
"""
Sets the width (in px) of the start line.
The 'startlinewidth' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["startlinewidth"]
@startlinewidth.setter
def startlinewidth(self, val):
self["startlinewidth"] = val
@property
def tick0(self):
"""
The starting index of grid lines along the axis
The 'tick0' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180.
Numeric values outside this range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
@property
def tickfont(self):
"""
Sets the tick font.
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.baxis.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Returns
-------
plotly.graph_objs.carpet.baxis.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format. And for
dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to d3's date
formatter: "%h" for half of the year as a decimal number as
well as "%{n}f" for fractional seconds with n digits. For
example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.carpet.baxis.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Returns
-------
tuple[plotly.graph_objs.carpet.baxis.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
@property
def tickformatstopdefaults(self):
"""
When used in a template (as
layout.template.data.carpet.baxis.tickformatstopdefaults), sets
the default property values to use for elements of
carpet.baxis.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.baxis.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Returns
-------
plotly.graph_objs.carpet.baxis.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
@property
def tickmode(self):
"""
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for `tickvals`.
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.carpet.baxis.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Returns
-------
plotly.graph_objs.carpet.baxis.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
@property
def type(self):
"""
Sets the axis type. By default, plotly attempts to determined
the axis type by looking into the data of the traces that
referenced the axis in question.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['-', 'linear', 'date', 'category']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def _prop_descriptions(self):
return """\
arraydtick
The stride between grid lines along the axis
arraytick0
The starting index of grid lines along the axis
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`.
cheatertype
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
The stride between grid lines along the axis
endline
Determines whether or not a line is drawn at along the
final value of this axis. If True, the end line is
drawn on top of the grid lines.
endlinecolor
Sets the line color of the end line.
endlinewidth
Sets the width (in px) of the end line.
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-able. If
true, then zoom is disabled.
gridcolor
Sets the axis line color.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the axis line.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
labelpadding
Extra padding between label and the axis
labelprefix
Sets a axis label prefix.
labelsuffix
Sets a axis label suffix.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number
minorgridcolor
Sets the color of the grid lines.
minorgridcount
Sets the number of minor grid ticks per major grid tick
minorgriddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
minorgridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether axis labels are drawn on the low
side, the high side, both, or neither side of the axis.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
smoothing
startline
Determines whether or not a line is drawn at along the
starting value of this axis. If True, the start line is
drawn on top of the grid lines.
startlinecolor
Sets the line color of the start line.
startlinewidth
Sets the width (in px) of the start line.
tick0
The starting index of grid lines along the axis
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.carpet.baxis.Ti
ckformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.carpet
.baxis.tickformatstopdefaults), sets the default
property values to use for elements of
carpet.baxis.tickformatstops
tickmode
tickprefix
Sets a tick label prefix.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
title
:class:`plotly.graph_objects.carpet.baxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
"""
def __init__(
self,
arg=None,
arraydtick=None,
arraytick0=None,
autorange=None,
autotypenumbers=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
cheatertype=None,
color=None,
dtick=None,
endline=None,
endlinecolor=None,
endlinewidth=None,
exponentformat=None,
fixedrange=None,
gridcolor=None,
griddash=None,
gridwidth=None,
labelalias=None,
labelpadding=None,
labelprefix=None,
labelsuffix=None,
linecolor=None,
linewidth=None,
minexponent=None,
minorgridcolor=None,
minorgridcount=None,
minorgriddash=None,
minorgridwidth=None,
nticks=None,
range=None,
rangemode=None,
separatethousands=None,
showexponent=None,
showgrid=None,
showline=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
smoothing=None,
startline=None,
startlinecolor=None,
startlinewidth=None,
tick0=None,
tickangle=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
tickmode=None,
tickprefix=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
title=None,
type=None,
**kwargs,
):
"""
Construct a new Baxis object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.carpet.Baxis`
arraydtick
The stride between grid lines along the axis
arraytick0
The starting index of grid lines along the axis
autorange
Determines whether or not the range of this axis is
computed in relation to the input data. See `rangemode`
for more info. If `range` is provided, then `autorange`
is set to False.
autotypenumbers
Using "strict" a numeric string in trace data is not
converted to a number. Using *convert types* a numeric
string in trace data may be treated as a number during
automatic axis `type` detection. Defaults to
layout.autotypenumbers.
categoryarray
Sets the order in which categories on this axis appear.
Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the case of
categorical variables. By default, plotly uses "trace",
which specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`.
cheatertype
color
Sets default for all colors associated with this axis
all at once: line, font, tick, and grid colors. Grid
color is lightened by blending this with the plot
background Individual pieces can override this.
dtick
The stride between grid lines along the axis
endline
Determines whether or not a line is drawn at along the
final value of this axis. If True, the end line is
drawn on top of the grid lines.
endlinecolor
Sets the line color of the end line.
endlinewidth
Sets the width (in px) of the end line.
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
fixedrange
Determines whether or not this axis is zoom-able. If
true, then zoom is disabled.
gridcolor
Sets the axis line color.
griddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
gridwidth
Sets the width (in px) of the axis line.
labelalias
Replacement text for specific tick or hover labels. For
example using {US: 'USA', CA: 'Canada'} changes US to
USA and CA to Canada. The labels we would have shown
must match the keys exactly, after adding any
tickprefix or ticksuffix. For negative numbers the
minus sign symbol used (U+2212) is wider than the
regular ascii dash. That means you need to use −1
instead of -1. labelalias can be used with any axis
type, and both keys (if needed) and values (if desired)
can include html-like tags or MathJax.
labelpadding
Extra padding between label and the axis
labelprefix
Sets a axis label prefix.
labelsuffix
Sets a axis label suffix.
linecolor
Sets the axis line color.
linewidth
Sets the width (in px) of the axis line.
minexponent
Hide SI prefix for 10^n if |n| is below this number
minorgridcolor
Sets the color of the grid lines.
minorgridcount
Sets the number of minor grid ticks per major grid tick
minorgriddash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
minorgridwidth
Sets the width (in px) of the grid lines.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
range
Sets the range of this axis. If the axis `type` is
"log", then you must take the log of your desired range
(e.g. to set the range from 1 to 100, set the range
from 0 to 2). If the axis `type` is "date", it should
be date strings, like date data, though Date objects
and unix milliseconds will be accepted and converted to
strings. If the axis `type` is "category", it should be
numbers, using the scale where each category is
assigned a serial number from zero in the order it
appears.
rangemode
If "normal", the range is computed in relation to the
extrema of the input data. If "tozero", the range
extends to 0, regardless of the input data If
"nonnegative", the range is non-negative, regardless of
the input data.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showgrid
Determines whether or not grid lines are drawn. If
True, the grid lines are drawn at every tick mark.
showline
Determines whether or not a line bounding this axis is
drawn.
showticklabels
Determines whether axis labels are drawn on the low
side, the high side, both, or neither side of the axis.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
smoothing
startline
Determines whether or not a line is drawn at along the
starting value of this axis. If True, the start line is
drawn on top of the grid lines.
startlinecolor
Sets the line color of the start line.
startlinewidth
Sets the width (in px) of the start line.
tick0
The starting index of grid lines along the axis
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickfont
Sets the tick font.
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
And for dates see: https://github.com/d3/d3-time-
format/tree/v2.2.3#locale_format. We add two items to
d3's date formatter: "%h" for half of the year as a
decimal number as well as "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.carpet.baxis.Ti
ckformatstop` instances or dicts with compatible
properties
tickformatstopdefaults
When used in a template (as layout.template.data.carpet
.baxis.tickformatstopdefaults), sets the default
property values to use for elements of
carpet.baxis.tickformatstops
tickmode
tickprefix
Sets a tick label prefix.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
`tickvals`.
title
:class:`plotly.graph_objects.carpet.baxis.Title`
instance or dict with compatible properties
type
Sets the axis type. By default, plotly attempts to
determined the axis type by looking into the data of
the traces that referenced the axis in question.
Returns
-------
Baxis
"""
super().__init__("baxis")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.carpet.Baxis
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.Baxis`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("arraydtick", arg, arraydtick)
self._set_property("arraytick0", arg, arraytick0)
self._set_property("autorange", arg, autorange)
self._set_property("autotypenumbers", arg, autotypenumbers)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("cheatertype", arg, cheatertype)
self._set_property("color", arg, color)
self._set_property("dtick", arg, dtick)
self._set_property("endline", arg, endline)
self._set_property("endlinecolor", arg, endlinecolor)
self._set_property("endlinewidth", arg, endlinewidth)
self._set_property("exponentformat", arg, exponentformat)
self._set_property("fixedrange", arg, fixedrange)
self._set_property("gridcolor", arg, gridcolor)
self._set_property("griddash", arg, griddash)
self._set_property("gridwidth", arg, gridwidth)
self._set_property("labelalias", arg, labelalias)
self._set_property("labelpadding", arg, labelpadding)
self._set_property("labelprefix", arg, labelprefix)
self._set_property("labelsuffix", arg, labelsuffix)
self._set_property("linecolor", arg, linecolor)
self._set_property("linewidth", arg, linewidth)
self._set_property("minexponent", arg, minexponent)
self._set_property("minorgridcolor", arg, minorgridcolor)
self._set_property("minorgridcount", arg, minorgridcount)
self._set_property("minorgriddash", arg, minorgriddash)
self._set_property("minorgridwidth", arg, minorgridwidth)
self._set_property("nticks", arg, nticks)
self._set_property("range", arg, range)
self._set_property("rangemode", arg, rangemode)
self._set_property("separatethousands", arg, separatethousands)
self._set_property("showexponent", arg, showexponent)
self._set_property("showgrid", arg, showgrid)
self._set_property("showline", arg, showline)
self._set_property("showticklabels", arg, showticklabels)
self._set_property("showtickprefix", arg, showtickprefix)
self._set_property("showticksuffix", arg, showticksuffix)
self._set_property("smoothing", arg, smoothing)
self._set_property("startline", arg, startline)
self._set_property("startlinecolor", arg, startlinecolor)
self._set_property("startlinewidth", arg, startlinewidth)
self._set_property("tick0", arg, tick0)
self._set_property("tickangle", arg, tickangle)
self._set_property("tickfont", arg, tickfont)
self._set_property("tickformat", arg, tickformat)
self._set_property("tickformatstops", arg, tickformatstops)
self._set_property("tickformatstopdefaults", arg, tickformatstopdefaults)
self._set_property("tickmode", arg, tickmode)
self._set_property("tickprefix", arg, tickprefix)
self._set_property("ticksuffix", arg, ticksuffix)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("tickvals", arg, tickvals)
self._set_property("tickvalssrc", arg, tickvalssrc)
self._set_property("title", arg, title)
self._set_property("type", arg, type)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Baxis |
python | donnemartin__system-design-primer | solutions/object_oriented_design/lru_cache/lru_cache.py | {
"start": 361,
"end": 1931
} | class ____(object):
def __init__(self, MAX_SIZE):
self.MAX_SIZE = MAX_SIZE
self.size = 0
self.lookup = {} # key: query, value: node
self.linked_list = LinkedList()
def get(self, query):
"""Get the stored query result from the cache.
Accessing a node updates its position to the front of the LRU list.
"""
node = self.lookup.get(query)
if node is None:
return None
self.linked_list.move_to_front(node)
return node.results
def set(self, results, query):
"""Set the result for the given query key in the cache.
When updating an entry, updates its position to the front of the LRU list.
If the entry is new and the cache is at capacity, removes the oldest entry
before the new entry is added.
"""
node = self.lookup.get(query)
if node is not None:
# Key exists in cache, update the value
node.results = results
self.linked_list.move_to_front(node)
else:
# Key does not exist in cache
if self.size == self.MAX_SIZE:
# Remove the oldest entry from the linked list and lookup
self.lookup.pop(self.linked_list.tail.query, None)
self.linked_list.remove_from_tail()
else:
self.size += 1
# Add the new key and value
new_node = Node(results)
self.linked_list.append_to_front(new_node)
self.lookup[query] = new_node
| Cache |
python | sympy__sympy | sympy/diffgeom/diffgeom.py | {
"start": 3990,
"end": 22544
} | class ____(Basic):
"""
A coordinate system defined on the patch.
Explanation
===========
Coordinate system is a system that uses one or more coordinates to uniquely
determine the position of the points or other geometric elements on a
manifold [1].
By passing ``Symbols`` to *symbols* parameter, user can define the name and
assumptions of coordinate symbols of the coordinate system. If not passed,
these symbols are generated automatically and are assumed to be real valued.
By passing *relations* parameter, user can define the transform relations of
coordinate systems. Inverse transformation and indirect transformation can
be found automatically. If this parameter is not passed, coordinate
transformation cannot be done.
Parameters
==========
name : str
The name of the coordinate system.
patch : Patch
The patch where the coordinate system is defined.
symbols : list of Symbols, optional
Defines the names and assumptions of coordinate symbols.
relations : dict, optional
Key is a tuple of two strings, who are the names of the systems where
the coordinates transform from and transform to.
Value is a tuple of the symbols before transformation and a tuple of
the expressions after transformation.
Examples
========
We define two-dimensional Cartesian coordinate system and polar coordinate
system.
>>> from sympy import symbols, pi, sqrt, atan2, cos, sin
>>> from sympy.diffgeom import Manifold, Patch, CoordSystem
>>> m = Manifold('M', 2)
>>> p = Patch('P', m)
>>> x, y = symbols('x y', real=True)
>>> r, theta = symbols('r theta', nonnegative=True)
>>> relation_dict = {
... ('Car2D', 'Pol'): [(x, y), (sqrt(x**2 + y**2), atan2(y, x))],
... ('Pol', 'Car2D'): [(r, theta), (r*cos(theta), r*sin(theta))]
... }
>>> Car2D = CoordSystem('Car2D', p, (x, y), relation_dict)
>>> Pol = CoordSystem('Pol', p, (r, theta), relation_dict)
``symbols`` property returns ``CoordinateSymbol`` instances. These symbols
are not same with the symbols used to construct the coordinate system.
>>> Car2D
Car2D
>>> Car2D.dim
2
>>> Car2D.symbols
(x, y)
>>> _[0].func
<class 'sympy.diffgeom.diffgeom.CoordinateSymbol'>
``transformation()`` method returns the transformation function from
one coordinate system to another. ``transform()`` method returns the
transformed coordinates.
>>> Car2D.transformation(Pol)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
>>> Car2D.transform(Pol)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> Car2D.transform(Pol, [1, 2])
Matrix([
[sqrt(5)],
[atan(2)]])
``jacobian()`` method returns the Jacobian matrix of coordinate
transformation between two systems. ``jacobian_determinant()`` method
returns the Jacobian determinant of coordinate transformation between two
systems.
>>> Pol.jacobian(Car2D)
Matrix([
[cos(theta), -r*sin(theta)],
[sin(theta), r*cos(theta)]])
>>> Pol.jacobian(Car2D, [1, pi/2])
Matrix([
[0, -1],
[1, 0]])
>>> Car2D.jacobian_determinant(Pol)
1/sqrt(x**2 + y**2)
>>> Car2D.jacobian_determinant(Pol, [1,0])
1
References
==========
.. [1] https://en.wikipedia.org/wiki/Coordinate_system
"""
def __new__(cls, name, patch, symbols=None, relations={}, **kwargs):
if not isinstance(name, Str):
name = Str(name)
# canonicallize the symbols
if symbols is None:
names = kwargs.get('names', None)
if names is None:
symbols = Tuple(
*[Symbol('%s_%s' % (name.name, i), real=True)
for i in range(patch.dim)]
)
else:
sympy_deprecation_warning(
f"""
The 'names' argument to CoordSystem is deprecated. Use 'symbols' instead. That
is, replace
CoordSystem(..., names={names})
with
CoordSystem(..., symbols=[{', '.join(["Symbol(" + repr(n) + ", real=True)" for n in names])}])
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
symbols = Tuple(
*[Symbol(n, real=True) for n in names]
)
else:
syms = []
for s in symbols:
if isinstance(s, Symbol):
syms.append(Symbol(s.name, **s._assumptions.generator))
elif isinstance(s, str):
sympy_deprecation_warning(
f"""
Passing a string as the coordinate symbol name to CoordSystem is deprecated.
Pass a Symbol with the appropriate name and assumptions instead.
That is, replace {s} with Symbol({s!r}, real=True).
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
syms.append(Symbol(s, real=True))
symbols = Tuple(*syms)
# canonicallize the relations
rel_temp = {}
for k,v in relations.items():
s1, s2 = k
if not isinstance(s1, Str):
s1 = Str(s1)
if not isinstance(s2, Str):
s2 = Str(s2)
key = Tuple(s1, s2)
# Old version used Lambda as a value.
if isinstance(v, Lambda):
v = (tuple(v.signature), tuple(v.expr))
else:
v = (tuple(v[0]), tuple(v[1]))
rel_temp[key] = v
relations = Dict(rel_temp)
# construct the object
obj = super().__new__(cls, name, patch, symbols, relations)
# Add deprecated attributes
obj.transforms = _deprecated_dict(
"""
CoordSystem.transforms is deprecated. The CoordSystem class is now
immutable. Use the 'relations' keyword argument to the
CoordSystems() constructor to specify relations.
""", {})
obj._names = [str(n) for n in symbols]
obj.patch.coord_systems.append(obj) # deprecated
obj._dummies = [Dummy(str(n)) for n in symbols] # deprecated
obj._dummy = Dummy()
return obj
@property
def name(self):
return self.args[0]
@property
def patch(self):
return self.args[1]
@property
def manifold(self):
return self.patch.manifold
@property
def symbols(self):
return tuple(CoordinateSymbol(self, i, **s._assumptions.generator)
for i,s in enumerate(self.args[2]))
@property
def relations(self):
return self.args[3]
@property
def dim(self):
return self.patch.dim
##########################################################################
# Finding transformation relation
##########################################################################
def transformation(self, sys):
"""
Return coordinate transformation function from *self* to *sys*.
Parameters
==========
sys : CoordSystem
Returns
=======
sympy.Lambda
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transformation(R2_p)
Lambda((x, y), Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]]))
"""
signature = self.args[2]
key = Tuple(self.name, sys.name)
if self == sys:
expr = Matrix(self.symbols)
elif key in self.relations:
expr = Matrix(self.relations[key][1])
elif key[::-1] in self.relations:
expr = Matrix(self._inverse_transformation(sys, self))
else:
expr = Matrix(self._indirect_transformation(self, sys))
return Lambda(signature, expr)
@staticmethod
def _solve_inverse(sym1, sym2, exprs, sys1_name, sys2_name):
ret = solve(
[t[0] - t[1] for t in zip(sym2, exprs)],
list(sym1), dict=True)
if len(ret) == 0:
temp = "Cannot solve inverse relation from {} to {}."
raise NotImplementedError(temp.format(sys1_name, sys2_name))
elif len(ret) > 1:
temp = "Obtained multiple inverse relation from {} to {}."
raise ValueError(temp.format(sys1_name, sys2_name))
return ret[0]
@classmethod
def _inverse_transformation(cls, sys1, sys2):
# Find the transformation relation from sys2 to sys1
forward = sys1.transform(sys2)
inv_results = cls._solve_inverse(sys1.symbols, sys2.symbols, forward,
sys1.name, sys2.name)
signature = tuple(sys1.symbols)
return [inv_results[s] for s in signature]
@classmethod
@cacheit
def _indirect_transformation(cls, sys1, sys2):
# Find the transformation relation between two indirectly connected
# coordinate systems
rel = sys1.relations
path = cls._dijkstra(sys1, sys2)
transforms = []
for s1, s2 in zip(path, path[1:]):
if (s1, s2) in rel:
transforms.append(rel[(s1, s2)])
else:
sym2, inv_exprs = rel[(s2, s1)]
sym1 = tuple(Dummy() for i in sym2)
ret = cls._solve_inverse(sym2, sym1, inv_exprs, s2, s1)
ret = tuple(ret[s] for s in sym2)
transforms.append((sym1, ret))
syms = sys1.args[2]
exprs = syms
for newsyms, newexprs in transforms:
exprs = tuple(e.subs(zip(newsyms, exprs)) for e in newexprs)
return exprs
@staticmethod
def _dijkstra(sys1, sys2):
# Use Dijkstra algorithm to find the shortest path between two indirectly-connected
# coordinate systems
# return value is the list of the names of the systems.
relations = sys1.relations
graph = {}
for s1, s2 in relations.keys():
if s1 not in graph:
graph[s1] = {s2}
else:
graph[s1].add(s2)
if s2 not in graph:
graph[s2] = {s1}
else:
graph[s2].add(s1)
path_dict = {sys:[0, [], 0] for sys in graph} # minimum distance, path, times of visited
def visit(sys):
path_dict[sys][2] = 1
for newsys in graph[sys]:
distance = path_dict[sys][0] + 1
if path_dict[newsys][0] >= distance or not path_dict[newsys][1]:
path_dict[newsys][0] = distance
path_dict[newsys][1] = list(path_dict[sys][1])
path_dict[newsys][1].append(sys)
visit(sys1.name)
while True:
min_distance = max(path_dict.values(), key=lambda x:x[0])[0]
newsys = None
for sys, lst in path_dict.items():
if 0 < lst[0] <= min_distance and not lst[2]:
min_distance = lst[0]
newsys = sys
if newsys is None:
break
visit(newsys)
result = path_dict[sys2.name][1]
result.append(sys2.name)
if result == [sys2.name]:
raise KeyError("Two coordinate systems are not connected.")
return result
def connect_to(self, to_sys, from_coords, to_exprs, inverse=True, fill_in_gaps=False):
sympy_deprecation_warning(
"""
The CoordSystem.connect_to() method is deprecated. Instead,
generate a new instance of CoordSystem with the 'relations'
keyword argument (CoordSystem classes are now immutable).
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
from_coords, to_exprs = dummyfy(from_coords, to_exprs)
self.transforms[to_sys] = Matrix(from_coords), Matrix(to_exprs)
if inverse:
to_sys.transforms[self] = self._inv_transf(from_coords, to_exprs)
if fill_in_gaps:
self._fill_gaps_in_transformations()
@staticmethod
def _inv_transf(from_coords, to_exprs):
# Will be removed when connect_to is removed
inv_from = [i.as_dummy() for i in from_coords]
inv_to = solve(
[t[0] - t[1] for t in zip(inv_from, to_exprs)],
list(from_coords), dict=True)[0]
inv_to = [inv_to[fc] for fc in from_coords]
return Matrix(inv_from), Matrix(inv_to)
@staticmethod
def _fill_gaps_in_transformations():
# Will be removed when connect_to is removed
raise NotImplementedError
##########################################################################
# Coordinate transformations
##########################################################################
def transform(self, sys, coordinates=None):
"""
Return the result of coordinate transformation from *self* to *sys*.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix containing CoordinateSymbol
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.transform(R2_p)
Matrix([
[sqrt(x**2 + y**2)],
[ atan2(y, x)]])
>>> R2_r.transform(R2_p, [0, 1])
Matrix([
[ 1],
[pi/2]])
"""
if coordinates is None:
coordinates = self.symbols
if self != sys:
transf = self.transformation(sys)
coordinates = transf(*coordinates)
else:
coordinates = Matrix(coordinates)
return coordinates
def coord_tuple_transform_to(self, to_sys, coords):
"""Transform ``coords`` to coord system ``to_sys``."""
sympy_deprecation_warning(
"""
The CoordSystem.coord_tuple_transform_to() method is deprecated.
Use the CoordSystem.transform() method instead.
""",
deprecated_since_version="1.7",
active_deprecations_target="deprecated-diffgeom-mutable",
)
coords = Matrix(coords)
if self != to_sys:
with ignore_warnings(SymPyDeprecationWarning):
transf = self.transforms[to_sys]
coords = transf[1].subs(list(zip(transf[0], coords)))
return coords
def jacobian(self, sys, coordinates=None):
"""
Return the jacobian matrix of a transformation on given coordinates.
If coordinates are not given, coordinate symbols of *self* are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.ImmutableDenseMatrix
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_p.jacobian(R2_r)
Matrix([
[cos(theta), -rho*sin(theta)],
[sin(theta), rho*cos(theta)]])
>>> R2_p.jacobian(R2_r, [1, 0])
Matrix([
[1, 0],
[0, 1]])
"""
result = self.transform(sys).jacobian(self.symbols)
if coordinates is not None:
result = result.subs(list(zip(self.symbols, coordinates)))
return result
jacobian_matrix = jacobian
def jacobian_determinant(self, sys, coordinates=None):
"""
Return the jacobian determinant of a transformation on given
coordinates. If coordinates are not given, coordinate symbols of *self*
are used.
Parameters
==========
sys : CoordSystem
coordinates : Any iterable, optional.
Returns
=======
sympy.Expr
Examples
========
>>> from sympy.diffgeom.rn import R2_r, R2_p
>>> R2_r.jacobian_determinant(R2_p)
1/sqrt(x**2 + y**2)
>>> R2_r.jacobian_determinant(R2_p, [1, 0])
1
"""
return self.jacobian(sys, coordinates).det()
##########################################################################
# Points
##########################################################################
def point(self, coords):
"""Create a ``Point`` with coordinates given in this coord system."""
return Point(self, coords)
def point_to_coords(self, point):
"""Calculate the coordinates of a point in this coord system."""
return point.coords(self)
##########################################################################
# Base fields.
##########################################################################
def base_scalar(self, coord_index):
"""Return ``BaseScalarField`` that takes a point and returns one of the coordinates."""
return BaseScalarField(self, coord_index)
coord_function = base_scalar
def base_scalars(self):
"""Returns a list of all coordinate functions.
For more details see the ``base_scalar`` method of this class."""
return [self.base_scalar(i) for i in range(self.dim)]
coord_functions = base_scalars
def base_vector(self, coord_index):
"""Return a basis vector field.
The basis vector field for this coordinate system. It is also an
operator on scalar fields."""
return BaseVectorField(self, coord_index)
def base_vectors(self):
"""Returns a list of all base vectors.
For more details see the ``base_vector`` method of this class."""
return [self.base_vector(i) for i in range(self.dim)]
def base_oneform(self, coord_index):
"""Return a basis 1-form field.
The basis one-form field for this coordinate system. It is also an
operator on vector fields."""
return Differential(self.coord_function(coord_index))
def base_oneforms(self):
"""Returns a list of all base oneforms.
For more details see the ``base_oneform`` method of this class."""
return [self.base_oneform(i) for i in range(self.dim)]
| CoordSystem |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/router/multi_retrieval_qa.py | {
"start": 922,
"end": 5326
} | class ____(MultiRouteChain):
"""Multi Retrieval QA Chain.
A multi-route chain that uses an LLM router chain to choose amongst retrieval
qa chains.
"""
router_chain: LLMRouterChain
"""Chain for deciding a destination chain and the input to it."""
destination_chains: Mapping[str, BaseRetrievalQA]
"""Map of name to candidate chains that inputs can be routed to."""
default_chain: Chain
"""Default chain to use when router doesn't map input to one of the destinations."""
@property
@override
def output_keys(self) -> list[str]:
return ["result"]
@classmethod
def from_retrievers(
cls,
llm: BaseLanguageModel,
retriever_infos: list[dict[str, Any]],
default_retriever: BaseRetriever | None = None,
default_prompt: PromptTemplate | None = None,
default_chain: Chain | None = None,
*,
default_chain_llm: BaseLanguageModel | None = None,
**kwargs: Any,
) -> MultiRetrievalQAChain:
"""Create a multi retrieval qa chain from an LLM and a default chain.
Args:
llm: The language model to use.
retriever_infos: Dictionaries containing retriever information.
default_retriever: Optional default retriever to use if no default chain
is provided.
default_prompt: Optional prompt template to use for the default retriever.
default_chain: Optional default chain to use when router doesn't map input
to one of the destinations.
default_chain_llm: Optional language model to use if no default chain and
no default retriever are provided.
**kwargs: Additional keyword arguments to pass to the chain.
Returns:
An instance of the multi retrieval qa chain.
"""
if default_prompt and not default_retriever:
msg = (
"`default_retriever` must be specified if `default_prompt` is "
"provided. Received only `default_prompt`."
)
raise ValueError(msg)
destinations = [f"{r['name']}: {r['description']}" for r in retriever_infos]
destinations_str = "\n".join(destinations)
router_template = MULTI_RETRIEVAL_ROUTER_TEMPLATE.format(
destinations=destinations_str,
)
router_prompt = PromptTemplate(
template=router_template,
input_variables=["input"],
output_parser=RouterOutputParser(next_inputs_inner_key="query"),
)
router_chain = LLMRouterChain.from_llm(llm, router_prompt)
destination_chains = {}
for r_info in retriever_infos:
prompt = r_info.get("prompt")
retriever = r_info["retriever"]
chain = RetrievalQA.from_llm(llm, prompt=prompt, retriever=retriever)
name = r_info["name"]
destination_chains[name] = chain
if default_chain:
_default_chain = default_chain
elif default_retriever:
_default_chain = RetrievalQA.from_llm(
llm,
prompt=default_prompt,
retriever=default_retriever,
)
else:
prompt_template = DEFAULT_TEMPLATE.replace("input", "query")
prompt = PromptTemplate(
template=prompt_template,
input_variables=["history", "query"],
)
if default_chain_llm is None:
msg = (
"conversation_llm must be provided if default_chain is not "
"specified. This API has been changed to avoid instantiating "
"default LLMs on behalf of users."
"You can provide a conversation LLM like so:\n"
"from langchain_openai import ChatOpenAI\n"
"model = ChatOpenAI()"
)
raise NotImplementedError(msg)
_default_chain = ConversationChain(
llm=default_chain_llm,
prompt=prompt,
input_key="query",
output_key="result",
)
return cls(
router_chain=router_chain,
destination_chains=destination_chains,
default_chain=_default_chain,
**kwargs,
)
| MultiRetrievalQAChain |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 1238,
"end": 1953
} | class ____(type):
"""A metaclass for nodes that handles the field and attribute
inheritance. fields and attributes from the parent class are
automatically forwarded to the child."""
def __new__(mcs, name, bases, d): # type: ignore
for attr in "fields", "attributes":
storage = []
storage.extend(getattr(bases[0] if bases else object, attr, ()))
storage.extend(d.get(attr, ()))
assert len(bases) <= 1, "multiple inheritance not allowed"
assert len(storage) == len(set(storage)), "layout conflict"
d[attr] = tuple(storage)
d.setdefault("abstract", False)
return type.__new__(mcs, name, bases, d)
| NodeType |
python | apache__airflow | airflow-core/tests/unit/dags/test_task_view_type_check.py | {
"start": 1207,
"end": 1879
} | class ____:
"""
Class that is callable.
"""
def __call__(self):
"""A __call__ method"""
def a_function(arg_x, __):
"""A function with two args"""
partial_function = functools.partial(a_function, arg_x=1)
class_instance = CallableClass()
logger.info("class_instance type: %s", type(class_instance))
dag = DAG(dag_id="test_task_view_type_check", schedule=None, default_args=default_args)
dag_task1 = PythonOperator(
task_id="test_dagrun_functool_partial",
dag=dag,
python_callable=partial_function,
)
dag_task2 = PythonOperator(
task_id="test_dagrun_instance",
dag=dag,
python_callable=class_instance,
)
| CallableClass |
python | pytorch__pytorch | torch/_guards.py | {
"start": 14859,
"end": 15050
} | class ____:
pass
"""
A class representing a pair of duplicate inputs.
input_pos_a and input_pos_b are input positions we have deduped.
"""
@dataclasses.dataclass(frozen=True)
| GuardEnvExpr |
python | eventlet__eventlet | eventlet/green/http/server.py | {
"start": 7171,
"end": 25215
} | class ____(socketserver.StreamRequestHandler):
"""HTTP request handler base class.
The following explanation of HTTP serves to guide you through the
code as well as to expose any misunderstandings I may have about
HTTP (so you don't need to read the code to figure out I'm wrong
:-).
HTTP (HyperText Transfer Protocol) is an extensible protocol on
top of a reliable stream transport (e.g. TCP/IP). The protocol
recognizes three parts to a request:
1. One line identifying the request type and path
2. An optional set of RFC-822-style headers
3. An optional data part
The headers and data are separated by a blank line.
The first line of the request has the form
<command> <path> <version>
where <command> is a (case-sensitive) keyword such as GET or POST,
<path> is a string containing path information for the request,
and <version> should be the string "HTTP/1.0" or "HTTP/1.1".
<path> is encoded using the URL encoding scheme (using %xx to signify
the ASCII character with hex code xx).
The specification specifies that lines are separated by CRLF but
for compatibility with the widest range of clients recommends
servers also handle LF. Similarly, whitespace in the request line
is treated sensibly (allowing multiple spaces between components
and allowing trailing whitespace).
Similarly, for output, lines ought to be separated by CRLF pairs
but most clients grok LF characters just fine.
If the first line of the request has the form
<command> <path>
(i.e. <version> is left out) then this is assumed to be an HTTP
0.9 request; this form has no optional headers and data part and
the reply consists of just the data.
The reply form of the HTTP 1.x protocol again has three parts:
1. One line giving the response code
2. An optional set of RFC-822-style headers
3. The data
Again, the headers and data are separated by a blank line.
The response code line has the form
<version> <responsecode> <responsestring>
where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"),
<responsecode> is a 3-digit response code indicating success or
failure of the request, and <responsestring> is an optional
human-readable string explaining what the response code means.
This server parses the request and the headers, and then calls a
function specific to the request type (<command>). Specifically,
a request SPAM will be handled by a method do_SPAM(). If no
such method exists the server sends an error response to the
client. If it exists, it is called with no arguments:
do_SPAM()
Note that the request name is case sensitive (i.e. SPAM and spam
are different requests).
The various request details are stored in instance variables:
- client_address is the client IP address in the form (host,
port);
- command, path and version are the broken-down request line;
- headers is an instance of email.message.Message (or a derived
class) containing the header information;
- rfile is a file object open for reading positioned at the
start of the optional input data part;
- wfile is a file object open for writing.
IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING!
The first thing to be written must be the response line. Then
follow 0 or more header lines, then a blank line, and then the
actual data (if any). The meaning of the header lines depends on
the command executed by the server; in most cases, when data is
returned, there should be at least one header line of the form
Content-type: <type>/<subtype>
where <type> and <subtype> should be registered MIME types,
e.g. "text/html" or "text/plain".
"""
# The Python system version, truncated to its first component.
sys_version = "Python/" + sys.version.split()[0]
# The server software version. You may want to override this.
# The format is multiple whitespace-separated strings,
# where each string is of the form name[/version].
server_version = "BaseHTTP/" + __version__
error_message_format = DEFAULT_ERROR_MESSAGE
error_content_type = DEFAULT_ERROR_CONTENT_TYPE
# The default request version. This only affects responses up until
# the point where the request line is parsed, so it mainly decides what
# the client gets back when sending a malformed request line.
# Most web servers default to HTTP 0.9, i.e. don't send a status line.
default_request_version = "HTTP/0.9"
def parse_request(self):
"""Parse a request (internal).
The request should be stored in self.raw_requestline; the results
are in self.command, self.path, self.request_version and
self.headers.
Return True for success, False for failure; on failure, an
error is sent back.
"""
self.command = None # set in case of error on the first line
self.request_version = version = self.default_request_version
self.close_connection = True
requestline = str(self.raw_requestline, 'iso-8859-1')
requestline = requestline.rstrip('\r\n')
self.requestline = requestline
words = requestline.split()
if len(words) == 3:
command, path, version = words
try:
if version[:5] != 'HTTP/':
raise ValueError
base_version_number = version.split('/', 1)[1]
version_number = base_version_number.split(".")
# RFC 2145 section 3.1 says there can be only one "." and
# - major and minor numbers MUST be treated as
# separate integers;
# - HTTP/2.4 is a lower version than HTTP/2.13, which in
# turn is lower than HTTP/12.3;
# - Leading zeros MUST be ignored by recipients.
if len(version_number) != 2:
raise ValueError
version_number = int(version_number[0]), int(version_number[1])
except (ValueError, IndexError):
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad request version (%r)" % version)
return False
if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1":
self.close_connection = False
if version_number >= (2, 0):
self.send_error(
HTTPStatus.HTTP_VERSION_NOT_SUPPORTED,
"Invalid HTTP version (%s)" % base_version_number)
return False
elif len(words) == 2:
command, path = words
self.close_connection = True
if command != 'GET':
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad HTTP/0.9 request type (%r)" % command)
return False
elif not words:
return False
else:
self.send_error(
HTTPStatus.BAD_REQUEST,
"Bad request syntax (%r)" % requestline)
return False
self.command, self.path, self.request_version = command, path, version
# Examine the headers and look for a Connection directive.
try:
self.headers = http_client.parse_headers(self.rfile,
_class=self.MessageClass)
except http_client.LineTooLong as err:
self.send_error(
HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
"Line too long",
str(err))
return False
except http_client.HTTPException as err:
self.send_error(
HTTPStatus.REQUEST_HEADER_FIELDS_TOO_LARGE,
"Too many headers",
str(err)
)
return False
conntype = self.headers.get('Connection', "")
if conntype.lower() == 'close':
self.close_connection = True
elif (conntype.lower() == 'keep-alive' and
self.protocol_version >= "HTTP/1.1"):
self.close_connection = False
# Examine the headers and look for an Expect directive
expect = self.headers.get('Expect', "")
if (expect.lower() == "100-continue" and
self.protocol_version >= "HTTP/1.1" and
self.request_version >= "HTTP/1.1"):
if not self.handle_expect_100():
return False
return True
def handle_expect_100(self):
"""Decide what to do with an "Expect: 100-continue" header.
If the client is expecting a 100 Continue response, we must
respond with either a 100 Continue or a final response before
waiting for the request body. The default is to always respond
with a 100 Continue. You can behave differently (for example,
reject unauthorized requests) by overriding this method.
This method should either return True (possibly after sending
a 100 Continue response) or send an error response and return
False.
"""
self.send_response_only(HTTPStatus.CONTINUE)
self.end_headers()
return True
def handle_one_request(self):
"""Handle a single HTTP request.
You normally don't need to override this method; see the class
__doc__ string for information on how to handle specific HTTP
commands such as GET and POST.
"""
try:
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ''
self.request_version = ''
self.command = ''
self.send_error(HTTPStatus.REQUEST_URI_TOO_LONG)
return
if not self.raw_requestline:
self.close_connection = True
return
if not self.parse_request():
# An error code has been sent, just exit
return
mname = 'do_' + self.command
if not hasattr(self, mname):
self.send_error(
HTTPStatus.NOT_IMPLEMENTED,
"Unsupported method (%r)" % self.command)
return
method = getattr(self, mname)
method()
self.wfile.flush() #actually send the response if not already done.
except socket.timeout as e:
#a read or a write timed out. Discard this connection
self.log_error("Request timed out: %r", e)
self.close_connection = True
return
def handle(self):
"""Handle multiple requests if necessary."""
self.close_connection = True
self.handle_one_request()
while not self.close_connection:
self.handle_one_request()
def send_error(self, code, message=None, explain=None):
"""Send and log an error reply.
Arguments are
* code: an HTTP error code
3 digits
* message: a simple optional 1 line reason phrase.
*( HTAB / SP / VCHAR / %x80-FF )
defaults to short entry matching the response code
* explain: a detailed message defaults to the long entry
matching the response code.
This sends an error response (so it must be called before any
output has been generated), logs the error, and finally sends
a piece of HTML explaining the error to the user.
"""
try:
shortmsg, longmsg = self.responses[code]
except KeyError:
shortmsg, longmsg = '???', '???'
if message is None:
message = shortmsg
if explain is None:
explain = longmsg
self.log_error("code %d, message %s", code, message)
self.send_response(code, message)
self.send_header('Connection', 'close')
# Message body is omitted for cases described in:
# - RFC7230: 3.3. 1xx, 204(No Content), 304(Not Modified)
# - RFC7231: 6.3.6. 205(Reset Content)
body = None
if (code >= 200 and
code not in (HTTPStatus.NO_CONTENT,
HTTPStatus.RESET_CONTENT,
HTTPStatus.NOT_MODIFIED)):
# HTML encode to prevent Cross Site Scripting attacks
# (see bug #1100201)
content = (self.error_message_format % {
'code': code,
'message': html.escape(message, quote=False),
'explain': html.escape(explain, quote=False)
})
body = content.encode('UTF-8', 'replace')
self.send_header("Content-Type", self.error_content_type)
self.send_header('Content-Length', int(len(body)))
self.end_headers()
if self.command != 'HEAD' and body:
self.wfile.write(body)
def send_response(self, code, message=None):
"""Add the response header to the headers buffer and log the
response code.
Also send two standard headers with the server software
version and the current date.
"""
self.log_request(code)
self.send_response_only(code, message)
self.send_header('Server', self.version_string())
self.send_header('Date', self.date_time_string())
def send_response_only(self, code, message=None):
"""Send the response header only."""
if self.request_version != 'HTTP/0.9':
if message is None:
if code in self.responses:
message = self.responses[code][0]
else:
message = ''
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(("%s %d %s\r\n" %
(self.protocol_version, code, message)).encode(
'latin-1', 'strict'))
def send_header(self, keyword, value):
"""Send a MIME header to the headers buffer."""
if self.request_version != 'HTTP/0.9':
if not hasattr(self, '_headers_buffer'):
self._headers_buffer = []
self._headers_buffer.append(
("%s: %s\r\n" % (keyword, value)).encode('latin-1', 'strict'))
if keyword.lower() == 'connection':
if value.lower() == 'close':
self.close_connection = True
elif value.lower() == 'keep-alive':
self.close_connection = False
def end_headers(self):
"""Send the blank line ending the MIME headers."""
if self.request_version != 'HTTP/0.9':
self._headers_buffer.append(b"\r\n")
self.flush_headers()
def flush_headers(self):
if hasattr(self, '_headers_buffer'):
self.wfile.write(b"".join(self._headers_buffer))
self._headers_buffer = []
def log_request(self, code='-', size='-'):
"""Log an accepted request.
This is called by send_response().
"""
if isinstance(code, HTTPStatus):
code = code.value
self.log_message('"%s" %s %s',
self.requestline, str(code), str(size))
def log_error(self, format, *args):
"""Log an error.
This is called when a request cannot be fulfilled. By
default it passes the message on to log_message().
Arguments are the same as for log_message().
XXX This should go to the separate error log.
"""
self.log_message(format, *args)
def log_message(self, format, *args):
"""Log an arbitrary message.
This is used by all other logging functions. Override
it if you have specific logging wishes.
The first argument, FORMAT, is a format string for the
message to be logged. If the format string contains
any % escapes requiring parameters, they should be
specified as subsequent arguments (it's just like
printf!).
The client ip and current date/time are prefixed to
every message.
"""
sys.stderr.write("%s - - [%s] %s\n" %
(self.address_string(),
self.log_date_time_string(),
format%args))
def version_string(self):
"""Return the server software version string."""
return self.server_version + ' ' + self.sys_version
def date_time_string(self, timestamp=None):
"""Return the current date and time formatted for a message header."""
if timestamp is None:
timestamp = time.time()
return email.utils.formatdate(timestamp, usegmt=True)
def log_date_time_string(self):
"""Return the current time formatted for logging."""
now = time.time()
year, month, day, hh, mm, ss, x, y, z = time.localtime(now)
s = "%02d/%3s/%04d %02d:%02d:%02d" % (
day, self.monthname[month], year, hh, mm, ss)
return s
weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
monthname = [None,
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
def address_string(self):
"""Return the client address."""
return self.client_address[0]
# Essentially static class variables
# The version of the HTTP protocol we support.
# Set this to HTTP/1.1 to enable automatic keepalive
protocol_version = "HTTP/1.0"
# MessageClass used to parse headers
MessageClass = http_client.HTTPMessage
# hack to maintain backwards compatibility
responses = {
v: (v.phrase, v.description)
for v in HTTPStatus.__members__.values()
}
| BaseHTTPRequestHandler |
python | getsentry__sentry | tests/sentry/integrations/test_pipeline.py | {
"start": 2202,
"end": 28380
} | class ____(IntegrationTestCase):
provider = ExampleIntegrationProvider
regions = (
Region("na", 0, "North America", RegionCategory.MULTI_TENANT),
Region("eu", 5, "Europe", RegionCategory.MULTI_TENANT),
)
external_id = "dummy_id-123"
@pytest.fixture(autouse=True)
def _register_example_plugin(self) -> Generator[None]:
plugins.register(ExamplePlugin)
yield
plugins.unregister(ExamplePlugin)
@pytest.fixture(autouse=True)
def _modify_provider(self):
with patch.multiple(
self.provider,
needs_default_identity=False,
is_region_restricted=False,
):
yield
def _setup_region_restriction(self):
self.provider.is_region_restricted = True
na_orgs = [
self.create_organization(name="na_org"),
self.create_organization(name="na_org_2"),
]
integration = self.create_provider_integration(
name="test", external_id=self.external_id, provider=self.provider.key
)
with (
receivers_raise_on_send(),
outbox_runner(),
unguarded_write(using=router.db_for_write(OrganizationMapping)),
):
for org in na_orgs:
integration.add_organization(org)
mapping = OrganizationMapping.objects.get(organization_id=org.id)
mapping.update(region_name="na")
def test_with_data(self, *args) -> None:
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
resp = self.pipeline.finish_pipeline()
assert isinstance(resp, HttpResponse)
self.assertDialogSuccess(resp)
assert b"document.origin);" in resp.content
integration = Integration.objects.get(
provider=self.provider.key, external_id=self.external_id
)
assert integration.name == data["name"]
assert integration.metadata == data["metadata"]
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration_id=integration.id
).exists()
def test_with_customer_domain(self, *args) -> None:
with self.feature({"system:multi-region": True}):
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
resp = self.pipeline.finish_pipeline()
assert isinstance(resp, HttpResponse)
self.assertDialogSuccess(resp)
assert (
f', "{generate_organization_url(self.organization.slug)}");'.encode()
in resp.content
)
integration = Integration.objects.get(
provider=self.provider.key, external_id=self.external_id
)
assert integration.name == data["name"]
assert integration.metadata == data["metadata"]
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration_id=integration.id
).exists()
@patch("sentry.signals.integration_added.send_robust")
def test_provider_should_check_region_violation(self, *args) -> None:
"""Ensures we validate regions if `provider.is_region_restricted` is set to True"""
self.provider.is_region_restricted = True
self.pipeline.state.data = {"external_id": self.external_id}
with patch(
"sentry.integrations.pipeline.is_violating_region_restriction"
) as mock_check_violation:
self.pipeline.finish_pipeline()
assert mock_check_violation.called
@patch("sentry.signals.integration_added.send_robust")
def test_provider_should_not_check_region_violation(self, *args) -> None:
"""Ensures we don't reject regions if `provider.is_region_restricted` is set to False"""
self.pipeline.state.data = {"external_id": self.external_id}
with patch(
"sentry.integrations.pipeline.is_violating_region_restriction"
) as mock_check_violation:
self.pipeline.finish_pipeline()
assert not mock_check_violation.called
@patch("sentry.signals.integration_added.send_robust")
def test_is_violating_region_restriction_success(self, *args) -> None:
"""Ensures pipeline can complete if all integration organizations reside in one region."""
self._setup_region_restriction()
# Installing organization is from the same region
mapping = OrganizationMapping.objects.get(organization_id=self.organization.id)
with unguarded_write(using=router.db_for_write(OrganizationMapping)):
mapping.update(region_name="na")
self.pipeline.state.data = {"external_id": self.external_id}
with (
override_regions(self.regions),
patch("sentry.integrations.pipeline.IntegrationPipeline._dialog_response") as resp,
):
self.pipeline.finish_pipeline()
_data, success = resp.call_args[0]
assert success
@patch("sentry.signals.integration_added.send_robust")
def test_is_violating_region_restriction_failure(self, *args) -> None:
"""Ensures pipeline can produces an error if all integration organizations do not reside in one region."""
self._setup_region_restriction()
# Installing organization is from a different region
mapping = OrganizationMapping.objects.get(organization_id=self.organization.id)
with unguarded_write(using=router.db_for_write(OrganizationMapping)):
mapping.update(region_name="eu")
self.pipeline.state.data = {"external_id": self.external_id}
with override_regions(self.regions):
response = self.pipeline.finish_pipeline()
assert isinstance(response, HttpResponse)
error_message = "This integration has already been installed on another Sentry organization which resides in a different region. Installation could not be completed."
assert error_message in response.content.decode()
if SiloMode.get_current_mode() == SiloMode.MONOLITH:
assert error_message not in response.content.decode()
if SiloMode.get_current_mode() == SiloMode.CONTROL:
assert error_message in response.content.decode()
def test_aliased_integration_key(self, *args) -> None:
self.provider = AliasedIntegrationProvider
self.setUp()
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
# Creates the Integration using ``integration_key`` instead of ``key``
assert Integration.objects.filter(
provider=self.provider.integration_key, external_id=self.external_id
).exists()
def test_with_expect_exists(self, *args) -> None:
old_integration = self.create_provider_integration(
provider=self.provider.key, external_id=self.external_id, name="Tester"
)
self.pipeline.state.data = {"expect_exists": True, "external_id": self.external_id}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
integration = Integration.objects.get(
provider=self.provider.key, external_id=self.external_id
)
assert integration.name == old_integration.name
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration_id=integration.id
).exists()
def test_expect_exists_does_not_update(self, *args) -> None:
old_integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
name="Tester",
metadata={"url": "https://example.com"},
)
self.pipeline.state.data = {
"expect_exists": True,
"external_id": self.external_id,
"name": "Should Not Update",
"metadata": {"url": "https://wrong.com"},
}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
integration = Integration.objects.get(
provider=self.provider.key, external_id=self.external_id
)
assert integration.name == old_integration.name
assert integration.metadata == old_integration.metadata
assert OrganizationIntegration.objects.filter(
organization_id=self.organization.id, integration_id=integration.id
).exists()
def test_with_default_id(self, *args) -> None:
self.provider.needs_default_identity = True
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": "plugin",
"external_id": "AccountId",
"scopes": [],
"data": {
"access_token": "token12345",
"expires_in": "123456789",
"refresh_token": "refresh12345",
"token_type": "typetype",
},
},
}
self.pipeline.state.data = data
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
integration = Integration.objects.get(
provider=self.provider.key, external_id=self.external_id
)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.default_auth_id is not None
assert Identity.objects.filter(id=org_integration.default_auth_id).exists()
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
def test_default_identity_does_update(self, mock_record, *args) -> None:
self.provider.needs_default_identity = True
old_identity_id = 234567
integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
metadata={"url": "https://example.com"},
)
self.create_organization_integration(
organization_id=self.organization.id,
integration=integration,
default_auth_id=old_identity_id,
)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": "plugin",
"external_id": "AccountId",
"scopes": [],
"data": {
"access_token": "token12345",
"expires_in": "123456789",
"refresh_token": "refresh12345",
"token_type": "typetype",
},
},
}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
identity = Identity.objects.get(external_id="AccountId")
assert org_integration.default_auth_id == identity.id
# SLO assertions
assert_success_metric(mock_record)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1
)
def test_existing_identity_becomes_default_auth_on_new_orgintegration(self, *args) -> None:
# The reinstall flow will result in an existing identity provider, identity
# and integration records. Ensure that the new organizationintegration gets
# a default_auth_id set.
self.provider.needs_default_identity = True
integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
metadata={"url": "https://example.com"},
)
identity_provider = self.create_identity_provider(
external_id=self.external_id, type="plugin"
)
identity = Identity.objects.create(
idp_id=identity_provider.id, external_id="AccountId", user_id=self.user.id
)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": "plugin",
"external_id": "AccountId",
"scopes": [],
"data": {
"access_token": "token12345",
"expires_in": "123456789",
"refresh_token": "refresh12345",
"token_type": "typetype",
},
},
}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
org_integration = OrganizationIntegration.objects.get(
organization_id=self.organization.id, integration_id=integration.id
)
assert org_integration.default_auth_id == identity.id
def test_new_external_id_same_user(self, *args) -> None:
# we need to make sure any other org_integrations have the same
# identity that we use for the new one
self.provider.needs_default_identity = True
integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
metadata={"url": "https://example.com"},
)
identity_provider = self.create_identity_provider(
external_id=self.external_id, type="plugin"
)
identity = Identity.objects.create(
idp_id=identity_provider.id, external_id="AccountId", user_id=self.user.id
)
org2 = self.create_organization(owner=self.user)
integration.add_organization(org2, default_auth_id=identity.id)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": "plugin",
"external_id": "new_external_id",
"scopes": [],
"data": {
"access_token": "token12345",
"expires_in": "123456789",
"refresh_token": "refresh12345",
"token_type": "typetype",
},
},
}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
org_integrations = OrganizationIntegration.objects.filter(integration_id=integration.id)
identity = Identity.objects.get(idp_id=identity_provider.id, external_id="new_external_id")
for org_integration in org_integrations:
assert org_integration.default_auth_id == identity.id
def test_different_user_same_external_id_no_default_needed(self, *args) -> None:
new_user = self.create_user()
integration = self.create_provider_integration(
provider=self.provider.key,
external_id=self.external_id,
metadata={"url": "https://example.com"},
)
identity_provider = self.create_identity_provider(
external_id=self.external_id, type=self.provider.key
)
Identity.objects.create(
idp_id=identity_provider.id, external_id="AccountId", user_id=new_user.id
)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
"user_identity": {
"type": self.provider.key,
"external_id": "AccountId",
"scopes": [],
"data": {},
},
}
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
assert OrganizationIntegration.objects.filter(
integration_id=integration.id, organization_id=self.organization.id
).exists()
@patch("sentry.plugins.migrator.Migrator.run")
def test_disabled_plugin_when_fully_migrated(self, run, *args) -> None:
with assume_test_silo_mode(SiloMode.REGION):
Repository.objects.create(
organization_id=self.organization.id,
name="user/repo",
url="https://example.org/user/repo",
provider=self.provider.key,
external_id=self.external_id,
)
self.pipeline.state.data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.finish_pipeline()
assert run.called
@patch("sentry.integrations.pipeline.logger")
def test_disallow_with_no_permission(self, mock_logger, *args) -> None:
member_user = self.create_user()
self.create_member(user=member_user, organization=self.organization, role="member")
self.login_as(member_user)
# partially copied from IntegrationTestCase.setUp()
# except the user is not an owner
with assume_test_silo_mode(SiloMode.REGION):
rpc_organization = serialize_rpc_organization(self.organization)
self.request = self.make_request(member_user)
self.pipeline = IntegrationPipeline(
request=self.request,
organization=rpc_organization,
provider_key=self.provider.key,
)
self.pipeline.initialize()
self.save_session()
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
# attempt to finish pipeline with no 'org:integrations' scope
resp = self.pipeline.finish_pipeline()
assert isinstance(resp, HttpResponse)
assert (
"You must be an organization owner, manager or admin to install this integration."
in resp.content.decode()
)
extra = {
"error_message": "You must be an organization owner, manager or admin to install this integration.",
"organization_id": self.organization.id,
"user_id": member_user.id,
"provider_key": "example",
}
mock_logger.info.assert_called_with("build-integration.permission_error", extra=extra)
def test_allow_with_superuser(self, *args) -> None:
member_user = self.create_user(is_superuser=True)
self.create_member(user=member_user, organization=self.organization, role="member")
self.login_as(member_user, superuser=True)
with assume_test_silo_mode(SiloMode.REGION):
rpc_organization = serialize_rpc_organization(self.organization)
self.request = self.make_request(member_user, is_superuser=True)
self.pipeline = IntegrationPipeline(
request=self.request,
organization=rpc_organization,
provider_key=self.provider.key,
)
self.pipeline.initialize()
self.save_session()
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
# should be allowed to install integration because of superuser
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
@override_options({"superuser.read-write.ga-rollout": True})
def test_allow_with_superuser_su_split(self, *args) -> None:
member_user = self.create_user(is_superuser=True)
self.create_member(user=member_user, organization=self.organization, role="member")
self.login_as(member_user, superuser=True)
with assume_test_silo_mode(SiloMode.REGION):
rpc_organization = serialize_rpc_organization(self.organization)
self.request = self.make_request(member_user, is_superuser=True)
self.pipeline = IntegrationPipeline(
request=self.request,
organization=rpc_organization,
provider_key=self.provider.key,
)
self.pipeline.initialize()
self.save_session()
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
# should be allowed to install integration because of superuser
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
@patch("sentry.integrations.utils.metrics.EventLifecycle.record_event")
@patch("sentry.integrations.pipeline.logger")
def test_disallow_with_removed_membership(self, mock_logger, mock_record, *args) -> None:
member_user = self.create_user()
om = self.create_member(user=member_user, organization=self.organization, role="manager")
self.login_as(member_user)
# partially copied from IntegrationTestCase.setUp()
# except the user is not an owner
with assume_test_silo_mode(SiloMode.REGION):
rpc_organization = serialize_rpc_organization(self.organization)
self.request = self.make_request(member_user)
self.pipeline = IntegrationPipeline(
request=self.request,
organization=rpc_organization,
provider_key=self.provider.key,
)
self.pipeline.initialize()
self.save_session()
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
with outbox_runner(), assume_test_silo_mode_of(OrganizationMember):
om.delete()
# attempt to finish pipeline without org membership
resp = self.pipeline.finish_pipeline()
assert isinstance(resp, HttpResponse)
assert (
"You must be an organization owner, manager or admin to install this integration."
in resp.content.decode()
)
extra = {
"error_message": "You must be an organization owner, manager or admin to install this integration.",
"organization_id": self.organization.id,
"user_id": member_user.id,
"provider_key": "example",
}
mock_logger.info.assert_called_with("build-integration.permission_error", extra=extra)
# SLO assertions
assert_success_metric(mock_record)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.STARTED, outcome_count=1
)
assert_count_of_metric(
mock_record=mock_record, outcome=EventLifecycleOutcome.SUCCESS, outcome_count=1
)
@with_feature("organizations:update-action-status")
def test_enable_actions_called_on_successful_install(self, *args) -> None:
"""Test that actions are enabled when integration is successfully installed."""
org = self.create_organization()
integration, _ = self.create_provider_integration_for(
org, self.user, provider="slack", name="Test Integration"
)
# Create a second integration to ensure that actions are not enabled for it
integration2, _ = self.create_provider_integration_for(
org, self.user, provider="slack", name="Test Integration 2", external_id="123456"
)
# Create a data condition group
condition_group = self.create_data_condition_group(organization=org)
# Create an action linked to this integration
action = self.create_action(
type=Action.Type.SLACK,
integration_id=integration.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration",
},
)
# Create an action linked to the second integration
action2 = self.create_action(
type=Action.Type.SLACK,
integration_id=integration2.id,
config={
"target_type": ActionTarget.SPECIFIC,
"target_identifier": "123",
"target_display": "Test Integration 2",
},
status=ObjectStatus.DISABLED,
)
# Link action to condition group
self.create_data_condition_group_action(condition_group=condition_group, action=action)
self.create_data_condition_group_action(condition_group=condition_group, action=action2)
data = {
"external_id": self.external_id,
"name": "Name",
"metadata": {"url": "https://example.com"},
}
self.pipeline.state.data = data
resp = self.pipeline.finish_pipeline()
self.assertDialogSuccess(resp)
with assume_test_silo_mode(SiloMode.REGION):
action = Action.objects.get(id=action.id)
assert action.status == ObjectStatus.ACTIVE
# Ensure that the second action is still disabled
assert action2.status == ObjectStatus.DISABLED
@control_silo_test
@patch(
"sentry.integrations.gitlab.integration.GitlabIntegrationProvider.build_integration",
side_effect=naive_build_integration,
)
| FinishPipelineTestCase |
python | doocs__leetcode | solution/0800-0899/0886.Possible Bipartition/Solution2.py | {
"start": 0,
"end": 562
} | class ____:
def possibleBipartition(self, n: int, dislikes: List[List[int]]) -> bool:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
g = defaultdict(list)
for a, b in dislikes:
a, b = a - 1, b - 1
g[a].append(b)
g[b].append(a)
p = list(range(n))
for i in range(n):
for j in g[i]:
if find(i) == find(j):
return False
p[find(j)] = find(g[i][0])
return True
| Solution |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 25313,
"end": 25462
} | class ____(RunInput):
"""Represents constant input value to a task run."""
input_type: Literal["constant"] = "constant"
type: str
| Constant |
python | coleifer__peewee | tests/sqlite.py | {
"start": 3048,
"end": 3121
} | class ____(TestModel):
key = TextField()
data = JSONBField()
| JBData |
python | getsentry__sentry-python | sentry_sdk/integrations/google_genai/__init__.py | {
"start": 889,
"end": 14232
} | class ____(Integration):
identifier = IDENTIFIER
origin = ORIGIN
def __init__(self, include_prompts=True):
# type: (GoogleGenAIIntegration, bool) -> None
self.include_prompts = include_prompts
@staticmethod
def setup_once():
# type: () -> None
# Patch sync methods
Models.generate_content = _wrap_generate_content(Models.generate_content)
Models.generate_content_stream = _wrap_generate_content_stream(
Models.generate_content_stream
)
Models.embed_content = _wrap_embed_content(Models.embed_content)
# Patch async methods
AsyncModels.generate_content = _wrap_async_generate_content(
AsyncModels.generate_content
)
AsyncModels.generate_content_stream = _wrap_async_generate_content_stream(
AsyncModels.generate_content_stream
)
AsyncModels.embed_content = _wrap_async_embed_content(AsyncModels.embed_content)
def _wrap_generate_content_stream(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_generate_content_stream(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return f(self, *args, **kwargs)
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
span = get_start_span_function()(
op=OP.GEN_AI_INVOKE_AGENT,
name="invoke_agent",
origin=ORIGIN,
)
span.__enter__()
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
set_span_data_for_request(span, integration, model_name, contents, kwargs)
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
chat_span = sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
name=f"chat {model_name}",
origin=ORIGIN,
)
chat_span.__enter__()
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
try:
stream = f(self, *args, **kwargs)
# Create wrapper iterator to accumulate responses
def new_iterator():
# type: () -> Iterator[Any]
chunks = [] # type: List[Any]
try:
for chunk in stream:
chunks.append(chunk)
yield chunk
except Exception as exc:
_capture_exception(exc)
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
finally:
# Accumulate all chunks and set final response data on spans
if chunks:
accumulated_response = accumulate_streaming_response(chunks)
set_span_data_for_streaming_response(
chat_span, integration, accumulated_response
)
set_span_data_for_streaming_response(
span, integration, accumulated_response
)
chat_span.__exit__(None, None, None)
span.__exit__(None, None, None)
return new_iterator()
except Exception as exc:
_capture_exception(exc)
chat_span.__exit__(None, None, None)
span.__exit__(None, None, None)
raise
return new_generate_content_stream
def _wrap_async_generate_content_stream(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
async def new_async_generate_content_stream(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return await f(self, *args, **kwargs)
_model, contents, model_name = prepare_generate_content_args(args, kwargs)
span = get_start_span_function()(
op=OP.GEN_AI_INVOKE_AGENT,
name="invoke_agent",
origin=ORIGIN,
)
span.__enter__()
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
set_span_data_for_request(span, integration, model_name, contents, kwargs)
span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
chat_span = sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
name=f"chat {model_name}",
origin=ORIGIN,
)
chat_span.__enter__()
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
set_span_data_for_request(chat_span, integration, model_name, contents, kwargs)
chat_span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True)
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
try:
stream = await f(self, *args, **kwargs)
# Create wrapper async iterator to accumulate responses
async def new_async_iterator():
# type: () -> AsyncIterator[Any]
chunks = [] # type: List[Any]
try:
async for chunk in stream:
chunks.append(chunk)
yield chunk
except Exception as exc:
_capture_exception(exc)
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
finally:
# Accumulate all chunks and set final response data on spans
if chunks:
accumulated_response = accumulate_streaming_response(chunks)
set_span_data_for_streaming_response(
chat_span, integration, accumulated_response
)
set_span_data_for_streaming_response(
span, integration, accumulated_response
)
chat_span.__exit__(None, None, None)
span.__exit__(None, None, None)
return new_async_iterator()
except Exception as exc:
_capture_exception(exc)
chat_span.__exit__(None, None, None)
span.__exit__(None, None, None)
raise
return new_async_generate_content_stream
def _wrap_generate_content(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_generate_content(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return f(self, *args, **kwargs)
model, contents, model_name = prepare_generate_content_args(args, kwargs)
with get_start_span_function()(
op=OP.GEN_AI_INVOKE_AGENT,
name="invoke_agent",
origin=ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
set_span_data_for_request(span, integration, model_name, contents, kwargs)
with sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
name=f"chat {model_name}",
origin=ORIGIN,
) as chat_span:
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
chat_span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
set_span_data_for_request(
chat_span, integration, model_name, contents, kwargs
)
try:
response = f(self, *args, **kwargs)
except Exception as exc:
_capture_exception(exc)
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
set_span_data_for_response(chat_span, integration, response)
set_span_data_for_response(span, integration, response)
return response
return new_generate_content
def _wrap_async_generate_content(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
async def new_async_generate_content(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return await f(self, *args, **kwargs)
model, contents, model_name = prepare_generate_content_args(args, kwargs)
with get_start_span_function()(
op=OP.GEN_AI_INVOKE_AGENT,
name="invoke_agent",
origin=ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_AGENT_NAME, model_name)
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent")
set_span_data_for_request(span, integration, model_name, contents, kwargs)
with sentry_sdk.start_span(
op=OP.GEN_AI_CHAT,
name=f"chat {model_name}",
origin=ORIGIN,
) as chat_span:
chat_span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat")
chat_span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
chat_span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
set_span_data_for_request(
chat_span, integration, model_name, contents, kwargs
)
try:
response = await f(self, *args, **kwargs)
except Exception as exc:
_capture_exception(exc)
chat_span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
set_span_data_for_response(chat_span, integration, response)
set_span_data_for_response(span, integration, response)
return response
return new_async_generate_content
def _wrap_embed_content(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
def new_embed_content(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return f(self, *args, **kwargs)
model_name, contents = prepare_embed_content_args(args, kwargs)
with sentry_sdk.start_span(
op=OP.GEN_AI_EMBEDDINGS,
name=f"embeddings {model_name}",
origin=ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
set_span_data_for_embed_request(span, integration, contents, kwargs)
try:
response = f(self, *args, **kwargs)
except Exception as exc:
_capture_exception(exc)
span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
set_span_data_for_embed_response(span, integration, response)
return response
return new_embed_content
def _wrap_async_embed_content(f):
# type: (Callable[..., Any]) -> Callable[..., Any]
@wraps(f)
async def new_async_embed_content(self, *args, **kwargs):
# type: (Any, Any, Any) -> Any
integration = sentry_sdk.get_client().get_integration(GoogleGenAIIntegration)
if integration is None:
return await f(self, *args, **kwargs)
model_name, contents = prepare_embed_content_args(args, kwargs)
with sentry_sdk.start_span(
op=OP.GEN_AI_EMBEDDINGS,
name=f"embeddings {model_name}",
origin=ORIGIN,
) as span:
span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "embeddings")
span.set_data(SPANDATA.GEN_AI_SYSTEM, GEN_AI_SYSTEM)
span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name)
set_span_data_for_embed_request(span, integration, contents, kwargs)
try:
response = await f(self, *args, **kwargs)
except Exception as exc:
_capture_exception(exc)
span.set_status(SPANSTATUS.INTERNAL_ERROR)
raise
set_span_data_for_embed_response(span, integration, response)
return response
return new_async_embed_content
| GoogleGenAIIntegration |
python | keon__algorithms | tests/test_backtrack.py | {
"start": 2878,
"end": 3534
} | class ____(unittest.TestCase):
def check_sum(self, nums, target):
if sum(nums) == target:
return (True, nums)
else:
return (False, nums)
def test_combination_sum(self):
candidates1 = [2, 3, 6, 7]
target1 = 7
answer1 = [
[2, 2, 3],
[7]
]
self.assertEqual(combination_sum(candidates1, target1), answer1)
candidates2 = [2, 3, 5]
target2 = 8
answer2 = [
[2, 2, 2, 2],
[2, 3, 3],
[3, 5]
]
self.assertEqual(combination_sum(candidates2, target2), answer2)
| TestCombinationSum |
python | hyperopt__hyperopt | hyperopt/tests/unit/test_rdists.py | {
"start": 6512,
"end": 7645
} | class ____(unittest.TestCase):
def test_smallq(self):
mu, sigma, q = (0, 1, 0.1)
qn = qnormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_bigq(self):
mu, sigma, q = (-20, 4, 3)
qn = qnormal_gen(mu, sigma, q)
check_d_samples(qn, n=10000)
def test_offgrid_int(self):
qn = qnormal_gen(0, 1, 2)
assert qn.pmf(0) > 0.0
assert qn.pmf(1) == 0.0
assert qn.pmf(2) > 0.0
def test_offgrid_float(self):
qn = qnormal_gen(0, 1, 0.2)
assert qn.pmf(0) > 0.0
assert qn.pmf(0.1) == 0.0
assert qn.pmf(0.2) > 0.0
assert qn.pmf(0.4) > 0.0
assert qn.pmf(-0.2) > 0.0
assert qn.pmf(-0.4) > 0.0
assert qn.pmf(0.99) == 0.0
assert qn.pmf(-0.99) == 0.0
def test_numeric(self):
qn = qnormal_gen(0, 1, 1)
assert qn.pmf(500) > -np.inf
def test_output_type_int(self):
result = qnormal_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(qnormal_gen(0, 10, 1.0).rvs())
| TestQNormal |
python | HIPS__autograd | examples/convnet.py | {
"start": 3202,
"end": 4033
} | class ____:
def __init__(self, pool_shape):
self.pool_shape = pool_shape
def build_weights_dict(self, input_shape):
# input_shape dimensions: [color, y, x]
output_shape = list(input_shape)
for i in [0, 1]:
assert input_shape[i + 1] % self.pool_shape[i] == 0, "maxpool shape should tile input exactly"
output_shape[i + 1] = input_shape[i + 1] / self.pool_shape[i]
return 0, output_shape
def forward_pass(self, inputs, param_vector):
new_shape = inputs.shape[:2]
for i in [0, 1]:
pool_width = self.pool_shape[i]
img_width = inputs.shape[i + 2]
new_shape += (img_width // pool_width, pool_width)
result = inputs.reshape(new_shape)
return np.max(np.max(result, axis=3), axis=4)
| maxpool_layer |
python | encode__django-rest-framework | rest_framework/mixins.py | {
"start": 2613,
"end": 2936
} | class ____:
"""
Destroy a model instance.
"""
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return Response(status=status.HTTP_204_NO_CONTENT)
def perform_destroy(self, instance):
instance.delete()
| DestroyModelMixin |
python | pytorch__pytorch | test/dynamo/test_dicts.py | {
"start": 51279,
"end": 53637
} | class ____(DictMethodsTests):
thetype = OrderedDict
# Methods:
# - popitem - Inherited from DictMethodsTest
# + move_to_end
@make_dynamo_test
def test_move_to_end(self):
d = self.thetype.fromkeys("abcde")
self.assertEqual("".join(d), "abcde")
d.move_to_end("b")
self.assertEqual("".join(d), "acdeb")
# Test OrderedDict.move_to_end
self.thetype.move_to_end(d, "a")
self.assertEqual("".join(d), "cdeba")
# Test last=False
self.thetype.move_to_end(d, "a", last=False)
self.assertEqual("".join(d), "acdeb")
# Test KeyError
self.assertRaises(KeyError, d.move_to_end, "f")
def test_cmp_eq_order(self):
a = self.thetype.fromkeys("abc")
b = self.thetype.fromkeys("bca")
self.assertFalse(a == b)
@make_dynamo_test
def test_binop_or_return_type(self):
d1 = self.thetype({"a": 1, "b": 2})
d2 = self.thetype({"b": 3, "c": 4})
# Test return type
self.assertIs(type(d1 | d2), OrderedDict)
self.assertIs(type(dict(d1) | d2), OrderedDict)
self.assertIs(type(d1 | dict(d2)), OrderedDict)
@make_dynamo_test
def test_binop_ior_return_type(self):
d1 = self.thetype({"a": 1, "b": 2})
d2 = self.thetype({"b": 3, "c": 4})
# Test return type
d3, d4 = d1.copy(), d2.copy()
self.assertIs(type(d3.__ior__(d2)), OrderedDict)
self.assertIs(type(dict.__ior__(d4, d2)), OrderedDict)
self.assertIs(type(self.thetype.__ior__(d4, d2)), OrderedDict)
d3, d4 = d1.copy(), d2.copy()
self.assertIs(type(dict.__ior__(d3, dict(d2))), OrderedDict)
self.assertIs(type(dict.__ior__(dict(d3), d2)), dict)
self.assertIs(type(dict(d4).__ior__(d2)), dict)
@make_dynamo_test
def test_popitem_kwarg(self):
d = self.thetype.fromkeys("abcdf")
self.assertEqual(d.popitem(last=True), ("f", None))
self.assertEqual(list(d), list("abcd"))
self.assertEqual(d.popitem(last=False), ("a", None))
self.assertEqual(list(d), list("bcd"))
self.assertEqual(d.popitem(False), ("b", None))
self.assertEqual(list(d), list("cd"))
self.assertEqual(d.popitem(True), ("d", None))
self.assertEqual(list(d), list("c"))
| OrderedDictMethodsTests |
python | PyCQA__pylint | doc/data/messages/r/redundant-unittest-assert/bad.py | {
"start": 18,
"end": 146
} | class ____(unittest.TestCase):
def test_dummy(self):
self.assertTrue("foo") # [redundant-unittest-assert]
| DummyTestCase |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1214846,
"end": 1215774
} | class ____(Sort):
"""
SortByEncoding schema wrapper.
Parameters
----------
encoding : :class:`SortByChannel`, Literal['x', 'y', 'color', 'fill', 'stroke', 'strokeWidth', 'size', 'shape', 'fillOpacity', 'strokeOpacity', 'opacity', 'text']
The `encoding channel
<https://vega.github.io/vega-lite/docs/encoding.html#channels>`__ to sort by (e.g.,
``"x"``, ``"y"``)
order : :class:`SortOrder`, Literal['ascending', 'descending'], None
The sort order. One of ``"ascending"`` (default), ``"descending"``, or ``null`` (do
not sort).
"""
_schema = {"$ref": "#/definitions/SortByEncoding"}
def __init__(
self,
encoding: Optional[SchemaBase | SortByChannel_T] = Undefined,
order: Optional[SchemaBase | SortOrder_T | None] = Undefined,
**kwds,
):
super().__init__(encoding=encoding, order=order, **kwds)
| SortByEncoding |
python | apache__airflow | providers/cncf/kubernetes/tests/unit/cncf/kubernetes/utils/test_pod_manager.py | {
"start": 33859,
"end": 43511
} | class ____:
@pytest.fixture
def mock_log_info(self):
with mock.patch.object(self.async_pod_manager.log, "info") as mock_log_info:
yield mock_log_info
def setup_method(self):
self.mock_async_hook = mock.AsyncMock()
self.async_pod_manager = AsyncPodManager(
async_hook=self.mock_async_hook,
callbacks=[],
)
@pytest.mark.asyncio
async def test_start_pod_raises_informative_error_on_scheduled_timeout(self):
pod_response = mock.MagicMock()
pod_response.status.phase = "Pending"
self.mock_async_hook.get_pod.return_value = pod_response
expected_msg = "Pod took too long to be scheduled on the cluster, giving up. More than 0s. Check the pod events in kubernetes."
mock_pod = mock.MagicMock()
with pytest.raises(AirflowException, match=expected_msg):
await self.async_pod_manager.await_pod_start(
pod=mock_pod,
schedule_timeout=0,
startup_timeout=0,
)
self.mock_async_hook.get_pod.assert_called()
@pytest.mark.asyncio
async def test_start_pod_raises_informative_error_on_startup_timeout(self):
pod_response = mock.MagicMock()
pod_response.status.phase = "Pending"
condition = mock.MagicMock()
condition.type = "PodScheduled"
condition.status = "True"
pod_response.status.conditions = [condition]
self.mock_async_hook.get_pod.return_value = pod_response
expected_msg = "Pod took too long to start. More than 0s. Check the pod events in kubernetes."
mock_pod = mock.MagicMock()
with pytest.raises(AirflowException, match=expected_msg):
await self.async_pod_manager.await_pod_start(
pod=mock_pod,
schedule_timeout=0,
startup_timeout=0,
)
self.mock_async_hook.get_pod.assert_called()
@pytest.mark.asyncio
async def test_start_pod_raises_fast_error_on_image_error(self):
pod_response = mock.MagicMock()
pod_response.status.phase = "Pending"
container_status = mock.MagicMock()
waiting_state = mock.MagicMock()
waiting_state.reason = "ErrImagePull"
waiting_state.message = "Test error"
container_status.state.waiting = waiting_state
pod_response.status.container_statuses = [container_status]
self.mock_async_hook.get_pod.return_value = pod_response
expected_msg = f"Pod docker image cannot be pulled, unable to start: {waiting_state.reason}\n{waiting_state.message}"
mock_pod = mock.MagicMock()
with pytest.raises(AirflowException, match=expected_msg):
await self.async_pod_manager.await_pod_start(
pod=mock_pod,
schedule_timeout=60,
startup_timeout=60,
)
self.mock_async_hook.get_pod.assert_called()
@pytest.mark.asyncio
@mock.patch("asyncio.sleep", new_callable=mock.AsyncMock)
async def test_start_pod_startup_interval_seconds(self, mock_time_sleep, mock_log_info):
condition_scheduled = mock.MagicMock()
condition_scheduled.type = "PodScheduled"
condition_scheduled.status = "True"
pod_info_pending = mock.MagicMock()
pod_info_pending.status.phase = PodPhase.PENDING
pod_info_pending.status.conditions = []
pod_info_pending_scheduled = mock.MagicMock()
pod_info_pending_scheduled.status.phase = PodPhase.PENDING
pod_info_pending_scheduled.status.conditions = [condition_scheduled]
pod_info_succeeded = mock.MagicMock()
pod_info_succeeded.status.phase = PodPhase.SUCCEEDED
# Simulate sequence of pod states
self.mock_async_hook.get_pod.side_effect = [
pod_info_pending,
pod_info_pending_scheduled,
pod_info_pending_scheduled,
pod_info_succeeded,
]
startup_check_interval = 10
schedule_timeout = 30
startup_timeout = 60
mock_pod = mock.MagicMock()
await self.async_pod_manager.await_pod_start(
pod=mock_pod,
schedule_timeout=schedule_timeout,
startup_timeout=startup_timeout,
check_interval=startup_check_interval,
)
assert mock_time_sleep.call_count == 3
mock_log_info.assert_any_call(
"::group::Waiting until %ss to get the POD scheduled...", schedule_timeout
)
mock_log_info.assert_any_call("Waiting %ss to get the POD running...", startup_timeout)
assert self.async_pod_manager.stop_watching_events is True
@pytest.mark.asyncio
@mock.patch("asyncio.sleep", new_callable=mock.AsyncMock)
async def test_watch_pod_events(self, mock_time_sleep, mock_log_info):
mock_pod = mock.MagicMock()
mock_pod.metadata.name = "test-pod"
mock_pod.metadata.namespace = "default"
events = mock.MagicMock()
events.items = []
for id in ["event 1", "event 2"]:
event = mock.MagicMock()
event.message = f"test {id}"
event.involved_object.field_path = f"object {id}"
events.items.append(event)
startup_check_interval = 10
def get_pod_events_side_effect(name, namespace):
self.async_pod_manager.stop_watching_events = True
return events
self.mock_async_hook.get_pod_events.side_effect = get_pod_events_side_effect
await self.async_pod_manager.watch_pod_events(pod=mock_pod, check_interval=startup_check_interval)
mock_log_info.assert_any_call("The Pod has an Event: %s from %s", "test event 1", "object event 1")
mock_log_info.assert_any_call("The Pod has an Event: %s from %s", "test event 2", "object event 2")
mock_time_sleep.assert_called_once_with(startup_check_interval)
@pytest.mark.asyncio
@pytest.mark.parametrize(
("log_lines", "now", "expected_log_messages", "not_expected_log_messages"),
[
# Case 1: No logs
([], pendulum.now(), [], []),
# Case 2: One log line with timestamp before now
(
[f"{pendulum.now().subtract(seconds=2).to_iso8601_string()} message"],
pendulum.now(),
["message"],
[],
),
# Case 3: Log line with timestamp equal to now (should be skipped, so last_time is None)
([f"{pendulum.now().to_iso8601_string()} message"], pendulum.now(), [], ["message"]),
# Case 4: Multiple log lines, last before now
(
[
f"{pendulum.now().subtract(seconds=3).to_iso8601_string()} msg1",
f"{pendulum.now().subtract(seconds=2).to_iso8601_string()} msg2",
],
pendulum.now(),
["msg1", "msg2"],
[],
),
# Case 5: Log lines with continuation (no timestamp)
(
[
f"{pendulum.now().subtract(seconds=2).to_iso8601_string()} msg1",
"continued line",
],
pendulum.now(),
["msg1\ncontinued line"],
[],
),
# Case 6: Log lines with continuation (no timestamp)
(
[
f"{pendulum.now().subtract(seconds=2).to_iso8601_string()} msg1",
f"{pendulum.now().to_iso8601_string()} msg2",
],
pendulum.now(),
["msg1"],
["msg2"],
),
],
)
async def test_fetch_container_logs_before_current_sec_various_logs(
self, log_lines, now, expected_log_messages, not_expected_log_messages
):
pod = mock.MagicMock()
container_name = "base"
since_time = now.subtract(minutes=1)
mock_async_hook = mock.AsyncMock()
mock_async_hook.read_logs.return_value = log_lines
with mock.patch("airflow.providers.cncf.kubernetes.utils.pod_manager.pendulum.now", return_value=now):
async_pod_manager = AsyncPodManager(
async_hook=mock_async_hook,
callbacks=[],
)
with mock.patch.object(async_pod_manager.log, "info") as mock_log_info:
result = await async_pod_manager.fetch_container_logs_before_current_sec(
pod=pod, container_name=container_name, since_time=since_time
)
assert result == now
for expected in expected_log_messages:
mock_log_info.assert_any_call("[%s] %s", container_name, expected)
for not_expected in not_expected_log_messages:
unexpected_call = mock.call("[%s] %s", container_name, not_expected)
assert unexpected_call not in mock_log_info.mock_calls
@pytest.mark.asyncio
async def test_fetch_container_logs_before_current_sec_error_handling(self):
pod = mock.MagicMock()
container_name = "base"
since_time = pendulum.now().subtract(minutes=1)
async def fake_read_logs(**kwargs):
raise KubernetesApiError("error")
self.async_pod_manager._hook.read_logs = fake_read_logs
with pytest.raises(KubernetesApiError):
await self.async_pod_manager.fetch_container_logs_before_current_sec(
pod=pod, container_name=container_name, since_time=since_time
)
| TestAsyncPodManager |
python | jazzband__django-waffle | waffle/mixins.py | {
"start": 172,
"end": 462
} | class ____:
def validate_waffle(self, waffle, func):
if waffle.startswith('!'):
active = not func(waffle[1:])
else:
active = func(waffle)
return active
def invalid_waffle(self):
raise Http404('Inactive waffle')
| BaseWaffleMixin |
python | fastapi__sqlmodel | docs_src/tutorial/one/tutorial001_py310.py | {
"start": 71,
"end": 1603
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
sqlite_file_name = "database.db"
sqlite_url = f"sqlite:///{sqlite_file_name}"
engine = create_engine(sqlite_url, echo=True)
def create_db_and_tables():
SQLModel.metadata.create_all(engine)
def create_heroes():
hero_1 = Hero(name="Deadpond", secret_name="Dive Wilson")
hero_2 = Hero(name="Spider-Boy", secret_name="Pedro Parqueador")
hero_3 = Hero(name="Rusty-Man", secret_name="Tommy Sharp", age=48)
hero_4 = Hero(name="Tarantula", secret_name="Natalia Roman-on", age=32)
hero_5 = Hero(name="Black Lion", secret_name="Trevor Challa", age=35)
hero_6 = Hero(name="Dr. Weird", secret_name="Steve Weird", age=36)
hero_7 = Hero(name="Captain North America", secret_name="Esteban Rogelios", age=93)
with Session(engine) as session:
session.add(hero_1)
session.add(hero_2)
session.add(hero_3)
session.add(hero_4)
session.add(hero_5)
session.add(hero_6)
session.add(hero_7)
session.commit()
def select_heroes():
with Session(engine) as session:
statement = select(Hero).where(Hero.age <= 35)
results = session.exec(statement)
hero = results.first()
print("Hero:", hero)
def main():
create_db_and_tables()
create_heroes()
select_heroes()
if __name__ == "__main__":
main()
| Hero |
python | pyodide__pyodide | tools/check_documented_functions.py | {
"start": 274,
"end": 2223
} | class ____:
srcdir = DOCS_DIR / "_build/html/"
config = None
def check_list():
inv = fetch_inventory(App, "https://example.com", "objects.inv")
res = []
for category, entries in inv.items():
if entries is None:
continue
if not category.startswith("js"):
continue
res.append(category)
res.extend(f" {key}" for key in entries.keys())
res.append("")
return res
def update_expected_js_docs():
EXPECTED_DOCS_FILE.write_text("\n".join(check_list()))
def check_expected_js_docs():
expected_lines = EXPECTED_DOCS_FILE.read_text().splitlines()
new_lines = check_list()
new_lines.pop()
diffs = list(
difflib.unified_diff(
expected_lines,
new_lines,
fromfile="old expected_js_docs.txt",
tofile="new expected_js_docs.txt",
)
)
if not diffs:
print("No changes")
return 0
print(
"Set of documented APIs changed. If this is intended, run ./tools/check_documented_functions.py --update"
)
for l in diffs:
print(l)
return 1
def parse_args():
parser = argparse.ArgumentParser(
description="Compare the set of documented JS APIs to the expected set or update the expected set"
)
parser.add_argument(
"--check",
action="store_true",
help="Check the set of documented JS APIs",
)
parser.add_argument(
"--update",
action="store_true",
help="Update the expected set of documented JS APIs",
)
return parser.parse_args()
def main():
args = parse_args()
if not (args.update ^ args.check):
print("Expected exactly one of --check and --update")
sys.exit(1)
if args.update:
update_expected_js_docs()
sys.exit(0)
if args.check:
sys.exit(check_expected_js_docs())
if __name__ == "__main__":
main()
| App |
python | pandas-dev__pandas | pandas/tests/series/methods/test_sort_index.py | {
"start": 7766,
"end": 12647
} | class ____:
def test_sort_index_multiindex_key(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level="C", key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level="C", key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_multiindex_key_multi_level(self):
mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list("ABC"))
s = Series([1, 2], mi)
backwards = s.iloc[[1, 0]]
result = s.sort_index(level=["A", "C"], key=lambda x: -x)
tm.assert_series_equal(s, result)
result = s.sort_index(level=["A", "C"], key=lambda x: x) # nothing happens
tm.assert_series_equal(backwards, result)
def test_sort_index_key(self):
series = Series(np.arange(6, dtype="int64"), index=list("aaBBca"))
result = series.sort_index()
expected = series.iloc[[2, 3, 0, 1, 5, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower())
expected = series.iloc[[0, 1, 5, 2, 3, 4]]
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: x.str.lower(), ascending=False)
expected = series.iloc[[4, 2, 3, 0, 1, 5]]
tm.assert_series_equal(result, expected)
def test_sort_index_key_int(self):
series = Series(np.arange(6, dtype="int64"), index=np.arange(6, dtype="int64"))
result = series.sort_index()
tm.assert_series_equal(result, series)
result = series.sort_index(key=lambda x: -x)
expected = series.sort_index(ascending=False)
tm.assert_series_equal(result, expected)
result = series.sort_index(key=lambda x: 2 * x)
tm.assert_series_equal(result, series)
def test_sort_index_kind_key(self, sort_kind, sort_by_key):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(kind=sort_kind, key=sort_by_key)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_kind_neg_key(self, sort_kind):
# GH #14444 & #13589: Add support for sort algo choosing
series = Series(index=[3, 2, 1, 4, 3], dtype=object)
expected_series = Series(index=[4, 3, 3, 2, 1], dtype=object)
index_sorted_series = series.sort_index(kind=sort_kind, key=lambda x: -x)
tm.assert_series_equal(expected_series, index_sorted_series)
def test_sort_index_na_position_key(self, sort_by_key):
series = Series(index=[3, 2, 1, 4, 3, np.nan], dtype=object)
expected_series_first = Series(index=[np.nan, 1, 2, 3, 3, 4], dtype=object)
index_sorted_series = series.sort_index(na_position="first", key=sort_by_key)
tm.assert_series_equal(expected_series_first, index_sorted_series)
expected_series_last = Series(index=[1, 2, 3, 3, 4, np.nan], dtype=object)
index_sorted_series = series.sort_index(na_position="last", key=sort_by_key)
tm.assert_series_equal(expected_series_last, index_sorted_series)
def test_changes_length_raises(self):
s = Series([1, 2, 3])
with pytest.raises(ValueError, match="change the shape"):
s.sort_index(key=lambda x: x[:1])
def test_sort_values_key_type(self):
s = Series([1, 2, 3], DatetimeIndex(["2008-10-24", "2008-11-23", "2007-12-22"]))
result = s.sort_index(key=lambda x: x.month)
expected = s.iloc[[0, 1, 2]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.day)
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.year)
expected = s.iloc[[2, 0, 1]]
tm.assert_series_equal(result, expected)
result = s.sort_index(key=lambda x: x.month_name())
expected = s.iloc[[2, 1, 0]]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"ascending",
[
[True, False],
[False, True],
],
)
def test_sort_index_multi_already_monotonic(self, ascending):
# GH 56049
mi = MultiIndex.from_product([[1, 2], [3, 4]])
ser = Series(range(len(mi)), index=mi)
result = ser.sort_index(ascending=ascending)
if ascending == [True, False]:
expected = ser.take([1, 0, 3, 2])
elif ascending == [False, True]:
expected = ser.take([2, 3, 0, 1])
tm.assert_series_equal(result, expected)
| TestSeriesSortIndexKey |
python | walkccc__LeetCode | solutions/2532. Time to Cross a Bridge/2532.py | {
"start": 0,
"end": 2081
} | class ____:
def findCrossingTime(self, n: int, k: int, time: list[list[int]]) -> int:
ans = 0
# (leftToRight + rightToLeft, i)
leftBridgeQueue = [
(-leftToRight - rightToLeft, -i) for i,
(leftToRight, pickOld, rightToLeft, pickNew) in enumerate(time)]
rightBridgeQueue = []
# (time to be idle, i)
leftWorkers = []
rightWorkers = []
heapq.heapify(leftBridgeQueue)
while n > 0 or rightBridgeQueue or rightWorkers:
# Idle left workers get on the left bridge.
while leftWorkers and leftWorkers[0][0] <= ans:
i = heapq.heappop(leftWorkers)[1]
leftWorkers.pop()
heapq.heappush(leftBridgeQueue, (-time[i][0] - time[i][2], -i))
# Idle right workers get on the right bridge.
while rightWorkers and rightWorkers[0][0] <= ans:
i = heapq.heappop(rightWorkers)[1]
heapq.heappush(rightBridgeQueue, (-time[i][0] - time[i][2], -i))
if rightBridgeQueue:
# If the bridge is free, the worker waiting on the right side of the
# bridge gets to cross the bridge. If more than one worker is waiting
# on the right side, the one with the lowest efficiency crosses first.
i = -heapq.heappop(rightBridgeQueue)[1]
ans += time[i][2]
heapq.heappush(leftWorkers, (ans + time[i][3], i))
elif leftBridgeQueue and n > 0:
# If the bridge is free and no worker is waiting on the right side, and
# at least one box remains at the old warehouse, the worker on the left
# side of the river gets to cross the bridge. If more than one worker
# is waiting on the left side, the one with the lowest efficiency
# crosses first.
i = -heapq.heappop(leftBridgeQueue)[1]
ans += time[i][0]
heapq.heappush(rightWorkers, (ans + time[i][1], i))
n -= 1
else:
# Advance the time of the last crossing worker.
ans = min(leftWorkers[0][0] if leftWorkers and n > 0 else math.inf,
rightWorkers[0][0] if rightWorkers else math.inf)
return ans
| Solution |
python | fastai__fastai | fastai/text/models/awdlstm.py | {
"start": 4040,
"end": 8605
} | class ____(Module):
"AWD-LSTM inspired by https://arxiv.org/abs/1708.02182"
initrange=0.1
def __init__(self,
vocab_sz:int, # Size of the vocabulary
emb_sz:int, # Size of embedding vector
n_hid:int, # Number of features in hidden state
n_layers:int, # Number of LSTM layers
pad_token:int=1, # Padding token id
hidden_p:float=0.2, # Dropout probability for hidden state between layers
input_p:float=0.6, # Dropout probability for LSTM stack input
embed_p:float=0.1, # Embedding layer dropout probabillity
weight_p:float=0.5, # Hidden-to-hidden wight dropout probability for LSTM layers
bidir:bool=False # If set to `True` uses bidirectional LSTM layers
):
store_attr('emb_sz,n_hid,n_layers,pad_token')
self.bs = 1
self.n_dir = 2 if bidir else 1
self.encoder = nn.Embedding(vocab_sz, emb_sz, padding_idx=pad_token)
self.encoder_dp = EmbeddingDropout(self.encoder, embed_p)
self.rnns = nn.ModuleList([self._one_rnn(emb_sz if l == 0 else n_hid, (n_hid if l != n_layers - 1 else emb_sz)//self.n_dir,
bidir, weight_p, l) for l in range(n_layers)])
self.encoder.weight.data.uniform_(-self.initrange, self.initrange)
self.input_dp = RNNDropout(input_p)
self.hidden_dps = nn.ModuleList([RNNDropout(hidden_p) for l in range(n_layers)])
self.reset()
def forward(self, inp:Tensor, from_embeds:bool=False):
bs,sl = inp.shape[:2] if from_embeds else inp.shape
if bs!=self.bs: self._change_hidden(bs)
output = self.input_dp(inp if from_embeds else self.encoder_dp(inp))
new_hidden = []
for l, (rnn,hid_dp) in enumerate(zip(self.rnns, self.hidden_dps)):
output, new_h = rnn(output, self.hidden[l])
new_hidden.append(new_h)
if l != self.n_layers - 1: output = hid_dp(output)
self.hidden = to_detach(new_hidden, cpu=False, gather=False)
return output
def _change_hidden(self, bs):
self.hidden = [self._change_one_hidden(l, bs) for l in range(self.n_layers)]
self.bs = bs
def _one_rnn(self, n_in, n_out, bidir, weight_p, l):
"Return one of the inner rnn"
rnn = nn.LSTM(n_in, n_out, 1, batch_first=True, bidirectional=bidir)
return WeightDropout(rnn, weight_p)
def _one_hidden(self, l):
"Return one hidden state"
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return (one_param(self).new_zeros(self.n_dir, self.bs, nh), one_param(self).new_zeros(self.n_dir, self.bs, nh))
def _change_one_hidden(self, l, bs):
if self.bs < bs:
nh = (self.n_hid if l != self.n_layers - 1 else self.emb_sz) // self.n_dir
return tuple(torch.cat([h, h.new_zeros(self.n_dir, bs-self.bs, nh)], dim=1) for h in self.hidden[l])
if self.bs > bs: return (self.hidden[l][0][:,:bs].contiguous(), self.hidden[l][1][:,:bs].contiguous())
return self.hidden[l]
def reset(self):
"Reset the hidden states"
[r.reset() for r in self.rnns if hasattr(r, 'reset')]
self.hidden = [self._one_hidden(l) for l in range(self.n_layers)]
# %% ../../../nbs/32_text.models.awdlstm.ipynb 22
def awd_lstm_lm_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].rnns, model[0].hidden_dps)]
groups = L(groups + [nn.Sequential(model[0].encoder, model[0].encoder_dp, model[1])])
return groups.map(params)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 23
awd_lstm_lm_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.1,
hidden_p=0.15, input_p=0.25, embed_p=0.02, weight_p=0.2, tie_weights=True, out_bias=True)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 24
def awd_lstm_clas_split(model):
"Split a RNN `model` in groups for differential learning rates."
groups = [nn.Sequential(model[0].module.encoder, model[0].module.encoder_dp)]
groups += [nn.Sequential(rnn, dp) for rnn, dp in zip(model[0].module.rnns, model[0].module.hidden_dps)]
groups = L(groups + [model[1]])
return groups.map(params)
# %% ../../../nbs/32_text.models.awdlstm.ipynb 25
awd_lstm_clas_config = dict(emb_sz=400, n_hid=1152, n_layers=3, pad_token=1, bidir=False, output_p=0.4,
hidden_p=0.3, input_p=0.4, embed_p=0.05, weight_p=0.5)
| AWD_LSTM |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | {
"start": 7742,
"end": 9011
} | class ____(Benchmark):
r"""
PenHolder objective function.
This class defines the PenHolder [1]_ global optimization problem. This is a
multimodal minimization problem defined as follows:
.. math::
f_{\text{PenHolder}}(x) = -e^{\left|{e^{-\left|{- \frac{\sqrt{x_{1}^{2}
+ x_{2}^{2}}}{\pi} + 1}\right|} \cos\left(x_{1}\right)
\cos\left(x_{2}\right)}\right|^{-1}}
with :math:`x_i \in [-11, 11]` for :math:`i = 1, 2`.
*Global optimum*: :math:`f(x_i) = -0.9635348327265058` for
:math:`x_i = \pm 9.646167671043401` for :math:`i = 1, 2`
.. [1] Jamil, M. & Yang, X.-S. A Literature Survey of Benchmark Functions
For Global Optimization Problems Int. Journal of Mathematical Modelling
and Numerical Optimisation, 2013, 4, 150-194.
"""
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-11.0] * self.N, [11.0] * self.N))
self.global_optimum = [[-9.646167708023526, 9.646167671043401]]
self.fglob = -0.9635348327265058
def fun(self, x, *args):
self.nfev += 1
a = abs(1. - (sqrt(x[0] ** 2 + x[1] ** 2) / pi))
b = cos(x[0]) * cos(x[1]) * exp(a)
return -exp(-abs(b) ** -1)
| PenHolder |
python | getsentry__sentry | src/sentry/tagstore/types.py | {
"start": 3919,
"end": 4562
} | class ____(Serializer):
def serialize(self, obj, attrs, user, **kwargs) -> TagKeySerializerResponse:
from sentry import tagstore
output: TagKeySerializerResponse = {
"key": tagstore.backend.get_standardized_key(obj.key),
"name": tagstore.backend.get_tag_key_label(obj.key),
}
if obj.values_seen is not None:
output["uniqueValues"] = obj.values_seen
if obj.count is not None:
output["totalValues"] = obj.count
if obj.top_values is not None:
output["topValues"] = serialize(obj.top_values, user)
return output
| TagKeySerializer |
python | falconry__falcon | tests/test_before_hooks.py | {
"start": 5158,
"end": 5486
} | class ____(TestFieldResource):
def on_get(self, req, resp, id):
# Test passing a single kwarg, but no extra args
super().on_get(req, resp, id=id)
@falcon.before(bunnies)
@falcon.before(frogs)
@falcon.before(Fish())
@falcon.before(bunnies_in_the_head)
@falcon.before(frogs_in_the_head)
| TestFieldResourceChildToo |
python | getsentry__sentry | src/sentry/models/deletedorganization.py | {
"start": 161,
"end": 864
} | class ____(DeletedEntry):
"""
This model tracks an intent to delete. If an org is marked pending_delete
through the UI, a deletedorganization is created to log this deletion.
This model does not account for aborted or failed deletions and is currently
unable to log deletions that occur implicitly (i.e. when the sole parent object
is deleted, the child is also marked for deletion as well).
"""
name = models.CharField(max_length=64, null=True)
slug = models.CharField(max_length=50, null=True)
class Meta:
app_label = "sentry"
db_table = "sentry_deletedorganization"
__repr__ = sane_repr("date_deleted", "slug", "reason")
| DeletedOrganization |
python | weaviate__weaviate-python-client | weaviate/rbac/models.py | {
"start": 8540,
"end": 8845
} | class ____(_Permission[GroupAction]):
group: str
group_type: str
def _to_weaviate(self) -> List[WeaviatePermission]:
return [
{"action": action, "groups": {"group": self.group, "groupType": self.group_type}}
for action in self.actions
]
| _GroupsPermission |
python | kamyu104__LeetCode-Solutions | Python/most-frequent-subtree-sum.py | {
"start": 50,
"end": 743
} | class ____(object):
def findFrequentTreeSum(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
def countSubtreeSumHelper(root, counts):
if not root:
return 0
total = root.val + \
countSubtreeSumHelper(root.left, counts) + \
countSubtreeSumHelper(root.right, counts)
counts[total] += 1
return total
counts = collections.defaultdict(int)
countSubtreeSumHelper(root, counts)
max_count = max(counts.values()) if counts else 0
return [total for total, count in counts.iteritems() if count == max_count]
| Solution |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 20301,
"end": 20639
} | class ____(BaseModel):
"""
Queued Event serializer for responses..
"""
dag_id: Annotated[str, Field(title="Dag Id")]
asset_id: Annotated[int, Field(title="Asset Id")]
created_at: Annotated[datetime, Field(title="Created At")]
dag_display_name: Annotated[str, Field(title="Dag Display Name")]
| QueuedEventResponse |
python | huggingface__transformers | src/transformers/models/internvl/configuration_internvl.py | {
"start": 6766,
"end": 10622
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`InternVLForConditionalGeneration`]. It is used to instantiate a
InternVL model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of InternVL3-1B.
e.g. [OpenGVLab/InternVL3-1B-hf](https://huggingface.co/OpenGVLab/InternVL3-1B-hf)
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vision_config (`Union[AutoConfig, dict]`, *optional*, defaults to `InternVisonConfig`):
The config object or dictionary of the vision backbone.
text_config (`Union[AutoConfig, dict]`, *optional*, defaults to `Qwen2Config`):
The config object or dictionary of the text backbone.
image_token_id (`int`, *optional*, defaults to 151667):
The image token index to encode the image prompt.
image_seq_length (`int`, *optional*, defaults to 256):
Number of image tokens to use per image patch.
downsample_ratio (`float`, *optional*, defaults to 0.5):
Factor by which to downsample the image.
projector_hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the projector.
vision_feature_layer (`int`, *optional*, defaults to -1):
The index of the layer to use as the image features.
vision_feature_select_strategy (`str`, *optional*, defaults to `"default"`):
The feature selection strategy used to select the vision feature from the vision backbone.
Can be one of `"default"` or `"full"`.
```python
>>> from transformers import InternVLForConditionalGeneration, InternVLConfig
>>> # Initializing a InternVL style configuration
>>> configuration = InternVLConfig()
>>> # Initializing a model (with random weights) from the OpenGVLab/InternVL3-1B-hf configuration
>>> model = InternVLForConditionalGeneration(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "internvl"
sub_configs = {"text_config": AutoConfig, "vision_config": InternVLVisionConfig}
def __init__(
self,
vision_config=None,
text_config=None,
image_token_id=151667,
image_seq_length=256,
downsample_ratio=0.5,
projector_hidden_act="gelu",
vision_feature_layer=-1,
vision_feature_select_strategy="default",
**kwargs,
):
self.image_token_id = image_token_id
self.image_seq_length = image_seq_length
self.downsample_ratio = downsample_ratio
self.projector_hidden_act = projector_hidden_act
self.vision_feature_layer = vision_feature_layer
self.vision_feature_select_strategy = vision_feature_select_strategy
if isinstance(vision_config, dict):
self.vision_config = InternVLVisionConfig(**vision_config)
elif isinstance(vision_config, InternVLVisionConfig):
self.vision_config = vision_config
elif vision_config is None:
self.vision_config = InternVLVisionConfig()
if isinstance(text_config, dict):
text_config["model_type"] = text_config.get("model_type", "qwen2")
text_config = CONFIG_MAPPING[text_config["model_type"]](**text_config)
elif text_config is None:
text_config = CONFIG_MAPPING["qwen2"]()
self.text_config = text_config
super().__init__(**kwargs)
__all__ = ["InternVLVisionConfig", "InternVLConfig"]
| InternVLConfig |
python | astropy__astropy | astropy/table/tests/test_table.py | {
"start": 65010,
"end": 69260
} | class ____(MetaBaseTest):
test_class = table.Table
args = ()
def test_unicode_content():
# If we don't have unicode literals then return
if isinstance("", bytes):
return
# Define unicode literals
string_a = "астрономическая питона"
string_b = "миллиарды световых лет"
a = table.Table([[string_a, 2], [string_b, 3]], names=("a", "b"))
assert string_a in str(a)
# This only works because the coding of this file is utf-8, which
# matches the default encoding of Table.__str__
assert string_a.encode("utf-8") in bytes(a)
def test_unicode_policy():
t = table.Table.read(
[
" a b c d",
" 2 c 7.0 0",
" 2 b 5.0 1",
" 2 b 6.0 2",
" 2 a 4.0 3",
" 0 a 0.0 4",
" 1 b 3.0 5",
" 1 a 2.0 6",
" 1 a 1.0 7",
],
format="ascii",
)
assert_follows_unicode_guidelines(t)
@pytest.mark.parametrize("uni", ["питона", "ascii"])
def test_unicode_bytestring_conversion(table_types, uni):
"""
Test converting columns to all unicode or all bytestring. This
makes two columns, one which is unicode (str in Py3) and one which
is bytes (UTF-8 encoded). There are two code paths in the conversions,
a faster one where the data are actually ASCII and a slower one where
UTF-8 conversion is required. This tests both via the ``uni`` param.
"""
byt = uni.encode("utf-8")
t = table_types.Table([[byt], [uni], [1]], dtype=("S", "U", "i"))
assert t["col0"].dtype.kind == "S"
assert t["col1"].dtype.kind == "U"
assert t["col2"].dtype.kind == "i"
t["col0"].description = "col0"
t["col1"].description = "col1"
t["col0"].meta["val"] = "val0"
t["col1"].meta["val"] = "val1"
# Unicode to bytestring
t1 = t.copy()
t1.convert_unicode_to_bytestring()
assert t1["col0"].dtype.kind == "S"
assert t1["col1"].dtype.kind == "S"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# Need to de-fang the automatic unicode sandwiching of Table
assert np.array(t1["col0"])[0] == byt
assert np.array(t1["col1"])[0] == byt
assert np.array(t1["col2"])[0] == 1
# Bytestring to unicode
t1 = t.copy()
t1.convert_bytestring_to_unicode()
assert t1["col0"].dtype.kind == "U"
assert t1["col1"].dtype.kind == "U"
assert t1["col2"].dtype.kind == "i"
# Meta made it through
assert t1["col0"].description == "col0"
assert t1["col1"].description == "col1"
assert t1["col0"].meta["val"] == "val0"
assert t1["col1"].meta["val"] == "val1"
# No need to de-fang the automatic unicode sandwiching of Table here, but
# do just for consistency to prove things are working.
assert np.array(t1["col0"])[0] == uni
assert np.array(t1["col1"])[0] == uni
assert np.array(t1["col2"])[0] == 1
def test_table_deletion():
"""
Regression test for the reference cycle discussed in
https://github.com/astropy/astropy/issues/2877
"""
deleted = set()
# A special table subclass which leaves a record when it is finalized
class TestTable(table.Table):
def __del__(self):
deleted.add(id(self))
t = TestTable({"a": [1, 2, 3]})
the_id = id(t)
assert t["a"].parent_table is t
del t
# Cleanup
gc.collect()
assert the_id in deleted
def test_nested_iteration():
"""
Regression test for issue 3358 where nested iteration over a single table fails.
"""
t = table.Table([[0, 1]], names=["a"])
out = []
for r1 in t:
for r2 in t:
out.append((r1["a"], r2["a"]))
assert out == [(0, 0), (0, 1), (1, 0), (1, 1)]
def test_table_init_from_degenerate_arrays(table_types):
t = table_types.Table(np.array([]))
assert len(t.columns) == 0
with pytest.raises(ValueError):
t = table_types.Table(np.array(0))
t = table_types.Table(np.array([1, 2, 3]))
assert len(t.columns) == 3
@pytest.mark.usefixtures("table_types")
| TestMetaTable |
python | huggingface__transformers | src/transformers/hyperparameter_search.py | {
"start": 1011,
"end": 1682
} | class ____:
name: str
pip_package: Optional[str] = None
@staticmethod
def is_available():
raise NotImplementedError
def run(self, trainer, n_trials: int, direction: str, **kwargs):
raise NotImplementedError
def default_hp_space(self, trial):
raise NotImplementedError
def ensure_available(self):
if not self.is_available():
raise RuntimeError(
f"You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}."
)
@classmethod
def pip_install(cls):
return f"`pip install {cls.pip_package or cls.name}`"
| HyperParamSearchBackendBase |
python | ZoranPandovski__al-go-rithms | backtracking/m-coloring/Python/mColoring.py | {
"start": 40,
"end": 1107
} | class ____():
def __init__(self, vertices):
self.V = vertices
self.graph = [[0 for column in range(vertices)]\
for row in range(vertices)]
# A utility function to check if the current color assignment
# is safe for vertex v
def isSafe(self, v, colour, c):
for i in range(self.V):
if self.graph[v][i] == 1 and colour[i] == c:
return False
return True
# A recursive utility function to solve m
# coloring problem
def graphColourUtil(self, m, colour, v):
if v == self.V:
return True
for c in range(1, m+1):
if self.isSafe(v, colour, c) == True:
colour[v] = c
if self.graphColourUtil(m, colour, v+1) == True:
return True
colour[v] = 0
def graphColouring(self, m):
colour = [0] * self.V
if self.graphColourUtil(m, colour, 0) == False:
return False
# Print the solution
print("Solution exist and Following are the assigned colours:")
for c in colour:
print(c, end=' ')
return True
# Driver Code
g = Graph(4)
g.graph = [[0,1,1,1], [1,0,1,0], [1,1,0,1], [1,0,1,0]]
m=3
g.graphColouring(m)
| Graph |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/tests/test_browser.py | {
"start": 828,
"end": 2318
} | class ____:
def test_get_current_page_no_contexts(self):
mock_browser = MagicMock()
mock_browser.contexts = []
mock_context = MagicMock()
mock_page = MagicMock()
mock_browser.new_context.return_value = mock_context
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_called_once()
mock_context.new_page.assert_called_once()
def test_get_current_page_with_context_no_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_context.pages = []
mock_browser.contexts = [mock_context]
mock_page = MagicMock()
mock_context.new_page.return_value = mock_page
result = get_current_page(mock_browser)
assert result == mock_page
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_called_once()
def test_get_current_page_with_context_and_pages(self):
mock_browser = MagicMock()
mock_context = MagicMock()
mock_page1 = MagicMock()
mock_page2 = MagicMock()
mock_context.pages = [mock_page1, mock_page2]
mock_browser.contexts = [mock_context]
result = get_current_page(mock_browser)
assert result == mock_page2
mock_browser.new_context.assert_not_called()
mock_context.new_page.assert_not_called()
| TestBrowserUtils |
python | ray-project__ray | python/ray/data/_internal/logical/operators/join_operator.py | {
"start": 424,
"end": 676
} | class ____(Enum):
INNER = "inner"
LEFT_OUTER = "left_outer"
RIGHT_OUTER = "right_outer"
FULL_OUTER = "full_outer"
LEFT_SEMI = "left_semi"
RIGHT_SEMI = "right_semi"
LEFT_ANTI = "left_anti"
RIGHT_ANTI = "right_anti"
| JoinType |
python | sympy__sympy | sympy/matrices/expressions/hadamard.py | {
"start": 1207,
"end": 9128
} | class ____(MatrixExpr):
"""
Elementwise product of matrix expressions
Examples
========
Hadamard product for matrix symbols:
>>> from sympy import hadamard_product, HadamardProduct, MatrixSymbol
>>> A = MatrixSymbol('A', 5, 5)
>>> B = MatrixSymbol('B', 5, 5)
>>> isinstance(hadamard_product(A, B), HadamardProduct)
True
Notes
=====
This is a symbolic object that simply stores its argument without
evaluating it. To actually compute the product, use the function
``hadamard_product()`` or ``HadamardProduct.doit``
"""
is_HadamardProduct = True
def __new__(cls, *args, evaluate=False, check=None):
args = list(map(sympify, args))
if len(args) == 0:
# We currently don't have a way to support one-matrices of generic dimensions:
raise ValueError("HadamardProduct needs at least one argument")
if not all(isinstance(arg, MatrixExpr) for arg in args):
raise TypeError("Mix of Matrix and Scalar symbols")
if check is not None:
sympy_deprecation_warning(
"Passing check to HadamardProduct is deprecated and the check argument will be removed in a future version.",
deprecated_since_version="1.11",
active_deprecations_target='remove-check-argument-from-matrix-operations')
if check is not False:
validate(*args)
obj = super().__new__(cls, *args)
if evaluate:
obj = obj.doit(deep=False)
return obj
@property
def shape(self):
return self.args[0].shape
def _entry(self, i, j, **kwargs):
return Mul(*[arg._entry(i, j, **kwargs) for arg in self.args])
def _eval_transpose(self):
from sympy.matrices.expressions.transpose import transpose
return HadamardProduct(*list(map(transpose, self.args)))
def doit(self, **hints):
expr = self.func(*(i.doit(**hints) for i in self.args))
# Check for explicit matrices:
from sympy.matrices.matrixbase import MatrixBase
from sympy.matrices.immutable import ImmutableMatrix
explicit = [i for i in expr.args if isinstance(i, MatrixBase)]
if explicit:
remainder = [i for i in expr.args if i not in explicit]
expl_mat = ImmutableMatrix([
Mul.fromiter(i) for i in zip(*explicit)
]).reshape(*self.shape)
expr = HadamardProduct(*([expl_mat] + remainder))
return canonicalize(expr)
def _eval_derivative(self, x):
terms = []
args = list(self.args)
for i in range(len(args)):
factors = args[:i] + [args[i].diff(x)] + args[i+1:]
terms.append(hadamard_product(*factors))
return Add.fromiter(terms)
def _eval_derivative_matrix_lines(self, x):
from sympy.tensor.array.expressions.array_expressions import ArrayDiagonal
from sympy.tensor.array.expressions.array_expressions import ArrayTensorProduct
from sympy.matrices.expressions.matexpr import _make_matrix
with_x_ind = [i for i, arg in enumerate(self.args) if arg.has(x)]
lines = []
for ind in with_x_ind:
left_args = self.args[:ind]
right_args = self.args[ind+1:]
d = self.args[ind]._eval_derivative_matrix_lines(x)
hadam = hadamard_product(*(right_args + left_args))
diagonal = [(0, 2), (3, 4)]
diagonal = [e for j, e in enumerate(diagonal) if self.shape[j] != 1]
for i in d:
l1 = i._lines[i._first_line_index]
l2 = i._lines[i._second_line_index]
subexpr = ExprBuilder(
ArrayDiagonal,
[
ExprBuilder(
ArrayTensorProduct,
[
ExprBuilder(_make_matrix, [l1]),
hadam,
ExprBuilder(_make_matrix, [l2]),
]
),
*diagonal],
)
i._first_pointer_parent = subexpr.args[0].args[0].args
i._first_pointer_index = 0
i._second_pointer_parent = subexpr.args[0].args[2].args
i._second_pointer_index = 0
i._lines = [subexpr]
lines.append(i)
return lines
# TODO Implement algorithm for rewriting Hadamard product as diagonal matrix
# if matmul identy matrix is multiplied.
def canonicalize(x):
"""Canonicalize the Hadamard product ``x`` with mathematical properties.
Examples
========
>>> from sympy import MatrixSymbol, HadamardProduct
>>> from sympy import OneMatrix, ZeroMatrix
>>> from sympy.matrices.expressions.hadamard import canonicalize
>>> from sympy import init_printing
>>> init_printing(use_unicode=False)
>>> A = MatrixSymbol('A', 2, 2)
>>> B = MatrixSymbol('B', 2, 2)
>>> C = MatrixSymbol('C', 2, 2)
Hadamard product associativity:
>>> X = HadamardProduct(A, HadamardProduct(B, C))
>>> X
A.*(B.*C)
>>> canonicalize(X)
A.*B.*C
Hadamard product commutativity:
>>> X = HadamardProduct(A, B)
>>> Y = HadamardProduct(B, A)
>>> X
A.*B
>>> Y
B.*A
>>> canonicalize(X)
A.*B
>>> canonicalize(Y)
A.*B
Hadamard product identity:
>>> X = HadamardProduct(A, OneMatrix(2, 2))
>>> X
A.*1
>>> canonicalize(X)
A
Absorbing element of Hadamard product:
>>> X = HadamardProduct(A, ZeroMatrix(2, 2))
>>> X
A.*0
>>> canonicalize(X)
0
Rewriting to Hadamard Power
>>> X = HadamardProduct(A, A, A)
>>> X
A.*A.*A
>>> canonicalize(X)
.3
A
Notes
=====
As the Hadamard product is associative, nested products can be flattened.
The Hadamard product is commutative so that factors can be sorted for
canonical form.
A matrix of only ones is an identity for Hadamard product,
so every matrices of only ones can be removed.
Any zero matrix will make the whole product a zero matrix.
Duplicate elements can be collected and rewritten as HadamardPower
References
==========
.. [1] https://en.wikipedia.org/wiki/Hadamard_product_(matrices)
"""
# Associativity
rule = condition(
lambda x: isinstance(x, HadamardProduct),
flatten
)
fun = exhaust(rule)
x = fun(x)
# Identity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
rm_id(lambda x: isinstance(x, OneMatrix))
)
x = fun(x)
# Absorbing by Zero Matrix
def absorb(x):
if any(isinstance(c, ZeroMatrix) for c in x.args):
return ZeroMatrix(*x.shape)
else:
return x
fun = condition(
lambda x: isinstance(x, HadamardProduct),
absorb
)
x = fun(x)
# Rewriting with HadamardPower
if isinstance(x, HadamardProduct):
tally = Counter(x.args)
new_arg = []
for base, exp in tally.items():
if exp == 1:
new_arg.append(base)
else:
new_arg.append(HadamardPower(base, exp))
x = HadamardProduct(*new_arg)
# Commutativity
fun = condition(
lambda x: isinstance(x, HadamardProduct),
sort(default_sort_key)
)
x = fun(x)
# Unpacking
x = unpack(x)
return x
def hadamard_power(base, exp):
base = sympify(base)
exp = sympify(exp)
if exp == 1:
return base
if not base.is_Matrix:
return base**exp
if exp.is_Matrix:
raise ValueError("cannot raise expression to a matrix")
return HadamardPower(base, exp)
| HadamardProduct |
python | huggingface__transformers | src/transformers/models/falcon_h1/modeling_falcon_h1.py | {
"start": 68631,
"end": 75303
} | class ____(FalconH1PreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
_tp_plan = {"lm_head": "colwise_rep"}
_pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
def __init__(self, config):
super().__init__(config)
self.model = FalconH1Model(config)
self.vocab_size = config.vocab_size
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[FalconHybridMambaAttentionDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
**kwargs,
) -> Union[tuple, CausalLMOutputWithPast]:
r"""
Example:
```python
>>> from transformers import AutoTokenizer, FalconH1ForCausalLM
>>> model = FalconH1ForCausalLM.from_pretrained("...")
>>> tokenizer = AutoTokenizer.from_pretrained("...")
>>> prompt = "Hey, are you conscious? Can you talk to me?"
>>> inputs = tokenizer(prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(inputs.input_ids, max_length=30)
>>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :]) * self.model.lm_head_multiplier
loss = None
if labels is not None:
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
return CausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
attention_mask=None,
inputs_embeds=None,
cache_position=None,
position_ids=None,
use_cache=True,
**kwargs,
):
# Overwritten -- has a unique cache type, `FalconHybridMambaAttentionDynamicCache`
empty_past_kv = past_key_values is None
# If we have cache: let's slice `input_ids` through `cache_position`, to keep only the unprocessed tokens
# Exception 1: when passing input_embeds, input_ids may be missing entries
# Exception 2: some generation methods do special slicing of input_ids, so we don't need to do it here
# Exception 3: with synced GPUs cache_position may go out of bounds, but we only want dummy token in that case.
# (we can't check exception 3 while compiling)
if not empty_past_kv:
if (
inputs_embeds is not None # Exception 1
or (is_torchdynamo_compiling() or cache_position[-1] >= input_ids.shape[1]) # Exception 3
):
input_ids = input_ids[:, -cache_position.shape[0] :]
elif input_ids.shape[1] != cache_position.shape[0]: # Default case (the "else", a no op, is Exception 2)
input_ids = input_ids[:, cache_position]
else:
past_key_values = FalconHybridMambaAttentionDynamicCache(
self.config,
input_ids.shape[0],
self.dtype,
devices=[
self.model.layers[i].mamba.conv1d.weight.device for i in range(self.config.num_hidden_layers)
],
)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
if not empty_past_kv:
position_ids = position_ids[:, -input_ids.shape[1] :]
# if `inputs_embeds` are passed, we only want to use them in the 1st generation step
if inputs_embeds is not None and empty_past_kv:
model_inputs = {"inputs_embeds": inputs_embeds}
else:
model_inputs = {"input_ids": input_ids.contiguous()} # `contiguous()` needed for compilation use cases
model_inputs.update(
{
"position_ids": position_ids,
"past_key_values": past_key_values,
"use_cache": use_cache,
"attention_mask": attention_mask,
"logits_to_keep": self.config.num_logits_to_keep,
"cache_position": cache_position,
}
)
# Forward ALL kwargs that are uninitialized (e.g. `use_cache`).
for key, value in kwargs.items():
if key not in model_inputs:
model_inputs[key] = value
return model_inputs
__all__ = ["FalconH1Model", "FalconH1ForCausalLM", "FalconH1PreTrainedModel"]
| FalconH1ForCausalLM |
python | getsentry__sentry | src/sentry_plugins/victorops/plugin.py | {
"start": 845,
"end": 3918
} | class ____(CorePluginMixin, NotificationPlugin):
description = DESCRIPTION
slug = "victorops"
title = "VictorOps"
conf_key = slug
conf_title = title
required_field = "api_key"
feature_descriptions = [
FeatureDescription(
"""
Manage incidents and outages by sending Sentry notifications to VictorOps.
""",
IntegrationFeatures.INCIDENT_MANAGEMENT,
),
FeatureDescription(
"""
Configure Sentry rules to trigger notifications based on conditions you set.
""",
IntegrationFeatures.ALERT_RULE,
),
]
def is_configured(self, project) -> bool:
return bool(self.get_option("api_key", project))
def get_config(self, project, user=None, initial=None, add_additional_fields: bool = False):
return [
get_secret_field_config(
name="api_key",
label="API Key",
secret=self.get_option("api_key", project),
help_text="VictorOps's Sentry API Key",
include_prefix=True,
),
{
"name": "routing_key",
"label": "Routing Key",
"type": "string",
"default": "everyone",
"required": False,
},
]
def get_client(self, project):
return VictorOpsClient(
api_key=self.get_option("api_key", project),
routing_key=self.get_option("routing_key", project),
)
def build_description(self, event):
enhanced_privacy = event.organization.flags.enhanced_privacy
if enhanced_privacy:
return ENHANCED_PRIVACY_BODY
interface_list = []
for interface in event.interfaces.values():
body = interface.to_string(event)
if not body:
continue
interface_list.append((interface.get_title(), body))
return "\n\n".join((f"{k}\n-----------\n\n{v}" for k, v in interface_list))
def notify_users(self, group, event, triggering_rules) -> None:
if not self.is_configured(group.project):
return
level = event.get_tag("level")
if level in ("info", "debug"):
message_type = "INFO"
if level == "warning":
message_type = "WARNING"
else:
message_type = "CRITICAL"
client = self.get_client(group.project)
try:
response = client.trigger_incident(
message_type=message_type,
entity_id=group.id,
entity_display_name=event.title,
state_message=self.build_description(event),
timestamp=int(event.datetime.strftime("%s")),
issue_url=group.get_absolute_url(),
issue_id=group.id,
project_id=group.project.id,
)
except ApiError as e:
self.raise_error(e)
assert response["result"] == "success"
| VictorOpsPlugin |
python | lazyprogrammer__machine_learning_examples | rl/linear_rl_trader.py | {
"start": 977,
"end": 2358
} | class ____:
""" A linear regression model """
def __init__(self, input_dim, n_action):
self.W = np.random.randn(input_dim, n_action) / np.sqrt(input_dim)
self.b = np.zeros(n_action)
# momentum terms
self.vW = 0
self.vb = 0
self.losses = []
def predict(self, X):
# make sure X is N x D
assert(len(X.shape) == 2)
return X.dot(self.W) + self.b
def sgd(self, X, Y, learning_rate=0.01, momentum=0.9):
# make sure X is N x D
assert(len(X.shape) == 2)
# the loss values are 2-D
# normally we would divide by N only
# but now we divide by N x K
num_values = np.prod(Y.shape)
# do one step of gradient descent
# we multiply by 2 to get the exact gradient
# (not adjusting the learning rate)
# i.e. d/dx (x^2) --> 2x
Yhat = self.predict(X)
gW = 2 * X.T.dot(Yhat - Y) / num_values
gb = 2 * (Yhat - Y).sum(axis=0) / num_values
# update momentum terms
self.vW = momentum * self.vW - learning_rate * gW
self.vb = momentum * self.vb - learning_rate * gb
# update params
self.W += self.vW
self.b += self.vb
mse = np.mean((Yhat - Y)**2)
self.losses.append(mse)
def load_weights(self, filepath):
npz = np.load(filepath)
self.W = npz['W']
self.b = npz['b']
def save_weights(self, filepath):
np.savez(filepath, W=self.W, b=self.b)
| LinearModel |
python | django__django | tests/m2m_through_regress/models.py | {
"start": 113,
"end": 351
} | class ____(models.Model):
person = models.ForeignKey("Person", models.CASCADE)
group = models.ForeignKey("Group", models.CASCADE)
price = models.IntegerField(default=100)
# using custom id column to test ticket #11107
| Membership |
python | huggingface__transformers | tests/quantization/torchao_integration/test_torchao.py | {
"start": 23900,
"end": 29002
} | class ____(TorchAoTest):
device = torch_device
quant_scheme_kwargs = {"group_size": 32, "version": 1}
# called only once for all test in this class
@classmethod
def setUpClass(cls):
super().setUpClass()
# fmt: off
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): "What are we having for dinner?\n\nJessica: (smiling)",
("cuda", 7): "What are we having for dinner?\n- 1. What is the temperature outside",
}
)
# fmt: on
cls.EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
def test_int4wo_offload(self):
"""
Simple test that checks if the quantized model int4 weight only is working properly with cpu/disk offload
"""
device_map_offload = {
"model.embed_tokens": 0,
"model.layers.0": 0,
"model.layers.1": 0,
"model.layers.2": 0,
"model.layers.3": 0,
"model.layers.4": 0,
"model.layers.5": 0,
"model.layers.6": 0,
"model.layers.7": 0,
"model.layers.8": 0,
"model.layers.9": 0,
"model.layers.10": 0,
"model.layers.11": 0,
"model.layers.12": 0,
"model.layers.13": 0,
"model.layers.14": 0,
"model.layers.15": 0,
"model.layers.16": 0,
"model.layers.17": 0,
"model.layers.18": 0,
"model.layers.19": "cpu",
"model.layers.20": "cpu",
"model.layers.21": "cpu",
"model.norm": 0,
"model.rotary_emb": 0,
"lm_head": 0,
}
config = Int4WeightOnlyConfig(**self.quant_scheme_kwargs)
quant_config = TorchAoConfig(config)
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map=device_map_offload,
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
# fmt: off
EXPECTED_OUTPUTS = Expectations(
{
("xpu", 3): "What are we having for dinner?\n\nJessica: (smiling)",
("cuda", 7): "What are we having for dinner?\n- 1. What is the temperature outside",
}
)
# fmt: on
EXPECTED_OUTPUT = EXPECTED_OUTPUTS.get_expectation()
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
self.assertEqual(generated_text, EXPECTED_OUTPUT)
@require_torch_multi_accelerator
def test_int4wo_quant_multi_accelerator(self):
"""
Simple test that checks if the quantized model int4 weight only is working properly with multiple accelerators
set CUDA_VISIBLE_DEVICES=0,1 if you have more than 2 CUDA GPUs
set ZE_AFFINITY_MASK=0,1 if you have more than 2 Intel XPUs
"""
config = Int4WeightOnlyConfig(**self.quant_scheme_kwargs)
quant_config = TorchAoConfig(config)
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype=torch.bfloat16,
device_map="auto",
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.assertTrue(set(quantized_model.hf_device_map.values()) == {0, 1})
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
output = quantized_model.generate(**input_ids, max_new_tokens=self.max_new_tokens)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), self.EXPECTED_OUTPUT)
def test_autoquant(self):
"""
Simple LLM model testing autoquant
"""
quant_config = TorchAoConfig("autoquant")
quantized_model = AutoModelForCausalLM.from_pretrained(
self.model_name,
torch_dtype="auto",
device_map=self.device,
quantization_config=quant_config,
)
tokenizer = AutoTokenizer.from_pretrained(self.model_name)
input_ids = tokenizer(self.input_text, return_tensors="pt").to(self.device)
output = quantized_model.generate(
**input_ids, max_new_tokens=self.max_new_tokens, cache_implementation="static"
)
quantized_model.finalize_autoquant()
check_autoquantized(self, quantized_model.model.layers[0].self_attn.v_proj)
EXPECTED_OUTPUT = "What are we having for dinner?\n\nJessica: (smiling)"
output = quantized_model.generate(
**input_ids, max_new_tokens=self.max_new_tokens, cache_implementation="static"
)
self.assertEqual(tokenizer.decode(output[0], skip_special_tokens=True), EXPECTED_OUTPUT)
@require_torchao_version_greater_or_equal("0.11.0")
@slow
| TorchAoAcceleratorTest |
python | walkccc__LeetCode | solutions/2326. Spiral Matrix IV/2326.py | {
"start": 0,
"end": 592
} | class ____:
def spiralMatrix(self, m: int, n: int, head: ListNode | None) -> list[list[int]]:
DIRS = ((0, 1), (1, 0), (0, -1), (-1, 0))
ans = [[-1] * n for _ in range(m)]
x = 0 # the current x position
y = 0 # the current y position
d = 0
curr = head
while curr:
ans[x][y] = curr.val
if (x + DIRS[d][0] < 0 or x + DIRS[d][0] == m or y + DIRS[d][1] < 0 or
y + DIRS[d][1] == n or ans[x + DIRS[d][0]][y + DIRS[d][1]] != -1):
d = (d + 1) % 4
x += DIRS[d][0]
y += DIRS[d][1]
curr = curr.next
return ans
| Solution |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 602534,
"end": 603216
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("RepositoryVulnerabilityAlertEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("RepositoryVulnerabilityAlert"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| RepositoryVulnerabilityAlertConnection |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/unit_tests/common.py | {
"start": 710,
"end": 808
} | class ____:
def search(self, search_request):
return search_request
| MockGoogleAdsService |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_stateful.py | {
"start": 10458,
"end": 10934
} | class ____(RuleBasedStateMachine):
def __init__(self, threshold):
super().__init__()
self.threshold = threshold
@rule(value=integers())
def action(self, value):
if value > self.threshold:
raise ValueError(f"{value} is too high")
def test_can_use_factory_for_tests():
with raises(ValueError):
run_state_machine_as_test(
lambda: RequiresInit(42), settings=Settings(max_examples=100)
)
| RequiresInit |
python | django__django | tests/forms_tests/widget_tests/test_checkboxinput.py | {
"start": 91,
"end": 4482
} | class ____(WidgetTest):
widget = CheckboxInput()
def test_render_empty(self):
self.check_html(
self.widget, "is_cool", "", html='<input type="checkbox" name="is_cool">'
)
def test_render_none(self):
self.check_html(
self.widget, "is_cool", None, html='<input type="checkbox" name="is_cool">'
)
def test_render_false(self):
self.check_html(
self.widget, "is_cool", False, html='<input type="checkbox" name="is_cool">'
)
def test_render_true(self):
self.check_html(
self.widget,
"is_cool",
True,
html='<input checked type="checkbox" name="is_cool">',
)
def test_render_value(self):
"""
Using any value that's not in ('', None, False, True) will check the
checkbox and set the 'value' attribute.
"""
self.check_html(
self.widget,
"is_cool",
"foo",
html='<input checked type="checkbox" name="is_cool" value="foo">',
)
def test_render_int(self):
"""
Integers are handled by value, not as booleans (#17114).
"""
self.check_html(
self.widget,
"is_cool",
0,
html='<input checked type="checkbox" name="is_cool" value="0">',
)
self.check_html(
self.widget,
"is_cool",
1,
html='<input checked type="checkbox" name="is_cool" value="1">',
)
def test_render_check_test(self):
"""
You can pass 'check_test' to the constructor. This is a callable that
takes the value and returns True if the box should be checked.
"""
widget = CheckboxInput(check_test=lambda value: value.startswith("hello"))
self.check_html(
widget, "greeting", "", html=('<input type="checkbox" name="greeting">')
)
self.check_html(
widget,
"greeting",
"hello",
html=('<input checked type="checkbox" name="greeting" value="hello">'),
)
self.check_html(
widget,
"greeting",
"hello there",
html=(
'<input checked type="checkbox" name="greeting" value="hello there">'
),
)
self.check_html(
widget,
"greeting",
"hello & goodbye",
html=(
'<input checked type="checkbox" name="greeting" '
'value="hello & goodbye">'
),
)
def test_render_check_exception(self):
"""
Calling check_test() shouldn't swallow exceptions (#17888).
"""
widget = CheckboxInput(
check_test=lambda value: value.startswith("hello"),
)
with self.assertRaises(AttributeError):
widget.render("greeting", True)
def test_value_from_datadict(self):
"""
The CheckboxInput widget will return False if the key is not found in
the data dictionary (because HTML form submission doesn't send any
result for unchecked checkboxes).
"""
self.assertFalse(self.widget.value_from_datadict({}, {}, "testing"))
def test_value_from_datadict_string_int(self):
value = self.widget.value_from_datadict({"testing": "0"}, {}, "testing")
self.assertIs(value, True)
def test_value_omitted_from_data(self):
self.assertIs(
self.widget.value_omitted_from_data({"field": "value"}, {}, "field"), False
)
self.assertIs(self.widget.value_omitted_from_data({}, {}, "field"), False)
def test_get_context_does_not_mutate_attrs(self):
attrs = {"checked": False}
self.widget.get_context("name", True, attrs)
self.assertIs(attrs["checked"], False)
def test_fieldset(self):
class TestForm(Form):
template_name = "forms_tests/use_fieldset.html"
field = BooleanField(widget=self.widget)
form = TestForm()
self.assertIs(self.widget.use_fieldset, False)
self.assertHTMLEqual(
form.render(),
'<div><label for="id_field">Field:</label>'
'<input id="id_field" name="field" required type="checkbox"></div>',
)
| CheckboxInputTest |
python | tiangolo__fastapi | fastapi/params.py | {
"start": 27906,
"end": 27974
} | class ____(Depends):
scopes: Optional[Sequence[str]] = None
| Security |
python | walkccc__LeetCode | solutions/284. Peeking Iterator/284.py | {
"start": 0,
"end": 519
} | class ____:
def __init__(self, iterator: Iterator):
self.iterator = iterator
self.buffer = self.iterator.next() if self.iterator.hasNext() else None
def peek(self) -> int:
"""
Returns the next element in the iteration without advancing the iterator.
"""
return self.buffer
def next(self) -> int:
next = self.buffer
self.buffer = self.iterator.next() if self.iterator.hasNext() else None
return next
def hasNext(self) -> bool:
return self.buffer is not None
| PeekingIterator |
python | aimacode__aima-python | search.py | {
"start": 29056,
"end": 30020
} | class ____(Problem):
"""
A problem which is solved by an agent executing
actions, rather than by just computation.
Carried in a deterministic and a fully observable environment."""
def __init__(self, initial, goal, graph):
super().__init__(initial, goal)
self.graph = graph
def actions(self, state):
return self.graph.graph_dict[state].keys()
def output(self, state, action):
return self.graph.graph_dict[state][action]
def h(self, state):
"""Returns least possible cost to reach a goal for the given state."""
return self.graph.least_costs[state]
def c(self, s, a, s1):
"""Returns a cost estimate for an agent to move from state 's' to state 's1'."""
return 1
def update_state(self, percept):
raise NotImplementedError
def goal_test(self, state):
if state == self.goal:
return True
return False
| OnlineSearchProblem |
python | django__django | tests/model_forms/models.py | {
"start": 14181,
"end": 14469
} | class ____(models.Model):
codename = models.CharField(max_length=50, blank=True, null=True, unique=True)
email = models.EmailField(blank=True, null=True)
slug = models.SlugField(blank=True, null=True)
url = models.URLField(blank=True, null=True)
| NullableUniqueCharFieldModel |
python | joke2k__faker | faker/providers/job/th_TH/__init__.py | {
"start": 169,
"end": 2095
} | class ____(BaseProvider):
jobs = (
"กวี",
"เกษตรกร",
"ข้าราชการ",
"คนขับรถแท็กซี่",
"ค้าขาย",
"โฆษก",
"จ๊อกกี้",
"จิตรกร",
"เจ้าหน้าที่พัฒนาเอกชน",
"เจ้าหน้าทีรักษาความปลอดภัย",
"เจ้าหน้าที่รัฐบาล",
"ช่างทำเครื่องดนตรี",
"ช่างทำผม",
"ตำรวจ",
"ทนายความ",
"ทหารบก",
"นักกฎหมาย",
"นักการกุศล",
"นักการทูต",
"นักการเมือง",
"นักการศึกษา",
"นักกีฬา",
"นักเขียน",
"นักคณิตศาสตร์",
"นักเคลื่อนไหว",
"นักจัดรายการวิทยุ",
"นักจิตวิทยา",
"นักชีววิทยา",
"นักดนตรี",
"นักดาราศาสตร์",
"นักแต่งเพลง",
"นักถ่ายภาพ",
"นักธุรกิจ",
"นักบวช",
"นักบิน",
"นักบินอวกาศ",
"นักโบราณคดี",
"นักประชาสัมพันธ์",
"นักประดิษฐ์",
"นักประวัติศาสตร์",
"นักปรัชญา",
"นักแปล",
"นักผจญภัย",
"นักพจนานุกรม",
"นักพากย์",
"นักภาษาศาสตร์",
"นักโภชนาการ",
"นักมายากล",
"นักวาดการ์ตูน",
"นักวิจัย",
"นักวิทยาศาสตร์",
"นักเศรษฐศาสตร์",
"นักสะสมศิลปะ",
"นักสังคมวิทยา",
"นักสังคมศาสตร์",
"นักสังคมสงเคราะห์",
"นักสัตววิทยา",
"นักสำรวจ",
"นักสิทธิมนุษยชน",
"นักสืบ",
"นักแสดง",
"นักหนังสือพิมพ์",
"นักอนุรักษ์ธรรมชาติ",
"นักออกแบบ",
"นางแบบ",
"นายแบบ",
"บรรณาธิการ",
"บรรณารักษ์",
"โปรแกรมเมอร์",
"ผู้กำกับ",
"ผู้กำกับภาพยนตร์",
"ผู้กำกับละครโทรทัศน์",
"ผู้จัดพิมพ์",
"พิธีกร",
"แพทย์",
"ภัณฑารักษ์",
"เภสัชกร",
"มัคคุเทศก์",
"วิศวกร",
"วีเจ",
"ศิลปิน",
"สถาปนิก",
"อัยการ",
"อาจารย์",
)
| Provider |
python | django__django | tests/model_forms/models.py | {
"start": 14023,
"end": 14181
} | class ____(models.Model):
name = models.CharField(max_length=30)
character = models.ForeignKey(Character, models.SET_NULL, blank=False, null=True)
| Award |
python | facelessuser__pymdown-extensions | pymdownx/superfences.py | {
"start": 33438,
"end": 35850
} | class ____(CodeBlockProcessor):
"""Process indented code blocks to see if we accidentally processed its content as a fenced block."""
def test(self, parent, block):
"""Test method that is one day to be deprecated."""
return True
def reindent(self, text, pos, level):
"""Reindent the code to where it is supposed to be."""
indented = []
for line in text.split('\n'):
index = pos - level
indented.append(line[index:])
return '\n'.join(indented)
def revert_greedy_fences(self, block):
"""Revert a prematurely converted fenced block."""
new_block = []
for line in block.split('\n'):
m = FENCED_BLOCK_RE.match(line)
if m:
key = m.group(2)
indent_level = len(m.group(1))
original = None
original, pos = self.extension.stash.get(key, (None, None))
if original is not None:
code = self.reindent(original, pos, indent_level)
new_block.append(code)
self.extension.stash.remove(key)
if original is None: # pragma: no cover
# Too much work to test this. This is just a fall back in case
# we find a placeholder, and we went to revert it and it wasn't in our stash.
# Most likely this would be caused by someone else. We just want to put it
# back in the block if we can't revert it. Maybe we can do a more directed
# unit test in the future.
new_block.append(line)
else:
new_block.append(line)
return '\n'.join(new_block)
def run(self, parent, blocks):
"""Look for and parse code block."""
handled = False
if not self.config.get("disable_indented_code_blocks", False):
handled = CodeBlockProcessor.test(self, parent, blocks[0])
if handled:
if self.config.get("nested", True):
blocks[0] = self.revert_greedy_fences(blocks[0])
handled = CodeBlockProcessor.run(self, parent, blocks) is not False
return handled
def makeExtension(*args, **kwargs):
"""Return extension."""
return SuperFencesCodeExtension(*args, **kwargs)
| SuperFencesCodeBlockProcessor |
python | pytorch__pytorch | torch/_subclasses/fake_tensor.py | {
"start": 3668,
"end": 9122
} | class ____(threading.local):
# Default to None, otherwise it'll be used to override _all_
# `FakeTensorMode.allow_non_fake_inputs` in this thread.
allow_non_fake_inputs_override: Optional[bool]
non_strict_export_fake_tensor_tracker: weakref.WeakSet
def __init__(self) -> None:
self.allow_non_fake_inputs_override = None
self.non_strict_export_fake_tensor_tracker = weakref.WeakSet()
fake_tensor_tls = FakeTensorTLS()
def ordered_set(*items: T) -> dict[T, Literal[True]]:
return dict.fromkeys(items, True)
@contextlib.contextmanager
def unset_fake_temporarily() -> Generator[Optional[TorchDispatchMode], None, None]:
old = torch._C._unset_dispatch_mode(torch._C._TorchDispatchModeKey.FAKE)
try:
yield old
finally:
if old is not None:
torch._C._set_dispatch_mode(old)
@contextlib.contextmanager
def disable_fake_tensor_cache(fake_mode: FakeTensorMode) -> Generator[None, None, None]:
old_value: bool = fake_mode.cache_enabled
try:
fake_mode.cache_enabled = False
yield
finally:
fake_mode.cache_enabled = old_value
def get_plain_tensors(
subclass: Tensor, *, out: list[Union[Tensor, int, SymInt]]
) -> list[Union[Tensor, int, SymInt]]:
# This function is used in Runtime, do not add redundant asserts
todo = [subclass]
while todo:
curr = todo.pop()
if not is_traceable_wrapper_subclass(curr):
out.append(curr)
continue
inner_keys, _ = curr.__tensor_flatten__()
todo.extend(getattr(curr, key) for key in reversed(inner_keys))
return out
def is_fake(x: object) -> TypeGuard[Tensor]:
from torch._subclasses.functional_tensor import FunctionalTensor
if isinstance(x, FakeTensor):
return True
if is_traceable_wrapper_subclass(x):
attrs, _ = type(x).__tensor_flatten__(x)
flattened_tensors = [getattr(x, attr) for attr in attrs]
all_fake = all(is_fake(x) for x in flattened_tensors)
any_fake = any(is_fake(x) for x in flattened_tensors)
assert all_fake == any_fake, "got mixed fake and real tensors!"
return all_fake
elif isinstance(x, FunctionalTensor):
return is_fake(x.elem)
elif isinstance(x, Tensor) and torch._is_functional_tensor(x):
reapply_views = torch._C._functionalization_reapply_views_tls()
unwrapped = torch._C._functorch._unwrap_functional_tensor(x, reapply_views)
return is_fake(unwrapped)
elif isinstance(x, Tensor) and is_functorch_wrapped_tensor(x):
unwrapped = torch._C._functorch.get_unwrapped(x)
return is_fake(unwrapped)
return False
def maybe_get_fake_mode(t: object) -> Optional[FakeTensorMode]:
from torch._subclasses.functional_tensor import FunctionalTensor
if isinstance(t, FakeTensor):
return t.fake_mode
if is_traceable_wrapper_subclass(t):
inner_tensor_names, _ = t.__tensor_flatten__()
modes = [
maybe_get_fake_mode(getattr(t, t_name)) for t_name in inner_tensor_names
]
m = modes[0]
assert all(m is x for x in modes)
return m
elif isinstance(t, FunctionalTensor):
return maybe_get_fake_mode(t.elem)
elif isinstance(t, Tensor) and torch._is_functional_tensor(t):
reapply_views = torch._C._functionalization_reapply_views_tls()
unwrapped = torch._C._functorch._unwrap_functional_tensor(t, reapply_views)
return maybe_get_fake_mode(unwrapped)
elif isinstance(t, Tensor) and is_functorch_wrapped_tensor(t):
unwrapped = torch._C._functorch.get_unwrapped(t)
return maybe_get_fake_mode(unwrapped)
return None
@functools.cache
def get_schema_info(func: OpOverload) -> torch._C._SchemaInfo:
return torch._C._SchemaInfo(func._schema)
# many of the decompositions registered to torch/_prims do not at the moment model
# aliasing or strides, so as an incremental step, just enable the decompositions in
# torch/_decomp/decompositions.py.
# decomps are used for aot autograd tracing so we would like to unify on their
# implementation and add additional testing to them
@functools.cache
def torch_decomp_decompositions(func: OpOverload) -> bool:
from torch._decomp import decomposition_table
decompositions = torch._decomp.decompositions
# Note that the function in the decomposition table might be
# different from the one in the module because of the difference
# in out handling in aten API and torch public API
return decomposition_table[func].__module__.startswith(
"torch._decomp"
) and decomposition_table[func].__name__ in dir(decompositions)
def tree_flatten_only(ty: type[T], tree: PyTree) -> list[T]:
flat_vals = pytree.tree_leaves(tree)
return [elem for elem in flat_vals if isinstance(elem, ty)]
def _is_plain_tensor(t: object) -> bool:
return (
type(t) is Tensor
and t.layout == torch.strided
and not (
t.is_sparse
or t.is_nested
or is_functorch_wrapped_tensor(t)
or is_legacy_batchedtensor(t)
or torch._is_functional_tensor(t)
)
)
# Similar to `MetaConverter`, this is a class for converting
# multiple tensors into fake tensors which share the same view/storage
# structure. Like `MetaConverter`, it uses `WeakIdRef` to
# hold a weak reference for all memoized tensors.
| FakeTensorTLS |
python | apache__airflow | providers/slack/tests/unit/slack/transfers/test_base_sql_to_slack.py | {
"start": 1058,
"end": 4392
} | class ____:
def setup_method(self):
self.default_op_kwargs = {
"sql": "SELECT 1",
"sql_conn_id": "test-sql-conn-id",
"sql_hook_params": None,
"parameters": None,
}
def test_execute_not_implemented(self):
"""Test that no base implementation for ``BaseSqlToSlackOperator.execute()``."""
op = BaseSqlToSlackOperator(task_id="test_base_not_implements", **self.default_op_kwargs)
with pytest.raises(NotImplementedError):
op.execute(mock.MagicMock())
@mock.patch("airflow.providers.common.sql.operators.sql.BaseHook.get_connection")
@mock.patch("airflow.models.connection.Connection.get_hook")
@pytest.mark.parametrize("conn_type", ["postgres", "snowflake"])
@pytest.mark.parametrize("sql_hook_params", [None, {"foo": "bar"}])
def test_get_hook(self, mock_get_hook, mock_get_conn, conn_type, sql_hook_params):
class SomeDummyHook:
"""Hook which implements ``get_df`` method"""
def get_df(self):
pass
expected_hook = SomeDummyHook()
mock_get_conn.return_value = Connection(conn_id=f"test_connection_{conn_type}", conn_type=conn_type)
mock_get_hook.return_value = expected_hook
op_kwargs = {
**self.default_op_kwargs,
"sql_hook_params": sql_hook_params,
}
op = BaseSqlToSlackOperator(task_id="test_get_hook", **op_kwargs)
hook = op._get_hook()
mock_get_hook.assert_called_once_with(hook_params=sql_hook_params)
assert hook == expected_hook
@mock.patch("airflow.providers.common.sql.operators.sql.BaseHook.get_connection")
@mock.patch("airflow.models.connection.Connection.get_hook")
def test_get_not_supported_hook(self, mock_get_hook, mock_get_conn):
class SomeDummyHook:
"""Hook which not implemented ``get_df`` method"""
mock_get_conn.return_value = Connection(conn_id="test_connection", conn_type="test_connection")
mock_get_hook.return_value = SomeDummyHook()
op = BaseSqlToSlackOperator(task_id="test_get_not_supported_hook", **self.default_op_kwargs)
error_message = r"This hook is not supported. The hook class must have get_df method\."
with pytest.raises(AirflowException, match=error_message):
op._get_hook()
@mock.patch("airflow.providers.slack.transfers.sql_to_slack.BaseSqlToSlackOperator._get_hook")
@pytest.mark.parametrize("sql", ["SELECT 42", "SELECT 1 FROM DUMMY WHERE col = ?"])
@pytest.mark.parametrize("parameters", [None, {"col": "spam-egg"}])
def test_get_query_results(self, mock_op_get_hook, sql, parameters):
test_df = pd.DataFrame({"a": "1", "b": "2"}, index=[0, 1])
mock_get_df = mock.MagicMock(return_value=test_df)
mock_hook = mock.MagicMock()
mock_hook.get_df = mock_get_df
mock_op_get_hook.return_value = mock_hook
op_kwargs = {
**self.default_op_kwargs,
"sql": sql,
"parameters": parameters,
}
op = BaseSqlToSlackOperator(task_id="test_get_query_results", **op_kwargs)
df = op._get_query_results()
mock_get_df.assert_called_once_with(sql, parameters=parameters)
assert df is test_df
| TestBaseSqlToSlackOperator |
python | keras-team__keras | keras/src/dtype_policies/dtype_policy_map_test.py | {
"start": 469,
"end": 14020
} | class ____(testing.TestCase):
def setUp(self):
super().setUp()
self._global_dtype_policy = dtype_policy()
def tearDown(self):
super().tearDown()
set_dtype_policy(self._global_dtype_policy)
@pytest.mark.requires_trainable_backend
def test_basic_usage(self):
# Create a subclass that might contain mixing dtype policies for
# sublayers.
# It is important to ensure that `dtype` is passed to sublayers and
# that each sublayer has a unique `name`.
@saving.register_keras_serializable()
class Subclass(layers.Layer):
def __init__(self, dtype=None, name="subclass", **kwargs):
super().__init__(dtype=dtype, name=name, **kwargs)
self.dense = layers.Dense(8, dtype=dtype, name=f"{name}_dense")
self.bn = layers.BatchNormalization(
dtype=dtype, name=f"{name}_bn"
)
self.relu = layers.ReLU(dtype=dtype, name=f"{name}_relu")
def call(self, inputs, training=None):
return self.relu(self.bn(self.dense(inputs), training=training))
def get_config(self):
# Typically, we only need to record the quantized policy for
# `DTypePolicyMap`
config = super().get_config()
dtype_policy_map = DTypePolicyMap()
for layer in self._flatten_layers():
if layer.quantization_mode is not None:
dtype_policy_map[layer.path] = layer.dtype_policy
if len(dtype_policy_map) > 0:
config.update({"dtype": dtype_policy_map})
return config
# Instantiate the model
inputs = layers.Input([4])
outputs = Subclass()(inputs)
model = models.Model(inputs, outputs)
# Quantize the model to make mixing of dtype policies in sublayers
model.quantize("int8")
for layer in model._flatten_layers():
if isinstance(layer, layers.Dense):
self.assertEqual(
layer.dtype_policy,
dtype_policies.QuantizedDTypePolicy("int8"),
)
elif isinstance(layer, layers.BatchNormalization):
self.assertEqual(
layer.dtype_policy, dtype_policies.DTypePolicy()
)
elif isinstance(layer, layers.ReLU):
self.assertEqual(
layer.dtype_policy, dtype_policies.DTypePolicy()
)
# Verify the output after saving and loading
x = np.random.uniform(size=[16, 4])
temp_dir = self.get_temp_dir()
y = model(x, training=False)
model.save(f"{temp_dir}/model.keras")
reloaded_model = saving.load_model(f"{temp_dir}/model.keras")
reloaded_y = reloaded_model(x, training=False)
self.assertAllClose(y, reloaded_y)
def test_add(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
dtype_policy_map["layer/dense_2"] = (
dtype_policies.QuantizedFloat8DTypePolicy("float8", "mixed_float16")
)
self.assertLen(dtype_policy_map, 3)
policy = dtype_policy_map["layer/dense_0"]
self.assertIsInstance(policy, dtype_policies.DTypePolicy)
self.assertEqual(policy.name, "bfloat16")
policy = dtype_policy_map["layer/dense_1"]
self.assertIsInstance(policy, dtype_policies.QuantizedDTypePolicy)
self.assertEqual(policy._source_name, "mixed_bfloat16")
self.assertEqual(policy.quantization_mode, "int8")
policy = dtype_policy_map["layer/dense_2"]
self.assertIsInstance(policy, dtype_policies.QuantizedFloat8DTypePolicy)
self.assertEqual(policy._source_name, "mixed_float16")
self.assertEqual(policy.quantization_mode, "float8")
with self.assertRaisesRegex(
ValueError, "layer/dense_0 already exist in the DTypePolicyMap"
):
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"float32"
)
with self.assertRaisesRegex(
ValueError, "Cannot interpret the assigned value."
):
dtype_policy_map["layer/dense_3"] = 123
def test_get(self):
# 1. Setup
bfloat16_policy = dtype_policies.DTypePolicy("bfloat16")
int8_policy = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
float32_policy = dtype_policies.DTypePolicy("float32")
float16_policy = dtype_policies.DTypePolicy("float16")
policy_map = DTypePolicyMap()
# Policy for an exact layer path
policy_map["model/encoder/layer_0/dense"] = bfloat16_policy
# Policy for a layer that is also a prefix of another layer's name
policy_map["model/encoder/attention/query"] = int8_policy
# Regex policies for entire scopes MUST include wildcards
policy_map["model/decoder/.*"] = float32_policy
policy_map["model/decoder/attention/.*"] = float16_policy
# 2. Test exact match
self.assertEqual(
policy_map["model/encoder/layer_0/dense"], bfloat16_policy
)
self.assertEqual(
policy_map["model/encoder/attention/query"], int8_policy
)
# 3. Test successful regex fallback (explicit wildcard)
# "model/decoder/.*" should match its children.
self.assertEqual(policy_map["model/decoder/layer_0"], float32_policy)
# 4. Test that partial matches are ignored
# The exact key "model/encoder/attention/query" should not match
# "model/encoder/attention/query_norm" without a wildcard.
self.assertEqual(
policy_map["model/encoder/attention/query_norm"],
policy_map.default_policy,
)
# A plain key "model/decoder" will not match "model/decoder/layer_0"
policy_map["model/decoder"] = bfloat16_policy # Add exact key
self.assertEqual(policy_map["model/decoder/layer_0"], float32_policy)
# Still matches the more general regex
self.assertEqual(policy_map["model/decoder"], bfloat16_policy)
# 5. Test no match
self.assertEqual(
policy_map["model/embedding"], policy_map.default_policy
)
# 6. Test multiple regex matches causing a ValueError
# "model/decoder/attention/output" matches two regex patterns:
# - "model/decoder/.*"
# - "model/decoder/attention/.*"
with self.assertRaisesRegex(
ValueError,
"Path 'model/decoder/attention/output' matches multiple "
"dtype policy",
):
_ = policy_map["model/decoder/attention/output"]
def test_delete(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertEqual(
dtype_policy_map.pop("layer/dense_0"),
dtype_policies.DTypePolicy("bfloat16"),
)
with self.assertRaises(KeyError):
dtype_policy_map.pop("layer/dense_0")
# Test `del`, causing no hit
del dtype_policy_map["layer/dense_1"]
self.assertEqual(
dtype_policy_map["layer/dense_1"], dtype_policy_map.default_policy
)
self.assertLen(dtype_policy_map, 0)
def test_len(self):
dtype_policy_map = DTypePolicyMap()
self.assertLen(dtype_policy_map, 0)
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertLen(dtype_policy_map, 2)
def test_iter(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertEqual(
list(dtype_policy_map.keys()), ["layer/dense_0", "layer/dense_1"]
)
keys = []
values = []
for k, v in dtype_policy_map.items():
keys.append(k)
values.append(v)
self.assertEqual(keys, ["layer/dense_0", "layer/dense_1"])
self.assertEqual(
values,
[
dtype_policies.DTypePolicy("bfloat16"),
dtype_policies.QuantizedDTypePolicy("int8", "mixed_bfloat16"),
],
)
def test_in(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
self.assertTrue("layer/dense_0" in dtype_policy_map)
self.assertTrue("layer/dense_1" in dtype_policy_map)
self.assertFalse("layer/dense_2" in dtype_policy_map)
def test_default_policy(self):
# Test default_policy is set to `"float32"`
dtype_policy_map = DTypePolicyMap(default_policy="mixed_bfloat16")
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policy_map.get_config()
dtype_policy_map = DTypePolicyMap.from_config(config)
self.assertEqual(
dtype_policy_map["layer/dense_0"],
dtype_policies.DTypePolicy("mixed_bfloat16"),
)
self.assertEqual(
dtype_policy_map["layer/dense_1"],
dtype_policies.QuantizedDTypePolicy("int8", "mixed_bfloat16"),
)
# No hit, defers to `dtype_policy_map.default_policy`
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policy_map.default_policy
)
# Test that default_policy defers to `keras.config.dtype_policy()`
# during loading
set_dtype_policy("bfloat16")
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policy_map.get_config()
dtype_policy_map = DTypePolicyMap.from_config(config)
self.assertEqual(
dtype_policy_map["layer/dense_0"],
dtype_policies.DTypePolicy("bfloat16"),
)
self.assertEqual(
dtype_policy_map["layer/dense_1"],
dtype_policies.QuantizedDTypePolicy("int8", "bfloat16"),
)
# No hit, defers to `dtype_policy_map.default_policy` which is
# `keras.config.dtype_policy()`
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policy_map.default_policy
)
self.assertEqual(
dtype_policy_map["layer/dense_2"], dtype_policies.get("bfloat16")
)
def test_serialization(self):
dtype_policy_map = DTypePolicyMap(default_policy="mixed_bfloat16")
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
dtype_policy_map["layer/dense_1"] = dtype_policies.QuantizedDTypePolicy(
"int8", "mixed_bfloat16"
)
config = dtype_policies.serialize(dtype_policy_map)
reloaded_dtype_policy_map = dtype_policies.deserialize(config)
self.assertEqual(
dtype_policy_map.default_policy,
reloaded_dtype_policy_map.default_policy,
)
for k, v in dtype_policy_map.items():
self.assertEqual(reloaded_dtype_policy_map[k], v)
# Test that config remains intact during deserialization
config = dtype_policy_map.get_config()
original_config = config.copy()
DTypePolicyMap.from_config(config)
self.assertDictEqual(config, original_config)
def test_repr(self):
dtype_policy_map = DTypePolicyMap()
dtype_policy_map["layer/dense_0"] = dtype_policies.DTypePolicy(
"mixed_bfloat16"
)
repr_str = repr(dtype_policy_map)
self.assertTrue("DTypePolicyMap" in repr_str)
self.assertTrue("default_policy" in repr_str)
self.assertTrue(
"mapping=[('layer/dense_0', 'mixed_bfloat16')]" in repr_str
)
def test_invalid_policy_map(self):
with self.assertRaisesRegex(
TypeError, "If specified, `policy_map` must be a dict."
):
DTypePolicyMap(policy_map=123)
with self.assertRaisesRegex(
TypeError, "If specified, `policy_map` must be a dict."
):
DTypePolicyMap(
policy_map=dtype_policies.DTypePolicy("mixed_bfloat16")
)
| DTypePolicyMapTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/emr.py | {
"start": 13684,
"end": 15006
} | class ____(AwsBaseWaiterTrigger):
"""
Poll an Emr Serverless application and wait for it to be deleted.
:param application_id: The ID of the application being polled.
:waiter_delay: polling period in seconds to check for the status
:param waiter_max_attempts: The maximum number of attempts to be made
:param aws_conn_id: Reference to AWS connection id
"""
def __init__(
self,
application_id: str,
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
aws_conn_id: str | None = "aws_default",
) -> None:
super().__init__(
serialized_fields={"application_id": application_id},
waiter_name="serverless_app_terminated",
waiter_args={"applicationId": application_id},
failure_message="Application failed to start",
status_message="Application status is",
status_queries=["application.state", "application.stateDetails"],
return_key="application_id",
return_value=application_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
def hook(self) -> AwsGenericHook:
return EmrServerlessHook(self.aws_conn_id)
| EmrServerlessDeleteApplicationTrigger |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/isinstance4.py | {
"start": 271,
"end": 610
} | class ____(Protocol):
pass
# This should generate an error because Sized is a Protocol that
# is not runtime checkable.
isinstance(4, MyProtocol1)
# This should generate an error because Iterable is a Protocol.
issubclass(str, (str, MyProtocol1))
def func1(t: type[MyProtocol1]):
isinstance(1, t)
@runtime_checkable
| MyProtocol1 |
python | getsentry__sentry | src/sentry/integrations/opsgenie/integration.py | {
"start": 3927,
"end": 4922
} | class ____:
def record_event(self, event: IntegrationPipelineViewType):
return IntegrationPipelineViewEvent(
event, IntegrationDomain.ON_CALL_SCHEDULING, OpsgenieIntegrationProvider.key
)
def dispatch(self, request: HttpRequest, pipeline: IntegrationPipeline) -> HttpResponseBase:
with self.record_event(IntegrationPipelineViewType.INSTALLATION_CONFIGURATION).capture():
if request.method == "POST":
form = InstallationForm(request.POST)
if form.is_valid():
form_data = form.cleaned_data
pipeline.bind_state("installation_data", form_data)
return pipeline.next_step()
else:
form = InstallationForm()
return render_to_response(
template="sentry/integrations/opsgenie-config.html",
context={"form": form},
request=request,
)
| InstallationConfigView |
python | dateutil__dateutil | src/dateutil/tz/tz.py | {
"start": 33828,
"end": 38395
} | class ____(tzrange):
"""
``tzstr`` objects are time zone objects specified by a time-zone string as
it would be passed to a ``TZ`` variable on POSIX-style systems (see
the `GNU C Library: TZ Variable`_ for more details).
There is one notable exception, which is that POSIX-style time zones use an
inverted offset format, so normally ``GMT+3`` would be parsed as an offset
3 hours *behind* GMT. The ``tzstr`` time zone object will parse this as an
offset 3 hours *ahead* of GMT. If you would like to maintain the POSIX
behavior, pass a ``True`` value to ``posix_offset``.
The :class:`tzrange` object provides the same functionality, but is
specified using :class:`relativedelta.relativedelta` objects. rather than
strings.
:param s:
A time zone string in ``TZ`` variable format. This can be a
:class:`bytes` (2.x: :class:`str`), :class:`str` (2.x:
:class:`unicode`) or a stream emitting unicode characters
(e.g. :class:`StringIO`).
:param posix_offset:
Optional. If set to ``True``, interpret strings such as ``GMT+3`` or
``UTC+3`` as being 3 hours *behind* UTC rather than ahead, per the
POSIX standard.
.. caution::
Prior to version 2.7.0, this function also supported time zones
in the format:
* ``EST5EDT,4,0,6,7200,10,0,26,7200,3600``
* ``EST5EDT,4,1,0,7200,10,-1,0,7200,3600``
This format is non-standard and has been deprecated; this function
will raise a :class:`DeprecatedTZFormatWarning` until
support is removed in a future version.
.. _`GNU C Library: TZ Variable`:
https://www.gnu.org/software/libc/manual/html_node/TZ-Variable.html
"""
def __init__(self, s, posix_offset=False):
global parser
from dateutil.parser import _parser as parser
self._s = s
res = parser._parsetz(s)
if res is None or res.any_unused_tokens:
raise ValueError("unknown string format")
# Here we break the compatibility with the TZ variable handling.
# GMT-3 actually *means* the timezone -3.
if res.stdabbr in ("GMT", "UTC") and not posix_offset:
res.stdoffset *= -1
# We must initialize it first, since _delta() needs
# _std_offset and _dst_offset set. Use False in start/end
# to avoid building it two times.
tzrange.__init__(self, res.stdabbr, res.stdoffset,
res.dstabbr, res.dstoffset,
start=False, end=False)
if not res.dstabbr:
self._start_delta = None
self._end_delta = None
else:
self._start_delta = self._delta(res.start)
if self._start_delta:
self._end_delta = self._delta(res.end, isend=1)
self.hasdst = bool(self._start_delta)
def _delta(self, x, isend=0):
from dateutil import relativedelta
kwargs = {}
if x.month is not None:
kwargs["month"] = x.month
if x.weekday is not None:
kwargs["weekday"] = relativedelta.weekday(x.weekday, x.week)
if x.week > 0:
kwargs["day"] = 1
else:
kwargs["day"] = 31
elif x.day:
kwargs["day"] = x.day
elif x.yday is not None:
kwargs["yearday"] = x.yday
elif x.jyday is not None:
kwargs["nlyearday"] = x.jyday
if not kwargs:
# Default is to start on first sunday of april, and end
# on last sunday of october.
if not isend:
kwargs["month"] = 4
kwargs["day"] = 1
kwargs["weekday"] = relativedelta.SU(+1)
else:
kwargs["month"] = 10
kwargs["day"] = 31
kwargs["weekday"] = relativedelta.SU(-1)
if x.time is not None:
kwargs["seconds"] = x.time
else:
# Default is 2AM.
kwargs["seconds"] = 7200
if isend:
# Convert to standard time, to follow the documented way
# of working with the extra hour. See the documentation
# of the tzinfo class.
delta = self._dst_offset - self._std_offset
kwargs["seconds"] -= delta.seconds + delta.days * 86400
return relativedelta.relativedelta(**kwargs)
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._s))
| tzstr |
python | matplotlib__matplotlib | lib/matplotlib/spines.py | {
"start": 312,
"end": 18498
} | class ____(mpatches.Patch):
"""
An axis spine -- the line noting the data area boundaries.
Spines are the lines connecting the axis tick marks and noting the
boundaries of the data area. They can be placed at arbitrary
positions. See `~.Spine.set_position` for more information.
The default position is ``('outward', 0)``.
Spines are subclasses of `.Patch`, and inherit much of their behavior.
Spines draw a line, a circle, or an arc depending on if
`~.Spine.set_patch_line`, `~.Spine.set_patch_circle`, or
`~.Spine.set_patch_arc` has been called. Line-like is the default.
For examples see :ref:`spines_examples`.
"""
def __str__(self):
return "Spine"
@_docstring.interpd
def __init__(self, axes, spine_type, path, **kwargs):
"""
Parameters
----------
axes : `~matplotlib.axes.Axes`
The `~.axes.Axes` instance containing the spine.
spine_type : str
The spine type.
path : `~matplotlib.path.Path`
The `.Path` instance used to draw the spine.
Other Parameters
----------------
**kwargs
Valid keyword arguments are:
%(Patch:kwdoc)s
"""
super().__init__(**kwargs)
self.axes = axes
self.set_figure(self.axes.get_figure(root=False))
self.spine_type = spine_type
self.set_facecolor('none')
self.set_edgecolor(mpl.rcParams['axes.edgecolor'])
self.set_linewidth(mpl.rcParams['axes.linewidth'])
self.set_capstyle('projecting')
self.axis = None
self.set_zorder(2.5)
self.set_transform(self.axes.transData) # default transform
self._bounds = None # default bounds
# Defer initial position determination. (Not much support for
# non-rectangular axes is currently implemented, and this lets
# them pass through the spines machinery without errors.)
self._position = None
_api.check_isinstance(mpath.Path, path=path)
self._path = path
# To support drawing both linear and circular spines, this
# class implements Patch behavior three ways. If
# self._patch_type == 'line', behave like a mpatches.PathPatch
# instance. If self._patch_type == 'circle', behave like a
# mpatches.Ellipse instance. If self._patch_type == 'arc', behave like
# a mpatches.Arc instance.
self._patch_type = 'line'
# Behavior copied from mpatches.Ellipse:
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = mtransforms.IdentityTransform()
def set_patch_arc(self, center, radius, theta1, theta2):
"""Set the spine to be arc-like."""
self._patch_type = 'arc'
self._center = center
self._width = radius * 2
self._height = radius * 2
self._theta1 = theta1
self._theta2 = theta2
self._path = mpath.Path.arc(theta1, theta2)
# arc drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_circle(self, center, radius):
"""Set the spine to be circular."""
self._patch_type = 'circle'
self._center = center
self._width = radius * 2
self._height = radius * 2
# circle drawn on axes transform
self.set_transform(self.axes.transAxes)
self.stale = True
def set_patch_line(self):
"""Set the spine to be linear."""
self._patch_type = 'line'
self.stale = True
# Behavior copied from mpatches.Ellipse:
def _recompute_transform(self):
"""
Notes
-----
This cannot be called until after this has been added to an Axes,
otherwise unit conversion will fail. This makes it very important to
call the accessor method and not directly access the transformation
member variable.
"""
assert self._patch_type in ('arc', 'circle')
center = (self.convert_xunits(self._center[0]),
self.convert_yunits(self._center[1]))
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
self._patch_transform = mtransforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.translate(*center)
def get_patch_transform(self):
if self._patch_type in ('arc', 'circle'):
self._recompute_transform()
return self._patch_transform
else:
return super().get_patch_transform()
def get_window_extent(self, renderer=None):
"""
Return the window extent of the spines in display space, including
padding for ticks (but not their labels)
See Also
--------
matplotlib.axes.Axes.get_tightbbox
matplotlib.axes.Axes.get_window_extent
"""
# make sure the location is updated so that transforms etc are correct:
self._adjust_location()
bb = super().get_window_extent(renderer=renderer)
if self.axis is None or not self.axis.get_visible():
return bb
bboxes = [bb]
drawn_ticks = self.axis._update_ticks()
major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None)
minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None)
for tick in [major_tick, minor_tick]:
if tick is None:
continue
bb0 = bb.frozen()
tickl = tick._size
tickdir = tick._tickdir
if tickdir == 'out':
padout = 1
padin = 0
elif tickdir == 'in':
padout = 0
padin = 1
else:
padout = 0.5
padin = 0.5
dpi = self.get_figure(root=True).dpi
padout = padout * tickl / 72 * dpi
padin = padin * tickl / 72 * dpi
if tick.tick1line.get_visible():
if self.spine_type == 'left':
bb0.x0 = bb0.x0 - padout
bb0.x1 = bb0.x1 + padin
elif self.spine_type == 'bottom':
bb0.y0 = bb0.y0 - padout
bb0.y1 = bb0.y1 + padin
if tick.tick2line.get_visible():
if self.spine_type == 'right':
bb0.x1 = bb0.x1 + padout
bb0.x0 = bb0.x0 - padin
elif self.spine_type == 'top':
bb0.y1 = bb0.y1 + padout
bb0.y0 = bb0.y0 - padout
bboxes.append(bb0)
return mtransforms.Bbox.union(bboxes)
def get_path(self):
return self._path
def _ensure_position_is_set(self):
if self._position is None:
# default position
self._position = ('outward', 0.0) # in points
self.set_position(self._position)
def register_axis(self, axis):
"""
Register an axis.
An axis should be registered with its corresponding spine from
the Axes instance. This allows the spine to clear any axis
properties when needed.
"""
self.axis = axis
self.stale = True
def clear(self):
"""Clear the current spine."""
self._clear()
if self.axis is not None:
self.axis.clear()
def _clear(self):
"""
Clear things directly related to the spine.
In this way it is possible to avoid clearing the Axis as well when calling
from library code where it is known that the Axis is cleared separately.
"""
self._position = None # clear position
def _get_bounds_or_viewLim(self):
"""
Get the bounds of the spine.
If self._bounds is None, return self.axes.viewLim.intervalx
or self.axes.viewLim.intervaly based on self.spine_type
"""
if self._bounds is not None:
low, high = self._bounds
elif self.spine_type in ('left', 'right'):
low, high = self.axes.viewLim.intervaly
elif self.spine_type in ('top', 'bottom'):
low, high = self.axes.viewLim.intervalx
else:
raise ValueError(f'spine_type: {self.spine_type} not supported')
return low, high
def _adjust_location(self):
"""Automatically set spine bounds to the view interval."""
if self.spine_type == 'circle':
return
low, high = self._get_bounds_or_viewLim()
if self._patch_type == 'arc':
if self.spine_type in ('bottom', 'top'):
try:
direction = self.axes.get_theta_direction()
except AttributeError:
direction = 1
try:
offset = self.axes.get_theta_offset()
except AttributeError:
offset = 0
low = low * direction + offset
high = high * direction + offset
if low > high:
low, high = high, low
self._path = mpath.Path.arc(np.rad2deg(low), np.rad2deg(high))
if self.spine_type == 'bottom':
if self.axis is None:
tr = mtransforms.IdentityTransform()
else:
tr = self.axis.get_transform()
rmin, rmax = tr.transform(self.axes.viewLim.intervaly)
try:
rorigin = self.axes.get_rorigin()
except AttributeError:
rorigin = rmin
else:
rorigin = tr.transform(rorigin)
scaled_diameter = (rmin - rorigin) / (rmax - rorigin)
self._height = scaled_diameter
self._width = scaled_diameter
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
else:
v1 = self._path.vertices
assert v1.shape == (2, 2), 'unexpected vertices shape'
if self.spine_type in ['left', 'right']:
v1[0, 1] = low
v1[1, 1] = high
elif self.spine_type in ['bottom', 'top']:
v1[0, 0] = low
v1[1, 0] = high
else:
raise ValueError('unable to set bounds for spine "%s"' %
self.spine_type)
@allow_rasterization
def draw(self, renderer):
self._adjust_location()
ret = super().draw(renderer)
self.stale = False
return ret
def set_position(self, position):
"""
Set the position of the spine.
Spine position is specified by a 2 tuple of (position type,
amount). The position types are:
* 'outward': place the spine out from the data area by the specified
number of points. (Negative values place the spine inwards.)
* 'axes': place the spine at the specified Axes coordinate (0 to 1).
* 'data': place the spine at the specified data coordinate.
Additionally, shorthand notations define a special positions:
* 'center' -> ``('axes', 0.5)``
* 'zero' -> ``('data', 0.0)``
Examples
--------
:doc:`/gallery/spines/spine_placement_demo`
"""
if position in ('center', 'zero'): # special positions
pass
else:
if len(position) != 2:
raise ValueError("position should be 'center' or 2-tuple")
if position[0] not in ['outward', 'axes', 'data']:
raise ValueError("position[0] should be one of 'outward', "
"'axes', or 'data' ")
self._position = position
self.set_transform(self.get_spine_transform())
if self.axis is not None:
self.axis.reset_ticks()
self.stale = True
def get_position(self):
"""Return the spine position."""
self._ensure_position_is_set()
return self._position
def get_spine_transform(self):
"""Return the spine transform."""
self._ensure_position_is_set()
position = self._position
if isinstance(position, str):
if position == 'center':
position = ('axes', 0.5)
elif position == 'zero':
position = ('data', 0)
assert len(position) == 2, 'position should be 2-tuple'
position_type, amount = position
_api.check_in_list(['axes', 'outward', 'data'],
position_type=position_type)
if self.spine_type in ['left', 'right']:
base_transform = self.axes.get_yaxis_transform(which='grid')
elif self.spine_type in ['top', 'bottom']:
base_transform = self.axes.get_xaxis_transform(which='grid')
else:
raise ValueError(f'unknown spine spine_type: {self.spine_type!r}')
if position_type == 'outward':
if amount == 0: # short circuit commonest case
return base_transform
else:
offset_vec = {'left': (-1, 0), 'right': (1, 0),
'bottom': (0, -1), 'top': (0, 1),
}[self.spine_type]
# calculate x and y offset in dots
offset_dots = amount * np.array(offset_vec) / 72
return (base_transform
+ mtransforms.ScaledTranslation(
*offset_dots, self.get_figure(root=False).dpi_scale_trans))
elif position_type == 'axes':
if self.spine_type in ['left', 'right']:
# keep y unchanged, fix x at amount
return (mtransforms.Affine2D.from_values(0, 0, 0, 1, amount, 0)
+ base_transform)
elif self.spine_type in ['bottom', 'top']:
# keep x unchanged, fix y at amount
return (mtransforms.Affine2D.from_values(1, 0, 0, 0, 0, amount)
+ base_transform)
elif position_type == 'data':
if self.spine_type in ('right', 'top'):
# The right and top spines have a default position of 1 in
# axes coordinates. When specifying the position in data
# coordinates, we need to calculate the position relative to 0.
amount -= 1
if self.spine_type in ('left', 'right'):
return mtransforms.blended_transform_factory(
mtransforms.Affine2D().translate(amount, 0)
+ self.axes.transData,
self.axes.transData)
elif self.spine_type in ('bottom', 'top'):
return mtransforms.blended_transform_factory(
self.axes.transData,
mtransforms.Affine2D().translate(0, amount)
+ self.axes.transData)
def set_bounds(self, low=None, high=None):
"""
Set the spine bounds.
Parameters
----------
low : float or None, optional
The lower spine bound. Passing *None* leaves the limit unchanged.
The bounds may also be passed as the tuple (*low*, *high*) as the
first positional argument.
.. ACCEPTS: (low: float, high: float)
high : float or None, optional
The higher spine bound. Passing *None* leaves the limit unchanged.
"""
if self.spine_type == 'circle':
raise ValueError(
'set_bounds() method incompatible with circular spines')
if high is None and np.iterable(low):
low, high = low
old_low, old_high = self._get_bounds_or_viewLim()
if low is None:
low = old_low
if high is None:
high = old_high
self._bounds = (low, high)
self.stale = True
def get_bounds(self):
"""Get the bounds of the spine."""
return self._bounds
@classmethod
def linear_spine(cls, axes, spine_type, **kwargs):
"""Create and return a linear `Spine`."""
# all values of 0.999 get replaced upon call to set_bounds()
if spine_type == 'left':
path = mpath.Path([(0.0, 0.999), (0.0, 0.999)])
elif spine_type == 'right':
path = mpath.Path([(1.0, 0.999), (1.0, 0.999)])
elif spine_type == 'bottom':
path = mpath.Path([(0.999, 0.0), (0.999, 0.0)])
elif spine_type == 'top':
path = mpath.Path([(0.999, 1.0), (0.999, 1.0)])
else:
raise ValueError('unable to make path for spine "%s"' % spine_type)
result = cls(axes, spine_type, path, **kwargs)
result.set_visible(mpl.rcParams[f'axes.spines.{spine_type}'])
return result
@classmethod
def arc_spine(cls, axes, spine_type, center, radius, theta1, theta2,
**kwargs):
"""Create and return an arc `Spine`."""
path = mpath.Path.arc(theta1, theta2)
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_arc(center, radius, theta1, theta2)
return result
@classmethod
def circular_spine(cls, axes, center, radius, **kwargs):
"""Create and return a circular `Spine`."""
path = mpath.Path.unit_circle()
spine_type = 'circle'
result = cls(axes, spine_type, path, **kwargs)
result.set_patch_circle(center, radius)
return result
def set_color(self, c):
"""
Set the edgecolor.
Parameters
----------
c : :mpltype:`color`
Notes
-----
This method does not modify the facecolor (which defaults to "none"),
unlike the `.Patch.set_color` method defined in the parent class. Use
`.Patch.set_facecolor` to set the facecolor.
"""
self.set_edgecolor(c)
self.stale = True
| Spine |
python | PrefectHQ__prefect | src/prefect/deployments/runner.py | {
"start": 3358,
"end": 57579
} | class ____(BaseModel):
"""
A Prefect RunnerDeployment definition, used for specifying and building deployments.
Attributes:
name: A name for the deployment (required).
version: An optional version for the deployment; defaults to the flow's version
description: An optional description of the deployment; defaults to the flow's
description
tags: An optional list of tags to associate with this deployment; note that tags
are used only for organizational purposes. For delegating work to workers,
see `work_queue_name`.
schedule: A schedule to run this deployment on, once registered
parameters: A dictionary of parameter values to pass to runs created from this
deployment
path: The path to the working directory for the workflow, relative to remote
storage or, if stored on a local filesystem, an absolute path
entrypoint: The path to the entrypoint for the workflow, always relative to the
`path`
parameter_openapi_schema: The parameter schema of the flow, including defaults.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for this deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(
arbitrary_types_allowed=True, validate_assignment=True
)
name: str = Field(..., description="The name of the deployment.")
flow_name: Optional[str] = Field(
None, description="The name of the underlying flow; typically inferred."
)
description: Optional[str] = Field(
default=None, description="An optional description of the deployment."
)
version: Optional[str] = Field(
default=None, description="An optional version for the deployment."
)
version_type: Optional[VersionType] = Field(
default=None,
description=(
"The type of version information to use for the deployment. The version type"
" will be inferred if not provided."
),
)
tags: ListOfNonEmptyStrings = Field(
default_factory=list,
description="One of more tags to apply to this deployment.",
)
schedules: Optional[
List[Union[DeploymentScheduleCreate, DeploymentScheduleUpdate]]
] = Field(
default=None,
description="The schedules that should cause this deployment to run.",
)
concurrency_limit: Optional[int] = Field(
default=None,
description="The maximum number of concurrent runs of this deployment.",
)
concurrency_options: Optional[ConcurrencyOptions] = Field(
default=None,
description="The concurrency limit config for the deployment.",
)
paused: Optional[bool] = Field(
default=None, description="Whether or not the deployment is paused."
)
parameters: dict[str, Any] = Field(default_factory=dict)
entrypoint: Optional[str] = Field(
default=None,
description=(
"The path to the entrypoint for the workflow, relative to the `path`."
),
)
triggers: List[Union[DeploymentTriggerTypes, TriggerTypes]] = Field(
default_factory=list,
description="The triggers that should cause this deployment to run.",
)
enforce_parameter_schema: bool = Field(
default=True,
description=(
"Whether or not the Prefect API should enforce the parameter schema for"
" this deployment."
),
)
storage: Optional[RunnerStorage] = Field(
default=None,
description=(
"The storage object used to retrieve flow code for this deployment."
),
)
work_pool_name: Optional[str] = Field(
default=None,
description=(
"The name of the work pool to use for this deployment. Only used when"
" the deployment is registered with a built runner."
),
)
work_queue_name: Optional[str] = Field(
default=None,
description=(
"The name of the work queue to use for this deployment. Only used when"
" the deployment is registered with a built runner."
),
)
job_variables: dict[str, Any] = Field(
default_factory=dict,
description=(
"Job variables used to override the default values of a work pool"
" base job template. Only used when the deployment is registered with"
" a built runner."
),
)
# (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = PrivateAttr(
default=None,
)
_entrypoint_type: EntrypointType = PrivateAttr(
default=EntrypointType.FILE_PATH,
)
_path: Optional[str] = PrivateAttr(
default=None,
)
_parameter_openapi_schema: ParameterSchema = PrivateAttr(
default_factory=ParameterSchema,
)
_version_from_flow: bool = PrivateAttr(
default=False,
)
@property
def entrypoint_type(self) -> EntrypointType:
return self._entrypoint_type
@property
def full_name(self) -> str:
return f"{self.flow_name}/{self.name}"
def _get_deployment_version_info(
self, version_type: Optional[VersionType] = None
) -> VersionInfo:
if inferred_version := run_coro_as_sync(
get_inferred_version_info(version_type)
):
if not self.version or self._version_from_flow:
self.version = inferred_version.version # TODO: maybe reconsider
inferred_version.version = self.version
return inferred_version
return VersionInfo(version=self.version or "", type="prefect:simple")
@field_validator("name", mode="before")
@classmethod
def validate_name(cls, value: str) -> str:
if value.endswith(".py"):
return Path(value).stem
return value
@model_validator(mode="after")
def validate_automation_names(self):
"""Ensure that each trigger has a name for its automation if none is provided."""
trigger: Union[DeploymentTriggerTypes, TriggerTypes]
for i, trigger in enumerate(self.triggers, start=1):
if trigger.name is None:
trigger.name = f"{self.name}__automation_{i}"
return self
@model_validator(mode="after")
def validate_deployment_parameters(self) -> Self:
"""Update the parameter schema to mark frozen parameters as readonly."""
if not self.parameters:
return self
for key, value in self.parameters.items():
if isinstance(value, freeze):
raw_value = value.unfreeze()
if key in self._parameter_openapi_schema.properties:
self._parameter_openapi_schema.properties[key]["readOnly"] = True
self._parameter_openapi_schema.properties[key]["enum"] = [raw_value]
self.parameters[key] = raw_value
return self
@model_validator(mode="before")
@classmethod
def reconcile_paused(cls, values: dict[str, Any]) -> dict[str, Any]:
return reconcile_paused_deployment(values)
@model_validator(mode="before")
@classmethod
def reconcile_schedules(cls, values: dict[str, Any]) -> dict[str, Any]:
return reconcile_schedules_runner(values)
async def _create(
self,
work_pool_name: Optional[str] = None,
image: Optional[str] = None,
version_info: VersionInfo | None = None,
) -> UUID:
work_pool_name = work_pool_name or self.work_pool_name
if image and not work_pool_name:
raise ValueError(
"An image can only be provided when registering a deployment with a"
" work pool."
)
if self.work_queue_name and not work_pool_name:
raise ValueError(
"A work queue can only be provided when registering a deployment with"
" a work pool."
)
if self.job_variables and not work_pool_name:
raise ValueError(
"Job variables can only be provided when registering a deployment"
" with a work pool."
)
async with get_client() as client:
flow_id = await client.create_flow_from_name(self.flow_name)
create_payload: dict[str, Any] = dict(
flow_id=flow_id,
name=self.name,
work_queue_name=self.work_queue_name,
work_pool_name=work_pool_name,
version=self.version,
version_info=version_info,
paused=self.paused,
schedules=self.schedules,
concurrency_limit=self.concurrency_limit,
concurrency_options=self.concurrency_options,
parameters=self.parameters,
description=self.description,
tags=self.tags,
path=self._path,
entrypoint=self.entrypoint,
storage_document_id=None,
infrastructure_document_id=None,
parameter_openapi_schema=self._parameter_openapi_schema.model_dump(
exclude_unset=True
),
enforce_parameter_schema=self.enforce_parameter_schema,
)
if work_pool_name:
create_payload["job_variables"] = self.job_variables
if image:
create_payload["job_variables"]["image"] = image
create_payload["path"] = None if self.storage else self._path
if self.storage:
pull_steps = self.storage.to_pull_step()
if isinstance(pull_steps, list):
create_payload["pull_steps"] = pull_steps
elif pull_steps:
create_payload["pull_steps"] = [pull_steps]
else:
create_payload["pull_steps"] = []
else:
create_payload["pull_steps"] = []
try:
deployment_id = await client.create_deployment(**create_payload)
except Exception as exc:
if isinstance(exc, PrefectHTTPStatusError):
detail = exc.response.json().get("detail")
if detail:
raise DeploymentApplyError(detail) from exc
raise DeploymentApplyError(
f"Error while applying deployment: {str(exc)}"
) from exc
await self._create_triggers(deployment_id, client)
# We plan to support SLA configuration on the Prefect Server in the future.
# For now, we only support it on Prefect Cloud.
# If we're provided with an empty list, we will call the apply endpoint
# to remove existing SLAs for the deployment. If the argument is not provided,
# we will not call the endpoint.
if self._sla or self._sla == []:
await self._create_slas(deployment_id, client)
return deployment_id
async def _update(
self,
deployment_id: UUID,
client: PrefectClient,
version_info: VersionInfo | None,
):
parameter_openapi_schema = self._parameter_openapi_schema.model_dump(
exclude_unset=True
)
update_payload = self.model_dump(
mode="json",
exclude_unset=True,
exclude={"storage", "name", "flow_name", "triggers", "version_type"},
)
if self.storage:
pull_steps = self.storage.to_pull_step()
if pull_steps and not isinstance(pull_steps, list):
pull_steps = [pull_steps]
update_payload["pull_steps"] = pull_steps
else:
update_payload["pull_steps"] = None
if self.schedules:
update_payload["schedules"] = [
schedule.model_dump(mode="json", exclude_unset=True)
for schedule in self.schedules
]
await client.update_deployment(
deployment_id,
deployment=DeploymentUpdate(
**update_payload,
version_info=version_info,
parameter_openapi_schema=parameter_openapi_schema,
),
)
await self._create_triggers(deployment_id, client)
# We plan to support SLA configuration on the Prefect Server in the future.
# For now, we only support it on Prefect Cloud.
# If we're provided with an empty list, we will call the apply endpoint
# to remove existing SLAs for the deployment. If the argument is not provided,
# we will not call the endpoint.
if self._sla or self._sla == []:
await self._create_slas(deployment_id, client)
return deployment_id
async def _create_triggers(self, deployment_id: UUID, client: PrefectClient):
try:
# The triggers defined in the deployment spec are, essentially,
# anonymous and attempting truly sync them with cloud is not
# feasible. Instead, we remove all automations that are owned
# by the deployment, meaning that they were created via this
# mechanism below, and then recreate them.
await client.delete_resource_owned_automations(
f"prefect.deployment.{deployment_id}"
)
except PrefectHTTPStatusError as e:
if e.response.status_code == 404:
# This Prefect server does not support automations, so we can safely
# ignore this 404 and move on.
return deployment_id
raise e
for trigger in self.triggers:
trigger.set_deployment_id(deployment_id)
await client.create_automation(trigger.as_automation())
@sync_compatible
async def apply(
self,
schedules: Optional[List[dict[str, Any]]] = None,
work_pool_name: Optional[str] = None,
image: Optional[str] = None,
version_info: Optional[VersionInfo] = None,
) -> UUID:
"""
Registers this deployment with the API and returns the deployment's ID.
Args:
work_pool_name: The name of the work pool to use for this
deployment.
image: The registry, name, and tag of the Docker image to
use for this deployment. Only used when the deployment is
deployed to a work pool.
version_info: The version information to use for the deployment.
Returns:
The ID of the created deployment.
"""
version_info = version_info or self._get_deployment_version_info(
self.version_type
)
async with get_client() as client:
try:
deployment = await client.read_deployment_by_name(self.full_name)
except ObjectNotFound:
if schedules:
self.schedules = [
DeploymentScheduleCreate(**schedule) for schedule in schedules
]
return await self._create(work_pool_name, image, version_info)
else:
if image:
self.job_variables["image"] = image
if work_pool_name:
self.work_pool_name = work_pool_name
if schedules:
self.schedules = [
DeploymentScheduleUpdate(**schedule) for schedule in schedules
]
return await self._update(deployment.id, client, version_info)
async def _create_slas(self, deployment_id: UUID, client: PrefectClient):
if not isinstance(self._sla, list):
self._sla = [self._sla]
if client.server_type == ServerType.CLOUD:
await client.apply_slas_for_deployment(deployment_id, self._sla)
else:
raise ValueError(
"SLA configuration is currently only supported on Prefect Cloud."
)
@staticmethod
def _construct_deployment_schedules(
interval: Optional[
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
] = None,
anchor_date: Optional[Union[datetime, str]] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
timezone: Optional[str] = None,
schedule: Union[SCHEDULE_TYPES, Schedule, None] = None,
schedules: Optional["FlexibleScheduleList"] = None,
) -> Union[List[DeploymentScheduleCreate], "FlexibleScheduleList"]:
"""
Construct a schedule or schedules from the provided arguments.
This method serves as a unified interface for creating deployment
schedules. If `schedules` is provided, it is directly returned. If
`schedule` is provided, it is encapsulated in a list and returned. If
`interval`, `cron`, or `rrule` are provided, they are used to construct
schedule objects.
Args:
interval: An interval on which to schedule runs, either as a single
value or as a list of values. Accepts numbers (interpreted as
seconds) or `timedelta` objects. Each value defines a separate
scheduling interval.
anchor_date: The anchor date from which interval schedules should
start. This applies to all intervals if a list is provided.
cron: A cron expression or a list of cron expressions defining cron
schedules. Each expression defines a separate cron schedule.
rrule: An rrule string or a list of rrule strings for scheduling.
Each string defines a separate recurrence rule.
timezone: The timezone to apply to the cron or rrule schedules.
This is a single value applied uniformly to all schedules.
schedule: A singular schedule object, used for advanced scheduling
options like specifying a timezone. This is returned as a list
containing this single schedule.
schedules: A pre-defined list of schedule objects. If provided,
this list is returned as-is, bypassing other schedule construction
logic.
"""
num_schedules = sum(
1
for entry in (interval, cron, rrule, schedule, schedules)
if entry is not None
)
if num_schedules > 1:
raise ValueError(
"Only one of interval, cron, rrule, schedule, or schedules can be provided."
)
elif num_schedules == 0:
return []
if schedules is not None:
return schedules
elif interval or cron or rrule:
# `interval`, `cron`, and `rrule` can be lists of values. This
# block figures out which one is not None and uses that to
# construct the list of schedules via `construct_schedule`.
parameters = [("interval", interval), ("cron", cron), ("rrule", rrule)]
schedule_type, value = [
param for param in parameters if param[1] is not None
][0]
if not isiterable(value):
value = [value]
return [
create_deployment_schedule_create(
construct_schedule(
**{
schedule_type: v,
"timezone": timezone,
"anchor_date": anchor_date,
}
)
)
for v in value
]
else:
return [create_deployment_schedule_create(schedule)]
def _set_defaults_from_flow(self, flow: "Flow[..., Any]"):
self._parameter_openapi_schema = parameter_schema(flow)
if not self.version:
self.version = flow.version
self._version_from_flow = True
if not self.description:
self.description = flow.description
@classmethod
def from_flow(
cls,
flow: "Flow[..., Any]",
name: str,
interval: Optional[
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
entrypoint_type: EntrypointType = EntrypointType.FILE_PATH,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Configure a deployment for a given flow.
Args:
flow: A flow function to deploy
name: A name for the deployment
interval: An interval on which to execute the current flow. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this flow.
rrule: An rrule schedule of when to execute runs of this flow.
paused: Whether or not to set this deployment as paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options like `timezone`.
concurrency_limit: The maximum number of concurrent runs this deployment will allow.
triggers: A list of triggers that should kick of a run of this flow.
parameters: A dictionary of default parameter values to pass to runs of this flow.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version information to use for the deployment.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for this deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
"""
constructed_schedules = cls._construct_deployment_schedules(
interval=interval,
cron=cron,
rrule=rrule,
schedules=schedules,
schedule=schedule,
)
job_variables = job_variables or {}
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
concurrency_options = {
"collision_strategy": concurrency_limit.collision_strategy
}
concurrency_limit = concurrency_limit.limit
else:
concurrency_options = None
deployment = cls(
name=name,
flow_name=flow.name,
schedules=constructed_schedules,
concurrency_limit=concurrency_limit,
concurrency_options=concurrency_options,
paused=paused,
tags=tags or [],
triggers=triggers or [],
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
)
deployment._sla = _sla
if not deployment.entrypoint:
no_file_location_error = (
"Flows defined interactively cannot be deployed. Check out the"
" quickstart guide for help getting started:"
" https://docs.prefect.io/latest/get-started/quickstart"
)
## first see if an entrypoint can be determined
flow_file = getattr(flow, "__globals__", {}).get("__file__")
mod_name = getattr(flow, "__module__", None)
if entrypoint_type == EntrypointType.MODULE_PATH:
if mod_name:
deployment.entrypoint = f"{mod_name}.{flow.__name__}"
else:
raise ValueError(
"Unable to determine module path for provided flow."
)
else:
if not flow_file:
if not mod_name:
raise ValueError(no_file_location_error)
try:
module = importlib.import_module(mod_name)
flow_file = getattr(module, "__file__", None)
except ModuleNotFoundError:
raise ValueError(no_file_location_error)
if not flow_file:
raise ValueError(no_file_location_error)
# set entrypoint
entry_path = (
Path(flow_file).absolute().relative_to(Path.cwd().absolute())
)
deployment.entrypoint = (
f"{entry_path}:{getattr(flow.fn, '__qualname__', flow.fn.__name__)}"
)
if entrypoint_type == EntrypointType.FILE_PATH and not deployment._path:
deployment._path = "."
deployment._entrypoint_type = entrypoint_type
cls._set_defaults_from_flow(deployment, flow)
return deployment
@classmethod
def from_entrypoint(
cls,
entrypoint: str,
name: str,
flow_name: Optional[str] = None,
interval: Optional[
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
version: Optional[str] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Configure a deployment for a given flow located at a given entrypoint.
Args:
entrypoint: The path to a file containing a flow and the name of the flow function in
the format `./path/to/file.py:flow_func_name`.
name: A name for the deployment
flow_name: The name of the flow to deploy
interval: An interval on which to execute the current flow. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this flow.
rrule: An rrule schedule of when to execute runs of this flow.
paused: Whether or not to set this deployment as paused.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to define multiple schedules or additional scheduling options like `timezone`.
triggers: A list of triggers that should kick of a run of this flow.
parameters: A dictionary of default parameter values to pass to runs of this flow.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for this deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
"""
from prefect.flows import load_flow_from_entrypoint
job_variables = job_variables or {}
flow = load_flow_from_entrypoint(entrypoint)
constructed_schedules = cls._construct_deployment_schedules(
interval=interval,
cron=cron,
rrule=rrule,
schedules=schedules,
schedule=schedule,
)
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
concurrency_options = {
"collision_strategy": concurrency_limit.collision_strategy
}
concurrency_limit = concurrency_limit.limit
else:
concurrency_options = None
deployment = cls(
name=name,
flow_name=flow_name or flow.name,
schedules=constructed_schedules,
concurrency_limit=concurrency_limit,
concurrency_options=concurrency_options,
paused=paused,
tags=tags or [],
triggers=triggers or [],
parameters=parameters or {},
description=description,
version=version,
entrypoint=entrypoint,
enforce_parameter_schema=enforce_parameter_schema,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
)
deployment._sla = _sla
deployment._path = str(Path.cwd())
cls._set_defaults_from_flow(deployment, flow)
return deployment
@classmethod
async def afrom_storage(
cls,
storage: RunnerStorage,
entrypoint: str,
name: str,
flow_name: Optional[str] = None,
interval: Optional[
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Create a RunnerDeployment from a flow located at a given entrypoint and stored in a
local storage location.
Args:
entrypoint: The path to a file containing a flow and the name of the flow function in
the format `./path/to/file.py:flow_func_name`.
name: A name for the deployment
flow_name: The name of the flow to deploy
storage: A storage object to use for retrieving flow code. If not provided, a
URL must be provided.
interval: An interval on which to execute the current flow. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this flow.
rrule: An rrule schedule of when to execute runs of this flow.
paused: Whether or not the deployment is paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
triggers: A list of triggers that should kick of a run of this flow.
parameters: A dictionary of default parameter values to pass to runs of this flow.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version information to use for the deployment. The version type
will be inferred if not provided.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for this deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
"""
from prefect.flows import load_flow_from_entrypoint
constructed_schedules = cls._construct_deployment_schedules(
interval=interval,
cron=cron,
rrule=rrule,
schedules=schedules,
schedule=schedule,
)
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
concurrency_options = {
"collision_strategy": concurrency_limit.collision_strategy
}
concurrency_limit = concurrency_limit.limit
else:
concurrency_options = None
job_variables = job_variables or {}
with tempfile.TemporaryDirectory() as tmpdir:
storage.set_base_path(Path(tmpdir))
await storage.pull_code()
full_entrypoint = str(storage.destination / entrypoint)
flow = await from_async.wait_for_call_in_new_thread(
create_call(load_flow_from_entrypoint, full_entrypoint)
)
deployment = cls(
name=name,
flow_name=flow_name or flow.name,
schedules=constructed_schedules,
concurrency_limit=concurrency_limit,
concurrency_options=concurrency_options,
paused=paused,
tags=tags or [],
triggers=triggers or [],
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
entrypoint=entrypoint,
enforce_parameter_schema=enforce_parameter_schema,
storage=storage,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
)
deployment._sla = _sla
deployment._path = str(storage.destination).replace(
tmpdir, "$STORAGE_BASE_PATH"
)
cls._set_defaults_from_flow(deployment, flow)
return deployment
@classmethod
@async_dispatch(afrom_storage)
def from_storage(
cls,
storage: RunnerStorage,
entrypoint: str,
name: str,
flow_name: Optional[str] = None,
interval: Optional[
Union[Iterable[Union[int, float, timedelta]], int, float, timedelta]
] = None,
cron: Optional[Union[Iterable[str], str]] = None,
rrule: Optional[Union[Iterable[str], str]] = None,
paused: Optional[bool] = None,
schedule: Optional[Schedule] = None,
schedules: Optional["FlexibleScheduleList"] = None,
concurrency_limit: Optional[Union[int, ConcurrencyLimitConfig, None]] = None,
parameters: Optional[dict[str, Any]] = None,
triggers: Optional[List[Union[DeploymentTriggerTypes, TriggerTypes]]] = None,
description: Optional[str] = None,
tags: Optional[List[str]] = None,
version: Optional[str] = None,
version_type: Optional[VersionType] = None,
enforce_parameter_schema: bool = True,
work_pool_name: Optional[str] = None,
work_queue_name: Optional[str] = None,
job_variables: Optional[dict[str, Any]] = None,
_sla: Optional[Union[SlaTypes, list[SlaTypes]]] = None, # experimental
) -> "RunnerDeployment":
"""
Create a RunnerDeployment from a flow located at a given entrypoint and stored in a
local storage location.
Args:
entrypoint: The path to a file containing a flow and the name of the flow function in
the format `./path/to/file.py:flow_func_name`.
name: A name for the deployment
flow_name: The name of the flow to deploy
storage: A storage object to use for retrieving flow code. If not provided, a
URL must be provided.
interval: An interval on which to execute the current flow. Accepts either a number
or a timedelta object. If a number is given, it will be interpreted as seconds.
cron: A cron schedule of when to execute runs of this flow.
rrule: An rrule schedule of when to execute runs of this flow.
paused: Whether or not the deployment is paused.
schedule: A schedule object defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
schedules: A list of schedule objects defining when to execute runs of this deployment.
Used to provide additional scheduling options like `timezone` or `parameters`.
triggers: A list of triggers that should kick of a run of this flow.
parameters: A dictionary of default parameter values to pass to runs of this flow.
description: A description for the created deployment. Defaults to the flow's
description if not provided.
tags: A list of tags to associate with the created deployment for organizational
purposes.
version: A version for the created deployment. Defaults to the flow's version.
version_type: The type of version information to use for the deployment. The version type
will be inferred if not provided.
enforce_parameter_schema: Whether or not the Prefect API should enforce the
parameter schema for this deployment.
work_pool_name: The name of the work pool to use for this deployment.
work_queue_name: The name of the work queue to use for this deployment's scheduled runs.
If not provided the default work queue for the work pool will be used.
job_variables: Settings used to override the values specified default base job template
of the chosen work pool. Refer to the base job template of the chosen work pool for
available settings.
_sla: (Experimental) SLA configuration for the deployment. May be removed or modified at any time. Currently only supported on Prefect Cloud.
"""
from prefect.flows import load_flow_from_entrypoint
constructed_schedules = cls._construct_deployment_schedules(
interval=interval,
cron=cron,
rrule=rrule,
schedules=schedules,
schedule=schedule,
)
if isinstance(concurrency_limit, ConcurrencyLimitConfig):
concurrency_options = {
"collision_strategy": concurrency_limit.collision_strategy
}
concurrency_limit = concurrency_limit.limit
else:
concurrency_options = None
job_variables = job_variables or {}
with tempfile.TemporaryDirectory() as tmpdir:
storage.set_base_path(Path(tmpdir))
run_coro_as_sync(storage.pull_code())
full_entrypoint = str(storage.destination / entrypoint)
flow = load_flow_from_entrypoint(full_entrypoint)
deployment = cls(
name=name,
flow_name=flow_name or flow.name,
schedules=constructed_schedules,
concurrency_limit=concurrency_limit,
concurrency_options=concurrency_options,
paused=paused,
tags=tags or [],
triggers=triggers or [],
parameters=parameters or {},
description=description,
version=version,
version_type=version_type,
entrypoint=entrypoint,
enforce_parameter_schema=enforce_parameter_schema,
storage=storage,
work_pool_name=work_pool_name,
work_queue_name=work_queue_name,
job_variables=job_variables,
)
deployment._sla = _sla
deployment._path = str(storage.destination).replace(
tmpdir, "$STORAGE_BASE_PATH"
)
cls._set_defaults_from_flow(deployment, flow)
return deployment
@sync_compatible
async def deploy(
*deployments: RunnerDeployment,
work_pool_name: Optional[str] = None,
image: Optional[Union[str, DockerImage]] = None,
build: bool = True,
push: bool = True,
print_next_steps_message: bool = True,
ignore_warnings: bool = False,
) -> List[UUID]:
"""
Deploy the provided list of deployments to dynamic infrastructure via a
work pool.
By default, calling this function will build a Docker image for the deployments, push it to a
registry, and create each deployment via the Prefect API that will run the corresponding
flow on the given schedule.
If you want to use an existing image, you can pass `build=False` to skip building and pushing
an image.
Args:
*deployments: A list of deployments to deploy.
work_pool_name: The name of the work pool to use for these deployments. Defaults to
the value of `PREFECT_DEFAULT_WORK_POOL_NAME`.
image: The name of the Docker image to build, including the registry and
repository. Pass a DockerImage instance to customize the Dockerfile used
and build arguments.
build: Whether or not to build a new image for the flow. If False, the provided
image will be used as-is and pulled at runtime.
push: Whether or not to skip pushing the built image to a registry.
print_next_steps_message: Whether or not to print a message with next steps
after deploying the deployments.
Returns:
A list of deployment IDs for the created/updated deployments.
Examples:
Deploy a group of flows to a work pool:
```python
from prefect import deploy, flow
@flow(log_prints=True)
def local_flow():
print("I'm a locally defined flow!")
if __name__ == "__main__":
deploy(
local_flow.to_deployment(name="example-deploy-local-flow"),
flow.from_source(
source="https://github.com/org/repo.git",
entrypoint="flows.py:my_flow",
).to_deployment(
name="example-deploy-remote-flow",
),
work_pool_name="my-work-pool",
image="my-registry/my-image:dev",
)
```
"""
work_pool_name = work_pool_name or PREFECT_DEFAULT_WORK_POOL_NAME.value()
if not image and not all(
d.storage or d.entrypoint_type == EntrypointType.MODULE_PATH
for d in deployments
):
raise ValueError(
"Either an image or remote storage location must be provided when deploying"
" a deployment."
)
if not work_pool_name:
raise ValueError(
"A work pool name must be provided when deploying a deployment. Either"
" provide a work pool name when calling `deploy` or set"
" `PREFECT_DEFAULT_WORK_POOL_NAME` in your profile."
)
if image and isinstance(image, str):
image_name, image_tag = parse_image_tag(image)
image = DockerImage(name=image_name, tag=image_tag)
try:
async with get_client() as client:
work_pool = await client.read_work_pool(work_pool_name)
active_workers = await client.read_workers_for_work_pool(
work_pool_name,
worker_filter=WorkerFilter(status=WorkerFilterStatus(any_=["ONLINE"])),
)
except ObjectNotFound as exc:
raise ValueError(
f"Could not find work pool {work_pool_name!r}. Please create it before"
" deploying this flow."
) from exc
is_docker_based_work_pool = get_from_dict(
work_pool.base_job_template, "variables.properties.image", False
)
is_block_based_work_pool = get_from_dict(
work_pool.base_job_template, "variables.properties.block", False
)
# carve out an exception for block based work pools that only have a block in their base job template
console = Console()
if not is_docker_based_work_pool and not is_block_based_work_pool:
if image:
raise ValueError(
f"Work pool {work_pool_name!r} does not support custom Docker images."
" Please use a work pool with an `image` variable in its base job template"
" or specify a remote storage location for the flow with `.from_source`."
" If you are attempting to deploy a flow to a local process work pool,"
" consider using `flow.serve` instead. See the documentation for more"
" information: https://docs.prefect.io/latest/how-to-guides/deployments/run-flows-in-local-processes"
)
elif work_pool.type == "process" and not ignore_warnings:
console.print(
"Looks like you're deploying to a process work pool. If you're creating a"
" deployment for local development, calling `.serve` on your flow is a great"
" way to get started. See the documentation for more information:"
" https://docs.prefect.io/latest/how-to-guides/deployments/run-flows-in-local-processes "
" Set `ignore_warnings=True` to suppress this message.",
style="yellow",
)
is_managed_pool = work_pool.is_managed_pool
if is_managed_pool:
build = False
push = False
if image and build:
with Progress(
SpinnerColumn(),
TextColumn(f"Building image {image.reference}..."),
transient=True,
console=console,
) as progress:
docker_build_task = progress.add_task("docker_build", total=1)
image.build()
progress.update(docker_build_task, completed=1)
console.print(
f"Successfully built image {image.reference!r}", style="green"
)
if image and build and push:
with Progress(
SpinnerColumn(),
TextColumn("Pushing image..."),
transient=True,
console=console,
) as progress:
docker_push_task = progress.add_task("docker_push", total=1)
image.push()
progress.update(docker_push_task, completed=1)
console.print(f"Successfully pushed image {image.reference!r}", style="green")
deployment_exceptions: list[dict[str, Any]] = []
deployment_ids: list[UUID] = []
image_ref = image.reference if image else None
for deployment in track(
deployments,
description="Creating/updating deployments...",
console=console,
transient=True,
):
try:
deployment_ids.append(
await deployment.apply(image=image_ref, work_pool_name=work_pool_name)
)
except Exception as exc:
if len(deployments) == 1:
raise
deployment_exceptions.append({"deployment": deployment, "exc": exc})
if deployment_exceptions:
console.print(
"Encountered errors while creating/updating deployments:\n",
style="orange_red1",
)
else:
console.print("Successfully created/updated all deployments!\n", style="green")
complete_failure = len(deployment_exceptions) == len(deployments)
table = Table(
title="Deployments",
show_lines=True,
)
table.add_column(header="Name", style="blue", no_wrap=True)
table.add_column(header="Status", style="blue", no_wrap=True)
table.add_column(header="Details", style="blue")
for deployment in deployments:
errored_deployment = next(
(d for d in deployment_exceptions if d["deployment"] == deployment),
None,
)
if errored_deployment:
table.add_row(
f"{deployment.flow_name}/{deployment.name}",
"failed",
str(errored_deployment["exc"]),
style="red",
)
else:
table.add_row(f"{deployment.flow_name}/{deployment.name}", "applied")
console.print(table)
if print_next_steps_message and not complete_failure:
if (
not work_pool.is_push_pool
and not work_pool.is_managed_pool
and not active_workers
):
console.print(
"\nTo execute flow runs from these deployments, start a worker in a"
" separate terminal that pulls work from the"
f" {work_pool_name!r} work pool:"
f"\n\t[blue]$ prefect worker start --pool {work_pool_name!r}[/]",
)
console.print(
"\nTo trigger any of these deployments, use the"
" following command:\n[blue]\n\t$ prefect deployment run"
" [DEPLOYMENT_NAME]\n[/]"
)
if PREFECT_UI_URL:
console.print(
"\nYou can also trigger your deployments via the Prefect UI:"
f" [blue]{PREFECT_UI_URL.value()}/deployments[/]\n"
)
return deployment_ids
| RunnerDeployment |
python | wandb__wandb | tools/graphql_codegen/plugin_utils.py | {
"start": 4554,
"end": 5003
} | class ____(ParsedConstraints):
min: int | None = Field(None, serialization_alias="min_length")
max: int | None = Field(None, serialization_alias="max_length")
pattern: str | None = None
@field_validator("pattern")
def _unescape_pattern(cls, v: str | None) -> str | None:
"""The patterns in the GraphQL schema are double-escaped, so unescape them once."""
return v.replace(r"\\", "\\") if v else v
| StringConstraints |
python | tiangolo__fastapi | tests/test_security_oauth2_optional.py | {
"start": 505,
"end": 11347
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: Optional[str] = Security(reusable_oauth2)):
if oauth_header is None:
return None
user = User(username=oauth_header)
return user
@app.post("/login")
def login(form_data: OAuth2PasswordRequestFormStrict = Depends()):
return form_data
@app.get("/users/me")
def read_users_me(current_user: Optional[User] = Depends(get_current_user)):
if current_user is None:
return {"msg": "Create an account first"}
return current_user
client = TestClient(app)
def test_security_oauth2():
response = client.get("/users/me", headers={"Authorization": "Bearer footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Bearer footokenbar"}
def test_security_oauth2_password_other_header():
response = client.get("/users/me", headers={"Authorization": "Other footokenbar"})
assert response.status_code == 200, response.text
assert response.json() == {"username": "Other footokenbar"}
def test_security_oauth2_password_bearer_no_header():
response = client.get("/users/me")
assert response.status_code == 200, response.text
assert response.json() == {"msg": "Create an account first"}
def test_strict_login_no_data():
response = client.post("/login")
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "grant_type"],
"msg": "Field required",
"input": None,
},
{
"type": "missing",
"loc": ["body", "username"],
"msg": "Field required",
"input": None,
},
{
"type": "missing",
"loc": ["body", "password"],
"msg": "Field required",
"input": None,
},
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "username"],
"msg": "field required",
"type": "value_error.missing",
},
{
"loc": ["body", "password"],
"msg": "field required",
"type": "value_error.missing",
},
]
}
)
def test_strict_login_no_grant_type():
response = client.post("/login", data={"username": "johndoe", "password": "secret"})
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "missing",
"loc": ["body", "grant_type"],
"msg": "Field required",
"input": None,
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
)
@pytest.mark.parametrize(
argnames=["grant_type"],
argvalues=[
pytest.param("incorrect", id="incorrect value"),
pytest.param("passwordblah", id="password with suffix"),
pytest.param("blahpassword", id="password with prefix"),
],
)
def test_strict_login_incorrect_grant_type(grant_type: str):
response = client.post(
"/login",
data={"username": "johndoe", "password": "secret", "grant_type": grant_type},
)
assert response.status_code == 422
assert response.json() == IsDict(
{
"detail": [
{
"type": "string_pattern_mismatch",
"loc": ["body", "grant_type"],
"msg": "String should match pattern '^password$'",
"input": grant_type,
"ctx": {"pattern": "^password$"},
}
]
}
) | IsDict(
# TODO: remove when deprecating Pydantic v1
{
"detail": [
{
"loc": ["body", "grant_type"],
"msg": 'string does not match regex "^password$"',
"type": "value_error.str.regex",
"ctx": {"pattern": "^password$"},
}
]
}
)
def test_strict_login_correct_data():
response = client.post(
"/login",
data={"username": "johndoe", "password": "secret", "grant_type": "password"},
)
assert response.status_code == 200
assert response.json() == {
"grant_type": "password",
"username": "johndoe",
"password": "secret",
"scopes": [],
"client_id": None,
"client_secret": None,
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/login": {
"post": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
"summary": "Login",
"operationId": "login_login_post",
"requestBody": {
"content": {
"application/x-www-form-urlencoded": {
"schema": {
"$ref": "#/components/schemas/Body_login_login_post"
}
}
},
"required": True,
},
}
},
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Users Me",
"operationId": "read_users_me_users_me_get",
"security": [{"OAuth2": []}],
}
},
},
"components": {
"schemas": {
"Body_login_login_post": {
"title": "Body_login_login_post",
"required": ["grant_type", "username", "password"],
"type": "object",
"properties": {
"grant_type": {
"title": "Grant Type",
"pattern": "^password$",
"type": "string",
},
"username": {"title": "Username", "type": "string"},
"password": {"title": "Password", "type": "string"},
"scope": {"title": "Scope", "type": "string", "default": ""},
"client_id": IsDict(
{
"title": "Client Id",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Id", "type": "string"}
),
"client_secret": IsDict(
{
"title": "Client Secret",
"anyOf": [{"type": "string"}, {"type": "null"}],
}
)
| IsDict(
# TODO: remove when deprecating Pydantic v1
{"title": "Client Secret", "type": "string"}
),
},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {
"anyOf": [{"type": "string"}, {"type": "integer"}]
},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
},
"securitySchemes": {
"OAuth2": {
"type": "oauth2",
"flows": {
"password": {
"scopes": {
"read:users": "Read the users",
"write:users": "Create users",
},
"tokenUrl": "token",
}
},
}
},
},
}
| User |
python | TheAlgorithms__Python | data_structures/stacks/stack_with_singly_linked_list.py | {
"start": 347,
"end": 3697
} | class ____[T]:
"""
Linked List Stack implementing push (to top),
pop (from top) and is_empty
>>> stack = LinkedStack()
>>> stack.is_empty()
True
>>> stack.push(5)
>>> stack.push(9)
>>> stack.push('python')
>>> stack.is_empty()
False
>>> stack.pop()
'python'
>>> stack.push('algorithms')
>>> stack.pop()
'algorithms'
>>> stack.pop()
9
>>> stack.pop()
5
>>> stack.is_empty()
True
>>> stack.pop()
Traceback (most recent call last):
...
IndexError: pop from empty stack
"""
def __init__(self) -> None:
self.top: Node[T] | None = None
def __iter__(self) -> Iterator[T]:
node = self.top
while node:
yield node.data
node = node.next
def __str__(self) -> str:
"""
>>> stack = LinkedStack()
>>> stack.push("c")
>>> stack.push("b")
>>> stack.push("a")
>>> str(stack)
'a->b->c'
"""
return "->".join([str(item) for item in self])
def __len__(self) -> int:
"""
>>> stack = LinkedStack()
>>> len(stack) == 0
True
>>> stack.push("c")
>>> stack.push("b")
>>> stack.push("a")
>>> len(stack) == 3
True
"""
return len(tuple(iter(self)))
def is_empty(self) -> bool:
"""
>>> stack = LinkedStack()
>>> stack.is_empty()
True
>>> stack.push(1)
>>> stack.is_empty()
False
"""
return self.top is None
def push(self, item: T) -> None:
"""
>>> stack = LinkedStack()
>>> stack.push("Python")
>>> stack.push("Java")
>>> stack.push("C")
>>> str(stack)
'C->Java->Python'
"""
node = Node(item)
if not self.is_empty():
node.next = self.top
self.top = node
def pop(self) -> T:
"""
>>> stack = LinkedStack()
>>> stack.pop()
Traceback (most recent call last):
...
IndexError: pop from empty stack
>>> stack.push("c")
>>> stack.push("b")
>>> stack.push("a")
>>> stack.pop() == 'a'
True
>>> stack.pop() == 'b'
True
>>> stack.pop() == 'c'
True
"""
if self.is_empty():
raise IndexError("pop from empty stack")
assert isinstance(self.top, Node)
pop_node = self.top
self.top = self.top.next
return pop_node.data
def peek(self) -> T:
"""
>>> stack = LinkedStack()
>>> stack.push("Java")
>>> stack.push("C")
>>> stack.push("Python")
>>> stack.peek()
'Python'
"""
if self.is_empty():
raise IndexError("peek from empty stack")
assert self.top is not None
return self.top.data
def clear(self) -> None:
"""
>>> stack = LinkedStack()
>>> stack.push("Java")
>>> stack.push("C")
>>> stack.push("Python")
>>> str(stack)
'Python->C->Java'
>>> stack.clear()
>>> len(stack) == 0
True
"""
self.top = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| LinkedStack |
python | conda__conda | tests/test_api.py | {
"start": 630,
"end": 9169
} | class ____:
pass
def inspect_arguments(f, arguments):
# FullArgSpec(args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations)
result = getargspec(f)
arg_names = result[0]
defaults = result[3] or ()
default_val_first_idx = len(arg_names) - len(defaults)
arg_values = [PositionalArgument] * default_val_first_idx + list(defaults)
for (recorded_name, recorded_value), (arg_name, arg_value) in zip(
arguments.items(), zip(arg_names, arg_values)
):
print(recorded_name, arg_name)
assert recorded_name == arg_name
assert recorded_value == arg_value
def test_DepsModifier_contract():
assert DepsModifier.NO_DEPS
assert DepsModifier.ONLY_DEPS
assert DepsModifier.NOT_SET
def test_UpdateModifier_contract():
assert UpdateModifier.SPECS_SATISFIED_SKIP_SOLVE
assert UpdateModifier.FREEZE_INSTALLED
assert UpdateModifier.UPDATE_DEPS
assert UpdateModifier.UPDATE_SPECS
assert UpdateModifier.UPDATE_ALL
def test_Solver_inputs_contract():
init_args = {
"self": PositionalArgument,
"prefix": PositionalArgument,
"channels": PositionalArgument,
"subdirs": (),
"specs_to_add": (),
"specs_to_remove": (),
}
inspect_arguments(Solver.__init__, init_args)
solve_final_state_args = {
"self": PositionalArgument,
"update_modifier": NULL,
"deps_modifier": NULL,
"prune": NULL,
"ignore_pinned": NULL,
"force_remove": NULL,
}
inspect_arguments(Solver.solve_final_state, solve_final_state_args)
solve_for_diff_args = {
"self": PositionalArgument,
"update_modifier": NULL,
"deps_modifier": NULL,
"prune": NULL,
"ignore_pinned": NULL,
"force_remove": NULL,
"force_reinstall": False,
}
inspect_arguments(Solver.solve_for_diff, solve_for_diff_args)
solve_for_transaction_args = {
"self": PositionalArgument,
"update_modifier": NULL,
"deps_modifier": NULL,
"prune": NULL,
"ignore_pinned": NULL,
"force_remove": NULL,
"force_reinstall": False,
}
inspect_arguments(Solver.solve_for_transaction, solve_for_transaction_args)
@pytest.mark.integration
def test_Solver_return_value_contract():
solver = Solver("/", (Channel("pkgs/main"),), specs_to_add=("openssl",))
solve_final_state_rv = solver.solve_final_state()
assert isiterable(solve_final_state_rv)
assert all(isinstance(pref, PackageRecord) for pref in solve_final_state_rv)
solve_for_diff_rv = solver.solve_for_diff()
assert len(solve_for_diff_rv) == 2
unlink_precs, link_precs = solve_for_diff_rv
assert isiterable(unlink_precs)
assert all(isinstance(pref, PackageRecord) for pref in unlink_precs)
assert isiterable(link_precs)
assert all(isinstance(pref, PackageRecord) for pref in link_precs)
solve_for_transaction_rv = solver.solve_for_transaction()
assert isinstance(solve_for_transaction_rv, UnlinkLinkTransaction)
def test_SubdirData_contract():
init_args = {
"self": PositionalArgument,
"channel": PositionalArgument,
}
inspect_arguments(SubdirData.__init__, init_args)
query_args = {
"self": PositionalArgument,
"package_ref_or_match_spec": PositionalArgument,
}
inspect_arguments(SubdirData.query, query_args)
query_all_args = {
"package_ref_or_match_spec": PositionalArgument,
"channels": None,
"subdirs": None,
}
inspect_arguments(SubdirData.query_all, query_all_args)
iter_records_args = {"self": PositionalArgument}
inspect_arguments(SubdirData.iter_records, iter_records_args)
reload_args = {"self": PositionalArgument}
inspect_arguments(SubdirData.reload, reload_args)
@pytest.mark.integration
def test_SubdirData_return_value_contract():
sd = SubdirData(Channel("pkgs/main/linux-64"))
query_result = sd.query("openssl")
assert isinstance(query_result, tuple)
assert all(isinstance(prec, PackageRecord) for prec in query_result)
query_all_result = sd.query_all("openssl", (Channel("pkgs/main"),), context.subdirs)
assert isinstance(query_all_result, tuple)
assert all(isinstance(prec, PackageRecord) for prec in query_all_result)
iter_records_result = sd.iter_records()
assert isiterable(iter_records_result)
assert all(isinstance(prec, PackageRecord) for prec in iter_records_result)
reload_result = sd.reload()
assert isinstance(reload_result, SubdirData)
def test_PackageCacheData_contract():
init_args = {
"self": PositionalArgument,
"pkgs_dir": PositionalArgument,
}
inspect_arguments(PackageCacheData.__init__, init_args)
get_args = {
"self": PositionalArgument,
"package_ref": PositionalArgument,
"default": NULL,
}
inspect_arguments(PackageCacheData.get, get_args)
query_args = {
"self": PositionalArgument,
"package_ref_or_match_spec": PositionalArgument,
}
inspect_arguments(PackageCacheData.query, query_args)
query_all_args = {
"package_ref_or_match_spec": PositionalArgument,
"pkgs_dirs": None,
}
inspect_arguments(PackageCacheData.query_all, query_all_args)
iter_records_args = {"self": PositionalArgument}
inspect_arguments(PackageCacheData.iter_records, iter_records_args)
isinstance(PackageCacheData.is_writable, property)
first_writable_args = {"pkgs_dirs": None}
inspect_arguments(PackageCacheData.first_writable, first_writable_args)
reload_args = {"self": PositionalArgument}
inspect_arguments(PackageCacheData.reload, reload_args)
def test_PackageCacheData_return_value_contract():
pc = PackageCacheData(context.pkgs_dirs[0])
single_pcrec = next(pc.iter_records(), None)
if single_pcrec:
get_result = pc.get(PackageRecord.from_objects(single_pcrec))
assert isinstance(get_result, PackageCacheRecord)
query_result = pc.query("openssl")
assert isinstance(query_result, tuple)
assert all(isinstance(pcrec, PackageCacheRecord) for pcrec in query_result)
query_all_result = PackageCacheData.query_all("openssl")
assert isinstance(query_all_result, tuple)
assert all(isinstance(pcrec, PackageCacheRecord) for pcrec in query_all_result)
iter_records_result = pc.iter_records()
assert isiterable(iter_records_result)
assert all(isinstance(pcrec, PackageCacheRecord) for pcrec in iter_records_result)
is_writable_result = pc.is_writable
assert is_writable_result is True or is_writable_result is False
first_writable_result = PackageCacheData.first_writable()
assert isinstance(first_writable_result, PackageCacheData)
reload_result = pc.reload()
assert isinstance(reload_result, PackageCacheData)
def test_PrefixData_contract():
init_args = {
"self": PositionalArgument,
"prefix_path": PositionalArgument,
}
inspect_arguments(PrefixData.__init__, init_args)
get_args = {
"self": PositionalArgument,
"package_ref": PositionalArgument,
"default": NULL,
}
inspect_arguments(PrefixData.get, get_args)
query_args = {
"self": PositionalArgument,
"package_ref_or_match_spec": PositionalArgument,
}
inspect_arguments(PrefixData.query, query_args)
iter_records_args = {"self": PositionalArgument}
inspect_arguments(PrefixData.iter_records, iter_records_args)
isinstance(PrefixData.is_writable, property)
reload_args = {"self": PositionalArgument}
inspect_arguments(PrefixData.reload, reload_args)
def test_PrefixData_return_value_contract():
pd = PrefixData(context.conda_prefix)
single_prefix_rec = next(pd.iter_records())
get_result = pd.get(PackageRecord.from_objects(single_prefix_rec))
assert isinstance(get_result, PrefixRecord)
query_result = pd.query("openssl")
assert isinstance(query_result, tuple)
assert all(isinstance(prefix_rec, PrefixRecord) for prefix_rec in query_result)
iter_records_result = pd.iter_records()
assert isiterable(iter_records_result)
assert all(
isinstance(prefix_rec, PrefixRecord) for prefix_rec in iter_records_result
)
is_writable_result = pd.is_writable
assert is_writable_result is True or is_writable_result is False
reload_result = pd.reload()
assert isinstance(reload_result, PrefixData)
| PositionalArgument |
python | allegroai__clearml | clearml/backend_api/services/v2_13/events.py | {
"start": 22384,
"end": 26163
} | class ____(NonStrictDataModel):
"""
A log event associated with a task.
:param timestamp: Epoch milliseconds UTC, will be set by the server if not set.
:type timestamp: float
:param task: Task ID (required)
:type task: str
:param level: Log level.
:type level: LogLevelEnum
:param worker: Name of machine running the task.
:type worker: str
:param msg: Log message.
:type msg: str
"""
_schema = {
"description": "A log event associated with a task.",
"properties": {
"level": {
"$ref": "#/definitions/log_level_enum",
"description": "Log level.",
},
"msg": {"description": "Log message.", "type": "string"},
"task": {"description": "Task ID (required)", "type": "string"},
"timestamp": {
"description": "Epoch milliseconds UTC, will be set by the server if not set.",
"type": ["number", "null"],
},
"type": {"const": "log", "description": "'log'"},
"worker": {
"description": "Name of machine running the task.",
"type": "string",
},
},
"required": ["task", "type"],
"type": "object",
}
def __init__(
self,
task: str,
timestamp: Optional[float] = None,
level: Any = None,
worker: Optional[str] = None,
msg: Optional[str] = None,
**kwargs: Any
) -> None:
super(TaskLogEvent, self).__init__(**kwargs)
self.timestamp = timestamp
self.task = task
self.level = level
self.worker = worker
self.msg = msg
@schema_property("timestamp")
def timestamp(self) -> Optional[float]:
return self._property_timestamp
@timestamp.setter
def timestamp(self, value: Optional[float]) -> None:
if value is None:
self._property_timestamp = None
return
self.assert_isinstance(value, "timestamp", six.integer_types + (float,))
self._property_timestamp = value
@schema_property("type")
def type(self) -> Any:
return "log"
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("level")
def level(self) -> Any:
return self._property_level
@level.setter
def level(self, value: Any) -> None:
if value is None:
self._property_level = None
return
if isinstance(value, six.string_types):
try:
value = LogLevelEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "level", enum.Enum)
self._property_level = value
@schema_property("worker")
def worker(self) -> Optional[str]:
return self._property_worker
@worker.setter
def worker(self, value: Optional[str]) -> None:
if value is None:
self._property_worker = None
return
self.assert_isinstance(value, "worker", six.string_types)
self._property_worker = value
@schema_property("msg")
def msg(self) -> Optional[str]:
return self._property_msg
@msg.setter
def msg(self, value: Optional[str]) -> None:
if value is None:
self._property_msg = None
return
self.assert_isinstance(value, "msg", six.string_types)
self._property_msg = value
| TaskLogEvent |
python | pallets__jinja | tests/test_core_tags.py | {
"start": 266,
"end": 10218
} | class ____:
def test_simple(self, env):
tmpl = env.from_string("{% for item in seq %}{{ item }}{% endfor %}")
assert tmpl.render(seq=list(range(10))) == "0123456789"
def test_else(self, env):
tmpl = env.from_string("{% for item in seq %}XXX{% else %}...{% endfor %}")
assert tmpl.render() == "..."
def test_else_scoping_item(self, env):
tmpl = env.from_string("{% for item in [] %}{% else %}{{ item }}{% endfor %}")
assert tmpl.render(item=42) == "42"
def test_empty_blocks(self, env):
tmpl = env.from_string("<{% for item in seq %}{% else %}{% endfor %}>")
assert tmpl.render() == "<>"
def test_context_vars(self, env):
slist = [42, 24]
for seq in [slist, iter(slist), reversed(slist), (_ for _ in slist)]:
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.index }}|{{ loop.index0 }}|{{ loop.revindex }}|{{
loop.revindex0 }}|{{ loop.first }}|{{ loop.last }}|{{
loop.length }}###{% endfor %}"""
)
one, two, _ = tmpl.render(seq=seq).split("###")
(
one_index,
one_index0,
one_revindex,
one_revindex0,
one_first,
one_last,
one_length,
) = one.split("|")
(
two_index,
two_index0,
two_revindex,
two_revindex0,
two_first,
two_last,
two_length,
) = two.split("|")
assert int(one_index) == 1 and int(two_index) == 2
assert int(one_index0) == 0 and int(two_index0) == 1
assert int(one_revindex) == 2 and int(two_revindex) == 1
assert int(one_revindex0) == 1 and int(two_revindex0) == 0
assert one_first == "True" and two_first == "False"
assert one_last == "False" and two_last == "True"
assert one_length == two_length == "2"
def test_cycling(self, env):
tmpl = env.from_string(
"""{% for item in seq %}{{
loop.cycle('<1>', '<2>') }}{% endfor %}{%
for item in seq %}{{ loop.cycle(*through) }}{% endfor %}"""
)
output = tmpl.render(seq=list(range(4)), through=("<1>", "<2>"))
assert output == "<1><2>" * 4
def test_lookaround(self, env):
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.previtem|default('x') }}-{{ item }}-{{
loop.nextitem|default('x') }}|
{%- endfor %}"""
)
output = tmpl.render(seq=list(range(4)))
assert output == "x-0-1|0-1-2|1-2-3|2-3-x|"
def test_changed(self, env):
tmpl = env.from_string(
"""{% for item in seq -%}
{{ loop.changed(item) }},
{%- endfor %}"""
)
output = tmpl.render(seq=[None, None, 1, 2, 2, 3, 4, 4, 4])
assert output == "True,False,True,True,False,True,True,False,False,"
def test_scope(self, env):
tmpl = env.from_string("{% for item in seq %}{% endfor %}{{ item }}")
output = tmpl.render(seq=list(range(10)))
assert not output
def test_varlen(self, env):
tmpl = env.from_string("{% for item in iter %}{{ item }}{% endfor %}")
output = tmpl.render(iter=range(5))
assert output == "01234"
def test_noniter(self, env):
tmpl = env.from_string("{% for item in none %}...{% endfor %}")
pytest.raises(TypeError, tmpl.render)
def test_recursive(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1<[1][2]>][2<[1][2]>][3<[a]>]"
)
def test_recursive_lookaround(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.previtem.a if loop.previtem is defined else 'x' }}.{{
item.a }}.{{ loop.nextitem.a if loop.nextitem is defined else 'x'
}}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[x.1.2<[x.1.2][1.2.x]>][1.2.3<[x.1.2][1.2.x]>][2.3.x<[x.a.x]>]"
)
def test_recursive_depth0(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.depth0 }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[0:1<[1:1][1:2]>][0:2<[1:1][1:2]>][0:3<[1:a]>]"
)
def test_recursive_depth(self, env):
tmpl = env.from_string(
"""{% for item in seq recursive -%}
[{{ loop.depth }}:{{ item.a }}{% if item.b %}<{{ loop(item.b) }}>{% endif %}]
{%- endfor %}"""
)
assert (
tmpl.render(
seq=[
dict(a=1, b=[dict(a=1), dict(a=2)]),
dict(a=2, b=[dict(a=1), dict(a=2)]),
dict(a=3, b=[dict(a="a")]),
]
)
== "[1:1<[2:1][2:2]>][1:2<[2:1][2:2]>][1:3<[2:a]>]"
)
def test_looploop(self, env):
tmpl = env.from_string(
"""{% for row in table %}
{%- set rowloop = loop -%}
{% for cell in row -%}
[{{ rowloop.index }}|{{ loop.index }}]
{%- endfor %}
{%- endfor %}"""
)
assert tmpl.render(table=["ab", "cd"]) == "[1|1][1|2][2|1][2|2]"
def test_reversed_bug(self, env):
tmpl = env.from_string(
"{% for i in items %}{{ i }}{% if not loop.last %},{% endif %}{% endfor %}"
)
assert tmpl.render(items=reversed([3, 2, 1])) == "1,2,3"
def test_loop_errors(self, env):
tmpl = env.from_string(
"""{% for item in [1] if loop.index
== 0 %}...{% endfor %}"""
)
pytest.raises(UndefinedError, tmpl.render)
tmpl = env.from_string(
"""{% for item in [] %}...{% else
%}{{ loop }}{% endfor %}"""
)
assert tmpl.render() == ""
def test_loop_filter(self, env):
tmpl = env.from_string(
"{% for item in range(10) if item is even %}[{{ item }}]{% endfor %}"
)
assert tmpl.render() == "[0][2][4][6][8]"
tmpl = env.from_string(
"""
{%- for item in range(10) if item is even %}[{{
loop.index }}:{{ item }}]{% endfor %}"""
)
assert tmpl.render() == "[1:0][2:2][3:4][4:6][5:8]"
def test_loop_unassignable(self, env):
pytest.raises(
TemplateSyntaxError, env.from_string, "{% for loop in seq %}...{% endfor %}"
)
def test_scoped_special_var(self, env):
t = env.from_string(
"{% for s in seq %}[{{ loop.first }}{% for c in s %}"
"|{{ loop.first }}{% endfor %}]{% endfor %}"
)
assert t.render(seq=("ab", "cd")) == "[True|True|False][False|True|False]"
def test_scoped_loop_var(self, env):
t = env.from_string(
"{% for x in seq %}{{ loop.first }}"
"{% for y in seq %}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalse"
t = env.from_string(
"{% for x in seq %}{% for y in seq %}"
"{{ loop.first }}{% endfor %}{% endfor %}"
)
assert t.render(seq="ab") == "TrueFalseTrueFalse"
def test_recursive_empty_loop_iter(self, env):
t = env.from_string(
"""
{%- for item in foo recursive -%}{%- endfor -%}
"""
)
assert t.render(dict(foo=[])) == ""
def test_call_in_loop(self, env):
t = env.from_string(
"""
{%- macro do_something() -%}
[{{ caller() }}]
{%- endmacro %}
{%- for i in [1, 2, 3] %}
{%- call do_something() -%}
{{ i }}
{%- endcall %}
{%- endfor -%}
"""
)
assert t.render() == "[1][2][3]"
def test_scoping_bug(self, env):
t = env.from_string(
"""
{%- for item in foo %}...{{ item }}...{% endfor %}
{%- macro item(a) %}...{{ a }}...{% endmacro %}
{{- item(2) -}}
"""
)
assert t.render(foo=(1,)) == "...1......2..."
def test_unpacking(self, env):
tmpl = env.from_string(
"{% for a, b, c in [[1, 2, 3]] %}{{ a }}|{{ b }}|{{ c }}{% endfor %}"
)
assert tmpl.render() == "1|2|3"
def test_intended_scoping_with_set(self, env):
tmpl = env.from_string(
"{% for item in seq %}{{ x }}{% set x = item %}{{ x }}{% endfor %}"
)
assert tmpl.render(x=0, seq=[1, 2, 3]) == "010203"
tmpl = env.from_string(
"{% set x = 9 %}{% for item in seq %}{{ x }}"
"{% set x = item %}{{ x }}{% endfor %}"
)
assert tmpl.render(x=0, seq=[1, 2, 3]) == "919293"
| TestForLoop |
python | automl__auto-sklearn | autosklearn/pipeline/components/feature_preprocessing/select_percentile_regression.py | {
"start": 560,
"end": 2527
} | class ____(
SelectPercentileBase, AutoSklearnPreprocessingAlgorithm
):
def __init__(self, percentile, score_func="f_regression", random_state=None):
"""Parameters:
random state : ignored
score_func : callable, Function taking two arrays X and y, and
returning a pair of arrays (scores, pvalues).
"""
import sklearn.feature_selection
self.random_state = random_state # We don't use this
self.percentile = int(float(percentile))
if score_func == "f_regression":
self.score_func = sklearn.feature_selection.f_regression
elif score_func == "mutual_info":
self.score_func = partial(
sklearn.feature_selection.mutual_info_regression,
random_state=self.random_state,
)
else:
raise ValueError("Don't know this scoring function: %s" % score_func)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "SPR",
"name": "Select Percentile Regression",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (INPUT,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
percentile = UniformFloatHyperparameter(
"percentile", lower=1, upper=99, default_value=50
)
score_func = CategoricalHyperparameter(
name="score_func", choices=["f_regression", "mutual_info"]
)
cs = ConfigurationSpace()
cs.add_hyperparameters([percentile, score_func])
return cs
| SelectPercentileRegression |
python | django__django | tests/serializers/models/data.py | {
"start": 5185,
"end": 5267
} | class ____(models.Model):
data = models.FloatField(primary_key=True)
| FloatPKData |
python | etianen__django-reversion | tests/test_app/tests/test_admin.py | {
"start": 10541,
"end": 10636
} | class ____(GenericTabularInline):
model = TestModelGenericInline
| TestModelGenericInlineAdmin |
python | pytorch__pytorch | torch/jit/_trace.py | {
"start": 30829,
"end": 31038
} | class ____(str, Enum):
DIRECT_EXPORT = "DIRECT_EXPORT"
TRACE_AND_EXPORT = "TRACE_AND_EXPORT"
SOURCE_TO_SOURCE = "SOURCE_TO_SOURCE"
def __str__(self) -> str:
return self.value
| _ExportType |
python | optuna__optuna | optuna/distributions.py | {
"start": 8916,
"end": 10807
} | class ____(FloatDistribution):
"""A discretized uniform distribution in the linear domain.
This object is instantiated by :func:`~optuna.trial.Trial.suggest_float` with ``step``
argument, and passed to :mod:`~optuna.samplers` in general.
.. note::
If the range :math:`[\\mathsf{low}, \\mathsf{high}]` is not divisible by :math:`q`,
:math:`\\mathsf{high}` will be replaced with the maximum of :math:`k q + \\mathsf{low}
< \\mathsf{high}`, where :math:`k` is an integer.
Args:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
``low`` must be less than or equal to ``high``.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
``high`` must be greater than or equal to ``low``.
q:
A discretization step. ``q`` must be larger than 0.
Attributes:
low:
Lower endpoint of the range of the distribution. ``low`` is included in the range.
high:
Upper endpoint of the range of the distribution. ``high`` is included in the range.
"""
def __init__(self, low: float, high: float, q: float) -> None:
super().__init__(low=low, high=high, step=q)
def _asdict(self) -> dict:
d = copy.deepcopy(self.__dict__)
d.pop("log")
step = d.pop("step")
d["q"] = step
return d
@property
def q(self) -> float:
"""Discretization step.
:class:`~optuna.distributions.DiscreteUniformDistribution` is a subtype of
:class:`~optuna.distributions.FloatDistribution`.
This property is a proxy for its ``step`` attribute.
"""
return cast("float", self.step)
@q.setter
def q(self, v: float) -> None:
self.step = v
| DiscreteUniformDistribution |
python | joke2k__faker | faker/providers/passport/en_US/__init__.py | {
"start": 161,
"end": 3876
} | class ____(PassportProvider):
"""Implement passport provider for ``en_US`` locale.
Sources:
- https://travel.state.gov/content/travel/en/passports/passport-help/next-generation-passport.html
- https://www.vitalrecordsonline.com/glossary/passport-book-number
"""
passport_number_formats = (
# NGP
"?########",
# Pre-NGP
"#########",
)
def passport_dates(self, birthday: date = date.today()) -> Tuple[str, str, str]:
"""Generates a formatted date of birth, issue, and expiration dates.
issue and expiration dates are conditioned to fall within U.S. standards of 5 and 10 year expirations
The ``birthday`` argument is a datetime.date object representing a date of birth.
Sources:
-https://travel.state.gov/content/travel/en/passports/passport-help/faqs.html
"""
birth_date = f"{birthday:%d %b %Y}"
today = date.today()
age = (today - birthday).days // 365
if age < 16:
expiry_years = 5
issue_date = self.generator.date_time_between(today - timedelta(days=expiry_years * 365 - 1), today)
# Checks if age is less than 5 so issue date is not before birthdate
if age < 5:
issue_date = self.generator.date_time_between(birthday, today)
elif age >= 26:
expiry_years = 10
issue_date = self.generator.date_time_between(today - timedelta(days=expiry_years * 365 - 1), today)
else:
# In cases between age 16 and 26, the issue date is 5 years ago, but expiry may be in 10 or 5 years
expiry_years = 5
issue_date = self.generator.date_time_between(
today - timedelta(days=expiry_years * 365 - 1), birthday + timedelta(days=16 * 365 - 1)
)
# all people over 21 must have been over 16 when they recieved passport or it will be expired otherwise
if age >= 21:
issue_date = self.generator.date_time_between(today - timedelta(days=expiry_years * 365 - 1), today)
expiry_years = 10
if issue_date.day == 29 and issue_date.month == 2:
issue_date -= timedelta(days=1)
expiry_date = issue_date.replace(year=issue_date.year + expiry_years)
issue_date_format = f"{issue_date:%d %b %Y}"
expiry_date_format = f"{expiry_date:%d %b %Y}"
return birth_date, issue_date_format, expiry_date_format
def passport_gender(self, seed: int = 0) -> SexLiteral:
"""Generates a string representing the gender displayed on a passport
Sources:
- https://williamsinstitute.law.ucla.edu/publications/x-gender-markers-passports/
"""
if seed != 0:
random.seed(seed)
genders = ["M", "F", "X"]
gender: SexLiteral = random.choices(genders, weights=[0.493, 0.493, 0.014], k=1)[0] # type: ignore
return gender
def passport_full(self) -> str:
"""Generates a formatted sting with US Passport information"""
dob = self.passport_dob()
birth_date, issue_date, expiry_date = self.passport_dates(dob)
gender_g = self.passport_gender()
given_name, surname = self.passport_owner(gender=gender_g)
number = self.passport_number()
full_rep = """{first_name}\n{second_name}\n{gender}\n{dob}\n{issue}\n{expire}\n{num}\n"""
full_rep = full_rep.format(
first_name=given_name,
second_name=surname,
gender=gender_g,
dob=birth_date,
issue=issue_date,
expire=expiry_date,
num=number,
)
return full_rep
| Provider |
python | jpadilla__pyjwt | tests/test_api_jwt.py | {
"start": 815,
"end": 34345
} | class ____:
def test_jwt_with_options(self):
jwt = PyJWT(options={"verify_signature": False})
assert jwt.options["verify_signature"] is False
# assert that unrelated option is unchanged from default
assert jwt.options["strict_aud"] is False
# assert that verify_signature is respected unless verify_exp is overridden
assert jwt.options["verify_exp"] is False
def test_decodes_valid_jwt(self, jwt):
example_payload = {"hello": "world"}
example_secret = "secret"
example_jwt = (
b"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9"
b".eyJoZWxsbyI6ICJ3b3JsZCJ9"
b".tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8"
)
decoded_payload = jwt.decode(example_jwt, example_secret, algorithms=["HS256"])
assert decoded_payload == example_payload
def test_decodes_complete_valid_jwt(self, jwt):
example_payload = {"hello": "world"}
example_secret = "secret"
example_jwt = (
b"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9"
b".eyJoZWxsbyI6ICJ3b3JsZCJ9"
b".tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8"
)
decoded = jwt.decode_complete(example_jwt, example_secret, algorithms=["HS256"])
assert decoded == {
"header": {"alg": "HS256", "typ": "JWT"},
"payload": example_payload,
"signature": (
b'\xb6\xf6\xa0,2\xe8j"J\xc4\xe2\xaa\xa4\x15\xd2'
b"\x10l\xbbI\x84\xa2}\x98c\x9e\xd8&\xf5\xcbi\xca?"
),
}
def test_load_verify_valid_jwt(self, jwt):
example_payload = {"hello": "world"}
example_secret = "secret"
example_jwt = (
b"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9"
b".eyJoZWxsbyI6ICJ3b3JsZCJ9"
b".tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8"
)
decoded_payload = jwt.decode(
example_jwt, key=example_secret, algorithms=["HS256"]
)
assert decoded_payload == example_payload
def test_decode_invalid_payload_string(self, jwt):
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.aGVsb"
"G8gd29ybGQ.SIr03zM64awWRdPrAM_61QWsZchAtgDV"
"3pphfHPPWkI"
)
example_secret = "secret"
with pytest.raises(DecodeError) as exc:
jwt.decode(example_jwt, example_secret, algorithms=["HS256"])
assert "Invalid payload string" in str(exc.value)
def test_decode_with_non_mapping_payload_throws_exception(self, jwt):
secret = "secret"
example_jwt = (
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9."
"MQ." # == 1
"AbcSR3DWum91KOgfKxUHm78rLs_DrrZ1CrDgpUFFzls"
)
with pytest.raises(DecodeError) as context:
jwt.decode(example_jwt, secret, algorithms=["HS256"])
exception = context.value
assert str(exception) == "Invalid payload string: must be a json object"
def test_decode_with_invalid_audience_param_throws_exception(self, jwt):
secret = "secret"
example_jwt = (
"eyJhbGciOiAiSFMyNTYiLCAidHlwIjogIkpXVCJ9"
".eyJoZWxsbyI6ICJ3b3JsZCJ9"
".tvagLDLoaiJKxOKqpBXSEGy7SYSifZhjntgm9ctpyj8"
)
with pytest.raises(TypeError) as context:
jwt.decode(example_jwt, secret, audience=1, algorithms=["HS256"])
exception = context.value
assert str(exception) == "audience must be a string, iterable or None"
def test_decode_with_nonlist_aud_claim_throws_exception(self, jwt):
secret = "secret"
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
".eyJoZWxsbyI6IndvcmxkIiwiYXVkIjoxfQ" # aud = 1
".Rof08LBSwbm8Z_bhA2N3DFY-utZR1Gi9rbIS5Zthnnc"
)
with pytest.raises(InvalidAudienceError) as context:
jwt.decode(
example_jwt,
secret,
audience="my_audience",
algorithms=["HS256"],
)
exception = context.value
assert str(exception) == "Invalid claim format in token"
def test_decode_with_invalid_aud_list_member_throws_exception(self, jwt):
secret = "secret"
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
".eyJoZWxsbyI6IndvcmxkIiwiYXVkIjpbMV19"
".iQgKpJ8shetwNMIosNXWBPFB057c2BHs-8t1d2CCM2A"
)
with pytest.raises(InvalidAudienceError) as context:
jwt.decode(
example_jwt,
secret,
audience="my_audience",
algorithms=["HS256"],
)
exception = context.value
assert str(exception) == "Invalid claim format in token"
def test_encode_bad_type(self, jwt):
types = ["string", tuple(), list(), 42, set()]
for t in types:
pytest.raises(
TypeError,
lambda t=t: jwt.encode(t, "secret", algorithms=["HS256"]),
)
def test_encode_with_non_str_iss(self, jwt):
"""Regression test for Issue #1039."""
with pytest.raises(TypeError):
jwt.encode(
{
"iss": 123,
},
key="secret",
)
def test_encode_with_typ(self, jwt):
payload = {
"iss": "https://scim.example.com",
"iat": 1458496404,
"jti": "4d3559ec67504aaba65d40b0363faad8",
"aud": [
"https://scim.example.com/Feeds/98d52461fa5bbc879593b7754",
"https://scim.example.com/Feeds/5d7604516b1d08641d7676ee7",
],
"events": {
"urn:ietf:params:scim:event:create": {
"ref": "https://scim.example.com/Users/44f6142df96bd6ab61e7521d9",
"attributes": ["id", "name", "userName", "password", "emails"],
}
},
}
token = jwt.encode(
payload, "secret", algorithm="HS256", headers={"typ": "secevent+jwt"}
)
header = token[0 : token.index(".")].encode()
header = base64url_decode(header)
header_obj = json.loads(header)
assert "typ" in header_obj
assert header_obj["typ"] == "secevent+jwt"
def test_decode_raises_exception_if_exp_is_not_int(self, jwt):
# >>> jwt.encode({'exp': 'not-an-int'}, 'secret')
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJleHAiOiJub3QtYW4taW50In0."
"P65iYgoHtBqB07PMtBSuKNUEIPPPfmjfJG217cEE66s"
)
with pytest.raises(DecodeError) as exc:
jwt.decode(example_jwt, "secret", algorithms=["HS256"])
assert "exp" in str(exc.value)
def test_decode_raises_exception_if_iat_is_not_int(self, jwt):
# >>> jwt.encode({'iat': 'not-an-int'}, 'secret')
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJpYXQiOiJub3QtYW4taW50In0."
"H1GmcQgSySa5LOKYbzGm--b1OmRbHFkyk8pq811FzZM"
)
with pytest.raises(InvalidIssuedAtError):
jwt.decode(example_jwt, "secret", algorithms=["HS256"])
def test_decode_raises_exception_if_iat_is_greater_than_now(self, jwt, payload):
payload["iat"] = utc_timestamp() + 10
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.raises(ImmatureSignatureError):
jwt.decode(jwt_message, secret, algorithms=["HS256"])
def test_decode_works_if_iat_is_str_of_a_number(self, jwt, payload):
payload["iat"] = "1638202770"
secret = "secret"
jwt_message = jwt.encode(payload, secret)
data = jwt.decode(jwt_message, secret, algorithms=["HS256"])
assert data["iat"] == "1638202770"
def test_decode_raises_exception_if_nbf_is_not_int(self, jwt):
# >>> jwt.encode({'nbf': 'not-an-int'}, 'secret')
example_jwt = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9."
"eyJuYmYiOiJub3QtYW4taW50In0."
"c25hldC8G2ZamC8uKpax9sYMTgdZo3cxrmzFHaAAluw"
)
with pytest.raises(DecodeError):
jwt.decode(example_jwt, "secret", algorithms=["HS256"])
def test_decode_allows_aud_to_be_none(self, jwt):
# >>> jwt.encode({'aud': None}, 'secret')
example_jwt = (
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9."
"eyJhdWQiOm51bGx9."
"-Peqc-pTugGvrc5C8Bnl0-X1V_5fv-aVb_7y7nGBVvQ"
)
decoded = jwt.decode(example_jwt, "secret", algorithms=["HS256"])
assert decoded["aud"] is None
def test_encode_datetime(self, jwt):
secret = "secret"
current_datetime = datetime.now(tz=timezone.utc)
payload = {
"exp": current_datetime,
"iat": current_datetime,
"nbf": current_datetime,
}
jwt_message = jwt.encode(payload, secret)
decoded_payload = jwt.decode(
jwt_message, secret, leeway=1, algorithms=["HS256"]
)
assert decoded_payload["exp"] == timegm(current_datetime.utctimetuple())
assert decoded_payload["iat"] == timegm(current_datetime.utctimetuple())
assert decoded_payload["nbf"] == timegm(current_datetime.utctimetuple())
# payload is not mutated.
assert payload == {
"exp": current_datetime,
"iat": current_datetime,
"nbf": current_datetime,
}
# 'Control' Elliptic Curve JWT created by another library.
# Used to test for regressions that could affect both
# encoding / decoding operations equally (causing tests
# to still pass).
@crypto_required
def test_decodes_valid_es256_jwt(self, jwt):
example_payload = {"hello": "world"}
with open(key_path("testkey_ec.pub")) as fp:
example_pubkey = fp.read()
example_jwt = (
b"eyJhbGciOiJFUzI1NiIsInR5cCI6IkpXVCJ9."
b"eyJoZWxsbyI6IndvcmxkIn0.TORyNQab_MoXM7DvNKaTwbrJr4UY"
b"d2SsX8hhlnWelQFmPFSf_JzC2EbLnar92t-bXsDovzxp25ExazrVHkfPkQ"
)
decoded_payload = jwt.decode(example_jwt, example_pubkey, algorithms=["ES256"])
assert decoded_payload == example_payload
# 'Control' RSA JWT created by another library.
# Used to test for regressions that could affect both
# encoding / decoding operations equally (causing tests
# to still pass).
@crypto_required
def test_decodes_valid_rs384_jwt(self, jwt):
example_payload = {"hello": "world"}
with open(key_path("testkey_rsa.pub")) as fp:
example_pubkey = fp.read()
example_jwt = (
b"eyJhbGciOiJSUzM4NCIsInR5cCI6IkpXVCJ9"
b".eyJoZWxsbyI6IndvcmxkIn0"
b".yNQ3nI9vEDs7lEh-Cp81McPuiQ4ZRv6FL4evTYYAh1X"
b"lRTTR3Cz8pPA9Stgso8Ra9xGB4X3rlra1c8Jz10nTUju"
b"O06OMm7oXdrnxp1KIiAJDerWHkQ7l3dlizIk1bmMA457"
b"W2fNzNfHViuED5ISM081dgf_a71qBwJ_yShMMrSOfxDx"
b"mX9c4DjRogRJG8SM5PvpLqI_Cm9iQPGMvmYK7gzcq2cJ"
b"urHRJDJHTqIdpLWXkY7zVikeen6FhuGyn060Dz9gYq9t"
b"uwmrtSWCBUjiN8sqJ00CDgycxKqHfUndZbEAOjcCAhBr"
b"qWW3mSVivUfubsYbwUdUG3fSRPjaUPcpe8A"
)
decoded_payload = jwt.decode(example_jwt, example_pubkey, algorithms=["RS384"])
assert decoded_payload == example_payload
def test_decode_with_expiration(self, jwt, payload):
payload["exp"] = utc_timestamp() - 1
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.raises(ExpiredSignatureError):
jwt.decode(jwt_message, secret, algorithms=["HS256"])
def test_decode_with_notbefore(self, jwt, payload):
payload["nbf"] = utc_timestamp() + 10
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.raises(ImmatureSignatureError):
jwt.decode(jwt_message, secret, algorithms=["HS256"])
def test_decode_skip_expiration_verification(self, jwt, payload):
payload["exp"] = time.time() - 1
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(
jwt_message,
secret,
algorithms=["HS256"],
options={"verify_exp": False},
)
def test_decode_skip_notbefore_verification(self, jwt, payload):
payload["nbf"] = time.time() + 10
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(
jwt_message,
secret,
algorithms=["HS256"],
options={"verify_nbf": False},
)
def test_decode_with_expiration_with_leeway(self, jwt, payload):
payload["exp"] = utc_timestamp() - 2
secret = "secret"
jwt_message = jwt.encode(payload, secret)
# With 5 seconds leeway, should be ok
for leeway in (5, timedelta(seconds=5)):
decoded = jwt.decode(
jwt_message, secret, leeway=leeway, algorithms=["HS256"]
)
assert decoded == payload
# With 1 seconds, should fail
for leeway in (1, timedelta(seconds=1)):
with pytest.raises(ExpiredSignatureError):
jwt.decode(jwt_message, secret, leeway=leeway, algorithms=["HS256"])
def test_decode_with_notbefore_with_leeway(self, jwt, payload):
payload["nbf"] = utc_timestamp() + 10
secret = "secret"
jwt_message = jwt.encode(payload, secret)
# With 13 seconds leeway, should be ok
jwt.decode(jwt_message, secret, leeway=13, algorithms=["HS256"])
with pytest.raises(ImmatureSignatureError):
jwt.decode(jwt_message, secret, leeway=1, algorithms=["HS256"])
def test_check_audience_when_valid(self, jwt):
payload = {"some": "payload", "aud": "urn:me"}
token = jwt.encode(payload, "secret")
jwt.decode(token, "secret", audience="urn:me", algorithms=["HS256"])
def test_check_audience_list_when_valid(self, jwt):
payload = {"some": "payload", "aud": "urn:me"}
token = jwt.encode(payload, "secret")
jwt.decode(
token,
"secret",
audience=["urn:you", "urn:me"],
algorithms=["HS256"],
)
def test_check_audience_none_specified(self, jwt):
payload = {"some": "payload", "aud": "urn:me"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidAudienceError):
jwt.decode(token, "secret", algorithms=["HS256"])
def test_raise_exception_invalid_audience_list(self, jwt):
payload = {"some": "payload", "aud": "urn:me"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidAudienceError):
jwt.decode(
token,
"secret",
audience=["urn:you", "urn:him"],
algorithms=["HS256"],
)
def test_check_audience_in_array_when_valid(self, jwt):
payload = {"some": "payload", "aud": ["urn:me", "urn:someone-else"]}
token = jwt.encode(payload, "secret")
jwt.decode(token, "secret", audience="urn:me", algorithms=["HS256"])
def test_raise_exception_invalid_audience(self, jwt):
payload = {"some": "payload", "aud": "urn:someone-else"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidAudienceError):
jwt.decode(token, "secret", audience="urn-me", algorithms=["HS256"])
def test_raise_exception_audience_as_bytes(self, jwt):
payload = {"some": "payload", "aud": ["urn:me", "urn:someone-else"]}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidAudienceError):
jwt.decode(token, "secret", audience=b"urn:me", algorithms=["HS256"])
def test_raise_exception_invalid_audience_in_array(self, jwt):
payload = {
"some": "payload",
"aud": ["urn:someone", "urn:someone-else"],
}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidAudienceError):
jwt.decode(token, "secret", audience="urn:me", algorithms=["HS256"])
def test_raise_exception_token_without_issuer(self, jwt):
issuer = "urn:wrong"
payload = {"some": "payload"}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
assert exc.value.claim == "iss"
def test_rasise_exception_on_partial_issuer_match(self, jwt):
issuer = "urn:expected"
payload = {"iss": "urn:"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidIssuerError):
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
def test_raise_exception_token_without_audience(self, jwt):
payload = {"some": "payload"}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(token, "secret", audience="urn:me", algorithms=["HS256"])
assert exc.value.claim == "aud"
def test_raise_exception_token_with_aud_none_and_without_audience(self, jwt):
payload = {"some": "payload", "aud": None}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(token, "secret", audience="urn:me", algorithms=["HS256"])
assert exc.value.claim == "aud"
def test_check_issuer_when_valid(self, jwt):
issuer = "urn:foo"
payload = {"some": "payload", "iss": "urn:foo"}
token = jwt.encode(payload, "secret")
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
def test_check_issuer_list_when_valid(self, jwt):
issuer = ["urn:foo", "urn:bar"]
payload = {"some": "payload", "iss": "urn:foo"}
token = jwt.encode(payload, "secret")
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
def test_raise_exception_invalid_issuer(self, jwt):
issuer = "urn:wrong"
payload = {"some": "payload", "iss": "urn:foo"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidIssuerError):
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
def test_raise_exception_invalid_issuer_list(self, jwt):
issuer = ["urn:wrong", "urn:bar", "urn:baz"]
payload = {"some": "payload", "iss": "urn:foo"}
token = jwt.encode(payload, "secret")
with pytest.raises(InvalidIssuerError):
jwt.decode(token, "secret", issuer=issuer, algorithms=["HS256"])
def test_skip_check_audience(self, jwt):
payload = {"some": "payload", "aud": "urn:me"}
token = jwt.encode(payload, "secret")
jwt.decode(
token,
"secret",
options={"verify_aud": False},
algorithms=["HS256"],
)
def test_skip_check_exp(self, jwt):
payload = {
"some": "payload",
"exp": datetime.now(tz=timezone.utc) - timedelta(days=1),
}
token = jwt.encode(payload, "secret")
jwt.decode(
token,
"secret",
options={"verify_exp": False},
algorithms=["HS256"],
)
def test_decode_should_raise_error_if_exp_required_but_not_present(self, jwt):
payload = {
"some": "payload",
# exp not present
}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(
token,
"secret",
options={"require": ["exp"]},
algorithms=["HS256"],
)
assert exc.value.claim == "exp"
def test_decode_should_raise_error_if_iat_required_but_not_present(self, jwt):
payload = {
"some": "payload",
# iat not present
}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(
token,
"secret",
options={"require": ["iat"]},
algorithms=["HS256"],
)
assert exc.value.claim == "iat"
def test_decode_should_raise_error_if_nbf_required_but_not_present(self, jwt):
payload = {
"some": "payload",
# nbf not present
}
token = jwt.encode(payload, "secret")
with pytest.raises(MissingRequiredClaimError) as exc:
jwt.decode(
token,
"secret",
options={"require": ["nbf"]},
algorithms=["HS256"],
)
assert exc.value.claim == "nbf"
def test_skip_check_signature(self, jwt):
token = (
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9"
".eyJzb21lIjoicGF5bG9hZCJ9"
".4twFt5NiznN84AWoo1d7KO1T_yoc0Z6XOpOVswacPZA"
)
jwt.decode(
token,
"secret",
options={"verify_signature": False},
algorithms=["HS256"],
)
def test_skip_check_iat(self, jwt):
payload = {
"some": "payload",
"iat": datetime.now(tz=timezone.utc) + timedelta(days=1),
}
token = jwt.encode(payload, "secret")
jwt.decode(
token,
"secret",
options={"verify_iat": False},
algorithms=["HS256"],
)
def test_skip_check_nbf(self, jwt):
payload = {
"some": "payload",
"nbf": datetime.now(tz=timezone.utc) + timedelta(days=1),
}
token = jwt.encode(payload, "secret")
jwt.decode(
token,
"secret",
options={"verify_nbf": False},
algorithms=["HS256"],
)
def test_custom_json_encoder(self, jwt):
class CustomJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, Decimal):
return "it worked"
return super().default(o)
data = {"some_decimal": Decimal("2.2")}
with pytest.raises(TypeError):
jwt.encode(data, "secret", algorithms=["HS256"])
token = jwt.encode(data, "secret", json_encoder=CustomJSONEncoder)
payload = jwt.decode(token, "secret", algorithms=["HS256"])
assert payload == {"some_decimal": "it worked"}
def test_decode_with_verify_exp_option(self, jwt, payload):
payload["exp"] = utc_timestamp() - 1
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(
jwt_message,
secret,
algorithms=["HS256"],
options={"verify_exp": False},
)
with pytest.raises(ExpiredSignatureError):
jwt.decode(
jwt_message,
secret,
algorithms=["HS256"],
options={"verify_exp": True},
)
def test_decode_with_verify_exp_option_and_signature_off(self, jwt, payload):
payload["exp"] = utc_timestamp() - 1
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(
jwt_message,
options={"verify_signature": False},
)
with pytest.raises(ExpiredSignatureError):
jwt.decode(
jwt_message,
options={"verify_signature": False, "verify_exp": True},
)
def test_decode_with_optional_algorithms(self, jwt, payload):
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.raises(DecodeError) as exc:
jwt.decode(jwt_message, secret)
assert (
'It is required that you pass in a value for the "algorithms" argument when calling decode().'
in str(exc.value)
)
def test_decode_no_algorithms_verify_signature_false(self, jwt, payload):
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(jwt_message, secret, options={"verify_signature": False})
def test_decode_legacy_verify_warning(self, jwt, payload):
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.deprecated_call():
# The implicit default for options.verify_signature is True,
# but the user sets verify to False.
jwt.decode(jwt_message, secret, verify=False, algorithms=["HS256"])
with pytest.deprecated_call():
# The user explicitly sets verify=True,
# but contradicts it in verify_signature.
jwt.decode(
jwt_message, secret, verify=True, options={"verify_signature": False}
)
def test_decode_no_options_mutation(self, jwt, payload):
options = {"verify_signature": True}
orig_options = options.copy()
secret = "secret"
jwt_message = jwt.encode(payload, secret)
jwt.decode(jwt_message, secret, options=options, algorithms=["HS256"])
assert options == orig_options
def test_decode_warns_on_unsupported_kwarg(self, jwt, payload):
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.warns(RemovedInPyjwt3Warning) as record:
jwt.decode(jwt_message, secret, algorithms=["HS256"], foo="bar")
assert len(record) == 1
assert "foo" in str(record[0].message)
def test_decode_complete_warns_on_unsupported_kwarg(self, jwt, payload):
secret = "secret"
jwt_message = jwt.encode(payload, secret)
with pytest.warns(RemovedInPyjwt3Warning) as record:
jwt.decode_complete(jwt_message, secret, algorithms=["HS256"], foo="bar")
assert len(record) == 1
assert "foo" in str(record[0].message)
def test_decode_strict_aud_forbids_list_audience(self, jwt, payload):
secret = "secret"
payload["aud"] = "urn:foo"
jwt_message = jwt.encode(payload, secret)
# Decodes without `strict_aud`.
jwt.decode(
jwt_message,
secret,
audience=["urn:foo", "urn:bar"],
options={"strict_aud": False},
algorithms=["HS256"],
)
# Fails with `strict_aud`.
with pytest.raises(InvalidAudienceError, match=r"Invalid audience \(strict\)"):
jwt.decode(
jwt_message,
secret,
audience=["urn:foo", "urn:bar"],
options={"strict_aud": True},
algorithms=["HS256"],
)
def test_decode_strict_aud_forbids_list_claim(self, jwt, payload):
secret = "secret"
payload["aud"] = ["urn:foo", "urn:bar"]
jwt_message = jwt.encode(payload, secret)
# Decodes without `strict_aud`.
jwt.decode(
jwt_message,
secret,
audience="urn:foo",
options={"strict_aud": False},
algorithms=["HS256"],
)
# Fails with `strict_aud`.
with pytest.raises(
InvalidAudienceError, match=r"Invalid claim format in token \(strict\)"
):
jwt.decode(
jwt_message,
secret,
audience="urn:foo",
options={"strict_aud": True},
algorithms=["HS256"],
)
def test_decode_strict_aud_does_not_match(self, jwt, payload):
secret = "secret"
payload["aud"] = "urn:foo"
jwt_message = jwt.encode(payload, secret)
with pytest.raises(
InvalidAudienceError, match=r"Audience doesn't match \(strict\)"
):
jwt.decode(
jwt_message,
secret,
audience="urn:bar",
options={"strict_aud": True},
algorithms=["HS256"],
)
def test_decode_strict_ok(self, jwt, payload):
secret = "secret"
payload["aud"] = "urn:foo"
jwt_message = jwt.encode(payload, secret)
jwt.decode(
jwt_message,
secret,
audience="urn:foo",
options={"strict_aud": True},
algorithms=["HS256"],
)
# -------------------- Sub Claim Tests --------------------
def test_encode_decode_sub_claim(self, jwt):
payload = {
"sub": "user123",
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"])
assert decoded["sub"] == "user123"
def test_decode_without_and_not_required_sub_claim(self, jwt):
secret = "your-256-bit-secret"
token = jwt.encode({}, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"])
assert "sub" not in decoded
def test_decode_missing_sub_but_required_claim(self, jwt):
secret = "your-256-bit-secret"
token = jwt.encode({}, secret, algorithm="HS256")
with pytest.raises(MissingRequiredClaimError):
jwt.decode(
token, secret, algorithms=["HS256"], options={"require": ["sub"]}
)
def test_decode_invalid_int_sub_claim(self, jwt):
payload = {
"sub": 1224344,
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
with pytest.raises(InvalidSubjectError):
jwt.decode(token, secret, algorithms=["HS256"])
def test_decode_with_valid_sub_claim(self, jwt):
payload = {
"sub": "user123",
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"], subject="user123")
assert decoded["sub"] == "user123"
def test_decode_with_invalid_sub_claim(self, jwt):
payload = {
"sub": "user123",
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
with pytest.raises(InvalidSubjectError) as exc_info:
jwt.decode(token, secret, algorithms=["HS256"], subject="user456")
assert "Invalid subject" in str(exc_info.value)
def test_decode_with_sub_claim_and_none_subject(self, jwt):
payload = {
"sub": "user789",
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"], subject=None)
assert decoded["sub"] == "user789"
# -------------------- JTI Claim Tests --------------------
def test_encode_decode_with_valid_jti_claim(self, jwt):
payload = {
"jti": "unique-id-456",
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"])
assert decoded["jti"] == "unique-id-456"
def test_decode_missing_jti_when_required_claim(self, jwt):
payload = {"name": "Bob", "admin": False}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
with pytest.raises(MissingRequiredClaimError) as exc_info:
jwt.decode(
token, secret, algorithms=["HS256"], options={"require": ["jti"]}
)
assert "jti" in str(exc_info.value)
def test_decode_missing_jti_claim(self, jwt):
secret = "your-256-bit-secret"
token = jwt.encode({}, secret, algorithm="HS256")
decoded = jwt.decode(token, secret, algorithms=["HS256"])
assert decoded.get("jti") is None
def test_jti_claim_with_invalid_int_value(self, jwt):
special_jti = 12223
payload = {
"jti": special_jti,
}
secret = "your-256-bit-secret"
token = jwt.encode(payload, secret, algorithm="HS256")
with pytest.raises(InvalidJTIError):
jwt.decode(token, secret, algorithms=["HS256"])
def test_validate_iss_with_container_of_str(self, jwt: PyJWT) -> None:
"""Check _validate_iss works with Container[str]."""
payload = {
"iss": "urn:expected",
}
# pytest.mark.parametrize triggers Untyped Decorator mypy issue,
# so trying inline for now
for issuer in (
["urn:expected", "urn:other"],
("urn:expected", "urn:other"),
{"urn:expected", "urn:other"},
):
jwt._validate_iss(payload, issuer=issuer)
def test_validate_iss_with_non_str(self, jwt):
"""Regression test for #1039"""
payload = {
"iss": 123,
}
with pytest.raises(InvalidIssuerError):
jwt._validate_iss(payload, issuer="123")
def test_validate_iss_with_non_str_issuer(self, jwt):
"""Regression test for #1039"""
payload = {
"iss": "123",
}
with pytest.raises(InvalidIssuerError):
jwt._validate_iss(payload, issuer=123)
| TestJWT |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws_tests/pipes_tests/fake_ecs.py | {
"start": 472,
"end": 8493
} | class ____:
CONTAINER_NAME = "test-container"
def __init__(self, ecs_client: boto3.client, cloudwatch_client: boto3.client): # pyright: ignore (reportGeneralTypeIssues)
self.ecs_client = ecs_client
self.cloudwatch_client = cloudwatch_client
self._task_runs: dict[
str, SimulatedTaskRun
] = {} # mapping of TaskDefinitionArn to TaskDefinition
@property
def meta(self):
return self.ecs_client.meta
def get_waiter(self, waiter_name: str):
return WaiterMock(self, waiter_name)
def register_task_definition(self, **kwargs):
return self.ecs_client.register_task_definition(**kwargs)
def describe_task_definition(self, **kwargs):
response = self.ecs_client.describe_task_definition(**kwargs)
assert len(response["taskDefinition"]["containerDefinitions"]) == 1, (
"Only 1 container is supported in tests"
)
# unlike real ECS, moto doesn't use cloudwatch logging by default
# so let's add it here
response["taskDefinition"]["containerDefinitions"][0]["logConfiguration"] = (
response["taskDefinition"]["containerDefinitions"][0].get("logConfiguration")
or {
"logDriver": "awslogs",
"options": {
"awslogs-group": f"{response['taskDefinition']['taskDefinitionArn']}", # this value doesn't really matter
"awslogs-stream-prefix": "ecs",
},
}
)
return response
def run_task(self, **kwargs):
response = self.ecs_client.run_task(**kwargs)
# inject container name in case it's missing
response["tasks"][0]["containers"] = response["tasks"][0].get("containers") or []
if len(response["tasks"][0]["containers"]) == 0:
response["tasks"][0]["containers"].append({})
response["tasks"][0]["containers"][0]["name"] = (
response["tasks"][0]["containers"][0].get("name") or self.CONTAINER_NAME
)
task_arn = response["tasks"][0]["taskArn"]
task_definition_arn = response["tasks"][0]["taskDefinitionArn"]
task_definition = self.describe_task_definition(taskDefinition=task_definition_arn)[
"taskDefinition"
]
assert len(task_definition["containerDefinitions"]) == 1, (
"Only 1 container is supported in tests"
)
# execute in a separate process
command = task_definition["containerDefinitions"][0]["command"]
assert command[0] == sys.executable, (
"Only the current Python interpreter is supported in tests"
)
created_at = datetime.now()
popen = Popen(
command,
stdout=PIPE,
stderr=PIPE,
# get env from container overrides
env={
env["name"]: env["value"]
for env in kwargs["overrides"]["containerOverrides"][0].get("environment", [])
},
)
log_group = task_definition["containerDefinitions"][0]["logConfiguration"]["options"][
"awslogs-group"
]
stream_prefix = task_definition["containerDefinitions"][0]["logConfiguration"]["options"][
"awslogs-stream-prefix"
]
container_name = task_definition["containerDefinitions"][0]["name"]
log_stream = f"{stream_prefix}/{container_name}/{task_arn.split('/')[-1]}"
self._task_runs[task_arn] = SimulatedTaskRun(
popen=popen,
cluster=kwargs.get("cluster", "default"),
task_arn=task_arn,
log_group=log_group,
log_stream=log_stream,
created_at=created_at,
runtime_id=str(uuid.uuid4()),
)
self._create_cloudwatch_streams(task_arn)
return response
def describe_tasks(self, cluster: str, tasks: list[str]):
assert len(tasks) == 1, "Only 1 task is supported in tests"
simulated_task = cast("SimulatedTaskRun", self._task_runs[tasks[0]])
response = self.ecs_client.describe_tasks(cluster=cluster, tasks=tasks)
assert len(response["tasks"]) == 1, "Only 1 task is supported in tests"
task_definition = self.describe_task_definition(
taskDefinition=response["tasks"][0]["taskDefinitionArn"]
)["taskDefinition"]
assert len(task_definition["containerDefinitions"]) == 1, (
"Only 1 container is supported in tests"
)
# need to inject container name since moto doesn't return it
response["tasks"][0]["containers"].append(
{
"name": task_definition["containerDefinitions"][0]["name"],
"runtimeId": simulated_task.runtime_id,
}
)
response["tasks"][0]["createdAt"] = simulated_task.created_at
# check if any failed
for task in response["tasks"]:
if task["taskArn"] in self._task_runs:
simulated_task = self._task_runs[task["taskArn"]]
if simulated_task.stopped:
task["lastStatus"] = "STOPPED"
task["stoppedReason"] = simulated_task.stopped_reason
task["containers"][0]["exitCode"] = 1
self._upload_logs_to_cloudwatch(task["taskArn"])
return response
if simulated_task.popen.poll() is not None:
simulated_task.popen.wait()
# check status code
if simulated_task.popen.returncode == 0:
task["lastStatus"] = "STOPPED"
task["containers"][0]["exitCode"] = 0
else:
task["lastStatus"] = "STOPPED"
# _, stderr = simulated_task.popen.communicate()
task["containers"][0]["exitCode"] = 1
self._upload_logs_to_cloudwatch(task["taskArn"])
else:
task["lastStatus"] = "RUNNING"
return response
def stop_task(self, cluster: str, task: str, reason: Optional[str] = None):
if simulated_task := self._task_runs.get(task):
simulated_task.popen.terminate()
simulated_task.stopped = True
simulated_task.stopped_reason = reason
self._upload_logs_to_cloudwatch(task)
else:
raise RuntimeError(f"Task {task} was not found")
def _create_cloudwatch_streams(self, task: str):
simulated_task = self._task_runs[task]
log_group = simulated_task.log_group
log_stream = simulated_task.log_stream
try:
self.cloudwatch_client.create_log_group(
logGroupName=f"{log_group}",
)
except self.cloudwatch_client.exceptions.ResourceAlreadyExistsException:
pass
try:
self.cloudwatch_client.create_log_stream(
logGroupName=f"{log_group}",
logStreamName=log_stream,
)
except self.cloudwatch_client.exceptions.ResourceAlreadyExistsException:
pass
def _upload_logs_to_cloudwatch(self, task: str):
simulated_task = self._task_runs[task]
if simulated_task.logs_uploaded:
return
log_group = simulated_task.log_group
log_stream = simulated_task.log_stream
stdout, stderr = self._task_runs[task].popen.communicate()
for out in [stderr, stdout]:
for line in out.decode().split("\n"):
if line:
self.cloudwatch_client.put_log_events(
logGroupName=f"{log_group}",
logStreamName=log_stream,
logEvents=[{"timestamp": int(time.time() * 1000), "message": str(line)}],
)
time.sleep(0.1)
simulated_task.logs_uploaded = True
| LocalECSMockClient |
python | tornadoweb__tornado | tornado/web.py | {
"start": 126784,
"end": 131688
} | class ____(OutputTransform):
"""Applies the gzip content encoding to the response.
See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
.. versionchanged:: 4.0
Now compresses all mime types beginning with ``text/``, instead
of just a whitelist. (the whitelist is still used for certain
non-text mime types).
"""
# Whitelist of compressible mime types (in addition to any types
# beginning with "text/").
CONTENT_TYPES = {
"application/javascript",
"application/x-javascript",
"application/xml",
"application/atom+xml",
"application/json",
"application/xhtml+xml",
"image/svg+xml",
}
# Python's GzipFile defaults to level 9, while most other gzip
# tools (including gzip itself) default to 6, which is probably a
# better CPU/size tradeoff.
GZIP_LEVEL = 6
# Responses that are too short are unlikely to benefit from gzipping
# after considering the "Content-Encoding: gzip" header and the header
# inside the gzip encoding.
# Note that responses written in multiple chunks will be compressed
# regardless of size.
MIN_LENGTH = 1024
def __init__(self, request: httputil.HTTPServerRequest) -> None:
self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
def _compressible_type(self, ctype: str) -> bool:
return ctype.startswith("text/") or ctype in self.CONTENT_TYPES
def transform_first_chunk(
self,
status_code: int,
headers: httputil.HTTPHeaders,
chunk: bytes,
finishing: bool,
) -> Tuple[int, httputil.HTTPHeaders, bytes]:
# TODO: can/should this type be inherited from the superclass?
if "Vary" in headers:
headers["Vary"] += ", Accept-Encoding"
else:
headers["Vary"] = "Accept-Encoding"
if self._gzipping:
ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
self._gzipping = (
self._compressible_type(ctype)
and (not finishing or len(chunk) >= self.MIN_LENGTH)
and ("Content-Encoding" not in headers)
)
if self._gzipping:
headers["Content-Encoding"] = "gzip"
self._gzip_value = BytesIO()
self._gzip_file = gzip.GzipFile(
mode="w", fileobj=self._gzip_value, compresslevel=self.GZIP_LEVEL
)
chunk = self.transform_chunk(chunk, finishing)
if "Content-Length" in headers:
# The original content length is no longer correct.
# If this is the last (and only) chunk, we can set the new
# content-length; otherwise we remove it and fall back to
# chunked encoding.
if finishing:
headers["Content-Length"] = str(len(chunk))
else:
del headers["Content-Length"]
return status_code, headers, chunk
def transform_chunk(self, chunk: bytes, finishing: bool) -> bytes:
if self._gzipping:
self._gzip_file.write(chunk)
if finishing:
self._gzip_file.close()
else:
self._gzip_file.flush()
chunk = self._gzip_value.getvalue()
self._gzip_value.truncate(0)
self._gzip_value.seek(0)
return chunk
def authenticated(
method: Callable[..., Optional[Awaitable[None]]],
) -> Callable[..., Optional[Awaitable[None]]]:
"""Decorate methods with this to require that the user be logged in.
If the user is not logged in, they will be redirected to the configured
`login url <RequestHandler.get_login_url>`.
If you configure a login url with a query parameter, Tornado will
assume you know what you're doing and use it as-is. If not, it
will add a `next` parameter so the login page knows where to send
you once you're logged in.
"""
@functools.wraps(method)
def wrapper( # type: ignore
self: RequestHandler, *args, **kwargs
) -> Optional[Awaitable[None]]:
if not self.current_user:
if self.request.method in ("GET", "HEAD"):
url = self.get_login_url()
if "?" not in url:
if urllib.parse.urlsplit(url).scheme:
# if login url is absolute, make next absolute too
next_url = self.request.full_url()
else:
assert self.request.uri is not None
next_url = self.request.uri
url += "?" + urlencode(dict(next=next_url))
self.redirect(url)
return None
raise HTTPError(403)
return method(self, *args, **kwargs)
return wrapper
| GZipContentEncoding |
python | pypa__pip | src/pip/_internal/index/package_finder.py | {
"start": 12717,
"end": 12962
} | class ____:
"""
Encapsulates some of the preferences for filtering and sorting
InstallationCandidate objects.
"""
prefer_binary: bool = False
allow_all_prereleases: bool = False
@dataclass(frozen=True)
| CandidatePreferences |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 50038,
"end": 50335
} | class ____(_PrintableStructure):
_fields_ = [
('isGridLicenseSupported', c_int),
('licensableFeaturesCount', c_uint),
('gridLicensableFeatures', c_nvmlGridLicensableFeature_t * NVML_GRID_LICENSE_FEATURE_MAX_COUNT),
]
## Event structures
| c_nvmlGridLicensableFeatures_t |
python | pytorch__pytorch | test/dynamo/test_modules.py | {
"start": 15460,
"end": 15691
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.layer = LazyLayerWithNamedTupleInput()
def forward(self, input):
return self.layer(input)
| LazyModuleWithNamedTupleInput |
python | PyCQA__pyflakes | pyflakes/messages.py | {
"start": 5668,
"end": 5839
} | class ____(Message):
"""
Indicates a continue statement outside of a while or for loop.
"""
message = '\'continue\' not properly in loop'
| ContinueOutsideLoop |
python | google__jax | tests/hijax_test.py | {
"start": 19815,
"end": 31637
} | class ____(jtu.JaxTestCase):
@parameterized.parameters([False, True])
def test_qdd(self, jit):
val1 = 1.0
val2 = jnp.arange(3)
box1 = Box(val1)
def f(box2):
assert core.cur_qdd(box2).leaf_avals == (core.typeof(val1),)
box2.set(val2)
assert core.cur_qdd(box2).leaf_avals == (core.typeof(val2),)
box3 = new_box()
box3.set(val2)
assert core.cur_qdd(box3).leaf_avals == (core.typeof(val2),)
box3.set(val1)
assert core.cur_qdd(box3).leaf_avals == (core.typeof(val1),)
assert core.cur_qdd(box1).leaf_avals == (core.typeof(val1),)
box1.set(val2)
assert core.cur_qdd(box1).leaf_avals == (core.typeof(val2),)
return
if jit:
f = jax.jit(f)
f(Box(val1))
def test_jit_internal(self):
@jax.jit
def f(x):
box = new_box() # TODO not Box
box.set(x)
box.set(box.get() + box.get())
return box.get()
f(1)
def test_jit_internal_box_constructor(self):
@jax.jit
def f(x):
box = Box(x)
box.set(box.get() + box.get())
return box.get()
f(1)
@parameterized.parameters([False, True])
def test_isinstance(self, jit):
def f():
box = Box()
self.assertIsInstance(box, Box)
if jit:
f = jax.jit(f)
f()
def test_jit_arg(self):
@jax.jit
def f(box, x):
assert tracing_ok
box.set(box.get() + x)
tracing_ok = True
box1 = Box(1.0)
f(box1, 1.)
self.assertAllClose(box1.get(), 2.0)
tracing_ok = False
box2 = Box(2.0)
f(box2, 2.)
self.assertAllClose(box2.get(), 4.0)
def test_jit_arg2(self):
# set without get
@jax.jit
def f(box, x):
box_set(box, x)
box = Box(0.0)
f(box, 1.)
self.assertAllClose(box_get(box), 1.0, check_dtypes=False)
def test_jit_arg_in_pytree(self):
@jax.jit
def f(dct, x):
assert tracing_ok
box = dct['box']
box.set(box.get() + x)
tracing_ok = True
box1 = Box(1.0)
f({'box': box1, 'a': 1.0}, 1.)
self.assertAllClose(box1.get(), 2.0)
tracing_ok = False
box2 = Box(2.0)
f({'box': box2, 'a': 2.0}, 2.)
self.assertAllClose(box2.get(), 4.0)
tracing_ok = True
box3 = Box(3) # int, dtype changed
f({'box': box3, 'a': 2.0}, 2.)
self.assertAllClose(box3.get(), 5.0)
def test_jit_closure(self):
box = Box(1.0)
@jax.jit
def f(x):
assert tracing_ok
box.set(box.get() + x)
tracing_ok = True
f(2.0)
self.assertAllClose(box.get(), 3.0)
tracing_ok = False
f(5.0)
self.assertAllClose(box.get(), 8.0)
def test_jit_closure_nested(self):
box = Box(5.0)
@jax.jit
def f(x):
box.set(box.get() + x)
@jax.jit
def g(x):
f(x)
g(3.0)
self.assertAllClose(box.get(), 8.0)
def test_jit_closure_nested2(self):
@jax.jit
def h(x):
box = new_box()
box.set(x)
@jax.jit
def k(x):
box.set(box.get() + x)
k(1.0)
k(1.0)
return box.get()
ans = h(2.0)
self.assertAllClose(ans, 4.0)
def test_jit_closure_nested3(self):
box = new_box()
@jax.jit
def h(x):
box.set(x)
@jax.jit
def k(x):
box.set(box.get() + x)
k(1.0)
k(1.0)
return box.get()
ans = h(2.0)
self.assertAllClose(ans, 4.0)
@parameterized.parameters([False, True])
def test_jvp_closure_stop_gradient(self, jit):
box = Box(1.0)
def f(x):
y = 2 * x
box.set(box.get() + jax.lax.stop_gradient(y))
return y
if jit:
f = jax.jit(f)
y, y_dot = jax.jvp(f, (1.0,), (1.0,))
self.assertAllClose(y, 2.0)
self.assertAllClose(y_dot, 2.0)
self.assertAllClose(box.get(), 3.0)
@parameterized.parameters([False, True])
def test_jvp_arg(self, jit):
def f(box, x):
box.set(box.get() + x)
return x
if jit:
f = jax.jit(f)
box = Box(5.0)
box_dot = Box(1.0)
y, y_dot = jax.jvp(f, (box, 2.), (box_dot, 1.))
self.assertAllClose(y, 2.0)
self.assertAllClose(y_dot, 1.0)
self.assertAllClose(box.get(), 7.0)
self.assertAllClose(box_dot.get(), 2.0)
@parameterized.parameters([False, True])
def test_custom_vjp_plumbing(self, jit):
box = Box(0.0)
@jax.custom_vjp
def foo(x):
return x
def foo_fwd(x):
return foo(x), None
def foo_bwd(_, g):
box.set(g)
return g,
foo.defvjp(foo_fwd, foo_bwd)
def f(x):
x = 2 * x
x = foo(x)
x = 2 * x
return x
if jit:
f = jax.jit(f)
jax.grad(f)(1.0)
self.assertAllClose(box.get(), 2.0)
@parameterized.parameters([False, True])
def test_custom_vjp_plumbing_abstracted(self, jit):
box = Box(0.0)
@jax.custom_vjp
def foo(box, x):
return x
def foo_fwd(box, x):
return x, box
def foo_bwd(box, g):
box.set(g)
return None, g
foo.defvjp(foo_fwd, foo_bwd)
def f(box, x):
x = 2 * x
x = foo(box, x)
x = 2 * x
return x
if jit:
f = jax.jit(f)
jax.grad(partial(f, box))(1.0)
self.assertAllClose(box.get(), 2.0)
@parameterized.parameters([False, True])
def test_grad_closure_stop_gradient(self, jit):
box = Box(0.0)
def f(x):
y = x * 2
box.set(box.get() + jax.lax.stop_gradient(y))
return y
if jit:
f = jax.jit(f)
g = jax.grad(f)(1.0)
self.assertAllClose(g, 2.0)
self.assertAllClose(box.get(), 2.0)
@parameterized.parameters([False, True])
def test_scan_basic(self, jit):
box = Box(1.0)
def double_it_10():
def body(_, __):
box.set(box.get() * 2)
return None, None
_, _ = jax.lax.scan(body, None, None, length=10)
if jit:
double_it_10 = jax.jit(double_it_10)
double_it_10()
self.assertAllClose(box.get(), 1024., check_dtypes=False)
def test_cond_box_internally_pure(self):
@jax.jit
def doubleit(x):
b = new_box()
b.set(x)
b.set(b.get() + b.get())
return b.get()
def identity(x): return x
@jax.jit
def f(x):
return jax.lax.cond(x > 0, doubleit, identity, x)
self.assertAllClose(f(1.0), 2.0)
def test_cond_box_arg(self):
@jax.jit
def f(x):
b = new_box()
b.set(x)
jax.lax.cond(x > 0, lambda box: box.set(box.get() + 1), lambda _: None, b)
return b.get()
self.assertAllClose(f(1.0), 2.0)
def test_cond_closed_over_box(self):
# TODO: good error messages in the case that qdd changes differently in each branch
def f(x):
b = new_box()
b.set(1.0)
jax.lax.cond(x > 0., lambda _: b.set(b.get() + 1.0), lambda _: None, 1.0)
return b.get()
self.assertAllClose(f(1.0), 2.0)
# TODO error-checking tests from attrs_test.py
###
def test_box_autodiff(self):
if config.enable_x64.value: raise unittest.SkipTest("no x64")
class StashTangents(HiPrimitive):
def is_high(self, *_):
return True
def abstract_eval(_, box_aval, x_aval):
del box_aval
return x_aval, {box_effect}
def to_lojax(_, box, x):
return x
def jvp(_, primals, tangents):
box, x = primals
_, x_dot = tangents
box_set(box, x_dot)
return x, x_dot
def transpose(self, *args):
assert False # TODO
stash_tangents_p = StashTangents('stash_tangents')
def stash_tangents(box, x):
return stash_tangents_p.bind(box, x)
@jax.jit
def f(box, x):
x = stash_tangents(box, x)
return x
box = Box(0.0)
jax.jvp(partial(f, box), (3.,), (5.,))
self.assertAllClose(box_get(box), 5.0, check_dtypes=False)
def test_type_changing_box(self):
box = Box(jnp.arange(1))
box_set(box, jnp.arange(2))
self.assertLen(box._val, 2)
@jax.jit
def f(box, x):
box_set(box, x)
f(box, jnp.arange(3))
self.assertLen(box._val, 3)
f(box, jnp.arange(4))
self.assertLen(box._val, 4)
def test_pytree_box(self):
box = Box(None)
@jax.jit
def f(box, x):
assert tracing_ok
val = box_get(box)
if val is None:
box_set(box, x)
else:
box_set(box, [x, x])
tracing_ok = True
f(box, 1.0)
self.assertAllClose(box_get(box), 1.0, check_dtypes=False)
f(box, 2.0)
self.assertAllClose(box_get(box), [2.0, 2.0], check_dtypes=False)
f(box, 3.0)
self.assertAllClose(box_get(box), [3.0, 3.0], check_dtypes=False)
tracing_ok = False
f(box, 4.0)
self.assertAllClose(box_get(box), [4.0, 4.0], check_dtypes=False)
def test_pytree_of_hijaxtypes_box(self):
@dataclass(frozen=True)
class MyArray:
arr: jax.Array # always f32
@dataclass(frozen=True)
class MyTy(HiType):
has_qdd = False
def to_tangent_aval(self):
return MyTy()
def str_short(self, short_dtypes=False):
return 'MyTy'
def lo_ty(self):
return [core.ShapedArray((), jnp.dtype('float32'))]
def lower_val(self, hi_val: MyArray) -> list[jax.Array]:
return [hi_val.arr]
def raise_val(self, val) -> MyArray:
return MyArray(val)
def __eq__(self, other): return isinstance(other, MyTy)
core.pytype_aval_mappings[MyArray] = lambda _: MyTy()
box = Box([MyArray(jnp.float32(1)),
MyArray(jnp.float32(2))])
@jax.jit
def f(box):
a, b = box_get(box)
box_set(box, [b, a])
f(box)
val = box_get(box)
self.assertIsInstance(val, list)
self.assertLen(val, 2)
b_, a_ = val
self.assertIsInstance(a_, MyArray)
self.assertIsInstance(b_, MyArray)
self.assertAllClose(a_.arr, 1, check_dtypes=False)
self.assertAllClose(b_.arr, 2, check_dtypes=False)
def test_closed_over_type_changing_box(self):
box = Box(None)
box2 = Box(None)
@jax.jit
def f():
assert tracing_ok
x = box.get()
if x is None:
box.set(0)
elif type(x) is dict:
box.set(dict(x, a=5))
box2.set(3)
else:
box.set(x + 1)
tracing_ok = True
f() # tracing okay because first time
f() # tracing okay because first time with box as not None
tracing_ok = False
f()
self.assertEqual(box.get(), 2)
self.assertEqual(box2.get(), None)
box.set(None)
f()
f()
f()
f()
self.assertEqual(box.get(), 3)
self.assertEqual(box2.get(), None)
box.set({'b': 3})
tracing_ok = True
f()
self.assertEqual(box.get(), dict(a=5, b=3))
self.assertEqual(box2.get(), 3)
@parameterized.parameters([False, True])
def test_while_loop(self, jit):
box = Box(1.)
def f():
zero = jnp.zeros((), 'int32')
def cond_fun(i):
return i + zero < 5
def body_fun(i):
box.set(box.get() * 2.)
return i + 1
_ = jax.lax.while_loop(cond_fun, body_fun, 0)
if jit:
f = jax.jit(f)
f()
self.assertAllClose(box.get(), 32, check_dtypes=False)
def test_while_loop_typechange_error(self):
box = Box([1.])
def cond_fun(i):
return i < 5
def body_fun(i):
box.set(box.get() * 2)
return i + 1
with self.assertRaisesRegex(TypeError, "type-changing mutations not allowed"):
_ = jax.lax.while_loop(cond_fun, body_fun, 0)
def test_eval_shape(self):
qarray = QArray(jnp.ones((2, 2)), jnp.ones(2))
@jax.jit
def f():
return qarray
out_type = jax.eval_shape(f)
self.assertEqual(out_type, QArrayTy((2, 2)))
def test_stages_mutable(self):
box = Box(1.0)
@jax.jit
def f(box):
box.set(box.get() + 1.)
f.lower(box).as_text() # don't crash
compiled = f.lower(box).compile()
compiled(box)
compiled(box)
compiled(box)
self.assertAllClose(box.get(), 4.)
| BoxTest |
python | openai__openai-python | src/openai/resources/realtime/realtime.py | {
"start": 23512,
"end": 26182
} | class ____(BaseRealtimeConnectionResource):
def create(self, *, event_id: str | Omit = omit, response: RealtimeResponseCreateParamsParam | Omit = omit) -> None:
"""
This event instructs the server to create a Response, which means triggering
model inference. When in Server VAD mode, the server will create Responses
automatically.
A Response will include at least one Item, and may have two, in which case
the second will be a function call. These Items will be appended to the
conversation history by default.
The server will respond with a `response.created` event, events for Items
and content created, and finally a `response.done` event to indicate the
Response is complete.
The `response.create` event includes inference configuration like
`instructions` and `tools`. If these are set, they will override the Session's
configuration for this Response only.
Responses can be created out-of-band of the default Conversation, meaning that they can
have arbitrary input, and it's possible to disable writing the output to the Conversation.
Only one Response can write to the default Conversation at a time, but otherwise multiple
Responses can be created in parallel. The `metadata` field is a good way to disambiguate
multiple simultaneous Responses.
Clients can set `conversation` to `none` to create a Response that does not write to the default
Conversation. Arbitrary input can be provided with the `input` field, which is an array accepting
raw Items and references to existing Items.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.create", "event_id": event_id, "response": response}),
)
)
def cancel(self, *, event_id: str | Omit = omit, response_id: str | Omit = omit) -> None:
"""Send this event to cancel an in-progress response.
The server will respond
with a `response.done` event with a status of `response.status=cancelled`. If
there is no response to cancel, the server will respond with an error. It's safe
to call `response.cancel` even if no response is in progress, an error will be
returned the session will remain unaffected.
"""
self._connection.send(
cast(
RealtimeClientEventParam,
strip_not_given({"type": "response.cancel", "event_id": event_id, "response_id": response_id}),
)
)
| RealtimeResponseResource |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_sampledata_xref.py | {
"start": 2149,
"end": 2339
} | class ____(BokehDirective):
has_content = False
required_arguments = 1
def run(self):
return [gallery_xrefs('', subfolder=self.arguments[0])]
| BokehGalleryOverviewDirective |
python | eth-brownie__brownie | brownie/network/transaction.py | {
"start": 2902,
"end": 2996
} | class ____(IntEnum):
Dropped = -2
Pending = -1
Reverted = 0
Confirmed = 1
| Status |
python | TheAlgorithms__Python | neural_network/input_data.py | {
"start": 3784,
"end": 12011
} | class ____:
"""Container class for a _DataSet (deprecated).
THIS CLASS IS DEPRECATED.
"""
@deprecated(
None,
"Please use alternatives such as official/mnist/_DataSet.py"
" from tensorflow/models.",
)
def __init__(
self,
images,
labels,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
seed=None,
):
"""Construct a _DataSet.
one_hot arg is used only if fake_data is true. `dtype` can be either
`uint8` to leave the input as `[0, 255]`, or `float32` to rescale into
`[0, 1]`. Seed arg provides for convenient deterministic testing.
Args:
images: The images
labels: The labels
fake_data: Ignore inages and labels, use fake data.
one_hot: Bool, return the labels as one hot vectors (if True) or ints (if
False).
dtype: Output image dtype. One of [uint8, float32]. `uint8` output has
range [0,255]. float32 output has range [0,1].
reshape: Bool. If True returned images are returned flattened to vectors.
seed: The random seed to use.
"""
seed1, seed2 = random_seed.get_seed(seed)
# If op level seed is not set, use whatever graph level seed is returned
self._rng = np.random.default_rng(seed1 if seed is None else seed2)
dtype = dtypes.as_dtype(dtype).base_dtype
if dtype not in (dtypes.uint8, dtypes.float32):
msg = f"Invalid image dtype {dtype!r}, expected uint8 or float32"
raise TypeError(msg)
if fake_data:
self._num_examples = 10000
self.one_hot = one_hot
else:
assert images.shape[0] == labels.shape[0], (
f"images.shape: {images.shape} labels.shape: {labels.shape}"
)
self._num_examples = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
images = images.reshape(
images.shape[0], images.shape[1] * images.shape[2]
)
if dtype == dtypes.float32:
# Convert from [0, 255] -> [0.0, 1.0].
images = images.astype(np.float32)
images = np.multiply(images, 1.0 / 255.0)
self._images = images
self._labels = labels
self._epochs_completed = 0
self._index_in_epoch = 0
@property
def images(self):
return self._images
@property
def labels(self):
return self._labels
@property
def num_examples(self):
return self._num_examples
@property
def epochs_completed(self):
return self._epochs_completed
def next_batch(self, batch_size, fake_data=False, shuffle=True):
"""Return the next `batch_size` examples from this data set."""
if fake_data:
fake_image = [1] * 784
fake_label = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(batch_size)],
[fake_label for _ in range(batch_size)],
)
start = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
perm0 = np.arange(self._num_examples)
self._rng.shuffle(perm0)
self._images = self.images[perm0]
self._labels = self.labels[perm0]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
rest_num_examples = self._num_examples - start
images_rest_part = self._images[start : self._num_examples]
labels_rest_part = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
perm = np.arange(self._num_examples)
self._rng.shuffle(perm)
self._images = self.images[perm]
self._labels = self.labels[perm]
# Start next epoch
start = 0
self._index_in_epoch = batch_size - rest_num_examples
end = self._index_in_epoch
images_new_part = self._images[start:end]
labels_new_part = self._labels[start:end]
return (
np.concatenate((images_rest_part, images_new_part), axis=0),
np.concatenate((labels_rest_part, labels_new_part), axis=0),
)
else:
self._index_in_epoch += batch_size
end = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(None, "Please write your own downloading logic.")
def _maybe_download(filename, work_directory, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
work_directory: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(work_directory):
gfile.MakeDirs(work_directory)
filepath = os.path.join(work_directory, filename)
if not gfile.Exists(filepath):
urllib.request.urlretrieve(source_url, filepath) # noqa: S310
with gfile.GFile(filepath) as f:
size = f.size()
print("Successfully downloaded", filename, size, "bytes.")
return filepath
@deprecated(None, "Please use alternatives such as: tensorflow_datasets.load('mnist')")
def read_data_sets(
train_dir,
fake_data=False,
one_hot=False,
dtype=dtypes.float32,
reshape=True,
validation_size=5000,
seed=None,
source_url=DEFAULT_SOURCE_URL,
):
if fake_data:
def fake():
return _DataSet(
[], [], fake_data=True, one_hot=one_hot, dtype=dtype, seed=seed
)
train = fake()
validation = fake()
test = fake()
return _Datasets(train=train, validation=validation, test=test)
if not source_url: # empty string check
source_url = DEFAULT_SOURCE_URL
train_images_file = "train-images-idx3-ubyte.gz"
train_labels_file = "train-labels-idx1-ubyte.gz"
test_images_file = "t10k-images-idx3-ubyte.gz"
test_labels_file = "t10k-labels-idx1-ubyte.gz"
local_file = _maybe_download(
train_images_file, train_dir, source_url + train_images_file
)
with gfile.Open(local_file, "rb") as f:
train_images = _extract_images(f)
local_file = _maybe_download(
train_labels_file, train_dir, source_url + train_labels_file
)
with gfile.Open(local_file, "rb") as f:
train_labels = _extract_labels(f, one_hot=one_hot)
local_file = _maybe_download(
test_images_file, train_dir, source_url + test_images_file
)
with gfile.Open(local_file, "rb") as f:
test_images = _extract_images(f)
local_file = _maybe_download(
test_labels_file, train_dir, source_url + test_labels_file
)
with gfile.Open(local_file, "rb") as f:
test_labels = _extract_labels(f, one_hot=one_hot)
if not 0 <= validation_size <= len(train_images):
msg = (
"Validation size should be between 0 and "
f"{len(train_images)}. Received: {validation_size}."
)
raise ValueError(msg)
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
options = {"dtype": dtype, "reshape": reshape, "seed": seed}
train = _DataSet(train_images, train_labels, **options)
validation = _DataSet(validation_images, validation_labels, **options)
test = _DataSet(test_images, test_labels, **options)
return _Datasets(train=train, validation=validation, test=test)
| _DataSet |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_eks.py | {
"start": 3715,
"end": 7301
} | class ____(TestEksTrigger):
def setup_method(self):
super().setup_method()
self.delete_any_nodegroups_patcher = patch.object(EksDeleteClusterTrigger, "delete_any_nodegroups")
self.mock_delete_any_nodegroups = self.delete_any_nodegroups_patcher.start()
self.delete_any_fargate_profiles_patcher = patch.object(
EksDeleteClusterTrigger, "delete_any_fargate_profiles"
)
self.mock_delete_any_fargate_profiles = self.delete_any_fargate_profiles_patcher.start()
self.trigger = EksDeleteClusterTrigger(
cluster_name=CLUSTER_NAME,
waiter_delay=WAITER_DELAY,
waiter_max_attempts=WAITER_MAX_ATTEMPTS,
aws_conn_id=AWS_CONN_ID,
region_name=REGION_NAME,
force_delete_compute=False,
)
self.trigger.log.info = Mock()
def teardown_method(self):
super().teardown_method()
self.delete_any_nodegroups_patcher.stop()
self.delete_any_fargate_profiles_patcher.stop()
@pytest.mark.asyncio
async def test_run_deletes_nodegroups_and_fargate_profiles(self):
self.trigger.force_delete_compute = True
generator = self.trigger.run()
response = await generator.asend(None)
self.mock_delete_any_nodegroups.assert_called_once_with(client=self.mock_client)
self.mock_delete_any_fargate_profiles.assert_called_once_with(client=self.mock_client)
assert response == TriggerEvent({"status": "deleted"})
@pytest.mark.asyncio
async def test_when_resource_is_not_found_it_should_return_status_deleted(self):
delete_cluster_mock = AsyncMock(
side_effect=ClientError({"Error": {"Code": "ResourceNotFoundException"}}, "delete_eks_cluster")
)
self.mock_client.delete_cluster = delete_cluster_mock
generator = self.trigger.run()
response = await generator.asend(None)
delete_cluster_mock.assert_called_once_with(name=CLUSTER_NAME)
assert response == TriggerEvent({"status": "deleted"})
@pytest.mark.asyncio
async def test_run_raises_client_error(self):
response = {"Error": {"Code": "OtherException"}}
operation_name = "delete_eks_cluster"
delete_cluster_mock = AsyncMock(side_effect=ClientError(response, "delete_eks_cluster"))
self.mock_client.delete_cluster = delete_cluster_mock
generator = self.trigger.run()
with pytest.raises(ClientError) as exception:
await generator.asend(None)
delete_cluster_mock.assert_called_once_with(name=CLUSTER_NAME)
assert exception._excinfo[1].response == response
assert exception._excinfo[1].operation_name == operation_name
@pytest.mark.asyncio
async def test_run_parameterizes_async_wait_correctly(self):
self.mock_client.get_waiter = Mock(return_value="waiter")
generator = self.trigger.run()
await generator.asend(None)
self.mock_delete_any_fargate_profiles.assert_not_called()
self.mock_delete_any_nodegroups.assert_not_called()
self.mock_client.get_waiter.assert_called_once_with("cluster_deleted")
self.mock_async_wait.assert_called_once_with(
waiter="waiter",
waiter_delay=WAITER_DELAY,
waiter_max_attempts=WAITER_MAX_ATTEMPTS,
args={"name": CLUSTER_NAME},
failure_message="Error deleting cluster",
status_message="Status of cluster is",
status_args=["cluster.status"],
)
| TestEksDeleteClusterTriggerRun |
python | django__django | django/db/migrations/serializer.py | {
"start": 10901,
"end": 11166
} | class ____(BaseSequenceSerializer):
def _format(self):
# When len(value)==0, the empty tuple should be serialized as "()",
# not "(,)" because (,) is invalid Python syntax.
return "(%s)" if len(self.value) != 1 else "(%s,)"
| TupleSerializer |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0101_add_path_prefixes.py | {
"start": 149,
"end": 2390
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0100_project_readthedocs_yaml_path"),
]
operations = [
migrations.AddField(
model_name="historicalproject",
name="custom_prefix",
field=models.CharField(
blank=True,
default=None,
help_text="A custom path prefix used when serving documentation from this project. By default we serve documentation at the root (/) of a domain.",
max_length=255,
null=True,
verbose_name="Custom path prefix",
),
),
migrations.AddField(
model_name="historicalproject",
name="custom_subproject_prefix",
field=models.CharField(
blank=True,
default=None,
help_text="A custom path prefix used when evaluating the root of a subproject. By default we serve documentation from subprojects under the `/projects/` prefix.",
max_length=255,
null=True,
verbose_name="Custom subproject path prefix",
),
),
migrations.AddField(
model_name="project",
name="custom_prefix",
field=models.CharField(
blank=True,
default=None,
help_text="A custom path prefix used when serving documentation from this project. By default we serve documentation at the root (/) of a domain.",
max_length=255,
null=True,
verbose_name="Custom path prefix",
),
),
migrations.AddField(
model_name="project",
name="custom_subproject_prefix",
field=models.CharField(
blank=True,
default=None,
help_text="A custom path prefix used when evaluating the root of a subproject. By default we serve documentation from subprojects under the `/projects/` prefix.",
max_length=255,
null=True,
verbose_name="Custom subproject path prefix",
),
),
]
| Migration |
python | Netflix__metaflow | metaflow/pylint_wrapper.py | {
"start": 192,
"end": 268
} | class ____(MetaflowException):
headline = "Pylint is not happy"
| PyLintWarn |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.