language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 73358,
"end": 73596
} | class ____(CausalLMOutputWithPast):
r"""
generation_steps (`int`, *optional*)
Current generation step of code predictor model.
"""
generation_steps: Optional[int] = None
| Qwen3OmniMoeTalkerCodePredictorOutputWithPast |
python | pyqtgraph__pyqtgraph | pyqtgraph/parametertree/SystemSolver.py | {
"start": 124,
"end": 17803
} | class ____(object):
"""
This abstract class is used to formalize and manage user interaction with a
complex system of equations (related to "constraint satisfaction problems").
It is often the case that devices must be controlled
through a large number of free variables, and interactions between these
variables make the system difficult to manage and conceptualize as a user
interface. This class does _not_ attempt to numerically solve the system
of equations. Rather, it provides a framework for subdividing the system
into manageable pieces and specifying closed-form solutions to these small
pieces.
For an example, see the simple Camera class below.
Theory of operation: Conceptualize the system as 1) a set of variables
whose values may be either user-specified or automatically generated, and
2) a set of functions that define *how* each variable should be generated.
When a variable is accessed (as an instance attribute), the solver first
checks to see if it already has a value (either user-supplied, or cached
from a previous calculation). If it does not, then the solver calls a
method on itself (the method must be named `_variableName`) that will
either return the calculated value (which usually involves acccessing
other variables in the system), or raise RuntimeError if it is unable to
calculate the value (usually because the user has not provided sufficient
input to fully constrain the system).
Each method that calculates a variable value may include multiple
try/except blocks, so that if one method generates a RuntimeError, it may
fall back on others.
In this way, the system may be solved by recursively searching the tree of
possible relationships between variables. This allows the user flexibility
in deciding which variables are the most important to specify, while
avoiding the apparent combinatorial explosion of calculation pathways
that must be considered by the developer.
Solved values are cached for efficiency, and automatically cleared when
a state change invalidates the cache. The rules for this are simple: any
time a value is set, it invalidates the cache *unless* the previous value
was None (which indicates that no other variable has yet requested that
value). More complex cache management may be defined in subclasses.
Subclasses must define:
1) The *defaultState* class attribute: This is a dict containing a
description of the variables in the system--their default values,
data types, and the ways they can be constrained. The format is::
{ name: [value, type, constraint, allowed_constraints], ...}
Where:
* *value* is the default value. May be None if it has not been specified
yet.
* *type* may be float, int, bool, np.ndarray, ...
* *constraint* may be None, single value, or (min, max)
* None indicates that the value is not constrained--it may be
automatically generated if the value is requested.
* *allowed_constraints* is a string composed of (n)one, (f)ixed, and (r)ange.
Note: do not put mutable objects inside defaultState!
2) For each variable that may be automatically determined, a method must
be defined with the name `_variableName`. This method may either return
the
"""
defaultState = OrderedDict()
def __init__(self):
self.__dict__['_vars'] = OrderedDict()
self.__dict__['_currentGets'] = set()
self.reset()
def copy(self):
sys = type(self)()
sys.__dict__['_vars'] = copy.deepcopy(self.__dict__['_vars'])
sys.__dict__['_currentGets'] = copy.deepcopy(self.__dict__['_currentGets'])
return sys
def reset(self):
"""
Reset all variables in the solver to their default state.
"""
self._currentGets.clear()
for k in self.defaultState:
self._vars[k] = self.defaultState[k][:]
def __getattr__(self, name):
if name in self._vars:
return self.get(name)
raise AttributeError(name)
def __setattr__(self, name, value):
"""
Set the value of a state variable.
If None is given for the value, then the constraint will also be set to None.
If a tuple is given for a scalar variable, then the tuple is used as a range constraint instead of a value.
Otherwise, the constraint is set to 'fixed'.
"""
# First check this is a valid attribute
if name in self._vars:
if value is None:
self.set(name, value, None)
elif isinstance(value, tuple) and self._vars[name][1] is not np.ndarray:
self.set(name, None, value)
else:
self.set(name, value, 'fixed')
else:
# also allow setting any other pre-existing attribute
if hasattr(self, name):
object.__setattr__(self, name, value)
else:
raise AttributeError(name)
def get(self, name):
"""
Return the value for parameter *name*.
If the value has not been specified, then attempt to compute it from
other interacting parameters.
If no value can be determined, then raise RuntimeError.
"""
if name in self._currentGets:
raise RuntimeError("Cyclic dependency while calculating '%s'." % name)
self._currentGets.add(name)
try:
v = self._vars[name][0]
if v is None:
cfunc = getattr(self, '_' + name, None)
if cfunc is None:
v = None
else:
v = cfunc()
if v is None:
raise RuntimeError("Parameter '%s' is not specified." % name)
v = self.set(name, v)
finally:
self._currentGets.remove(name)
return v
def set(self, name, value=None, constraint=True):
"""
Set a variable *name* to *value*. The actual set value is returned (in
some cases, the value may be cast into another type).
If *value* is None, then the value is left to be determined in the
future. At any time, the value may be re-assigned arbitrarily unless
a constraint is given.
If *constraint* is True (the default), then supplying a value that
violates a previously specified constraint will raise an exception.
If *constraint* is 'fixed', then the value is set (if provided) and
the variable will not be updated automatically in the future.
If *constraint* is a tuple, then the value is constrained to be within the
given (min, max). Either constraint may be None to disable
it. In some cases, a constraint cannot be satisfied automatically,
and the user will be forced to resolve the constraint manually.
If *constraint* is None, then any constraints are removed for the variable.
"""
var = self._vars[name]
if constraint is None:
if 'n' not in var[3]:
raise TypeError("Empty constraints not allowed for '%s'" % name)
var[2] = constraint
elif constraint == 'fixed':
if 'f' not in var[3]:
raise TypeError("Fixed constraints not allowed for '%s'" % name)
# This is nice, but not reliable because sometimes there is 1 DOF but we set 2
# values simultaneously.
# if var[2] is None:
# try:
# self.get(name)
# # has already been computed by the system; adding a fixed constraint
# # would overspecify the system.
# raise ValueError("Cannot fix parameter '%s'; system would become overconstrained." % name)
# except RuntimeError:
# pass
var[2] = constraint
elif isinstance(constraint, tuple):
if 'r' not in var[3]:
raise TypeError("Range constraints not allowed for '%s'" % name)
assert len(constraint) == 2
var[2] = constraint
elif constraint is not True:
raise TypeError("constraint must be None, True, 'fixed', or tuple. (got %s)" % constraint)
# type checking / massaging
if var[1] is np.ndarray and value is not None:
value = np.array(value, dtype=float)
elif var[1] in (int, float, tuple) and value is not None:
value = var[1](value)
# constraint checks
if constraint is True and not self.check_constraint(name, value):
raise ValueError("Setting %s = %s violates constraint %s" % (name, value, var[2]))
# invalidate other dependent values
if var[0] is not None or value is None:
# todo: we can make this more clever..(and might need to)
# we just know that a value of None cannot have dependencies
# (because if anyone else had asked for this value, it wouldn't be
# None anymore)
self.resetUnfixed()
var[0] = value
return value
def check_constraint(self, name, value):
c = self._vars[name][2]
if c is None or value is None:
return True
if isinstance(c, tuple):
return ((c[0] is None or c[0] <= value) and
(c[1] is None or c[1] >= value))
else:
return value == c
def saveState(self):
"""
Return a serializable description of the solver's current state.
"""
state = OrderedDict()
for name, var in self._vars.items():
state[name] = (var[0], var[2])
return state
def restoreState(self, state):
"""
Restore the state of all values and constraints in the solver.
"""
self.reset()
for name, var in state.items():
self.set(name, var[0], var[1])
def resetUnfixed(self):
"""
For any variable that does not have a fixed value, reset
its value to None.
"""
for var in self._vars.values():
if var[2] != 'fixed':
var[0] = None
def solve(self):
for k in self._vars:
getattr(self, k)
def checkOverconstraint(self):
"""Check whether the system is overconstrained. If so, return the name of
the first overconstrained parameter.
Overconstraints occur when any fixed parameter can be successfully computed by the system.
(Ideally, all parameters are either fixed by the user or constrained by the
system, but never both).
"""
for k,v in self._vars.items():
if v[2] == 'fixed' and 'n' in v[3]:
oldval = v[:]
self.set(k, None, None)
try:
self.get(k)
return k
except RuntimeError:
pass
finally:
self._vars[k] = oldval
return False
def __repr__(self):
state = OrderedDict()
for name, var in self._vars.items():
if var[2] == 'fixed':
state[name] = var[0]
state = ', '.join(["%s=%s" % (n, v) for n,v in state.items()])
return "<%s %s>" % (self.__class__.__name__, state)
if __name__ == '__main__':
class Camera(SystemSolver):
"""
Consider a simple SLR camera. The variables we will consider that
affect the camera's behavior while acquiring a photo are aperture, shutter speed,
ISO, and flash (of course there are many more, but let's keep the example simple).
In rare cases, the user wants to manually specify each of these variables and
no more work needs to be done to take the photo. More often, the user wants to
specify more interesting constraints like depth of field, overall exposure,
or maximum allowed ISO value.
If we add a simple light meter measurement into this system and an 'exposure'
variable that indicates the desired exposure (0 is "perfect", -1 is one stop
darker, etc), then the system of equations governing the camera behavior would
have the following variables:
aperture, shutter, iso, flash, exposure, light meter
The first four variables are the "outputs" of the system (they directly drive
the camera), the last is a constant (the camera itself cannot affect the
reading on the light meter), and 'exposure' specifies a desired relationship
between other variables in the system.
So the question is: how can I formalize a system like this as a user interface?
Typical cameras have a fairly limited approach: provide the user with a list
of modes, each of which defines a particular set of constraints. For example:
manual: user provides aperture, shutter, iso, and flash
aperture priority: user provides aperture and exposure, camera selects
iso, shutter, and flash automatically
shutter priority: user provides shutter and exposure, camera selects
iso, aperture, and flash
program: user specifies exposure, camera selects all other variables
automatically
action: camera selects all variables while attempting to maximize
shutter speed
portrait: camera selects all variables while attempting to minimize
aperture
A more general approach might allow the user to provide more explicit
constraints on each variable (for example: I want a shutter speed of 1/30 or
slower, an ISO no greater than 400, an exposure between -1 and 1, and the
smallest aperture possible given all other constraints) and have the camera
solve the system of equations, with a warning if no solution is found. This
is exactly what we will implement in this example class.
"""
defaultState = OrderedDict([
# Field stop aperture
('aperture', [None, float, None, 'nf']),
# Duration that shutter is held open.
('shutter', [None, float, None, 'nf']),
# ISO (sensitivity) value. 100, 200, 400, 800, 1600..
('iso', [None, int, None, 'nf']),
# Flash is a value indicating the brightness of the flash. A table
# is used to decide on "balanced" settings for each flash level:
# 0: no flash
# 1: s=1/60, a=2.0, iso=100
# 2: s=1/60, a=4.0, iso=100 ..and so on..
('flash', [None, float, None, 'nf']),
# exposure is a value indicating how many stops brighter (+1) or
# darker (-1) the photographer would like the photo to appear from
# the 'balanced' settings indicated by the light meter (see below).
('exposure', [None, float, None, 'f']),
# Let's define this as an external light meter (not affected by
# aperture) with logarithmic output. We arbitrarily choose the
# following settings as "well balanced" for each light meter value:
# -1: s=1/60, a=2.0, iso=100
# 0: s=1/60, a=4.0, iso=100
# 1: s=1/120, a=4.0, iso=100 ..and so on..
# Note that the only allowed constraint mode is (f)ixed, since the
# camera never _computes_ the light meter value, it only reads it.
('lightMeter', [None, float, None, 'f']),
# Indicates the camera's final decision on how it thinks the photo will
# look, given the chosen settings. This value is _only_ determined
# automatically.
('balance', [None, float, None, 'n']),
])
def _aperture(self):
"""
Determine aperture automatically under a variety of conditions.
"""
iso = self.iso
exp = self.exposure
light = self.lightMeter
try:
# shutter-priority mode
sh = self.shutter # this raises RuntimeError if shutter has not
# been specified
ap = 4.0 * (sh / (1./60.)) * (iso / 100.) * (2 ** exp) * (2 ** light)
ap = fn.clip_scalar(ap, 2.0, 16.0)
except RuntimeError:
# program mode; we can select a suitable shutter
# value at the same time.
sh = (1./60.)
raise
return ap
def _balance(self):
iso = self.iso
light = self.lightMeter
sh = self.shutter
ap = self.aperture
bal = (4.0 / ap) * (sh / (1./60.)) * (iso / 100.) * (2 ** light)
return log2(bal)
camera = Camera()
camera.iso = 100
camera.exposure = 0
camera.lightMeter = 2
camera.shutter = 1./60.
camera.flash = 0
camera.solve()
print(camera.saveState())
| SystemSolver |
python | spyder-ide__spyder | external-deps/spyder-remote-services/spyder_remote_services/services/files/handlers.py | {
"start": 521,
"end": 1629
} | class ____(
WebSocketMixin,
FileWebSocketHandler,
JupyterHandler,
):
auth_resource = "spyder-services"
def get_path_argument(self, name: str) -> str:
"""Get the path argument from the request.
Args
----
name (str): Name of the argument to get.
Returns
-------
str: The path argument.
Raises
------
HTTPError: If the argument is missing or invalid.
"""
path = self.get_argument(name)
if not path:
raise web.HTTPError(
HTTPStatus.BAD_REQUEST,
reason=f"Missing {name} argument",
)
match = re.match(_path_regex, path)
if not match:
raise web.HTTPError(
HTTPStatus.BAD_REQUEST,
reason=f"Missing {name} argument",
)
return match.group("path")
@ws_authenticated
async def get(self, *args, **kwargs):
"""Handle the initial websocket upgrade GET request."""
await super().get(*args, **kwargs)
| ReadWriteWebsocketHandler |
python | sqlalchemy__sqlalchemy | test/orm/test_session.py | {
"start": 56560,
"end": 58220
} | class ____(_fixtures.FixtureTest):
run_inserts = None
# see test/aaa_profiling/test_memusage.py for more
# WeakIdentityMap tests
def test_auto_detach_on_gc_session(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
gc_collect()
sess = Session(testing.db)
u1 = User(name="u1")
sess.add(u1)
sess.commit()
# can't add u1 to Session,
# already belongs to u2
s2 = Session(testing.db)
assert_raises_message(
exc.InvalidRequestError,
r".*is already attached to session",
s2.add,
u1,
)
# garbage collect sess
del sess
gc_collect()
# s2 lets it in now despite u1 having
# session_key
s2.add(u1)
assert u1 in s2
def test_fast_discard_race(self):
# test issue #4068
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
gc_collect()
sess = fixture_session()
u1 = User(name="u1")
sess.add(u1)
sess.commit()
u1_state = u1._sa_instance_state
ref = u1_state.obj
u1_state.obj = lambda: None
u2 = sess.query(User).first()
u1_state._cleanup(ref)
u3 = sess.query(User).first()
is_(u2, u3)
u2_state = u2._sa_instance_state
ref = u2_state.obj
u2_state.obj = lambda: None
u2_state._cleanup(ref)
assert not sess.identity_map.contains_state(u2._sa_instance_state)
| WeakIdentityMapTest |
python | apache__airflow | providers/postgres/src/airflow/providers/postgres/hooks/postgres.py | {
"start": 3333,
"end": 28800
} | class ____(DbApiHook):
"""
Interact with Postgres.
You can specify ssl parameters in the extra field of your connection
as ``{"sslmode": "require", "sslcert": "/path/to/cert.pem", etc}``.
Also, you can choose cursor as ``{"cursor": "dictcursor"}``. Refer to the
psycopg2.extras or psycopg.rows for more details.
Note: For Redshift, use keepalives_idle in the extra connection parameters
and set it to less than 300 seconds.
Note: For AWS IAM authentication, use iam in the extra connection parameters
and set it to true. Leave the password field empty. This will use the
"aws_default" connection to get the temporary token unless you override
in extras.
extras example: ``{"iam":true, "aws_conn_id":"my_aws_conn"}``
For Redshift, also use redshift in the extra connection parameters and
set it to true. The cluster-identifier is extracted from the beginning of
the host field, so is optional. It can however be overridden in the extra field.
extras example: ``{"iam":true, "redshift":true, "cluster-identifier": "my_cluster_id"}``
For Redshift Serverless, use redshift-serverless in the extra connection parameters and
set it to true. The workgroup-name is extracted from the beginning of
the host field, so is optional. It can however be overridden in the extra field.
extras example: ``{"iam":true, "redshift-serverless":true, "workgroup-name": "my_serverless_workgroup"}``
:param postgres_conn_id: The :ref:`postgres conn id <howto/connection:postgres>`
reference to a specific postgres database.
:param options: Optional. Specifies command-line options to send to the server
at connection start. For example, setting this to ``-c search_path=myschema``
sets the session's value of the ``search_path`` to ``myschema``.
:param enable_log_db_messages: Optional. If enabled logs database messages sent to the client
during the session. To avoid a memory leak psycopg2 only saves the last 50 messages.
For details, see: `PostgreSQL logging configuration parameters
<https://www.postgresql.org/docs/current/runtime-config-logging.html>`__
"""
conn_name_attr = "postgres_conn_id"
default_conn_name = "postgres_default"
default_client_log_level = "warning"
default_connector_version: int = 2
conn_type = "postgres"
hook_name = "Postgres"
supports_autocommit = True
supports_executemany = True
ignored_extra_options = {
"iam",
"redshift",
"redshift-serverless",
"cursor",
"cluster-identifier",
"workgroup-name",
"aws_conn_id",
"sqlalchemy_scheme",
"sqlalchemy_query",
"azure_conn_id",
}
default_azure_oauth_scope = "https://ossrdbms-aad.database.windows.net/.default"
def __init__(
self, *args, options: str | None = None, enable_log_db_messages: bool = False, **kwargs
) -> None:
super().__init__(*args, **kwargs)
self.conn: CompatConnection | None = None
self.database: str | None = kwargs.pop("database", None)
self.options = options
self.enable_log_db_messages = enable_log_db_messages
@staticmethod
def __cast_nullable(value, dst_type: type) -> Any:
return dst_type(value) if value is not None else None
@property
def sqlalchemy_url(self) -> URL:
conn = self.connection
query = conn.extra_dejson.get("sqlalchemy_query", {})
if not isinstance(query, dict):
raise AirflowException("The parameter 'sqlalchemy_query' must be of type dict!")
if conn.extra_dejson.get("iam", False):
conn.login, conn.password, conn.port = self.get_iam_token(conn)
return URL.create(
drivername="postgresql+psycopg" if USE_PSYCOPG3 else "postgresql",
username=self.__cast_nullable(conn.login, str),
password=self.__cast_nullable(conn.password, str),
host=self.__cast_nullable(conn.host, str),
port=self.__cast_nullable(conn.port, int),
database=self.__cast_nullable(self.database, str) or self.__cast_nullable(conn.schema, str),
query=query,
)
@property
def dialect_name(self) -> str:
return "postgresql"
@property
def dialect(self) -> Dialect:
return PostgresDialect(self)
def _notice_handler(self, notice: Diagnostic):
"""Handle notices from the database and log them."""
self.log.info(str(notice.message_primary).strip())
def _get_cursor(self, raw_cursor: str) -> CursorType:
_cursor = raw_cursor.lower()
if USE_PSYCOPG3:
if _cursor == "dictcursor":
return dict_row
if _cursor == "namedtuplecursor":
return namedtuple_row
if _cursor == "realdictcursor":
raise AirflowException(
"realdictcursor is not supported with psycopg3. Use dictcursor instead."
)
valid_cursors = "dictcursor, namedtuplecursor"
raise ValueError(f"Invalid cursor passed {_cursor}. Valid options are: {valid_cursors}")
cursor_types = {
"dictcursor": psycopg2.extras.DictCursor,
"realdictcursor": psycopg2.extras.RealDictCursor,
"namedtuplecursor": psycopg2.extras.NamedTupleCursor,
}
if _cursor in cursor_types:
return cursor_types[_cursor]
valid_cursors = ", ".join(cursor_types.keys())
raise ValueError(f"Invalid cursor passed {_cursor}. Valid options are: {valid_cursors}")
def _generate_cursor_name(self):
"""Generate a unique name for server-side cursor."""
import uuid
return f"airflow_cursor_{uuid.uuid4().hex}"
def get_conn(self) -> CompatConnection:
"""Establish a connection to a postgres database."""
conn = deepcopy(self.connection)
if conn.extra_dejson.get("iam", False):
login, password, port = self.get_iam_token(conn)
conn.login = cast("Any", login)
conn.password = cast("Any", password)
conn.port = cast("Any", port)
conn_args: dict[str, Any] = {
"host": conn.host,
"user": conn.login,
"password": conn.password,
"dbname": self.database or conn.schema,
"port": conn.port,
}
if self.options:
conn_args["options"] = self.options
# Add extra connection arguments
for arg_name, arg_val in conn.extra_dejson.items():
if arg_name not in self.ignored_extra_options:
conn_args[arg_name] = arg_val
if USE_PSYCOPG3:
from psycopg.connection import Connection as pgConnection
raw_cursor = conn.extra_dejson.get("cursor")
if raw_cursor:
conn_args["row_factory"] = self._get_cursor(raw_cursor)
# Use Any type for the connection args to avoid type conflicts
connection = pgConnection.connect(**cast("Any", conn_args))
self.conn = cast("CompatConnection", connection)
# Register JSON handlers for both json and jsonb types
# This ensures JSON data is properly decoded from bytes to Python objects
register_default_adapters(connection)
# Add the notice handler AFTER the connection is established
if self.enable_log_db_messages and hasattr(self.conn, "add_notice_handler"):
self.conn.add_notice_handler(self._notice_handler)
else: # psycopg2
raw_cursor = conn.extra_dejson.get("cursor", False)
if raw_cursor:
conn_args["cursor_factory"] = self._get_cursor(raw_cursor)
self.conn = cast("CompatConnection", psycopg2.connect(**conn_args))
return self.conn
@overload
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["pandas"] = "pandas",
**kwargs: Any,
) -> PandasDataFrame: ...
@overload
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["polars"] = ...,
**kwargs: Any,
) -> PolarsDataFrame: ...
def get_df(
self,
sql: str | list[str],
parameters: list | tuple | Mapping[str, Any] | None = None,
*,
df_type: Literal["pandas", "polars"] = "pandas",
**kwargs: Any,
) -> PandasDataFrame | PolarsDataFrame:
"""
Execute the sql and returns a dataframe.
:param sql: the sql statement to be executed (str) or a list of sql statements to execute
:param parameters: The parameters to render the SQL query with.
:param df_type: Type of dataframe to return, either "pandas" or "polars"
:param kwargs: (optional) passed into `pandas.io.sql.read_sql` or `polars.read_database` method
:return: A pandas or polars DataFrame containing the query results.
"""
if df_type == "pandas":
try:
from pandas.io import sql as psql
except ImportError:
raise AirflowOptionalProviderFeatureException(
"pandas library not installed, run: pip install "
"'apache-airflow-providers-common-sql[pandas]'."
)
engine = self.get_sqlalchemy_engine()
with engine.connect() as conn:
if isinstance(sql, list):
sql = "; ".join(sql) # Or handle multiple queries differently
return cast("PandasDataFrame", psql.read_sql(sql, con=conn, params=parameters, **kwargs))
elif df_type == "polars":
return self._get_polars_df(sql, parameters, **kwargs)
else:
raise ValueError(f"Unsupported df_type: {df_type}")
def copy_expert(self, sql: str, filename: str) -> None:
"""
Execute SQL using psycopg's ``copy_expert`` method.
Necessary to execute COPY command without access to a superuser.
Note: if this method is called with a "COPY FROM" statement and
the specified input file does not exist, it creates an empty
file and no data is loaded, but the operation succeeds.
So if users want to be aware when the input file does not exist,
they have to check its existence by themselves.
"""
self.log.info("Running copy expert: %s, filename: %s", sql, filename)
if USE_PSYCOPG3:
if " from stdin" in sql.lower():
# Handle COPY FROM STDIN: read from the file and write to the database.
if not os.path.isfile(filename):
with open(filename, "w"):
pass # Create an empty file to prevent errors.
with open(filename, "rb") as file, self.get_conn() as conn, conn.cursor() as cur:
with cur.copy(sql) as copy:
while data := file.read(8192):
copy.write(data)
conn.commit()
else:
# Handle COPY TO STDOUT: read from the database and write to the file.
with open(filename, "wb") as file, self.get_conn() as conn, conn.cursor() as cur:
with cur.copy(sql) as copy:
for data in copy:
file.write(data)
conn.commit()
else:
if not os.path.isfile(filename):
with open(filename, "w"):
pass
with (
open(filename, "r+") as file,
closing(self.get_conn()) as conn,
closing(conn.cursor()) as cur,
):
cur.copy_expert(sql, file)
file.truncate(file.tell())
conn.commit()
def get_uri(self) -> str:
"""
Extract the URI from the connection.
:return: the extracted URI in Sqlalchemy URI format.
"""
return self.sqlalchemy_url.render_as_string(hide_password=False)
def bulk_load(self, table: str, tmp_file: str) -> None:
"""Load a tab-delimited file into a database table."""
self.copy_expert(f"COPY {table} FROM STDIN", tmp_file)
def bulk_dump(self, table: str, tmp_file: str) -> None:
"""Dump a database table into a tab-delimited file."""
self.copy_expert(f"COPY {table} TO STDOUT", tmp_file)
@staticmethod
def _serialize_cell_ppg2(cell: object, conn: CompatConnection | None = None) -> Any:
"""
Serialize a cell using psycopg2.
Psycopg2 adapts all arguments to the ``execute()`` method internally,
hence we return the cell without any conversion.
See https://www.psycopg.org/docs/extensions.html#sql-adaptation-protocol-objects
for more information.
To perform custom type adaptation please use register_adapter function
https://www.psycopg.org/docs/extensions.html#psycopg2.extensions.register_adapter.
:param cell: The cell to insert into the table
:param conn: The database connection
:return: The cell
"""
return cell
@staticmethod
def _serialize_cell_ppg3(cell: object, conn: CompatConnection | None = None) -> Any:
"""Serialize a cell using psycopg3."""
if isinstance(cell, (dict, list)):
try:
from psycopg.types.json import Json
return Json(cell)
except ImportError:
return cell
return cell
@staticmethod
def _serialize_cell(cell: object, conn: Any | None = None) -> Any:
if USE_PSYCOPG3:
return PostgresHook._serialize_cell_ppg3(cell, conn)
return PostgresHook._serialize_cell_ppg2(cell, conn)
def get_iam_token(self, conn: Connection) -> tuple[str, str, int]:
"""Get the IAM token from different identity providers."""
if conn.extra_dejson.get("azure_conn_id"):
return self.get_azure_iam_token(conn)
return self.get_aws_iam_token(conn)
def get_aws_iam_token(self, conn: Connection) -> tuple[str, str, int]:
"""
Get the AWS IAM token.
This uses AWSHook to retrieve a temporary password to connect to
Postgres or Redshift. Port is required. If none is provided, the default
5432 is used.
"""
try:
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
except ImportError:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(
"apache-airflow-providers-amazon not installed, run: "
"pip install 'apache-airflow-providers-postgres[amazon]'."
)
aws_conn_id = conn.extra_dejson.get("aws_conn_id", "aws_default")
login = conn.login
if conn.extra_dejson.get("redshift", False):
port = conn.port or 5439
# Pull the custer-identifier from the beginning of the Redshift URL
# ex. my-cluster.ccdre4hpd39h.us-east-1.redshift.amazonaws.com returns my-cluster
cluster_identifier = conn.extra_dejson.get(
"cluster-identifier", cast("str", conn.host).split(".")[0]
)
redshift_client = AwsBaseHook(aws_conn_id=aws_conn_id, client_type="redshift").conn
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift/client/get_cluster_credentials.html#Redshift.Client.get_cluster_credentials
cluster_creds = redshift_client.get_cluster_credentials(
DbUser=login,
DbName=self.database or conn.schema,
ClusterIdentifier=cluster_identifier,
AutoCreate=False,
)
token = cluster_creds["DbPassword"]
login = cluster_creds["DbUser"]
elif conn.extra_dejson.get("redshift-serverless", False):
port = conn.port or 5439
# Pull the workgroup-name from the query params/extras, if not there then pull it from the
# beginning of the Redshift URL
# ex. workgroup-name.ccdre4hpd39h.us-east-1.redshift.amazonaws.com returns workgroup-name
workgroup_name = conn.extra_dejson.get("workgroup-name", cast("str", conn.host).split(".")[0])
redshift_serverless_client = AwsBaseHook(
aws_conn_id=aws_conn_id, client_type="redshift-serverless"
).conn
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/redshift-serverless/client/get_credentials.html#RedshiftServerless.Client.get_credentials
cluster_creds = redshift_serverless_client.get_credentials(
dbName=self.database or conn.schema,
workgroupName=workgroup_name,
)
token = cluster_creds["dbPassword"]
login = cluster_creds["dbUser"]
else:
port = conn.port or 5432
rds_client = AwsBaseHook(aws_conn_id=aws_conn_id, client_type="rds").conn
# https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds/client/generate_db_auth_token.html#RDS.Client.generate_db_auth_token
token = rds_client.generate_db_auth_token(conn.host, port, conn.login)
return cast("str", login), cast("str", token), port
def get_azure_iam_token(self, conn: Connection) -> tuple[str, str, int]:
"""
Get the Azure IAM token.
This uses AzureBaseHook to retrieve an OAUTH token to connect to Postgres.
Scope for the OAuth token can be set in the config option ``azure_oauth_scope`` under the section ``[postgres]``.
"""
if TYPE_CHECKING:
from airflow.providers.microsoft.azure.hooks.base_azure import AzureBaseHook
azure_conn_id = conn.extra_dejson.get("azure_conn_id", "azure_default")
try:
azure_conn = Connection.get(azure_conn_id)
except AttributeError:
azure_conn = Connection.get_connection_from_secrets(azure_conn_id) # type: ignore[attr-defined]
try:
azure_base_hook: AzureBaseHook = azure_conn.get_hook()
except TypeError as e:
if "required positional argument: 'sdk_client'" in str(e):
raise AirflowOptionalProviderFeatureException(
"Getting azure token is not supported by current version of 'AzureBaseHook'. "
"Please upgrade apache-airflow-providers-microsoft-azure>=12.8.0"
) from e
raise
scope = conf.get("postgres", "azure_oauth_scope", fallback=self.default_azure_oauth_scope)
token = azure_base_hook.get_token(scope).token
return cast("str", conn.login or azure_conn.login), token, conn.port or 5432
def get_table_primary_key(self, table: str, schema: str | None = "public") -> list[str] | None:
"""
Get the table's primary key.
:param table: Name of the target table
:param schema: Name of the target schema, public by default
:return: Primary key columns list
"""
return self.dialect.get_primary_keys(table=table, schema=schema)
def get_openlineage_database_info(self, connection) -> DatabaseInfo:
"""Return Postgres/Redshift specific information for OpenLineage."""
from airflow.providers.openlineage.sqlparser import DatabaseInfo
is_redshift = connection.extra_dejson.get("redshift", False)
if is_redshift:
authority = self._get_openlineage_redshift_authority_part(connection)
else:
authority = DbApiHook.get_openlineage_authority_part(connection, default_port=5432)
return DatabaseInfo(
scheme="postgres" if not is_redshift else "redshift",
authority=authority,
database=self.database or connection.schema,
)
def _get_openlineage_redshift_authority_part(self, connection) -> str:
try:
from airflow.providers.amazon.aws.hooks.base_aws import AwsBaseHook
except ImportError:
from airflow.exceptions import AirflowOptionalProviderFeatureException
raise AirflowOptionalProviderFeatureException(
"apache-airflow-providers-amazon not installed, run: "
"pip install 'apache-airflow-providers-postgres[amazon]'."
)
aws_conn_id = connection.extra_dejson.get("aws_conn_id", "aws_default")
port = connection.port or 5439
cluster_identifier = connection.extra_dejson.get("cluster-identifier", connection.host.split(".")[0])
region_name = AwsBaseHook(aws_conn_id=aws_conn_id).region_name
return f"{cluster_identifier}.{region_name}:{port}"
def get_openlineage_database_dialect(self, connection) -> str:
"""Return postgres/redshift dialect."""
return "redshift" if connection.extra_dejson.get("redshift", False) else "postgres"
def get_openlineage_default_schema(self) -> str | None:
"""Return current schema. This is usually changed with ``SEARCH_PATH`` parameter."""
return self.get_first("SELECT CURRENT_SCHEMA;")[0]
@classmethod
def get_ui_field_behaviour(cls) -> dict[str, Any]:
return {
"hidden_fields": [],
"relabeling": {
"schema": "Database",
},
}
def get_db_log_messages(self, conn) -> None:
"""Log database messages."""
if not self.enable_log_db_messages:
return
if USE_PSYCOPG3:
self.log.debug(
"With psycopg3, database notices are logged upon creation (via self._notice_handler)."
)
return
for output in conn.notices:
self.log.info(output)
def insert_rows(
self,
table,
rows,
target_fields=None,
commit_every=1000,
replace=False,
*,
executemany=False,
fast_executemany=False,
autocommit=False,
**kwargs,
):
"""
Insert a collection of tuples into a table.
Rows are inserted in chunks, each chunk (of size ``commit_every``) is
done in a new transaction.
:param table: Name of the target table
:param rows: The rows to insert into the table
:param target_fields: The names of the columns to fill in the table
:param commit_every: The maximum number of rows to insert in one
transaction. Set to 0 to insert all rows in one transaction.
:param replace: Whether to replace instead of insert
:param executemany: If True, all rows are inserted at once in
chunks defined by the commit_every parameter. This only works if all rows
have same number of column names, but leads to better performance.
:param fast_executemany: If True, rows will be inserted using an optimized
bulk execution strategy (``psycopg2.extras.execute_batch``). This can
significantly improve performance for large inserts. If set to False,
the method falls back to the default implementation from
``DbApiHook.insert_rows``.
:param autocommit: What to set the connection's autocommit setting to
before executing the query.
"""
# if fast_executemany is disabled, defer to default implementation of insert_rows in DbApiHook
if not fast_executemany:
return super().insert_rows(
table,
rows,
target_fields=target_fields,
commit_every=commit_every,
replace=replace,
executemany=executemany,
autocommit=autocommit,
**kwargs,
)
# if fast_executemany is enabled, use optimized execute_batch from psycopg
nb_rows = 0
with self._create_autocommit_connection(autocommit) as conn:
conn.commit()
with closing(conn.cursor()) as cur:
for chunked_rows in chunked(rows, commit_every):
values = list(
map(
lambda row: self._serialize_cells(row, conn),
chunked_rows,
)
)
sql = self._generate_insert_sql(table, values[0], target_fields, replace, **kwargs)
self.log.debug("Generated sql: %s", sql)
try:
execute_batch(cur, sql, values, page_size=commit_every)
except Exception as e:
self.log.error("Generated sql: %s", sql)
self.log.error("Parameters: %s", values)
raise e
conn.commit()
nb_rows += len(chunked_rows)
self.log.info("Loaded %s rows into %s so far", nb_rows, table)
self.log.info("Done loading. Loaded a total of %s rows into %s", nb_rows, table)
| PostgresHook |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 29718,
"end": 30198
} | class ____(BaseModel, extra="forbid"):
points: Optional[List["ExtendedPointId"]] = Field(
default=None, description="Deletes values from each point in this list"
)
filter: Optional["Filter"] = Field(
default=None, description="Deletes values from points that satisfy this filter condition"
)
vector: List[str] = Field(..., description="Vector names")
shard_key: Optional["ShardKeySelector"] = Field(default=None, description="")
| DeleteVectors |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 4488,
"end": 4567
} | class ____(PydanticTypeError):
msg_template = 'byte type expected'
| BytesError |
python | kamyu104__LeetCode-Solutions | Python/nearest-exit-from-entrance-in-maze.py | {
"start": 55,
"end": 1649
} | class ____(object):
def nearestExit(self, maze, entrance):
"""
:type maze: List[List[str]]
:type entrance: List[int]
:rtype: int
"""
directions = [(0, 1), (1, 0), (0, -1), (-1, 0)]
visited = ' '
entrance = tuple(entrance)
left = set([entrance])
right = set([(r, 0) for r in xrange(len(maze)-1) if maze[r][0] == '.' and (r, 0) != entrance] +
[(len(maze)-1, c) for c in xrange(len(maze[0])-1) if maze[len(maze)-1][c] == '.' and (len(maze)-1, c) != entrance] +
[(r, len(maze[0])-1) for r in reversed(xrange(1, len(maze))) if maze[r][len(maze[0])-1] == '.' and (r, len(maze[0])-1) != entrance] +
[(0, c) for c in reversed(xrange(1, len(maze[0]))) if maze[0][c] == '.' and (0, c) != entrance])
steps = 0
while left:
for (r, c) in left:
maze[r][c] = visited
new_left = set()
for (r, c) in left:
if (r, c) in right:
return steps
for dr, dc in directions:
nr, nc = r+dr, c+dc
if not (0 <= nr < len(maze) and
0 <= nc < len(maze[0]) and
maze[nr][nc] == '.'):
continue
new_left.add((nr, nc))
left = new_left
steps += 1
if len(left) > len(right):
left, right = right, left
return -1
# Time: O(m * n)
# Space: O(m + n)
# bfs solution
| Solution |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/eks.py | {
"start": 4248,
"end": 6221
} | class ____(EksBaseSensor):
"""
Check the state of an Amazon EKS Cluster until it reaches the target state or another terminal state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EksClusterStateSensor`
:param cluster_name: The name of the Cluster to watch. (templated)
:param target_state: Target state of the Cluster. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
"""
template_fields: Sequence[str] = aws_template_fields("cluster_name", "target_state")
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
target_state: ClusterStates = ClusterStates.ACTIVE,
region: str | None = None,
**kwargs,
):
if region is not None:
warnings.warn(
message="Parameter `region` is deprecated. Use the parameter `region_name` instead",
category=AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["region_name"] = region
super().__init__(target_state=target_state, target_state_type=ClusterStates, **kwargs)
def get_state(self) -> ClusterStates:
return self.hook.get_cluster_state(clusterName=self.cluster_name)
def get_terminal_states(self) -> frozenset:
return CLUSTER_TERMINAL_STATES
| EksClusterStateSensor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_django/DJ008.py | {
"start": 91,
"end": 392
} | class ____(models.Model):
new_field = models.CharField(max_length=10)
class Meta:
verbose_name = "test model"
verbose_name_plural = "test models"
@property
def my_brand_new_property(self):
return 1
def my_beautiful_method(self):
return 2
| TestModel1 |
python | scipy__scipy | scipy/fft/_pocketfft/tests/test_basic.py | {
"start": 8546,
"end": 10245
} | class ____:
def setup_method(self):
np.random.seed(1234)
def test_definition(self):
for t in [[1, 2, 3, 4, 1, 2, 3, 4], [1, 2, 3, 4, 1, 2, 3, 4, 5]]:
x = np.array(t, dtype=self.rdt)
y = rfft(x)
y1 = direct_rdft(x)
assert_array_almost_equal(y,y1)
assert_equal(y.dtype, self.cdt)
def test_djbfft(self):
for i in range(2,14):
n = 2**i
x = np.arange(n)
y1 = np.fft.rfft(x)
y = rfft(x)
assert_array_almost_equal(y,y1)
def test_invalid_sizes(self):
assert_raises(ValueError, rfft, [])
assert_raises(ValueError, rfft, [[1,1],[2,2]], -5)
def test_complex_input(self):
x = np.zeros(10, dtype=self.cdt)
with assert_raises(TypeError, match="x must be a real sequence"):
rfft(x)
# See gh-5790
class MockSeries:
def __init__(self, data):
self.data = np.asarray(data)
def __getattr__(self, item):
try:
return getattr(self.data, item)
except AttributeError as e:
raise AttributeError("'MockSeries' object "
f"has no attribute '{item}'") from e
def test_non_ndarray_with_dtype(self):
x = np.array([1., 2., 3., 4., 5.])
xs = _TestRFFTBase.MockSeries(x)
expected = [1, 2, 3, 4, 5]
rfft(xs)
# Data should not have been overwritten
assert_equal(x, expected)
assert_equal(xs.data, expected)
@pytest.mark.skipif(np.longdouble is np.float64,
reason="Long double is aliased to double")
| _TestRFFTBase |
python | viewflow__viewflow | tests/_cases/test_workflow_undo_gates.py | {
"start": 5550,
"end": 6197
} | class ____(flow.Flow):
start = flow.StartHandle().Next(this.split_gate)
split_gate = flow.Split().Next(this.if_gate).Next(this.switch_gate)
if_gate = flow.If(lambda act: True).Then(this.handle_a).Else(this.handle_b)
handle_a = flow.Handle().Next(this.join_gate)
handle_b = flow.Handle().Next(this.join_gate)
switch_gate = (
flow.Switch()
.Case(this.handle_c, lambda act: False)
.Case(this.handle_d, lambda act: True)
)
handle_c = flow.Handle().Next(this.join_gate)
handle_d = flow.Handle().Next(this.join_gate)
join_gate = flow.Join().Next(this.end)
end = flow.End()
| TestUndoFlow |
python | joke2k__faker | faker/providers/person/vi_VN/__init__.py | {
"start": 44,
"end": 2444
} | class ____(PersonProvider):
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{first_name_unisex}} {{last_name}}",
"{{prefix_female}} {{first_name_unisex}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
)
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{first_name_male}} {{middle_name}} {{last_name}}",
"{{first_name_unisex}} {{middle_name}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
)
formats = formats_female + formats_male
# Name from : https://en.wikipedia.org/wiki/Vietnamese_name
# and https://vinpearl.com/en/vietnamese-names-top-200-popular-names-for-boys-and-girls
first_names_female = (
"Ngọc",
"Hương",
"Lan",
"Mai",
"Thảo",
"Linh",
"Hồng",
"Chi",
"Vân",
"Duyên",
"Dương",
"Yến",
"Vi",
"Ánh",
"Xuân",
)
first_names_unisex = (
"An",
"Hà",
"Bảo",
"Lâm",
"Hạnh",
"Thành",
"Kim",
"Nhật",
"Phương",
"Khoa",
"Hải",
"Nhật",
)
first_names_male = (
"Nam",
"Hưng",
"Vũ",
"Tú",
"Hoàng",
"Phúc",
"Trung",
"Quang",
"Anh",
"Khoa",
"Dũng",
"Quang",
"Thành",
"Huy",
"Bảo",
"Châu",
"Minh",
"Tùng",
"Nhiên",
"Trọng",
)
middle_names = (
"Văn",
"Thị",
"Quang",
"Đức",
"Trí",
"Xuân",
"Hoàng",
"Hải",
"Đức",
"Thế",
"Tấn",
"Phú",
"Hữu",
"Bảo",
"Mai",
"Mai Bảo",
)
last_names = ("Nguyễn", "Trần", "Lê", "Phạm", "Vũ", "Đặng", "Bùi", "Dương", "Mai", "Hoàng")
# Typically, Vietnamese will be addressed with their given name and a prefix
# https://en.wikipedia.org/wiki/Vietnamese_name#Given_name
prefixes_female = ("Cô", "Chị", "Bà", "Quý cô")
prefixes_male = ("Ông", "Anh", "Bác", "Quý ông")
def first_name_unisex(self) -> str:
return self.random_element(self.first_names_unisex)
def middle_name(self) -> str:
return self.random_element(self.middle_names)
| Provider |
python | openai__openai-python | src/openai/lib/streaming/responses/_responses.py | {
"start": 4303,
"end": 6566
} | class ____(Generic[TextFormatT]):
def __init__(
self,
*,
raw_stream: AsyncStream[RawResponseStreamEvent],
text_format: type[TextFormatT] | Omit,
input_tools: Iterable[ToolParam] | Omit,
starting_after: int | None,
) -> None:
self._raw_stream = raw_stream
self._response = raw_stream.response
self._iterator = self.__stream__()
self._state = ResponseStreamState(text_format=text_format, input_tools=input_tools)
self._starting_after = starting_after
async def __anext__(self) -> ResponseStreamEvent[TextFormatT]:
return await self._iterator.__anext__()
async def __aiter__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
async for item in self._iterator:
yield item
async def __stream__(self) -> AsyncIterator[ResponseStreamEvent[TextFormatT]]:
async for sse_event in self._raw_stream:
events_to_fire = self._state.handle_event(sse_event)
for event in events_to_fire:
if self._starting_after is None or event.sequence_number > self._starting_after:
yield event
async def __aenter__(self) -> Self:
return self
async def __aexit__(
self,
exc_type: type[BaseException] | None,
exc: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
await self.close()
async def close(self) -> None:
"""
Close the response and release the connection.
Automatically called if the response body is read to completion.
"""
await self._response.aclose()
async def get_final_response(self) -> ParsedResponse[TextFormatT]:
"""Waits until the stream has been read to completion and returns
the accumulated `ParsedResponse` object.
"""
await self.until_done()
response = self._state._completed_response
if not response:
raise RuntimeError("Didn't receive a `response.completed` event.")
return response
async def until_done(self) -> Self:
"""Blocks until the stream has been consumed."""
await consume_async_iterator(self)
return self
| AsyncResponseStream |
python | astropy__astropy | astropy/modeling/polynomial.py | {
"start": 13301,
"end": 16606
} | class ____(_PolyDomainWindow1D):
r"""
Univariate Chebyshev series.
It is defined as:
.. math::
P(x) = \sum_{i=0}^{i=n}C_{i} * T_{i}(x)
where ``T_i(x)`` is the corresponding Chebyshev polynomial of the 1st kind.
For explanation of ```domain``, and ``window`` see
:ref:`Notes regarding usage of domain and window <domain-window-note>`.
Parameters
----------
degree : int
degree of the series
domain : tuple or None, optional
window : tuple or None, optional
If None, it is set to (-1, 1)
Fitters will remap the domain to this window.
**params : dict
keyword : value pairs, representing parameter_name: value
Notes
-----
This model does not support the use of units/quantities, because each term
in the sum of Chebyshev polynomials is a polynomial in x - since the
coefficients within each Chebyshev polynomial are fixed, we can't use
quantities for x since the units would not be compatible. For example, the
third Chebyshev polynomial (T2) is 2x^2-1, but if x was specified with
units, 2x^2 and -1 would have incompatible units.
"""
n_inputs = 1
n_outputs = 1
_separable = True
def __init__(
self,
degree,
domain=None,
window=None,
n_models=None,
model_set_axis=None,
name=None,
meta=None,
**params,
):
super().__init__(
degree,
domain=domain,
window=window,
n_models=n_models,
model_set_axis=model_set_axis,
name=name,
meta=meta,
**params,
)
def fit_deriv(self, x, *params):
"""
Computes the Vandermonde matrix.
Parameters
----------
x : ndarray
input
*params
throw-away parameter list returned by non-linear fitters
Returns
-------
result : ndarray
The Vandermonde matrix
"""
x = np.array(x, dtype=float, copy=COPY_IF_NEEDED, ndmin=1)
v = np.empty((self.degree + 1,) + x.shape, dtype=x.dtype)
v[0] = 1
if self.degree > 0:
x2 = 2 * x
v[1] = x
for i in range(2, self.degree + 1):
v[i] = v[i - 1] * x2 - v[i - 2]
return np.rollaxis(v, 0, v.ndim)
def prepare_inputs(self, x, **kwargs):
inputs, broadcasted_shapes = super().prepare_inputs(x, **kwargs)
x = inputs[0]
return (x,), broadcasted_shapes
def evaluate(self, x, *coeffs):
if self.domain is not None:
x = poly_map_domain(x, self.domain, self.window)
return self.clenshaw(x, coeffs)
@staticmethod
def clenshaw(x, coeffs):
"""Evaluates the polynomial using Clenshaw's algorithm."""
if len(coeffs) == 1:
c0 = coeffs[0]
c1 = 0
elif len(coeffs) == 2:
c0 = coeffs[0]
c1 = coeffs[1]
else:
x2 = 2 * x
c0 = coeffs[-2]
c1 = coeffs[-1]
for i in range(3, len(coeffs) + 1):
tmp = c0
c0 = coeffs[-i] - c1
c1 = tmp + c1 * x2
return c0 + c1 * x
| Chebyshev1D |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 171506,
"end": 174132
} | class ____(CType, EnumMixin):
# name string
# doc string or None
# cname string
is_cpp_enum = True
def __init__(self, name, cname, underlying_type, namespace=None, doc=None):
self.name = name
self.doc = doc
self.cname = cname
self.values = []
self.underlying_type = underlying_type
self.namespace = namespace
def __str__(self):
return self.name
def declaration_code(self, entity_code,
for_display=0, dll_linkage=None, pyrex=0):
if pyrex or for_display:
type_name = self.name
else:
if self.namespace:
type_name = "%s::%s" % (
self.namespace.empty_declaration_code(),
self.cname
)
else:
type_name = "__PYX_ENUM_CLASS_DECL %s" % self.cname
type_name = public_decl(type_name, dll_linkage)
return self.base_declaration_code(type_name, entity_code)
def create_from_py_utility_code(self, env):
if self.from_py_function:
return True
if self.underlying_type.create_from_py_utility_code(env):
self.from_py_function = '(%s)%s' % (
self.cname, self.underlying_type.from_py_function
)
return True
def create_to_py_utility_code(self, env):
if self.to_py_function is not None:
return True
if self.entry.create_wrapper:
self.create_enum_to_py_utility_code(env)
return True
if self.underlying_type.create_to_py_utility_code(env):
# Using a C++11 lambda here, which is fine since
# scoped enums are a C++11 feature
self.to_py_function = '[](const %s& x){return %s((%s)x);}' % (
self.cname,
self.underlying_type.to_py_function,
self.underlying_type.empty_declaration_code()
)
return True
def create_type_wrapper(self, env):
from .UtilityCode import CythonUtilityCode
rst = CythonUtilityCode.load(
"CppScopedEnumType", "CpdefEnums.pyx",
context={
"name": self.name,
"cname": self.cname.split("::")[-1],
"items": tuple(self.values),
"underlying_type": self.underlying_type.empty_declaration_code(),
"enum_doc": self.doc,
"static_modname": env.qualified_name,
},
outer_module_scope=env.global_scope())
env.use_utility_code(rst)
| CppScopedEnumType |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 19726,
"end": 20058
} | class ____(GroupType):
type_id = 2003
slug = "profile_json_decode_main_thread"
description = "JSON Decoding on Main Thread"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.MOBILE.value
default_priority = PriorityLevel.LOW
released = True
@dataclass(frozen=True)
| ProfileJSONDecodeType |
python | huggingface__transformers | src/transformers/image_processing_utils_fast.py | {
"start": 5092,
"end": 37790
} | class ____(BaseImageProcessor):
r"""
Base class for fast image processors using PyTorch and TorchVision for image transformations.
This class provides a complete implementation for standard image preprocessing operations (resize, crop, rescale,
normalize) with GPU support and batch processing optimizations. Most image processors can be implemented by simply
setting class attributes; only processors requiring custom logic need to override methods.
Basic Implementation
--------------------
For processors that only need standard operations (resize, center crop, rescale, normalize), define class
attributes:
class MyImageProcessorFast(BaseImageProcessorFast):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"height": 224, "width": 224}
do_resize = True
do_rescale = True
do_normalize = True
Custom Processing
-----------------
Override `_preprocess` (most common):
For custom image processing logic, override `_preprocess`. This method receives a list of torch tensors with
channel dimension first and should return a BatchFeature. Use `group_images_by_shape` and `reorder_images` for
efficient batch processing:
def _preprocess(
self,
images: list[torch.Tensor],
do_resize: bool,
size: SizeDict,
# ... other parameters
**kwargs,
) -> BatchFeature:
# Group images by shape for batched operations
grouped_images, indices = group_images_by_shape(images)
processed_groups = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(stacked_images, size)
# Custom processing here
processed_groups[shape] = stacked_images
processed_images = reorder_images(processed_groups, indices)
return BatchFeature(data={"pixel_values": torch.stack(processed_images)})
Override `_preprocess_image_like_inputs` (for additional inputs):
For processors handling multiple input types (e.g., images + segmentation maps), override this method:
def _preprocess_image_like_inputs(
self,
images: ImageInput,
segmentation_maps: Optional[ImageInput] = None,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[torch.device] = None,
**kwargs,
) -> BatchFeature:
images = self._prepare_image_like_inputs(images, do_convert_rgb, input_data_format, device)
batch_feature = self._preprocess(images, **kwargs)
if segmentation_maps is not None:
# Process segmentation maps separately
maps = self._prepare_image_like_inputs(segmentation_maps, ...)
batch_feature["labels"] = self._preprocess(maps, ...)
return batch_feature
Override `_further_process_kwargs` (for custom kwargs formatting):
To format custom kwargs before validation:
def _further_process_kwargs(self, custom_param=None, **kwargs):
kwargs = super()._further_process_kwargs(**kwargs)
if custom_param is not None:
kwargs["custom_param"] = self._format_custom_param(custom_param)
return kwargs
Override `_validate_preprocess_kwargs` (for custom validation):
To add custom validation logic:
def _validate_preprocess_kwargs(self, custom_param=None, **kwargs):
super()._validate_preprocess_kwargs(**kwargs)
if custom_param is not None and custom_param < 0:
raise ValueError("custom_param must be non-negative")
Override `_prepare_images_structure` (for nested inputs):
By default, nested image lists are flattened. Override to preserve structure:
def _prepare_images_structure(self, images, expected_ndims=3):
# Custom logic to handle nested structure
return images # Return as-is or with custom processing
Custom Parameters
-----------------
To add parameters beyond `ImagesKwargs`, create a custom kwargs class and set it as `valid_kwargs`:
class MyImageProcessorKwargs(ImagesKwargs):
custom_param: Optional[int] = None
another_param: Optional[bool] = None
class MyImageProcessorFast(BaseImageProcessorFast):
valid_kwargs = MyImageProcessorKwargs
custom_param = 10 # default value
def _preprocess(self, images, custom_param, **kwargs):
# Use custom_param in processing
...
Key Notes
---------
- Images in `_preprocess` are always torch tensors with channel dimension first, regardless of input format
- Arguments not provided by users default to class attribute values
- Use batch processing utilities (`group_images_by_shape`, `reorder_images`) for GPU efficiency
- Image loading, format conversion, and argument handling are automatic - focus only on processing logic
"""
resample = None
image_mean = None
image_std = None
size = None
default_to_square = True
crop_size = None
do_resize = None
do_center_crop = None
do_pad = None
pad_size = None
do_rescale = None
rescale_factor = 1 / 255
do_normalize = None
do_convert_rgb = None
return_tensors = None
data_format = ChannelDimension.FIRST
input_data_format = None
device = None
model_input_names = ["pixel_values"]
image_seq_length = None
valid_kwargs = ImagesKwargs
unused_kwargs = None
def __init__(self, **kwargs: Unpack[ImagesKwargs]):
super().__init__(**kwargs)
kwargs = self.filter_out_unused_kwargs(kwargs)
size = kwargs.pop("size", self.size)
self.size = (
get_size_dict(size=size, default_to_square=kwargs.pop("default_to_square", self.default_to_square))
if size is not None
else None
)
crop_size = kwargs.pop("crop_size", self.crop_size)
self.crop_size = get_size_dict(crop_size, param_name="crop_size") if crop_size is not None else None
pad_size = kwargs.pop("pad_size", self.pad_size)
self.pad_size = get_size_dict(size=pad_size, param_name="pad_size") if pad_size is not None else None
for key in self.valid_kwargs.__annotations__:
kwarg = kwargs.pop(key, None)
if kwarg is not None:
setattr(self, key, kwarg)
else:
setattr(self, key, deepcopy(getattr(self, key, None)))
# get valid kwargs names
self._valid_kwargs_names = list(self.valid_kwargs.__annotations__.keys())
@property
def is_fast(self) -> bool:
"""
`bool`: Whether or not this image processor is a fast processor (backed by PyTorch and TorchVision).
"""
return True
def pad(
self,
images: list["torch.Tensor"],
pad_size: SizeDict = None,
fill_value: Optional[int] = 0,
padding_mode: Optional[str] = "constant",
return_mask: bool = False,
disable_grouping: Optional[bool] = False,
is_nested: Optional[bool] = False,
**kwargs,
) -> Union[tuple["torch.Tensor", "torch.Tensor"], "torch.Tensor"]:
"""
Pads images to `(pad_size["height"], pad_size["width"])` or to the largest size in the batch.
Args:
images (`list[torch.Tensor]`):
Images to pad.
pad_size (`SizeDict`, *optional*):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
fill_value (`int`, *optional*, defaults to `0`):
The constant value used to fill the padded area.
padding_mode (`str`, *optional*, defaults to "constant"):
The padding mode to use. Can be any of the modes supported by
`torch.nn.functional.pad` (e.g. constant, reflection, replication).
return_mask (`bool`, *optional*, defaults to `False`):
Whether to return a pixel mask to denote padded regions.
disable_grouping (`bool`, *optional*, defaults to `False`):
Whether to disable grouping of images by size.
Returns:
`Union[tuple[torch.Tensor, torch.Tensor], torch.Tensor]`: The padded images and pixel masks if `return_mask` is `True`.
"""
if pad_size is not None:
if not (pad_size.height and pad_size.width):
raise ValueError(f"Pad size must contain 'height' and 'width' keys only. Got pad_size={pad_size}.")
pad_size = (pad_size.height, pad_size.width)
else:
pad_size = get_max_height_width(images)
grouped_images, grouped_images_index = group_images_by_shape(
images, disable_grouping=disable_grouping, is_nested=is_nested
)
processed_images_grouped = {}
processed_masks_grouped = {}
for shape, stacked_images in grouped_images.items():
image_size = stacked_images.shape[-2:]
padding_height = pad_size[0] - image_size[0]
padding_width = pad_size[1] - image_size[1]
if padding_height < 0 or padding_width < 0:
raise ValueError(
f"Padding dimensions are negative. Please make sure that the `pad_size` is larger than the "
f"image size. Got pad_size={pad_size}, image_size={image_size}."
)
if image_size != pad_size:
padding = (0, 0, padding_width, padding_height)
stacked_images = F.pad(stacked_images, padding, fill=fill_value, padding_mode=padding_mode)
processed_images_grouped[shape] = stacked_images
if return_mask:
# keep only one from the channel dimension in pixel mask
stacked_masks = torch.zeros_like(stacked_images, dtype=torch.int64)[..., 0, :, :]
stacked_masks[..., : image_size[0], : image_size[1]] = 1
processed_masks_grouped[shape] = stacked_masks
processed_images = reorder_images(processed_images_grouped, grouped_images_index, is_nested=is_nested)
if return_mask:
processed_masks = reorder_images(processed_masks_grouped, grouped_images_index, is_nested=is_nested)
return processed_images, processed_masks
return processed_images
def resize(
self,
image: "torch.Tensor",
size: SizeDict,
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
**kwargs,
) -> "torch.Tensor":
"""
Resize an image to `(size["height"], size["width"])`.
Args:
image (`torch.Tensor`):
Image to resize.
size (`SizeDict`):
Dictionary in the format `{"height": int, "width": int}` specifying the size of the output image.
interpolation (`InterpolationMode`, *optional*, defaults to `InterpolationMode.BILINEAR`):
`InterpolationMode` filter to use when resizing the image e.g. `InterpolationMode.BICUBIC`.
antialias (`bool`, *optional*, defaults to `True`):
Whether to use antialiasing.
Returns:
`torch.Tensor`: The resized image.
"""
interpolation = interpolation if interpolation is not None else F.InterpolationMode.BILINEAR
if size.shortest_edge and size.longest_edge:
# Resize the image so that the shortest edge or the longest edge is of the given size
# while maintaining the aspect ratio of the original image.
new_size = get_size_with_aspect_ratio(
image.size()[-2:],
size.shortest_edge,
size.longest_edge,
)
elif size.shortest_edge:
new_size = get_resize_output_image_size(
image,
size=size.shortest_edge,
default_to_square=False,
input_data_format=ChannelDimension.FIRST,
)
elif size.max_height and size.max_width:
new_size = get_image_size_for_max_height_width(image.size()[-2:], size.max_height, size.max_width)
elif size.height and size.width:
new_size = (size.height, size.width)
else:
raise ValueError(
"Size must contain 'height' and 'width' keys, or 'max_height' and 'max_width', or 'shortest_edge' key. Got"
f" {size}."
)
# This is a workaround to avoid a bug in torch.compile when dealing with uint8 on AMD MI3XX GPUs
# Tracked in PyTorch issue: https://github.com/pytorch/pytorch/issues/155209
# TODO: remove this once the bug is fixed (detected with torch==2.7.0+git1fee196, torchvision==0.22.0+9eb57cd)
if is_torchdynamo_compiling() and is_rocm_platform():
return self.compile_friendly_resize(image, new_size, interpolation, antialias)
return F.resize(image, new_size, interpolation=interpolation, antialias=antialias)
@staticmethod
def compile_friendly_resize(
image: "torch.Tensor",
new_size: tuple[int, int],
interpolation: Optional["F.InterpolationMode"] = None,
antialias: bool = True,
) -> "torch.Tensor":
"""
A wrapper around `F.resize` so that it is compatible with torch.compile when the image is a uint8 tensor.
"""
if image.dtype == torch.uint8:
# 256 is used on purpose instead of 255 to avoid numerical differences
# see https://github.com/huggingface/transformers/pull/38540#discussion_r2127165652
image = image.float() / 256
image = F.resize(image, new_size, interpolation=interpolation, antialias=antialias)
image = image * 256
# torch.where is used on purpose instead of torch.clamp to avoid bug in torch.compile
# see https://github.com/huggingface/transformers/pull/38540#discussion_r2126888471
image = torch.where(image > 255, 255, image)
image = torch.where(image < 0, 0, image)
image = image.round().to(torch.uint8)
else:
image = F.resize(image, new_size, interpolation=interpolation, antialias=antialias)
return image
def rescale(
self,
image: "torch.Tensor",
scale: float,
**kwargs,
) -> "torch.Tensor":
"""
Rescale an image by a scale factor. image = image * scale.
Args:
image (`torch.Tensor`):
Image to rescale.
scale (`float`):
The scaling factor to rescale pixel values by.
Returns:
`torch.Tensor`: The rescaled image.
"""
return image * scale
def normalize(
self,
image: "torch.Tensor",
mean: Union[float, Iterable[float]],
std: Union[float, Iterable[float]],
**kwargs,
) -> "torch.Tensor":
"""
Normalize an image. image = (image - image_mean) / image_std.
Args:
image (`torch.Tensor`):
Image to normalize.
mean (`torch.Tensor`, `float` or `Iterable[float]`):
Image mean to use for normalization.
std (`torch.Tensor`, `float` or `Iterable[float]`):
Image standard deviation to use for normalization.
Returns:
`torch.Tensor`: The normalized image.
"""
return F.normalize(image, mean, std)
@lru_cache(maxsize=10)
def _fuse_mean_std_and_rescale_factor(
self,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
device: Optional["torch.device"] = None,
) -> tuple:
if do_rescale and do_normalize:
# Fused rescale and normalize
image_mean = torch.tensor(image_mean, device=device) * (1.0 / rescale_factor)
image_std = torch.tensor(image_std, device=device) * (1.0 / rescale_factor)
do_rescale = False
return image_mean, image_std, do_rescale
def rescale_and_normalize(
self,
images: "torch.Tensor",
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Union[float, list[float]],
image_std: Union[float, list[float]],
) -> "torch.Tensor":
"""
Rescale and normalize images.
"""
image_mean, image_std, do_rescale = self._fuse_mean_std_and_rescale_factor(
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
device=images.device,
)
# if/elif as we use fused rescale and normalize if both are set to True
if do_normalize:
images = self.normalize(images.to(dtype=torch.float32), image_mean, image_std)
elif do_rescale:
images = self.rescale(images, rescale_factor)
return images
def center_crop(
self,
image: "torch.Tensor",
size: SizeDict,
**kwargs,
) -> "torch.Tensor":
"""
Note: override torchvision's center_crop to have the same behavior as the slow processor.
Center crop an image to `(size["height"], size["width"])`. If the input size is smaller than `crop_size` along
any edge, the image is padded with 0's and then center cropped.
Args:
image (`"torch.Tensor"`):
Image to center crop.
size (`dict[str, int]`):
Size of the output image.
Returns:
`torch.Tensor`: The center cropped image.
"""
if size.height is None or size.width is None:
raise ValueError(f"The size dictionary must have keys 'height' and 'width'. Got {size.keys()}")
image_height, image_width = image.shape[-2:]
crop_height, crop_width = size.height, size.width
if crop_width > image_width or crop_height > image_height:
padding_ltrb = [
(crop_width - image_width) // 2 if crop_width > image_width else 0,
(crop_height - image_height) // 2 if crop_height > image_height else 0,
(crop_width - image_width + 1) // 2 if crop_width > image_width else 0,
(crop_height - image_height + 1) // 2 if crop_height > image_height else 0,
]
image = F.pad(image, padding_ltrb, fill=0) # PIL uses fill value 0
image_height, image_width = image.shape[-2:]
if crop_width == image_width and crop_height == image_height:
return image
crop_top = int((image_height - crop_height) / 2.0)
crop_left = int((image_width - crop_width) / 2.0)
return F.crop(image, crop_top, crop_left, crop_height, crop_width)
def convert_to_rgb(
self,
image: ImageInput,
) -> ImageInput:
"""
Converts an image to RGB format. Only converts if the image is of type PIL.Image.Image, otherwise returns the image
as is.
Args:
image (ImageInput):
The image to convert.
Returns:
ImageInput: The converted image.
"""
return convert_to_rgb(image)
def filter_out_unused_kwargs(self, kwargs: dict):
"""
Filter out the unused kwargs from the kwargs dictionary.
"""
if self.unused_kwargs is None:
return kwargs
for kwarg_name in self.unused_kwargs:
if kwarg_name in kwargs:
logger.warning_once(f"This processor does not use the `{kwarg_name}` parameter. It will be ignored.")
kwargs.pop(kwarg_name)
return kwargs
def _prepare_images_structure(
self,
images: ImageInput,
expected_ndims: int = 3,
) -> ImageInput:
"""
Prepare the images structure for processing.
Args:
images (`ImageInput`):
The input images to process.
Returns:
`ImageInput`: The images with a valid nesting.
"""
# Checks for `str` in case of URL/local path and optionally loads images
images = self.fetch_images(images)
return make_flat_list_of_images(images, expected_ndims=expected_ndims)
def _process_image(
self,
image: ImageInput,
do_convert_rgb: Optional[bool] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
device: Optional["torch.device"] = None,
) -> "torch.Tensor":
image_type = get_image_type(image)
if image_type not in [ImageType.PIL, ImageType.TORCH, ImageType.NUMPY]:
raise ValueError(f"Unsupported input image type {image_type}")
if do_convert_rgb:
image = self.convert_to_rgb(image)
if image_type == ImageType.PIL:
image = F.pil_to_tensor(image)
elif image_type == ImageType.NUMPY:
# not using F.to_tensor as it doesn't handle (C, H, W) numpy arrays
image = torch.from_numpy(image).contiguous()
# If the image is 2D, we need to unsqueeze it to add a channel dimension for processing
if image.ndim == 2:
image = image.unsqueeze(0)
# Infer the channel dimension format if not provided
if input_data_format is None:
input_data_format = infer_channel_dimension_format(image)
if input_data_format == ChannelDimension.LAST:
# We force the channel dimension to be first for torch tensors as this is what torchvision expects.
image = image.permute(2, 0, 1).contiguous()
# Now that we have torch tensors, we can move them to the right device
if device is not None:
image = image.to(device)
return image
def _prepare_image_like_inputs(
self,
images: ImageInput,
do_convert_rgb: Optional[bool] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
device: Optional["torch.device"] = None,
expected_ndims: int = 3,
) -> list["torch.Tensor"]:
"""
Prepare image-like inputs for processing.
Args:
images (`ImageInput`):
The image-like inputs to process.
do_convert_rgb (`bool`, *optional*):
Whether to convert the images to RGB.
input_data_format (`str` or `ChannelDimension`, *optional*):
The input data format of the images.
device (`torch.device`, *optional*):
The device to put the processed images on.
expected_ndims (`int`, *optional*):
The expected number of dimensions for the images. (can be 2 for segmentation maps etc.)
Returns:
List[`torch.Tensor`]: The processed images.
"""
# Get structured images (potentially nested)
images = self._prepare_images_structure(images, expected_ndims=expected_ndims)
process_image_partial = partial(
self._process_image, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
# Check if we have nested structure, assuming the nesting is consistent
has_nested_structure = len(images) > 0 and isinstance(images[0], (list, tuple))
if has_nested_structure:
processed_images = [[process_image_partial(img) for img in nested_list] for nested_list in images]
else:
processed_images = [process_image_partial(img) for img in images]
return processed_images
def _further_process_kwargs(
self,
size: Optional[SizeDict] = None,
crop_size: Optional[SizeDict] = None,
pad_size: Optional[SizeDict] = None,
default_to_square: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
) -> dict:
"""
Update kwargs that need further processing before being validated
Can be overridden by subclasses to customize the processing of kwargs.
"""
if kwargs is None:
kwargs = {}
if size is not None:
size = SizeDict(**get_size_dict(size=size, default_to_square=default_to_square))
if crop_size is not None:
crop_size = SizeDict(**get_size_dict(crop_size, param_name="crop_size"))
if pad_size is not None:
pad_size = SizeDict(**get_size_dict(size=pad_size, param_name="pad_size"))
if isinstance(image_mean, list):
image_mean = tuple(image_mean)
if isinstance(image_std, list):
image_std = tuple(image_std)
if data_format is None:
data_format = ChannelDimension.FIRST
kwargs["size"] = size
kwargs["crop_size"] = crop_size
kwargs["pad_size"] = pad_size
kwargs["image_mean"] = image_mean
kwargs["image_std"] = image_std
kwargs["data_format"] = data_format
# torch resize uses interpolation instead of resample
# Check if resample is an int before checking if it's an instance of PILImageResampling
# because if pillow < 9.1.0, resample is an int and PILImageResampling is a module.
# Checking PILImageResampling will fail with error `TypeError: isinstance() arg 2 must be a type or tuple of types`.
resample = kwargs.pop("resample")
kwargs["interpolation"] = (
pil_torch_interpolation_mapping[resample] if isinstance(resample, (PILImageResampling, int)) else resample
)
return kwargs
def _validate_preprocess_kwargs(
self,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, tuple[float]]] = None,
image_std: Optional[Union[float, tuple[float]]] = None,
do_resize: Optional[bool] = None,
size: Optional[SizeDict] = None,
do_center_crop: Optional[bool] = None,
crop_size: Optional[SizeDict] = None,
interpolation: Optional["F.InterpolationMode"] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Optional[ChannelDimension] = None,
**kwargs,
):
"""
validate the kwargs for the preprocess method.
"""
validate_fast_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
do_center_crop=do_center_crop,
crop_size=crop_size,
interpolation=interpolation,
return_tensors=return_tensors,
data_format=data_format,
)
@auto_docstring
def preprocess(self, images: ImageInput, *args, **kwargs: Unpack[ImagesKwargs]) -> BatchFeature:
# args are not validated, but their order in the `preprocess` and `_preprocess` signatures must be the same
validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_kwargs_names)
# Perform type validation on received kwargs
validate_typed_dict(self.valid_kwargs, kwargs)
# Set default kwargs from self. This ensures that if a kwarg is not provided
# by the user, it gets its default value from the instance, or is set to None.
for kwarg_name in self._valid_kwargs_names:
kwargs.setdefault(kwarg_name, getattr(self, kwarg_name, None))
# Extract parameters that are only used for preparing the input images
do_convert_rgb = kwargs.pop("do_convert_rgb")
input_data_format = kwargs.pop("input_data_format")
device = kwargs.pop("device")
# Update kwargs that need further processing before being validated
kwargs = self._further_process_kwargs(**kwargs)
# Validate kwargs
self._validate_preprocess_kwargs(**kwargs)
# Pop kwargs that are not needed in _preprocess
kwargs.pop("data_format")
return self._preprocess_image_like_inputs(
images, *args, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device, **kwargs
)
def _preprocess_image_like_inputs(
self,
images: ImageInput,
*args,
do_convert_rgb: bool,
input_data_format: ChannelDimension,
device: Optional[Union[str, "torch.device"]] = None,
**kwargs: Unpack[ImagesKwargs],
) -> BatchFeature:
"""
Preprocess image-like inputs.
To be overridden by subclasses when image-like inputs other than images should be processed.
It can be used for segmentation maps, depth maps, etc.
"""
# Prepare input images
images = self._prepare_image_like_inputs(
images=images, do_convert_rgb=do_convert_rgb, input_data_format=input_data_format, device=device
)
return self._preprocess(images, *args, **kwargs)
def _preprocess(
self,
images: list["torch.Tensor"],
do_resize: bool,
size: SizeDict,
interpolation: Optional["F.InterpolationMode"],
do_center_crop: bool,
crop_size: SizeDict,
do_rescale: bool,
rescale_factor: float,
do_normalize: bool,
image_mean: Optional[Union[float, list[float]]],
image_std: Optional[Union[float, list[float]]],
do_pad: Optional[bool],
pad_size: Optional[SizeDict],
disable_grouping: Optional[bool],
return_tensors: Optional[Union[str, TensorType]],
**kwargs,
) -> BatchFeature:
# Group images by size for batched resizing
grouped_images, grouped_images_index = group_images_by_shape(images, disable_grouping=disable_grouping)
resized_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_resize:
stacked_images = self.resize(image=stacked_images, size=size, interpolation=interpolation)
resized_images_grouped[shape] = stacked_images
resized_images = reorder_images(resized_images_grouped, grouped_images_index)
# Group images by size for further processing
# Needed in case do_resize is False, or resize returns images with different sizes
grouped_images, grouped_images_index = group_images_by_shape(resized_images, disable_grouping=disable_grouping)
processed_images_grouped = {}
for shape, stacked_images in grouped_images.items():
if do_center_crop:
stacked_images = self.center_crop(stacked_images, crop_size)
# Fused rescale and normalize
stacked_images = self.rescale_and_normalize(
stacked_images, do_rescale, rescale_factor, do_normalize, image_mean, image_std
)
processed_images_grouped[shape] = stacked_images
processed_images = reorder_images(processed_images_grouped, grouped_images_index)
if do_pad:
processed_images = self.pad(processed_images, pad_size=pad_size, disable_grouping=disable_grouping)
processed_images = torch.stack(processed_images, dim=0) if return_tensors else processed_images
return BatchFeature(data={"pixel_values": processed_images}, tensor_type=return_tensors)
def to_dict(self):
encoder_dict = super().to_dict()
encoder_dict.pop("_valid_processor_keys", None)
encoder_dict.pop("_valid_kwargs_names", None)
return encoder_dict
| BaseImageProcessorFast |
python | django__django | tests/flatpages_tests/test_templatetags.py | {
"start": 259,
"end": 6996
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain="example.com", name="example.com")
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url="/flatpage/",
title="A Flatpage",
content="Isn't it flat!",
enable_comments=False,
template_name="",
registration_required=False,
)
cls.fp2 = FlatPage.objects.create(
url="/location/flatpage/",
title="A Nested Flatpage",
content="Isn't it flat and deep!",
enable_comments=False,
template_name="",
registration_required=False,
)
cls.fp3 = FlatPage.objects.create(
url="/sekrit/",
title="Sekrit Flatpage",
content="Isn't it sekrit!",
enable_comments=False,
template_name="",
registration_required=True,
)
cls.fp4 = FlatPage.objects.create(
url="/location/sekrit/",
title="Sekrit Nested Flatpage",
content="Isn't it sekrit and deep!",
enable_comments=False,
template_name="",
registration_required=True,
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
def test_get_flatpages_tag(self):
"""
The flatpage template tag retrieves unregistered prefixed flatpages by
default
"""
out = Template(
"{% load flatpages %}"
"{% get_flatpages as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_anon_user(self):
"""
The flatpage template tag retrieves unregistered flatpages for an
anonymous user.
"""
out = Template(
"{% load flatpages %}"
"{% get_flatpages for anonuser as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({"anonuser": AnonymousUser()}))
self.assertEqual(out, "A Flatpage,A Nested Flatpage,")
def test_get_flatpages_tag_for_user(self):
"""
The flatpage template tag retrieves all flatpages for an authenticated
user
"""
me = User.objects.create_user("testuser", "test@example.com", "s3krit")
out = Template(
"{% load flatpages %}"
"{% get_flatpages for me as flatpages %}"
"{% for page in flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({"me": me}))
self.assertEqual(
out, "A Flatpage,A Nested Flatpage,Sekrit Nested Flatpage,Sekrit Flatpage,"
)
def test_get_flatpages_with_prefix(self):
"""
The flatpage template tag retrieves unregistered prefixed flatpages by
default
"""
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context())
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_anon_user(self):
"""
The flatpage template tag retrieves unregistered prefixed flatpages for
an anonymous user.
"""
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for anonuser as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({"anonuser": AnonymousUser()}))
self.assertEqual(out, "A Nested Flatpage,")
def test_get_flatpages_with_prefix_for_user(self):
"""
The flatpage template tag retrieve prefixed flatpages for an
authenticated user.
"""
me = User.objects.create_user("testuser", "test@example.com", "s3krit")
out = Template(
"{% load flatpages %}"
"{% get_flatpages '/location/' for me as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({"me": me}))
self.assertEqual(out, "A Nested Flatpage,Sekrit Nested Flatpage,")
def test_get_flatpages_with_variable_prefix(self):
"The prefix for the flatpage template tag can be a template variable"
out = Template(
"{% load flatpages %}"
"{% get_flatpages location_prefix as location_flatpages %}"
"{% for page in location_flatpages %}"
"{{ page.title }},"
"{% endfor %}"
).render(Context({"location_prefix": "/location/"}))
self.assertEqual(out, "A Nested Flatpage,")
def test_parsing_errors(self):
"There are various ways that the flatpages template tag won't parse"
def render(t):
return Template(t).render(Context())
msg = (
"get_flatpages expects a syntax of get_flatpages "
"['url_starts_with'] [for user] as context_name"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render("{% load flatpages %}{% get_flatpages %}")
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render("{% load flatpages %}{% get_flatpages as %}")
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render("{% load flatpages %}{% get_flatpages cheesecake flatpages %}")
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render("{% load flatpages %}{% get_flatpages as flatpages asdf %}")
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render(
"{% load flatpages %}{% get_flatpages cheesecake user as flatpages %}"
)
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render("{% load flatpages %}{% get_flatpages for user as flatpages asdf %}")
with self.assertRaisesMessage(TemplateSyntaxError, msg):
render(
"{% load flatpages %}"
"{% get_flatpages prefix for user as flatpages asdf %}"
)
| FlatpageTemplateTagTests |
python | django__django | tests/migrate_signals/tests.py | {
"start": 751,
"end": 1986
} | class ____:
"""
Special receiver for handle the fact that test runner calls migrate for
several databases and several times for some of them.
"""
def __init__(self, signal):
self.signal = signal
self.call_counter = 0
self.call_args = None
self.signal.connect(self, sender=APP_CONFIG)
def __call__(self, signal, sender, **kwargs):
# Although test runner calls migrate for several databases,
# testing for only one of them is quite sufficient.
if kwargs["using"] == MIGRATE_DATABASE:
self.call_counter += 1
self.call_args = kwargs
# we need to test only one call of migrate
self.signal.disconnect(self, sender=APP_CONFIG)
# We connect receiver here and not in unit test code because we need to
# connect receiver before test runner creates database. That is, sequence of
# actions would be:
#
# 1. Test runner imports this module.
# 2. We connect receiver.
# 3. Test runner calls migrate for create default database.
# 4. Test runner execute our unit test code.
pre_migrate_receiver = OneTimeReceiver(signals.pre_migrate)
post_migrate_receiver = OneTimeReceiver(signals.post_migrate)
| OneTimeReceiver |
python | tiangolo__fastapi | tests/test_security_api_key_query.py | {
"start": 215,
"end": 1923
} | class ____(BaseModel):
username: str
def get_current_user(oauth_header: str = Security(api_key)):
user = User(username=oauth_header)
return user
@app.get("/users/me")
def read_current_user(current_user: User = Depends(get_current_user)):
return current_user
client = TestClient(app)
def test_security_api_key():
response = client.get("/users/me?key=secret")
assert response.status_code == 200, response.text
assert response.json() == {"username": "secret"}
def test_security_api_key_no_key():
response = client.get("/users/me")
assert response.status_code == 401, response.text
assert response.json() == {"detail": "Not authenticated"}
assert response.headers["WWW-Authenticate"] == "APIKey"
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == {
"openapi": "3.1.0",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/users/me": {
"get": {
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
}
},
"summary": "Read Current User",
"operationId": "read_current_user_users_me_get",
"security": [{"APIKeyQuery": []}],
}
}
},
"components": {
"securitySchemes": {
"APIKeyQuery": {"type": "apiKey", "name": "key", "in": "query"}
}
},
}
| User |
python | rapidsai__cudf | python/cudf/cudf/core/column/decimal.py | {
"start": 2106,
"end": 14559
} | class ____(NumericalBaseColumn):
"""Base column for decimal32, decimal64 or decimal128 columns"""
_VALID_BINARY_OPERATIONS = BinaryOperand._SUPPORTED_BINARY_OPERATIONS
def __init__(
self,
plc_column: plc.Column,
size: int,
dtype: DecimalDtype,
offset: int,
null_count: int,
exposed: bool,
) -> None:
if not isinstance(dtype, DecimalDtype):
raise ValueError(f"{dtype=} must be a DecimalDtype instance")
super().__init__(
plc_column=plc_column,
size=size,
dtype=dtype,
offset=offset,
null_count=null_count,
exposed=exposed,
)
@property
def __cuda_array_interface__(self) -> Mapping[str, Any]:
raise NotImplementedError(
"Decimals are not yet supported via `__cuda_array_interface__`"
)
@classmethod
def _from_32_64_arrow(
cls,
data: pa.Array | pa.ChunkedArray,
*,
view_type: Literal["int32", "int64"],
plc_type: plc.TypeId,
step: int,
) -> Self:
# Can remove when pyarrow 19 is the minimum version
# Handle ChunkedArray by combining chunks first
if isinstance(data, pa.ChunkedArray):
data = data.combine_chunks()
mask_buf, data_buf = data.buffers()
if data_buf is None:
# If data_buf is None, create an empty column
plc_column = plc.Column(
data_type=plc.DataType(plc_type, -data.type.scale),
size=0,
data=None,
mask=None,
null_count=0,
offset=0,
children=[],
)
else:
rmm_data_buffer = rmm.DeviceBuffer.to_device(
np.frombuffer(data_buf)
.view(view_type)[::step]
.copy()
.view("uint8")
)
plc_column = plc.Column.from_rmm_buffer(
rmm_data_buffer,
plc.DataType(plc_type, -data.type.scale),
len(data),
[],
)
if mask_buf is not None and data_buf is not None:
mask_size = plc.null_mask.bitmask_allocation_size_bytes(len(data))
if mask_buf.size < mask_size:
rmm_mask_buffer = rmm.DeviceBuffer(size=mask_size)
rmm_mask_buffer.copy_from_host(
np.asarray(mask_buf).view("uint8")
)
else:
rmm_mask_buffer = rmm.DeviceBuffer.to_device(
np.frombuffer(mask_buf).view("uint8")
)
plc_column = plc_column.with_mask(
plc.gpumemoryview(rmm_mask_buffer), data.null_count
)
column = cls.from_pylibcudf(plc_column)
column.dtype.precision = data.type.precision # type: ignore[union-attr]
return column
def element_indexing(self, index: int) -> Decimal | None:
result = super().element_indexing(index)
if isinstance(result, pa.Scalar):
return result.as_py()
return result
def as_decimal_column(
self,
dtype: DecimalDtype,
) -> DecimalBaseColumn:
if isinstance(dtype, DecimalDtype) and dtype.scale < self.dtype.scale: # type: ignore[union-attr]
warnings.warn(
"cuDF truncates when downcasting decimals to a lower scale. "
"To round, use Series.round() or DataFrame.round()."
)
if dtype == self.dtype:
return self
return self.cast(dtype=dtype) # type: ignore[return-value]
def as_string_column(self, dtype: DtypeObj) -> StringColumn:
if cudf.get_option("mode.pandas_compatible"):
if isinstance(dtype, np.dtype) and dtype.kind == "O":
raise TypeError(
f"Cannot cast a decimal from {self.dtype} to {dtype}"
)
if len(self) > 0:
with acquire_spill_lock():
plc_column = (
plc.strings.convert.convert_fixed_point.from_fixed_point(
self.to_pylibcudf(mode="read"),
)
)
return type(self).from_pylibcudf(plc_column) # type: ignore[return-value]
else:
return cast(
cudf.core.column.StringColumn,
cudf.core.column.column_empty(0, dtype=CUDF_STRING_DTYPE),
)
def __pow__(self, other: ColumnBinaryOperand) -> ColumnBase:
if isinstance(other, int):
if other == 0:
res = cudf.core.column.as_column(
1, dtype=self.dtype, length=len(self)
)
if self.nullable:
res = res.set_mask(self.mask)
return res
elif other < 0:
raise TypeError("Power of negative integers not supported.")
res = self
for _ in range(other - 1):
res = self * res
return res
else:
raise NotImplementedError(
f"__pow__ of types {self.dtype} and {type(other)} is "
"not yet implemented."
)
# Decimals in libcudf don't support truediv, see
# https://github.com/rapidsai/cudf/pull/7435 for explanation.
def __truediv__(self, other: ColumnBinaryOperand) -> ColumnBase:
return self._binaryop(other, "__div__")
def __rtruediv__(self, other: ColumnBinaryOperand) -> ColumnBase:
return self._binaryop(other, "__rdiv__")
def _binaryop(self, other: ColumnBinaryOperand, op: str) -> ColumnBase:
reflect, op = self._check_reflected_op(op)
# Inline _normalize_binop_operand functionality
if isinstance(other, ColumnBase):
if not isinstance(other, NumericalBaseColumn):
return NotImplemented
elif other.dtype.kind == "f":
return self.astype(other.dtype)._binaryop(other, op)
elif other.dtype.kind == "b":
raise TypeError(
"Decimal columns only support binary operations with "
"integer numerical columns."
)
elif other.dtype.kind in {"i", "u"}:
other = other.astype(
type(self.dtype)(self.dtype.MAX_PRECISION, 0) # type: ignore[call-overload, union-attr]
)
elif not isinstance(self.dtype, other.dtype.__class__):
# This branch occurs if we have a DecimalBaseColumn of a
# different size (e.g. 64 instead of 32).
if _same_precision_and_scale(self.dtype, other.dtype): # type: ignore[arg-type]
other = other.astype(self.dtype)
other_cudf_dtype = other.dtype
elif isinstance(other, (int, Decimal)):
if cudf.get_option("mode.pandas_compatible") and not isinstance(
self.dtype, DecimalDtype
):
raise NotImplementedError(
"binary operations with arbitrary decimal types are not supported in pandas compatibility mode"
)
other_cudf_dtype = self.dtype._from_decimal(Decimal(other)) # type: ignore[union-attr]
elif isinstance(other, float):
return self._binaryop(as_column(other, length=len(self)), op)
elif is_na_like(other):
other = pa.scalar(None, type=cudf_dtype_to_pa_type(self.dtype))
other_cudf_dtype = self.dtype
else:
return NotImplemented
if reflect:
lhs_dtype = other_cudf_dtype
rhs_dtype = self.dtype
lhs = other
rhs = self
else:
lhs_dtype = self.dtype
rhs_dtype = other_cudf_dtype
lhs = self
rhs = other # type: ignore[assignment]
# Binary Arithmetics between decimal columns. `Scale` and `precision`
# are computed outside of libcudf
if op in {"__add__", "__sub__", "__mul__", "__div__"}:
output_type = _get_decimal_type(lhs_dtype, rhs_dtype, op) # type: ignore[arg-type]
new_lhs_dtype = type(output_type)(
lhs_dtype.precision, # type: ignore[union-attr]
lhs_dtype.scale, # type: ignore[union-attr]
)
new_rhs_dtype = type(output_type)(
rhs_dtype.precision, # type: ignore[union-attr]
rhs_dtype.scale, # type: ignore[union-attr]
)
lhs_binop: plc.Scalar | ColumnBase
rhs_binop: plc.Scalar | ColumnBase
if isinstance(lhs, (int, Decimal)):
lhs_binop = _to_plc_scalar(lhs, new_lhs_dtype)
else:
lhs_binop = lhs.astype(new_lhs_dtype)
if isinstance(rhs, (int, Decimal)):
rhs_binop = _to_plc_scalar(rhs, new_rhs_dtype)
else:
rhs_binop = rhs.astype(new_rhs_dtype)
result = binaryop.binaryop(lhs_binop, rhs_binop, op, output_type)
# libcudf doesn't support precision, so result.dtype doesn't
# maintain output_type.precision
result.dtype.precision = output_type.precision # type: ignore[union-attr]
return result
elif op in {
"__eq__",
"__ne__",
"__lt__",
"__gt__",
"__le__",
"__ge__",
}:
lhs_comp: plc.Scalar | ColumnBase = lhs # type: ignore[assignment]
rhs_comp: plc.Scalar | ColumnBase = (
_to_plc_scalar(rhs, self.dtype) # type: ignore[arg-type]
if isinstance(rhs, (int, Decimal))
else rhs
)
result = binaryop.binaryop(
lhs_comp,
rhs_comp,
op,
get_dtype_of_same_kind(self.dtype, np.dtype(np.bool_)),
)
if cudf.get_option("mode.pandas_compatible"):
result = result.fillna(op == "__ne__")
return result
else:
raise TypeError(
f"{op} not supported for the following dtypes: "
f"{self.dtype}, {other_cudf_dtype}"
)
def _cast_setitem_value(self, value: Any) -> plc.Scalar | ColumnBase:
if isinstance(value, np.integer):
value = value.item()
if is_scalar(value):
return self._scalar_to_plc_scalar(value)
return super()._cast_setitem_value(value)
def _scalar_to_plc_scalar(self, scalar: ScalarLike) -> plc.Scalar:
"""Return a pylibcudf.Scalar that matches the type of self.dtype"""
if not isinstance(scalar, pa.Scalar):
# e.g casting int to decimal type isn't allow, but OK in the constructor?
pa_scalar = pa.scalar(
scalar, type=cudf_dtype_to_pa_type(self.dtype)
)
else:
pa_scalar = scalar.cast(cudf_dtype_to_pa_type(self.dtype))
plc_scalar = pa_scalar_to_plc_scalar(pa_scalar)
if isinstance(self.dtype, (Decimal32Dtype, Decimal64Dtype)):
# pyarrow.Scalar only supports Decimal128 so conversion
# from pyarrow would only return a pylibcudf.Scalar with Decimal128
col = ColumnBase.from_pylibcudf(
plc.Column.from_scalar(plc_scalar, 1)
).astype(self.dtype)
return plc.copying.get_element(col.to_pylibcudf(mode="read"), 0)
return plc_scalar
def _validate_fillna_value(
self, fill_value: ScalarLike | ColumnLike
) -> plc.Scalar | ColumnBase:
"""Align fill_value for .fillna based on column type."""
if isinstance(fill_value, (int, Decimal)):
return super()._validate_fillna_value(fill_value)
elif isinstance(fill_value, ColumnBase) and (
isinstance(self.dtype, DecimalDtype) or self.dtype.kind in "iu"
):
return super()._validate_fillna_value(fill_value)
raise TypeError(
"Decimal columns only support using fillna with decimal and "
"integer values"
)
def as_numerical_column(self, dtype: np.dtype) -> NumericalColumn:
return self.cast(dtype=dtype) # type: ignore[return-value]
| DecimalBaseColumn |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 19387,
"end": 19421
} | class ____(Cov):
std = True
| Corr |
python | pandas-dev__pandas | pandas/tests/plotting/frame/test_frame_groupby.py | {
"start": 174,
"end": 2545
} | class ____:
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
_check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
_check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.parametrize(
"kwargs, expected",
[
# behavior without keyword
({}, [True, False, True, False]),
# set sharey=True should be identical
({"sharey": True}, [True, False, True, False]),
# sharey=False, all yticklabels should be visible
({"sharey": False}, [True, True, True, True]),
],
)
def test_groupby_boxplot_sharey(self, kwargs, expected):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
axes = df.groupby("c").boxplot(**kwargs)
self._assert_ytickslabels_visibility(axes, expected)
@pytest.mark.parametrize(
"kwargs, expected",
[
# behavior without keyword
({}, [True, True, True, True]),
# set sharex=False should be identical
({"sharex": False}, [True, True, True, True]),
# sharex=True, xticklabels should be visible
# only for bottom plots
({"sharex": True}, [False, False, True, True]),
],
)
def test_groupby_boxplot_sharex(self, kwargs, expected):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
axes = df.groupby("c").boxplot(**kwargs)
self._assert_xtickslabels_visibility(axes, expected)
| TestDataFramePlotsGroupby |
python | langchain-ai__langchain | libs/partners/anthropic/tests/integration_tests/test_standard.py | {
"start": 438,
"end": 4289
} | class ____(ChatModelIntegrationTests):
"""Use standard chat model integration tests against the `ChatAnthropic` class."""
@property
def chat_model_class(self) -> type[BaseChatModel]:
return ChatAnthropic
@property
def chat_model_params(self) -> dict:
return {"model": MODEL}
@property
def supports_image_inputs(self) -> bool:
return True
@property
def supports_image_urls(self) -> bool:
return True
@property
def supports_pdf_inputs(self) -> bool:
return True
@property
def supports_image_tool_message(self) -> bool:
return True
@property
def supports_pdf_tool_message(self) -> bool:
return True
@property
def supports_anthropic_inputs(self) -> bool:
return True
@property
def enable_vcr_tests(self) -> bool:
return True
@property
def supported_usage_metadata_details(
self,
) -> dict[
Literal["invoke", "stream"],
list[
Literal[
"audio_input",
"audio_output",
"reasoning_output",
"cache_read_input",
"cache_creation_input",
]
],
]:
return {
"invoke": ["cache_read_input", "cache_creation_input"],
"stream": ["cache_read_input", "cache_creation_input"],
}
def invoke_with_cache_creation_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model=MODEL, # type: ignore[call-arg]
)
with Path.open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
def invoke_with_cache_read_input(self, *, stream: bool = False) -> AIMessage:
llm = ChatAnthropic(
model=MODEL, # type: ignore[call-arg]
)
with Path.open(REPO_ROOT_DIR / "README.md") as f:
readme = f.read()
input_ = f"""What's langchain? Here's the langchain README:
{readme}
"""
# invoke twice so first invocation is cached
_invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
return _invoke(
llm,
[
{
"role": "user",
"content": [
{
"type": "text",
"text": input_,
"cache_control": {"type": "ephemeral"},
},
],
},
],
stream,
)
def _invoke(llm: ChatAnthropic, input_: list, stream: bool) -> AIMessage: # noqa: FBT001
if stream:
full = None
for chunk in llm.stream(input_):
full = cast("BaseMessageChunk", chunk) if full is None else full + chunk
return cast("AIMessage", full)
return cast("AIMessage", llm.invoke(input_))
| TestAnthropicStandard |
python | pytorch__pytorch | test/package/test_load_bc_packages.py | {
"start": 446,
"end": 1733
} | class ____(PackageTestCase):
"""Tests for checking loading has backwards compatibility"""
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_nn_module(self):
"""Tests for backwards compatible nn module"""
importer1 = PackageImporter(f"{packaging_directory}/test_nn_module.pt")
importer1.load_pickle("nn_module", "nn_module.pkl")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_torchscript_module(self):
"""Tests for backwards compatible torchscript module"""
importer2 = PackageImporter(f"{packaging_directory}/test_torchscript_module.pt")
importer2.load_pickle("torchscript_module", "torchscript_module.pkl")
@skipIf(
IS_FBCODE or IS_SANDCASTLE,
"Tests that use temporary files are disabled in fbcode",
)
def test_load_bc_packages_fx_module(self):
"""Tests for backwards compatible fx module"""
importer3 = PackageImporter(f"{packaging_directory}/test_fx_module.pt")
importer3.load_pickle("fx_module", "fx_module.pkl")
if __name__ == "__main__":
run_tests()
| TestLoadBCPackages |
python | tensorflow__tensorflow | tensorflow/python/data/kernel_tests/flat_map_test.py | {
"start": 17690,
"end": 19707
} | class ____(
test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
repetitions=[1, 2],
seed=[None, 42],
reshuffle_each_iteration=[True, False])))
def test(
self,
repetitions: int,
seed: Optional[int],
reshuffle_each_iteration: bool):
dataset = dataset_ops.Dataset.from_tensor_slices(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]])
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
if repetitions > 1:
dataset = dataset.repeat(repetitions)
dataset = dataset.prefetch(buffer_size=dataset_ops.AUTOTUNE)
dataset = global_shuffle_op._global_shuffle(
dataset, seed=seed, reshuffle_each_iteration=reshuffle_each_iteration)
expected = list(range(1, 10)) * repetitions
dataset_output = self.getDatasetOutput(
dataset, requires_initialization=True)
self.assertCountEqual(dataset_output, expected)
self.assertNotEqual(dataset_output, expected)
self.assertLen(dataset_output, self.evaluate(dataset.cardinality()))
@combinations.generate(test_base.default_test_combinations())
def testInputCardinalityTooLarge(self):
dataset = dataset_ops.Dataset.from_tensor_slices([[i] for i in range(101)])
dataset = dataset.flat_map(dataset_ops.Dataset.from_tensor_slices)
with self.assertRaisesRegex(
errors.FailedPreconditionError,
"The cardinality of the input to FlatMapDataset is too large to support"
" global shuffling",
):
dataset = global_shuffle_op._global_shuffle(dataset, seed=42)
self.getDatasetOutput(dataset, requires_initialization=True)
@unittest.skip(
"TODO: b/355241367 - `flat_map_dataset_op.cc` still needs to be fixed."
" Please use concatenate dataset op plus global shuffling instead."
)
| FlatMapGlobalShuffleTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-azure-blob-storage/source_azure_blob_storage/stream_reader.py | {
"start": 2352,
"end": 2834
} | class ____(Oauth2Authenticator, TokenCredential):
"""
Authenticator for Azure Blob Storage SDK to align with azure.core.credentials.TokenCredential protocol
"""
def get_token(self, *args, **kwargs) -> AccessToken:
"""Parent class handles Oauth Refresh token logic.
`expires_on` is ignored and set to year 2222 to align with protocol.
"""
return AccessToken(token=self.get_access_token(), expires_on=7952342400)
| AzureOauth2Authenticator |
python | coleifer__peewee | peewee.py | {
"start": 55569,
"end": 55786
} | class ____(Node):
__slots__ = ('_name',)
def __init__(self, name):
self._name = name
def __getattr__(self, attr):
return NamespaceAttribute(self, attr)
__getitem__ = __getattr__
| _Namespace |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 44842,
"end": 49367
} | class ____(nn.Module):
"""
Multiscale deformable attention as proposed in Deformable DETR.
"""
def __init__(self, embed_dim: int, num_heads: int, n_levels: int, n_points: int):
super().__init__()
if embed_dim % num_heads != 0:
raise ValueError(
f"embed_dim (d_model) must be divisible by num_heads, but got {embed_dim} and {num_heads}"
)
dim_per_head = embed_dim // num_heads
# check if dim_per_head is power of 2
if not ((dim_per_head & (dim_per_head - 1) == 0) and dim_per_head != 0):
warnings.warn(
"You'd better set embed_dim (d_model) in DeformableDetrMultiscaleDeformableAttention to make the"
" dimension of each attention head a power of 2 which is more efficient in the authors' CUDA"
" implementation."
)
self.im2col_step = 128
self.d_model = embed_dim
self.n_levels = n_levels
self.n_heads = num_heads
self.n_points = n_points
self.sampling_offsets = nn.Linear(embed_dim, num_heads * n_levels * n_points * 2)
self.attention_weights = nn.Linear(embed_dim, num_heads * n_levels * n_points)
self.value_proj = nn.Linear(embed_dim, embed_dim)
self.output_proj = nn.Linear(embed_dim, embed_dim)
def with_pos_embed(self, tensor: torch.Tensor, position_embeddings: Optional[Tensor]):
return tensor if position_embeddings is None else tensor + position_embeddings
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
encoder_hidden_states=None,
encoder_attention_mask=None,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
# add position embeddings to the hidden states before projecting to queries and keys
if position_embeddings is not None:
hidden_states = self.with_pos_embed(hidden_states, position_embeddings)
batch_size, num_queries, _ = hidden_states.shape
batch_size, sequence_length, _ = encoder_hidden_states.shape
total_elements = sum(height * width for height, width in spatial_shapes_list)
if total_elements != sequence_length:
raise ValueError(
"Make sure to align the spatial shapes with the sequence length of the encoder hidden states"
)
value = self.value_proj(encoder_hidden_states)
if attention_mask is not None:
# we invert the attention_mask
value = value.masked_fill(attention_mask[..., None], float(0))
value = value.view(batch_size, sequence_length, self.n_heads, self.d_model // self.n_heads)
sampling_offsets = self.sampling_offsets(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points, 2
)
attention_weights = self.attention_weights(hidden_states).view(
batch_size, num_queries, self.n_heads, self.n_levels * self.n_points
)
attention_weights = nn.functional.softmax(attention_weights, -1).view(
batch_size, num_queries, self.n_heads, self.n_levels, self.n_points
)
# batch_size, num_queries, n_heads, n_levels, n_points, 2
if reference_points.shape[-1] == 2:
offset_normalizer = torch.tensor(
[[shape[1], shape[0]] for shape in spatial_shapes_list],
dtype=torch.long,
device=reference_points.device,
)
sampling_locations = (
reference_points[:, :, None, :, None, :]
+ sampling_offsets / offset_normalizer[None, None, None, :, None, :]
)
elif reference_points.shape[-1] == 4:
sampling_locations = (
reference_points[:, :, None, :, None, :2]
+ sampling_offsets / self.n_points * reference_points[:, :, None, :, None, 2:] * 0.5
)
else:
raise ValueError(f"Last dim of reference_points must be 2 or 4, but got {reference_points.shape[-1]}")
output = multi_scale_deformable_attention(value, spatial_shapes_list, sampling_locations, attention_weights)
output = self.output_proj(output)
return output, attention_weights
| Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention |
python | readthedocs__readthedocs.org | readthedocs/api/v3/tests/mixins.py | {
"start": 1413,
"end": 8980
} | class ____(TestCase):
fixtures = []
maxDiff = None # So we get an actual diff when it fails
def setUp(self):
self.created = make_aware(datetime.datetime(2019, 4, 29, 10, 0, 0))
self.modified = make_aware(datetime.datetime(2019, 4, 29, 12, 0, 0))
self.me = fixture.get(
User,
date_joined=self.created,
username="testuser",
projects=[],
)
self.token = fixture.get(Token, key="me", user=self.me)
# Defining all the defaults helps to avoid creating ghost / unwanted
# objects (like a Project for translations/subprojects)
self.project = fixture.get(
Project,
id=1,
pub_date=self.created,
modified_date=self.modified,
description="Project description",
repo="https://github.com/rtfd/project",
project_url="http://project.com",
name="project",
slug="project",
related_projects=[],
main_language_project=None,
users=[self.me],
versions=[],
external_builds_enabled=False,
external_builds_privacy_level=PUBLIC,
privacy_level=PUBLIC,
)
for tag in ("tag", "project", "test"):
self.project.tags.add(tag)
self.redirect = fixture.get(
Redirect,
create_dt=self.created,
update_dt=self.modified,
from_url="/docs/",
to_url="/documentation/",
redirect_type="page",
project=self.project,
)
self.version = fixture.get(
Version,
slug="v1.0",
verbose_name="v1.0",
identifier="a1b2c3",
project=self.project,
hidden=False,
active=True,
built=True,
type=TAG,
has_pdf=True,
has_epub=True,
has_htmlzip=True,
privacy_level=PUBLIC,
)
self.build = fixture.get(
Build,
id=1,
date=self.created,
type="html",
state="finished",
error="",
success=True,
_config={"property": "test value"},
version=self.version,
project=self.project,
builder="builder01",
commit="a1b2c3",
length=60,
)
self.other = fixture.get(User, projects=[])
self.others_token = fixture.get(Token, key="other", user=self.other)
self.others_project = fixture.get(
Project,
id=2,
slug="others-project",
name="others-project",
related_projects=[],
main_language_project=None,
users=[self.other],
versions=[],
external_builds_privacy_level=PUBLIC,
privacy_level=PUBLIC,
)
self.others_version = self.others_project.versions.get(slug=LATEST)
self.others_build = fixture.get(
Build,
date=self.created,
type="html",
state="finished",
error="",
success=True,
_config={"property": "test value"},
version=self.others_version,
project=self.others_project,
builder="builder01",
commit="a1b2c3",
length=60,
)
# Make all non-html true so responses are complete
self.project.versions.update(
has_pdf=True,
has_epub=True,
has_htmlzip=True,
privacy_level=PUBLIC,
)
self.organization = fixture.get(
Organization,
id=1,
pub_date=self.created,
modified_date=self.modified,
name="organization",
slug="organization",
owners=[self.me],
)
self.organization.projects.add(self.project)
self.notification_organization = fixture.get(
Notification,
attached_to_content_type=ContentType.objects.get_for_model(
self.organization
),
attached_to_id=self.organization.pk,
message_id=MESSAGE_ORGANIZATION_DISABLED,
)
self.notification_project = fixture.get(
Notification,
attached_to_content_type=ContentType.objects.get_for_model(self.project),
attached_to_id=self.project.pk,
message_id=MESSAGE_PROJECT_SKIP_BUILDS,
)
self.notification_build = fixture.get(
Notification,
attached_to_content_type=ContentType.objects.get_for_model(self.build),
attached_to_id=self.build.pk,
message_id=BuildCancelled.CANCELLED_BY_USER,
)
self.notification_user = fixture.get(
Notification,
attached_to_content_type=ContentType.objects.get_for_model(self.me),
attached_to_id=self.me.pk,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
self.notification_others_build = fixture.get(
Notification,
attached_to_content_type=ContentType.objects.get_for_model(Build),
attached_to_id=self.others_build.pk,
message_id=BuildCancelled.CANCELLED_BY_USER,
)
self.client = APIClient()
def tearDown(self):
# Cleanup cache to avoid throttling on tests
cache.clear()
def _create_new_project(self):
"""Helper to create a project with all the fields set."""
return fixture.get(
Project,
pub_date=self.created,
modified_date=self.modified,
description="Project description",
repo="https://github.com/rtfd/project",
project_url="http://project.com",
name="new-project",
slug="new-project",
related_projects=[],
main_language_project=None,
users=[self.me],
versions=[],
external_builds_privacy_level=PUBLIC,
privacy_level=PUBLIC,
)
def _create_subproject(self):
"""Helper to create a sub-project with all the fields set."""
self.subproject = fixture.get(
Project,
pub_date=self.created,
modified_date=self.modified,
description="SubProject description",
repo="https://github.com/rtfd/subproject",
project_url="http://subproject.com",
name="subproject",
slug="subproject",
related_projects=[],
main_language_project=None,
users=[self.me],
versions=[],
external_builds_privacy_level=PUBLIC,
privacy_level=PUBLIC,
)
self.project_relationship = self.project.add_subproject(self.subproject)
def _get_response_dict(self, view_name, filepath=None):
filepath = filepath or __file__
filename = Path(filepath).absolute().parent / "responses" / f"{view_name}.json"
return json.load(open(filename))
def assertDictEqual(self, d1, d2):
"""
Show the differences between the dicts in a human readable way.
It's just a helper for debugging API responses.
"""
message = ""
try:
import datadiff
message = datadiff.diff(d1, d2)
except ImportError:
pass
return super().assertDictEqual(d1, d2, message)
| APIEndpointMixin |
python | mlflow__mlflow | mlflow/genai/datasets/databricks_evaluation_dataset_source.py | {
"start": 2407,
"end": 2570
} | class ____(DatabricksEvaluationDatasetSource):
@staticmethod
def _get_source_type() -> str:
return "databricks-uc-table"
| DatabricksUCTableDatasetSource |
python | getsentry__sentry | tests/sentry/seer/explorer/test_tools.py | {
"start": 29245,
"end": 44210
} | class ____(APITransactionTestCase, SnubaTestCase, OccurrenceTestMixin):
def _validate_event_timeseries(self, timeseries: dict):
assert isinstance(timeseries, dict)
assert "count()" in timeseries
assert "data" in timeseries["count()"]
assert isinstance(timeseries["count()"]["data"], list)
for item in timeseries["count()"]["data"]:
assert len(item) == 2
assert isinstance(item[0], int)
assert isinstance(item[1], list)
assert len(item[1]) == 1
assert isinstance(item[1][0], dict)
assert "count" in item[1][0]
assert isinstance(item[1][0]["count"], int)
@patch("sentry.models.group.get_recommended_event")
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def _test_get_ie_details_basic(
self,
mock_get_tags,
mock_get_recommended_event,
issue_id_type: Literal["int_id", "short_id", "none"],
):
"""Test the queries and response format for a group of error events, and multiple event types."""
mock_get_tags.return_value = {"tags_overview": [{"key": "test_tag", "top_values": []}]}
# Create events with shared stacktrace (should have same group)
events = []
event0_trace_id = uuid.uuid4().hex
for i in range(3):
data = load_data("python", timestamp=before_now(minutes=5 - i))
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
if i == 0:
data["contexts"] = data.get("contexts", {})
data["contexts"]["trace"] = {
"trace_id": event0_trace_id,
"span_id": "1" + uuid.uuid4().hex[:15],
}
event = self.store_event(data=data, project_id=self.project.id)
events.append(event)
mock_get_recommended_event.return_value = events[1]
group = events[0].group
assert isinstance(group, Group)
assert events[1].group_id == group.id
assert events[2].group_id == group.id
issue_id_param = (
group.qualified_short_id
if issue_id_type == "short_id"
else str(group.id) if issue_id_type == "int_id" else None
)
if issue_id_param is None:
valid_selected_events = [
uuid.UUID(events[1].event_id).hex, # no dashes
str(uuid.UUID(events[1].event_id)), # with dashes
]
invalid_selected_events = [
"oldest",
"latest",
"recommended",
events[1].event_id[:8],
"potato",
]
else:
valid_selected_events = [
"oldest",
"latest",
"recommended",
uuid.UUID(events[1].event_id).hex, # no dashes
str(uuid.UUID(events[1].event_id)), # with dashes
]
invalid_selected_events = [
events[1].event_id[:8],
"potato",
]
for selected_event in valid_selected_events:
result = get_issue_and_event_details(
issue_id=issue_id_param,
organization_id=self.organization.id,
selected_event=selected_event,
)
assert result is not None
assert result["project_id"] == self.project.id
assert result["project_slug"] == self.project.slug
assert result["tags_overview"] == mock_get_tags.return_value
# Validate fields of the main issue payload.
assert isinstance(result["issue"], dict)
_IssueMetadata.parse_obj(result["issue"])
# Validate fields of the selected event.
event_dict = result["event"]
assert isinstance(event_dict, dict)
_SentryEventData.parse_obj(event_dict)
assert result["event_id"] == event_dict["id"]
# Check correct event is returned based on selected_event_type.
if selected_event == "oldest":
assert event_dict["id"] == events[0].event_id, selected_event
elif selected_event == "latest":
assert event_dict["id"] == events[-1].event_id, selected_event
elif selected_event == "recommended":
assert (
event_dict["id"] == mock_get_recommended_event.return_value.event_id
), selected_event
else:
assert (
uuid.UUID(event_dict["id"]).hex == uuid.UUID(selected_event).hex
), selected_event
# Check event_trace_id matches mocked trace context.
if event_dict["id"] == events[0].event_id:
assert events[0].trace_id == event0_trace_id
assert result["event_trace_id"] == event0_trace_id
else:
assert result["event_trace_id"] is None
# Validate timeseries dict structure.
self._validate_event_timeseries(result["event_timeseries"])
for selected_event in invalid_selected_events:
with pytest.raises(ValueError, match="badly formed hexadecimal UUID string"):
get_issue_and_event_details(
issue_id=issue_id_param,
organization_id=self.organization.id,
selected_event=selected_event,
)
def test_get_ie_details_basic_int_id(self):
self._test_get_ie_details_basic(issue_id_type="int_id")
def test_get_ie_details_basic_short_id(self):
self._test_get_ie_details_basic(issue_id_type="short_id")
def test_get_ie_details_basic_null_issue_id(self):
self._test_get_ie_details_basic(issue_id_type="none")
def test_get_ie_details_nonexistent_organization(self):
"""Test returns None when organization doesn't exist."""
# Create a valid group.
data = load_data("python", timestamp=before_now(minutes=5))
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
event = self.store_event(data=data, project_id=self.project.id)
group = event.group
assert isinstance(group, Group)
# Call with nonexistent organization ID.
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=99999,
selected_event="latest",
)
assert result is None
def test_get_ie_details_nonexistent_issue(self):
"""Test returns None when the requested issue doesn't exist."""
# Call with nonexistent issue ID.
result = get_issue_and_event_details(
issue_id="99999",
organization_id=self.organization.id,
selected_event="latest",
)
assert result is None
@patch("sentry.models.group.get_oldest_or_latest_event")
@patch("sentry.models.group.get_recommended_event")
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def test_get_ie_details_no_event_found(
self, mock_get_tags, mock_get_recommended_event, mock_get_oldest_or_latest_event
):
"""Test returns None when issue is found but selected_event is not."""
mock_get_tags.return_value = {"tags_overview": [{"key": "test_tag", "top_values": []}]}
mock_get_recommended_event.return_value = None
mock_get_oldest_or_latest_event.return_value = None
# Create events with shared stacktrace (should have same group)
for i in range(2):
data = load_data("python", timestamp=before_now(minutes=5 - i))
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
event = self.store_event(data=data, project_id=self.project.id)
group = event.group
assert isinstance(group, Group)
for et in ["oldest", "latest", "recommended", uuid.uuid4().hex]:
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=self.organization.id,
selected_event=et,
)
assert result is None, et
def test_get_ie_details_no_event_found_null_issue_id(self):
"""Test returns None when issue_id is not provided and selected_event is not found."""
_ = self.project # Create an active project.
result = get_issue_and_event_details(
issue_id=None,
organization_id=self.organization.id,
selected_event=uuid.uuid4().hex,
)
assert result is None
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def test_get_ie_details_tags_exception(self, mock_get_tags):
mock_get_tags.side_effect = Exception("Test exception")
"""Test other fields are returned with null tags_overview when tag util fails."""
# Create a valid group.
data = load_data("python", timestamp=before_now(minutes=5))
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
event = self.store_event(data=data, project_id=self.project.id)
group = event.group
assert isinstance(group, Group)
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=self.organization.id,
selected_event="latest",
)
assert result is not None
assert result["tags_overview"] is None
assert "event_trace_id" in result
assert isinstance(result.get("project_id"), int)
assert isinstance(result.get("issue"), dict)
_IssueMetadata.parse_obj(result.get("issue", {}))
@patch("sentry.models.group.get_recommended_event")
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def test_get_ie_details_with_assigned_user(
self,
mock_get_tags,
mock_get_recommended_event,
):
mock_get_tags.return_value = {"tags_overview": [{"key": "test_tag", "top_values": []}]}
data = load_data("python", timestamp=before_now(minutes=5))
event = self.store_event(data=data, project_id=self.project.id)
mock_get_recommended_event.return_value = event
group = event.group
assert isinstance(group, Group)
# Create assignee.
GroupAssignee.objects.create(group=group, project=self.project, user_id=self.user.id)
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=self.organization.id,
selected_event="recommended",
)
assert result is not None
md = _IssueMetadata.parse_obj(result["issue"])
assert md.assignedTo is not None
assert md.assignedTo.type == "user"
assert md.assignedTo.id == str(self.user.id)
assert md.assignedTo.email == self.user.email
assert md.assignedTo.name == self.user.get_display_name()
@patch("sentry.models.group.get_recommended_event")
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def test_get_ie_details_with_assigned_team(self, mock_get_tags, mock_get_recommended_event):
mock_get_tags.return_value = {"tags_overview": [{"key": "test_tag", "top_values": []}]}
data = load_data("python", timestamp=before_now(minutes=5))
event = self.store_event(data=data, project_id=self.project.id)
mock_get_recommended_event.return_value = event
group = event.group
assert isinstance(group, Group)
# Create assignee.
GroupAssignee.objects.create(group=group, project=self.project, team=self.team)
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=self.organization.id,
selected_event="recommended",
)
assert result is not None
md = _IssueMetadata.parse_obj(result["issue"])
assert md.assignedTo is not None
assert md.assignedTo.type == "team"
assert md.assignedTo.id == str(self.team.id)
assert md.assignedTo.name == self.team.slug
assert md.assignedTo.email is None
@patch("sentry.seer.explorer.tools.client")
@patch("sentry.models.group.get_recommended_event")
@patch("sentry.seer.explorer.tools.get_all_tags_overview")
def test_get_ie_details_timeseries_resolution(
self,
mock_get_tags,
mock_get_recommended_event,
mock_api_client,
):
"""Test groups with different first_seen dates"""
mock_get_tags.return_value = {"tags_overview": [{"key": "test_tag", "top_values": []}]}
# Passthrough to real client - allows testing call args
mock_api_client.get.side_effect = client.get
for stats_period, interval in EVENT_TIMESERIES_RESOLUTIONS:
delta = parse_stats_period(stats_period)
assert delta is not None
if delta > timedelta(days=30):
# Skip the 90d test as the retention for testutils is 30d.
continue
# Set a first_seen date slightly newer than the stats period we're testing.
first_seen = datetime.now(UTC) - delta + timedelta(minutes=6, seconds=7)
data = load_data("python", timestamp=first_seen)
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
event = self.store_event(data=data, project_id=self.project.id)
mock_get_recommended_event.return_value = event
# Second newer event
data = load_data("python", timestamp=first_seen + timedelta(minutes=6, seconds=7))
data["exception"] = {"values": [{"type": "Exception", "value": "Test exception"}]}
self.store_event(data=data, project_id=self.project.id)
group = event.group
assert isinstance(group, Group)
assert group.first_seen == first_seen
result = get_issue_and_event_details(
issue_id=str(group.id),
organization_id=self.organization.id,
selected_event="recommended",
)
# Assert expected stats params were passed to the API.
_, kwargs = mock_api_client.get.call_args
assert kwargs["path"] == f"/organizations/{self.organization.slug}/events-stats/"
assert kwargs["params"]["statsPeriod"] == stats_period
assert kwargs["params"]["interval"] == interval
# Validate final results.
assert result is not None
self._validate_event_timeseries(result["event_timeseries"])
assert result["timeseries_stats_period"] == stats_period
assert result["timeseries_interval"] == interval
# Ensure next iteration makes a fresh group.
group.delete()
| TestGetIssueAndEventDetails |
python | tiangolo__fastapi | fastapi/security/oauth2.py | {
"start": 14465,
"end": 17920
} | class ____(OAuth2):
"""
OAuth2 flow for authentication using a bearer token obtained with a password.
An instance of it would be used as a dependency.
Read more about it in the
[FastAPI docs for Simple OAuth2 with Password and Bearer](https://fastapi.tiangolo.com/tutorial/security/simple-oauth2/).
"""
def __init__(
self,
tokenUrl: Annotated[
str,
Doc(
"""
The URL to obtain the OAuth2 token. This would be the *path operation*
that has `OAuth2PasswordRequestForm` as a dependency.
"""
),
],
scheme_name: Annotated[
Optional[str],
Doc(
"""
Security scheme name.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
scopes: Annotated[
Optional[Dict[str, str]],
Doc(
"""
The OAuth2 scopes that would be required by the *path operations* that
use this dependency.
"""
),
] = None,
description: Annotated[
Optional[str],
Doc(
"""
Security scheme description.
It will be included in the generated OpenAPI (e.g. visible at `/docs`).
"""
),
] = None,
auto_error: Annotated[
bool,
Doc(
"""
By default, if no HTTP Authorization header is provided, required for
OAuth2 authentication, it will automatically cancel the request and
send the client an error.
If `auto_error` is set to `False`, when the HTTP Authorization header
is not available, instead of erroring out, the dependency result will
be `None`.
This is useful when you want to have optional authentication.
It is also useful when you want to have authentication that can be
provided in one of multiple optional ways (for example, with OAuth2
or in a cookie).
"""
),
] = True,
refreshUrl: Annotated[
Optional[str],
Doc(
"""
The URL to refresh the token and obtain a new one.
"""
),
] = None,
):
if not scopes:
scopes = {}
flows = OAuthFlowsModel(
password=cast(
Any,
{
"tokenUrl": tokenUrl,
"refreshUrl": refreshUrl,
"scopes": scopes,
},
)
)
super().__init__(
flows=flows,
scheme_name=scheme_name,
description=description,
auto_error=auto_error,
)
async def __call__(self, request: Request) -> Optional[str]:
authorization = request.headers.get("Authorization")
scheme, param = get_authorization_scheme_param(authorization)
if not authorization or scheme.lower() != "bearer":
if self.auto_error:
raise self.make_not_authenticated_error()
else:
return None
return param
| OAuth2PasswordBearer |
python | dask__distributed | distributed/utils.py | {
"start": 55462,
"end": 56873
} | class ____(logging.Filter):
"""A Logging filter that ensures a matching message is emitted at most every
`rate` seconds"""
pattern: re.Pattern
rate: float
_last_seen: float
def __init__(self, pattern: str, *, name: str = "", rate: str | float = "10s"):
super().__init__(name)
self.pattern = re.compile(pattern)
self.rate = _parse_timedelta(rate)
self._last_seen = -self.rate
def filter(self, record: logging.LogRecord) -> bool:
if self.pattern.match(record.msg):
now = monotonic()
if now - self._last_seen < self.rate:
return False
self._last_seen = now
return True
@classmethod
def reset_timer(cls, logger: logging.Logger | str) -> None:
"""Reset the timer on all RateLimiterFilters on a logger.
Useful in unit testing.
"""
if isinstance(logger, str):
logger = logging.getLogger(logger)
for filter in logger.filters:
if isinstance(filter, cls):
filter._last_seen = -filter.rate
if sys.version_info >= (3, 11):
async def wait_for(fut: Awaitable[T], timeout: float) -> T:
async with asyncio.timeout(timeout):
return await fut
else:
async def wait_for(fut: Awaitable[T], timeout: float) -> T:
return await asyncio.wait_for(fut, timeout)
| RateLimiterFilter |
python | django__django | tests/forms_tests/models.py | {
"start": 2060,
"end": 2878
} | class ____(models.Model):
"""Model with ForeignKey to another model, for testing ModelForm
generation with ModelChoiceField."""
choice = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
default=choice_default,
)
choice_int = models.ForeignKey(
ChoiceOptionModel,
models.CASCADE,
blank=False,
related_name="choice_int",
default=int_default,
)
multi_choice = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name="multi_choice",
default=choice_default_list,
)
multi_choice_int = models.ManyToManyField(
ChoiceOptionModel,
blank=False,
related_name="multi_choice_int",
default=int_list_default,
)
| ChoiceFieldModel |
python | MongoEngine__mongoengine | mongoengine/errors.py | {
"start": 410,
"end": 464
} | class ____(MongoEngineException):
pass
| NotRegistered |
python | ray-project__ray | python/ray/data/tests/test_pandas_block.py | {
"start": 618,
"end": 3326
} | class ____:
@pytest.mark.parametrize(
"ignore_nulls, expected",
[
(True, 3),
(False, 4),
],
)
def test_count(self, arr, ignore_nulls, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.count(ignore_nulls=ignore_nulls, as_py=True)
assert result == expected
@pytest.mark.parametrize(
"ignore_nulls, expected",
[
(True, 9),
(False, np.nan),
],
)
def test_sum(self, arr, ignore_nulls, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.sum(ignore_nulls=ignore_nulls, as_py=True)
assert result == expected or is_null(result) and is_null(expected)
@pytest.mark.parametrize(
"ignore_nulls, expected",
[
(True, 1),
(False, np.nan),
],
)
def test_min(self, arr, ignore_nulls, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.min(ignore_nulls=ignore_nulls, as_py=True)
assert result == expected or is_null(result) and is_null(expected)
@pytest.mark.parametrize(
"ignore_nulls, expected",
[
(True, 6),
(False, np.nan),
],
)
def test_max(self, arr, ignore_nulls, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.max(ignore_nulls=ignore_nulls, as_py=True)
assert result == expected or is_null(result) and is_null(expected)
@pytest.mark.parametrize(
"ignore_nulls, expected",
[
(True, 3.0),
(False, np.nan),
],
)
def test_mean(self, arr, ignore_nulls, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.mean(ignore_nulls=ignore_nulls, as_py=True)
assert result == expected or is_null(result) and is_null(expected)
@pytest.mark.parametrize(
"provided_mean, expected",
[
(3.0, 14.0),
(None, 14.0),
],
)
def test_sum_of_squared_diffs_from_mean(self, arr, provided_mean, expected):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.sum_of_squared_diffs_from_mean(
ignore_nulls=True, mean=provided_mean, as_py=True
)
assert result == expected or is_null(result) and is_null(expected)
def test_to_pylist(self, arr):
accessor = PandasBlockColumnAccessor(arr)
result = accessor.to_pylist()
expected = arr.to_list()
assert all(
[a == b or is_null(a) and is_null(b) for a, b in zip(expected, result)]
)
| TestPandasBlockColumnAccessor |
python | pytorch__pytorch | test/inductor/test_compiled_autograd.py | {
"start": 147780,
"end": 204027
} | class ____(torch.nn.Module):
def forward(self, inputs, sizes, scalars, hooks, packed_data):
getitem = inputs[0]
getitem_1 = inputs[1]; inputs = None
getitem_2 = sizes[0]; getitem_2 = None
getitem_3 = sizes[1]; sizes = None
validate_outputs = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem], [((None, None, device(type='cpu'), 6, 0, None), [], False)]); getitem = None
getitem_4 = validate_outputs[0]; validate_outputs = None
sum_backward0 = torch__dynamo_compiled_autograd_ops_SumBackward0([getitem_4], [True], []); getitem_4 = None
getitem_5 = sum_backward0[0]; sum_backward0 = None
validate_outputs_1 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_5], [((None, None, device(type='cpu'), 6, 0, None), [], False)]); getitem_5 = None
getitem_6 = validate_outputs_1[0]; validate_outputs_1 = None
getitem_7 = hooks[0]
getitem_8 = packed_data[0]; packed_data = None
getitem_9 = hooks[1]; hooks = None
call_hook = torch__dynamo_external_utils_call_hook(getitem_7, getitem_8, hook_type = 'unpack_hook'); getitem_7 = getitem_8 = None
call_backward = torch__dynamo_external_utils_call_backward(getitem_9, (call_hook,), getitem_6); getitem_9 = call_hook = getitem_6 = None
getitem_11 = call_backward[0]; call_backward = None
validate_outputs_2 = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem_11], [((None, None, device(type='cpu'), 6, 0, None), [getitem_3], False)]); getitem_11 = getitem_3 = None
getitem_12 = validate_outputs_2[0]; validate_outputs_2 = None
accumulate_grad__default = torch.ops.inductor.accumulate_grad_.default(getitem_1, getitem_12); getitem_1 = getitem_12 = accumulate_grad__default = None
_exec_final_callbacks_stub = torch__dynamo_external_utils__exec_final_callbacks_stub(); _exec_final_callbacks_stub = None
return []
""", # noqa: B950
)
# 1 graph break on torch.load -> 2 dynamo graphs
self.check_output_and_recompiles(
fn,
count=[1, 2],
compiler_fn=make_compiler_fn(fullgraph=False, gm_hook=check),
)
@skipIfWindows(msg="node name demangling inconsistent on windows")
def test_backward_hook_relative_ordering_partial(self):
# test backward hooks for cases that CA matches eager
def fn():
order = []
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(10, 10, bias=False)
def forward(self, x):
return self.linear(x)
x = torch.randn(10, 10)
module = MyModule()
def make_pre_hook(id):
return lambda _: order.append(f"pre_hook_{id}")
def make_post_hook(id):
return lambda _1, _2: order.append(f"post_hook_{id}")
count = 0
def register_hooks_on_all_nodes(nodes):
nonlocal count
for node, _ in nodes:
if node is None:
continue
count += 1
id = f"{node.name()}_{count}"
node.register_prehook(make_pre_hook(id))
node.register_hook(make_post_hook(id))
register_hooks_on_all_nodes(node.next_functions)
loss = module(x).sum()
register_hooks_on_all_nodes(((loss.grad_fn, None),))
def make_tensor_pre_hook(id):
return lambda _: order.append(f"tensor_pre_hook_{id}")
def make_post_acc_grad_hook(id):
return lambda _: order.append(f"post_acc_grad_hook_{id}")
module.linear.weight.register_hook(make_tensor_pre_hook("weight"))
module.linear.weight.register_post_accumulate_grad_hook(
make_post_acc_grad_hook("weight")
)
loss.backward()
yield tuple(order)
self.check_output_and_recompiles(fn)
def test_checkpointing_sac(self):
# circular import
from torch.utils.checkpoint import (
checkpoint,
CheckpointPolicy,
create_selective_checkpoint_contexts,
)
def fn():
class mlp(nn.Module):
def __init__(self):
super().__init__()
self.layer1 = nn.Linear(10, 10)
self.layer2 = nn.Linear(10, 10)
self.layer3 = nn.Linear(10, 10)
self.layer4 = nn.Linear(10, 10)
def forward(self, x):
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
return x
recompute_list = [torch.ops.aten.addmm.default]
def recompute_policy(ctx, op, *args, **kwargs):
if op in recompute_list:
return CheckpointPolicy.MUST_RECOMPUTE
else:
return CheckpointPolicy.PREFER_SAVE
def context_fn():
return create_selective_checkpoint_contexts(recompute_policy)
model = mlp()
input = torch.randn(1, 10)
out = checkpoint(model, input, use_reentrant=False, context_fn=context_fn)
out.sum().backward()
yield model.layer1.weight.grad
yield model.layer1.bias.grad
yield model.layer2.weight.grad
yield model.layer2.bias.grad
yield model.layer3.weight.grad
yield model.layer3.bias.grad
yield model.layer4.weight.grad
yield model.layer4.bias.grad
self.check_output_and_recompiles(
fn, count=[1, 5], compiler_fn=make_compiler_fn(fullgraph=False)
)
def test_dont_dce_side_effects(self):
class SideEffectfulBackward(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
torch.randn(10, 10)
return gO
x = torch.randn(10, 10, requires_grad=True)
# https://github.com/pytorch/pytorch/issues/147171
torch._inductor.config.fallback_random = True
@torch.compile(backend="aot_eager")
def fn(x):
return SideEffectfulBackward.apply(x).sum()
gm = None
def extract(ca_gm):
nonlocal gm
gm = ca_gm
return ca_gm
with compiled_autograd._enable(extract):
fn(x).backward()
self.assertTrue("aten.randn" in str(gm))
def test_aot_bwd_gm_runnable(self):
# This test ensures that the bw_module saved in
# CompiledFunction._lazy_backward_info is executable,
# by ensuring post grad passes have not ran on it.
post_grad_graphs = []
def post_grad_pass(graph):
nonlocal post_grad_graphs
post_grad_graphs.append(graph)
return graph
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
# forces symints to be saved for backward
# and forces aot compilation of the backward
torch._dynamo.mark_dynamic(x, 0)
torch._dynamo.mark_dynamic(y, 1)
@torch.compile
def fn(x, y):
return torch.matmul(x, y).sum()
with inductor_config.patch(post_grad_custom_post_pass=post_grad_pass):
loss = fn(x, y)
self.assertEqual(len(post_grad_graphs), 2) # 1 fwd and 1 bwd
self.assertTrue(loss.grad_fn.name(), "CompiledFunctionBackward")
self.assertIsNot(
post_grad_graphs[1],
loss.grad_fn._forward_cls._lazy_backward_info.bw_module.graph,
)
with compiled_autograd._enable(lambda gm: gm):
loss.backward()
def test_anomaly_mode_already_nan(self):
def fn():
with torch.autograd.detect_anomaly():
a = torch.randn(5, 5, requires_grad=True)
a.grad = torch.full((5, 5), float("nan"))
b = torch.randn(5, 5)
out = torch.matmul(a, b)
loss = out.sum()
with torch._dynamo.compiled_autograd._enable(lambda gm: gm):
loss.backward()
with self.assertRaisesRegex(
AssertionError, "already having NaN gradient. This is not supported."
):
fn()
def test_anomaly_mode_backward(self):
def fn():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return torch.full(gO.size(), float("nan"))
with torch.autograd.detect_anomaly():
a = torch.randn(5, 5, requires_grad=True)
out = MyFn.apply(a)
loss = out.sum()
with torch._dynamo.compiled_autograd._enable(lambda gm: gm):
loss.backward()
with self.assertRaisesRegex(
RuntimeError, "Compiled Autograd returned NaN gradients for parameters"
):
fn()
def test_anomaly_mode_grad(self):
def fn():
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return torch.full(gO.size(), float("nan"))
with torch.autograd.detect_anomaly():
a = torch.randn(5, 5, requires_grad=True)
out = MyFn.apply(a)
loss = out.sum()
with torch._dynamo.compiled_autograd._enable(lambda gm: gm):
torch.autograd.grad(loss, inputs=a)
with self.assertRaisesRegex(
RuntimeError, "Compiled Autograd returned NaN gradients for output nodes"
):
fn()
def test_higher_order_gradients(self):
def f(x):
return x**3
def fn(fwd_compiler, ca_compiler):
torch.manual_seed(123)
x = torch.tensor(2.0, requires_grad=True)
first, second, third, fourth = None, None, None, None
try:
with compiled_autograd._enable(ca_compiler):
first = torch.autograd.grad(
fwd_compiler(f)(x), x, create_graph=True
)[0]
second = torch.autograd.grad(first, x, create_graph=True)[0]
third = torch.autograd.grad(second, x, create_graph=True)[0]
fourth = torch.autograd.grad(third, x, create_graph=True)[0]
except RuntimeError as e:
assert "does not currently support higher order gradients" in str(e)
return (first, second, third, fourth)
return (first, second, third, fourth)
def eager():
return torch.compile(backend="eager")
def aot_eager():
return torch.compile(backend="aot_eager")
# Without AOTAutograd, no problem
first, second, third, fourth = fn(eager(), eager())
self.assertEqual(counters["compiled_autograd"]["captures"], 4)
self.assertEqual(first, 12) # 3x^2
self.assertEqual(second, 12) # 6x
self.assertEqual(third, 6) # 6
self.assertEqual(fourth, 0)
# and should cache hit
counters.clear()
_ = fn(eager(), eager())
self.assertEqual(counters["compiled_autograd"]["captures"], 0)
torch._dynamo.reset()
# With AOTAutograd, can't create_graph
first, second, third, fourth = fn(aot_eager(), aot_eager())
self.assertIsNone(second)
first, second, third, fourth = fn(aot_eager(), eager())
self.assertIsNone(second)
first, second, third, fourth = fn(eager(), aot_eager())
self.assertIsNone(third)
@unittest.skipIf(
not torch.distributed.is_available(),
"FakePG relies on distributed build",
)
def test_ddp_cpp_reducer_error(self):
from torch.testing._internal.distributed.fake_pg import FakeStore
store = FakeStore()
dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
try:
model = torch.nn.Sequential(nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10))
model = DDP(model)
inputs = torch.randn(10, 10)
loss = model(inputs).sum()
with (
compiled_autograd._enable(compiler_fn),
self.assertRaisesRegex(
RuntimeError,
(
r"Compiled autograd is not compatible with C\+\+ DDP Reducer, "
r'please use torch._dynamo.config.optimize_ddp="python_reducer"'
),
),
):
loss.backward()
finally:
dist.destroy_process_group()
@unittest.skipIf(
not torch.distributed.is_available(),
"FakePG relies on distributed build",
)
@config.patch(optimize_ddp="python_reducer")
def test_ddp_python_reducer(self):
from torch.testing._internal.distributed.fake_pg import FakeStore
store = FakeStore()
dist.init_process_group(backend="fake", rank=0, world_size=2, store=store)
try:
model = torch.nn.Sequential(nn.Linear(10, 10), nn.ReLU(), nn.Linear(10, 10))
model = DDP(model)
inputs = torch.randn(10, 10)
loss = model(inputs).sum()
with compiled_autograd._enable(compiler_fn):
# no error expected
loss.backward()
self.assertEqual(counters["compiled_autograd"]["captures"], 1)
finally:
dist.destroy_process_group()
# Case 1.1: Stealable dense new_grad
# if (!GradMode::is_enabled() && !new_grad.is_sparse() &&
# !new_grad.is_sparse_csr() &&
# !(variable.is_sparse_csr() && new_grad.layout() == at::kStrided) &&
# at::caching::adjusted_use_count(new_grad) <= num_expected_refs &&
# (new_grad.is_mkldnn() || utils::obeys_layout_contract(new_grad, variable))) {
@unittest.expectedFailure
def test_accumulate_grad_polyfill_case_1_1(self):
def fn():
class StealableDenseOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output, requires_grad=False) * 5
pre_hook_storage_id = None
def check(grad):
nonlocal pre_hook_storage_id
assert pre_hook_storage_id is None
pre_hook_storage_id = id(grad.untyped_storage())
var = torch.randn(2, 2, requires_grad=True)
var.register_hook(check)
output = StealableDenseOp.apply(var)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert torch.equal(var.grad, torch.ones_like(var) * 5), (
"Grad content should be as returned by backward"
)
assert var.grad.requires_grad is False, (
"Detached grad should not require grad"
)
assert id(var.grad.untyped_storage()) == pre_hook_storage_id, (
"Should be stolen"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=make_compiler_fn(fullgraph=False),
count=[1, 2],
)
# Case 1.2: Stealable sparse new_grad
# } else if (!GradMode::is_enabled() && new_grad.is_sparse() &&
# new_grad._indices().is_contiguous() &&
# new_grad._values().is_contiguous() &&
# new_grad._indices().use_count() <= 1 &&
# new_grad._values().use_count() <= 1 &&
# new_grad.use_count() <= num_expected_refs) {
@unittest.expectedFailure
def test_accumulate_grad_polyfill_case_1_2(self):
def fn():
class StealableSparseOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
size = grad_output.size()
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([5.0, 5.0])
return torch.sparse_coo_tensor(
indices, values, size, requires_grad=False
)
pre_hook_storages_id = None
def check(grad):
nonlocal pre_hook_storages_id
assert pre_hook_storages_id is None
pre_hook_storages_id = [
id(grad._indices().untyped_storage()),
id(grad._values().untyped_storage()),
]
var = torch.randn(2, 2, requires_grad=True)
var.register_hook(check)
output = StealableSparseOp.apply(var)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert var.grad.is_sparse, "Grad should be sparse"
expected_dense_grad = torch.tensor([[5.0, 0.0], [0.0, 5.0]])
assert torch.equal(var.grad.to_dense(), expected_dense_grad), (
"Content should be equal after shallow copy"
)
assert var.grad.requires_grad is False, (
"Detached grad should not require grad"
)
assert (
id(var.grad._indices().untyped_storage()) == pre_hook_storages_id[0]
), "Should be stolen"
assert (
id(var.grad._values().untyped_storage()) == pre_hook_storages_id[1]
), "Should be stolen"
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=make_compiler_fn(fullgraph=False),
count=[1, 2],
)
# Case 1.3: Cloning sparse/nested new_grad
# else {
# if (new_grad.is_sparse() || new_grad.is_sparse_csr() ||
# new_grad.is_nested()) {
def test_accumulate_grad_polyfill_case_1_3(self):
def fn():
class CloneSparseGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
size = grad_output.size()
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor(
[5.0, 5.0], requires_grad=True
) # Requires grad
return torch.sparse_coo_tensor(
indices, values, size, requires_grad=True
)
pre_hook_storages_id = None
def check(grad):
nonlocal pre_hook_storages_id
assert pre_hook_storages_id is None
pre_hook_storages_id = [
id(grad._indices().untyped_storage()),
id(grad._values().untyped_storage()),
]
var = torch.randn(2, 2, requires_grad=True)
var.register_hook(check)
output = CloneSparseGradOp.apply(var)
output.backward(
torch.ones_like(output), create_graph=True
) # grad mode == create_graph
assert var.grad is not None, "Grad should be defined"
assert var.grad.is_sparse, "Grad should be sparse"
expected_dense_grad = torch.tensor([[5.0, 0.0], [0.0, 5.0]])
assert torch.equal(var.grad.to_dense(), expected_dense_grad), (
"Content should be equal after clone"
)
assert var.grad.requires_grad, (
"Grad should require grad for double backward"
)
assert (
id(var.grad._indices().untyped_storage()) != pre_hook_storages_id[0]
), "Should be copied"
assert (
id(var.grad._values().untyped_storage()) != pre_hook_storages_id[1]
), "Should be copied"
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=make_compiler_fn(fullgraph=False),
count=[1, 2],
)
# Case 1.5.1: Dense variable gradient layout contract
# else { // Covers various deep copy scenarios not covered by specific stealable paths
# ...
# if (new_grad.is_mkldnn()) {
# ...
# } else {
# // Deep copies new_grad according to the "Gradient Layout Contract."
# update_grad(utils::clone_obey_contract(new_grad, variable));
# }
# }
def test_accumulate_grad_polyfill_case_1_5_1(self):
def fn():
class NotStealableRefsOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output, requires_grad=False) * 10.0
var = torch.randn(2, 2, requires_grad=True)
grad_ref_holder = [None]
def check(grad):
# forces a clone due to refcount
grad_ref_holder[0] = grad
return grad
var.register_hook(check)
output = NotStealableRefsOp.apply(var)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert torch.equal(var.grad, torch.ones_like(var) * 10.0), (
"Grad content should be as returned by backward"
)
assert (
grad_ref_holder[0].untyped_storage() is not var.grad.untyped_storage()
), "Should be copied"
yield var.grad
self.check_output_and_recompiles(fn)
# Case 1.5.2: Non-dense variable gradient layout contract
# else { // Covers various deep copy scenarios not covered by specific stealable paths
# ...
# if (new_grad.is_mkldnn()) {
# ...
# } else {
# // Deep copies new_grad according to the "Gradient Layout Contract."
# update_grad(utils::clone_obey_contract(new_grad, variable));
# }
# }
def test_accumulate_grad_polyfill_case_1_5_2(self):
def fn():
class SimpleDenseGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output, requires_grad=False) * 7.0
# Create a non-contiguous variable
base_tensor = torch.randn(4, 4)
var = base_tensor[::2, ::2]
assert not var.is_contiguous(), (
"Variable should be non-contiguous for this test"
)
var.requires_grad_(True)
grad_ref_holder = [None]
def check(grad):
# forces a clone due to refcount
grad_ref_holder[0] = grad
return grad
var.register_hook(check)
output = SimpleDenseGradOp.apply(var)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
# The `clone_obey_contract` branch 2 (`new_grad.clone(at::MemoryFormat::Contiguous)`)
# will make the resulting grad contiguous.
assert var.grad.is_contiguous(), (
"Resulting grad should be contiguous due to branch 2 of clone_obey_contract"
)
assert torch.equal(var.grad, torch.ones_like(var) * 7.0), (
"Grad content should be as returned by backward"
)
assert (
grad_ref_holder[0].untyped_storage() is not var.grad.untyped_storage()
), "Should be copied"
yield var.grad
self.check_output_and_recompiles(
fn,
)
# Case 2.1: Sparse variable_grad + Dense new_grad
# } else if (!GradMode::is_enabled()) {
# if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
# auto result = new_grad + variable_grad;
def test_accumulate_grad_polyfill_case_2_1(self):
def fn():
class SparseVarGradDenseNewGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output) * 3.0
var = torch.randn(2, 2, requires_grad=True)
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([1.0, 1.0])
var.grad = torch.sparse_coo_tensor(
indices, values, var.size(), requires_grad=False
)
initial_grad_ref = var.grad
output = SparseVarGradDenseNewGradOp.apply(var)
expected_sum = (torch.ones_like(var) * 3.0) + initial_grad_ref.to_dense()
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert not var.grad.is_sparse, "Resulting grad should be dense"
assert torch.equal(var.grad, expected_sum), "Grad content should be the sum"
assert var.grad is not initial_grad_ref, (
"Grad object should be replaced (out-of-place)"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=lambda gm: gm, # https://github.com/pytorch/pytorch/issues/154161
count=[1, 0],
)
# Case 2.3.1: Dense/Dense in-place addition
# } else if (!GradMode::is_enabled()) {
# ...
# } else {
# variable_grad += new_grad;
def test_accumulate_grad_polyfill_case_2_3_1(self):
def fn():
class DenseVarGradDenseNewGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output) * 3.0
var = torch.randn(2, 2, requires_grad=True)
var.grad = torch.ones_like(var) * 1.0
initial_grad_ref = var.grad
output = DenseVarGradDenseNewGradOp.apply(var)
expected_sum = initial_grad_ref + (torch.ones_like(var) * 3.0)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert not var.grad.is_sparse, "Resulting grad should be dense"
assert torch.equal(var.grad, expected_sum), "Grad content should be the sum"
assert var.grad is initial_grad_ref, (
"Grad object should be modified in-place (same object)"
)
yield var.grad
self.check_output_and_recompiles(fn)
# Case 2.3.2: Sparse/Sparse in-place addition
# } else if (!GradMode::is_enabled()) {
# ...
# } else {
# variable_grad += new_grad;
def test_accumulate_grad_polyfill_case_2_3_2(self):
def fn():
class SparseVarGradSparseNewGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
size = grad_output.size()
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([3.0, 3.0])
return torch.sparse_coo_tensor(
indices, values, size, requires_grad=False
)
var = torch.randn(2, 2, requires_grad=True)
indices_v = torch.tensor([[0, 0], [0, 1]], dtype=torch.int64)
values_v = torch.tensor([1.0, 2.0])
var.grad = torch.sparse_coo_tensor(
indices_v, values_v, var.size(), requires_grad=False
)
initial_grad_ref = var.grad
output = SparseVarGradSparseNewGradOp.apply(var)
new_grad_for_sum = torch.sparse_coo_tensor(
torch.tensor([[0, 1], [0, 1]], dtype=torch.int64),
torch.tensor([3.0, 3.0]),
var.size(),
)
expected_sum_dense = (
initial_grad_ref.to_dense() + new_grad_for_sum.to_dense()
)
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert var.grad.is_sparse, "Resulting grad should remain sparse"
assert torch.equal(var.grad.to_dense(), expected_sum_dense), (
"Grad content should be the sum of sparse grads"
)
assert var.grad is initial_grad_ref, (
"Grad object should be modified in-place (same object)"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=lambda gm: gm, # https://github.com/pytorch/pytorch/issues/154161
count=[1, 0],
)
# Case 2.3.3: Dense/Sparse in-place addition
# } else if (!GradMode::is_enabled()) {
# ...
# } else {
# variable_grad += new_grad;
def test_accumulate_grad_polyfill_case_2_3_3(self):
def fn():
class DenseVarGradSparseNewGradOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
size = grad_output.size()
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([3.0, 3.0]) # New sparse values
return torch.sparse_coo_tensor(
indices, values, size, requires_grad=False
)
var = torch.randn(2, 2, requires_grad=True)
var.grad = torch.ones_like(var) * 1.0 # Initial value
initial_grad_ref = var.grad
output = DenseVarGradSparseNewGradOp.apply(var)
new_grad_for_sum = torch.sparse_coo_tensor(
torch.tensor([[0, 1], [0, 1]], dtype=torch.int64),
torch.tensor([3.0, 3.0]),
var.size(),
).to_dense()
expected_sum = initial_grad_ref + new_grad_for_sum
output.backward(torch.ones_like(output))
assert var.grad is not None, "Grad should be defined"
assert not var.grad.is_sparse, "Resulting grad should be dense"
assert torch.equal(var.grad, expected_sum), "Grad content should be the sum"
assert var.grad is initial_grad_ref, (
"Grad object should be modified in-place (same object)"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=make_compiler_fn(fullgraph=False),
count=[1, 2],
)
# Case 3.1: Sparse variable_grad + Dense new_grad (reorder into Dense + Sparse)
# } else { // if GradMode::is_enabled()
# at::Tensor result;
# if (variable_grad.is_sparse() && !new_grad.is_sparse()) {
# result = new_grad + variable_grad;
# }
# }
def test_accumulate_grad_polyfill_case_3_1(self):
def fn():
class SparseVarGradDenseNewGradDoubleBackwardOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output, requires_grad=True) * 3.0
var = torch.randn(2, 2, requires_grad=True)
indices = torch.tensor([[0, 1], [0, 1]], dtype=torch.int64)
values = torch.tensor([1.0, 1.0], requires_grad=True)
var.grad = torch.sparse_coo_tensor(
indices, values, var.size(), requires_grad=True
)
initial_grad_ref = var.grad
output = SparseVarGradDenseNewGradDoubleBackwardOp.apply(var)
expected_sum = (
torch.ones_like(var, requires_grad=True) * 3.0
) + initial_grad_ref.to_dense()
output.backward(torch.ones_like(output), create_graph=True)
assert var.grad is not None, "Grad should be defined"
assert not var.grad.is_sparse, "Resulting grad should be dense"
assert torch.equal(var.grad, expected_sum), "Grad content should be the sum"
assert var.grad is not initial_grad_ref, (
"Grad object should be replaced (out-of-place)"
)
assert var.grad.requires_grad, (
"Resulting grad should track history for double backward"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=lambda gm: gm, # https://github.com/pytorch/pytorch/issues/154161
count=[1, 0],
)
# Case 3.2: variable_grad.defined() & GradMode::is_enabled() - Double backward (dense variable_grad + dense new_grad)
# } else { // if GradMode::is_enabled()
# at::Tensor result;
# ...
# } else {
# result = variable_grad + new_grad;
# }
# }
def test_accumulate_grad_polyfill_case_3_2(self):
def fn():
class DenseVarGradDenseNewGradDoubleBackwardOp(BaseCustomOp):
@staticmethod
def backward(ctx, grad_output):
return torch.ones_like(grad_output, requires_grad=True) * 3.0
var = torch.randn(2, 2, requires_grad=True)
var.grad = torch.ones_like(var) * 1.0
initial_grad_ref = var.grad
output = DenseVarGradDenseNewGradDoubleBackwardOp.apply(var)
expected_sum = initial_grad_ref + (
torch.ones_like(var, requires_grad=True) * 3.0
)
output.backward(torch.ones_like(output), create_graph=True)
assert var.grad is not None, "Grad should be defined"
assert not var.grad.is_sparse, "Resulting grad should be dense"
assert torch.equal(var.grad, expected_sum), "Grad content should be the sum"
assert var.grad is not initial_grad_ref, (
"Grad object should be replaced (out-of-place)"
)
assert var.grad.requires_grad, (
"Resulting grad should track history for double backward"
)
yield var.grad
self.check_output_and_recompiles(
fn,
compiler_fn=make_compiler_fn(fullgraph=False),
count=[1, 3],
)
def test_torch_function_mode(self):
called_funcs = []
class LoggingTorchFunctionMode(BaseTorchFunctionMode):
def __torch_function__(self, func, types, args=(), kwargs=None):
called_funcs.append(str(func.__name__))
return super().__torch_function__(func, types, args, kwargs)
class MyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, out):
ctx.save_for_backward(out)
return out.sum()
@staticmethod
def backward(ctx, grad_output):
(saved,) = ctx.saved_tensors
return torch.ones_like(saved) * grad_output
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2)
z = torch.randn(2, 2)
def fwd(x, y, z):
out = x * y * z
loss = MyLoss.apply(out)
return loss
with LoggingTorchFunctionMode():
called_funcs.append("Forward")
loss = fwd(x, y, z)
called_funcs.append("Backward")
with torch._dynamo.compiled_autograd._enable(torch.compile):
loss.backward()
self.assertExpectedInline(
"\n".join(called_funcs),
"""\
Forward
mul
mul
sum
Backward
_set_multithreading_enabled
backward
_set_multithreading_enabled""",
) # noqa: B950
def test_torch_dispatch_mode(self):
called_funcs = []
class LoggingTorchDispatchMode(TorchDispatchMode):
def __torch_dispatch__(self, func, types, args=(), kwargs=None):
called_funcs.append(str(func.__name__))
return func(*args, **kwargs)
class MyLoss(torch.autograd.Function):
@staticmethod
def forward(ctx, out):
ctx.save_for_backward(out)
return out.sum()
@staticmethod
def backward(ctx, grad_output):
(saved,) = ctx.saved_tensors
return torch.ones_like(saved) * grad_output
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2)
z = torch.randn(2, 2)
def fwd(x, y, z):
out = x * y * z
loss = MyLoss.apply(out)
return loss
with LoggingTorchDispatchMode():
called_funcs.append("Forward")
loss = fwd(x, y, z)
called_funcs.append("Backward")
with torch._dynamo.compiled_autograd._enable(lambda gm: gm):
loss.backward()
self.assertExpectedInline(
"\n".join(called_funcs),
"""\
Forward
mul.Tensor
mul.Tensor
sum.default
Backward
ones_like.default
empty.memory_format
empty.memory_format
empty.memory_format
empty.memory_format
empty.memory_format
empty.memory_format
ones_like.default
mul.Tensor
mul.Tensor
mul.Tensor
new_empty_strided.default
copy_.default""",
) # noqa: B950
def load_test_module(name):
testdir = Path(__file__).absolute().parent.parent
with mock.patch("sys.path", [*sys.path, str(testdir)]):
return SourceFileLoader(
name, str(testdir / f"{name.replace('.', '/')}.py")
).load_module()
def make_wrapped(fn, ctxs):
@functools.wraps(fn)
def wrapped(self):
torch._dynamo.reset()
stack = contextlib.ExitStack()
for ctx in ctxs:
stack.enter_context(ctx)
out = fn(self)
stack.close()
return out
return wrapped
def lookup_backend(test_name):
if test_name in xfail_by_backend["inductor"]:
return "aot_eager"
elif test_name in xfail_by_backend["aot_eager"]:
return "eager"
elif test_name in xfail_by_backend["eager"]:
return "ca_eager"
else:
assert test_name not in xfail_by_backend["ca_eager"]
return "inductor"
def wrap_test_class(orig_cls):
dct = orig_cls.__dict__.copy()
for name in list(dct.keys()):
fn = dct[name]
if not callable(fn) or name in skipped_tests:
continue
elif (
xfail_re.match(name)
or name in xfail_by_backend["ca_eager"]
or name in xfail_divergence_from_eager
):
dct[name] = unittest.expectedFailure
elif name.startswith("test_"):
backend = lookup_backend(name)
if not HAS_CUDA_AND_TRITON and backend == "inductor":
continue
ctxs = [
compiled_autograd._enable(
make_compiler_fn(
backend=backend,
fullgraph=name not in known_graph_breaks_tests,
)
),
test_contexts.get(name, contextlib.nullcontext()),
]
dct[name] = make_wrapped(fn, ctxs)
cls = type(
orig_cls.__name__ + "WithCompiledAutograd",
orig_cls.__bases__,
dct,
)
cls.__file__ = __file__
return cls
known_graph_breaks_tests = {
"test_hook_none", # uses assert in hook
"test_post_accumulate_grad_hook_e2e", # optim.Adam manually graph breaks
"test_tensor_hooks_inplace", # uses assert in hook
"test_tensor_hooks_inplace_over_view", # uses assert in hook
"test_grad_fn_prehooks", # uses assert in hook
"test_grad_fn_prehooks_multiple_outputs", # uses assert in hook
"test_grad_fn_prehooks_remove_hooks", # uses handle.remove() in hook
"test_tensor_hooks_inplace_multiple_outputs", # uses assert in hook
"test_hooks", # uses assert in hook
"test_accumulate_grad_posthooks_can_observe_tensor_prehook", # allclose
"test_saved_tensors_hook_version_counter_not_shared", # assertEqual
"test_post_accumulate_grad_hook_returns_not_None", # throws
"test_custom_function_cycle", # assertEqual
"test_mark_non_differentiable_mixed", # assertTrue
"test_materialize_grads", # assertEqual
"test_return_leaf", # assertEqual
"test_save_none_for_backward", # assertIsNone
"test_saved_variables_deprecated", # warnings.warn
"test_autograd_node_isinstance", # assertIsInstance
"test_set_materialize_non_diff_grads", # assertIsNone
"test_backward_dict_grad_for_nontensor", # torch/_custom_op/autograd.py in skip files
"test_backward_dict_invalid_keys", # torch/_custom_op/autograd.py in skip files
"test_backward_dict_requires_keys_for_input_optional_tensors", # torch/_custom_op/autograd.py in skip files
"test_backward_dict_requires_keys_for_input_tensors", # torch/_custom_op/autograd.py in skip files
"test_backward_grads_are_tensor_or_none", # torch/_custom_op/autograd.py in skip files
"test_backward_impl_on_existing_op", # torch/_custom_op/autograd.py in skip files
"test_backward_returns_dict", # torch/_custom_op/autograd.py in skip files
"test_backward_tensorlist_input_requires_list_grads", # torch/_custom_op/autograd.py in skip files
"test_backward_tensorlist_input_requires_list_grads_none_or_Tensor", # torch/_custom_op/autograd.py in skip files
"test_backward_tensorlist_input_requires_list_grads_with_same_numel", # torch/_custom_op/autograd.py in skip files
"test_save_for_backward_inputs_are_namedtuple", # torch/_custom_op/autograd.py in skip files
"test_reentrant_with_leaf_variable_hook", # reentrant .backward
"test_reentrant_with_non_leaf_variable_hook", # reentrant .backward
"test_reentrant_child_error", # reentrant .backward
"test_deep_reentrant", # reentrant .backward
"test_reentrant_priority", # reentrant .backward
"test_simple_reentrant", # reentrant .backward
"test_checkpoint_detects_non_determinism", # unpack hook in skip files
"test_checkpoint_valid_reset_on_error", # unpack hook in skip files
"test_checkpointing_non_reentrant_autocast_cpu", # unpack hook in skip files
"test_checkpointing_non_reentrant_autocast_gpu", # unpack hook in skip files
"test_checkpointing_without_reentrant_arbitrary_input_output", # unpack hook in skip files
"test_checkpointing_without_reentrant_correct_grad", # unpack hook in skip files
"test_checkpointing_without_reentrant_custom_function_works", # unpack hook in skip files
"test_checkpointing_without_reentrant_dataparallel", # _get_device_index in skip files
"test_checkpointing_without_reentrant_detached_tensor_use_reentrant_True", # reentrant .backward
"test_checkpointing_without_reentrant_parameter_used_in_an_out", # unpack hook in skip files
"test_checkpointing_without_reentrant_with_context_fn", # unpack hook in skip files
"test_save_on_cpu_and_checkpoint", # unpack hook in skip files
"test_saved_tensor_hooks_custom_error_propagation", # CustomError
"test_access_saved_tensor_twice_without_recomputation_works", # unpack hook in skip files
"test_saved_tensor_hooks_extra_enter_during_bw_no_leak", # ctx in skip files
"test_saved_tensor_hooks_extra_exit_during_bw_no_crash", # ctx in skip files
"test_checkpointing", # reentrant .backward
"test_checkpointing_without_reentrant_input_requires_grad_False", # reentrant .backward
"test_checkpointing_without_reentrant_input_requires_grad_True", # reentrant .backward
"test_checkpointing_without_reentrant_memory_savings", # reentrant .backward
"test_dtensor_basic", # torch._dynamo.exc.Unsupported: Failed to convert args/kwargs to proxy
"test_dtensor_contiguous_dtensor_noncontiguous_local_as_tangent", # subclass constructor
"test_retain_grad", # retains_grad_hooks
"test_retain_grad_cycle", # retains_grad_hooks
"test_retain_grad_inplace", # retains_grad_hooks
"test_retain_grad_inplace_over_view", # retains_grad_hooks
"test_retains_grad_can_always_observe_tensor_prehook", # retains_grad_hooks
"test_retains_grad_inplace_multiple_outputs", # retains_grad_hooks
"test_hook_edge_case_when_called_with_grad", # retains_grad_hooks
"test_multi_grad_all_hooks", # retains_grad_hooks
"test_prehook_ordering", # retains_grad_hooks
"test_will_engine_execute_node", # retains_grad_hooks
"test_backward_to_node", # retains_grad_hooks
"test_backward_with_nonleaf_inputs", # retains_grad_hook on non-leaf input
"test_create_graph_and_full_backward_hook_cycle", # _pack_with_none
"test_full_backward_hook_double_backward", # _pack_with_none
"test_grad_mode_restored_reentrant", # assertTrue
"test_multi_grad_any_hooks", # register_multi_grad_hook
"test_saved_variable_packing_unpacking_did_not_save_original_with_hooks", # register_hooks
"test_graph_save_on_cpu", # dynamo disabled
"test_nested_checkpoint_early_stop_False", # dynamo disable
"test_nested_checkpoint_early_stop_True", # dynamo disable
"test_nested_checkpoint_kwargs_early_stop_False", # dynamo disable
"test_nested_checkpoint_kwargs_early_stop_True", # dynamo disable
"test_nested_checkpoint_non_tensor_inputs_and_outputs_early_stop_False", # dynamo disable
"test_nested_checkpoint_non_tensor_inputs_and_outputs_early_stop_True", # dynamo disable
"test_nested_checkpoint_reentrant_backwards_early_stop_False", # dynamo disable
"test_nested_checkpoint_reentrant_backwards_early_stop_True", # dynamo disable
"test_nested_checkpoint_same_graph_early_stop_False", # dynamo disable
"test_nested_checkpoint_same_graph_early_stop_True", # dynamo disable
"test_nested_checkpoint_set_early_stop", # dynamo disable
"test_nested_checkpoint_two_children_early_stop_False", # dynamo disable
"test_nested_checkpoint_two_children_early_stop_True", # dynamo disable
"test_custom_autograd_ac_early_stop", # marked as skipped
"test_dropout", # dynamo disable
"test_dropout_inductor", # dynamo disable
"test_function_with_kwargs", # dynamo disable
"test_module", # dynamo disable
}
test_contexts = {
"test_setitem_mask": config.patch(capture_dynamic_output_shape_ops=True),
"test_index_backward_does_not_save_tensor": config.patch(
capture_dynamic_output_shape_ops=True
),
}
# These groups of tests aren't supported yet
xfail_re = re.compile(r"^test_(sparse|profiler|gradcheck|named_tensor)")
# Tests fail at different stages, we categorize them wrt to their backends
# We run only the last passing backend in this order:
# ca_eager -> eager -> aot_eager -> inductor
xfail_by_backend = {
"ca_eager": { # xfail
"test_callback_propagates_errors_from_device_thread", # fullgraph for queue_callback, but graph break for RuntimeError
"test_reentrant_with_callbacks_both_depths", # queue_callback
"test_reentrant_with_callbacks_depth_0", # queue_callback
"test_reentrant_with_callbacks_depth_1", # queue_callback
"test_checkpoint_graph_execution_group", # Attempted to call function marked as skipped
"test_current_graph_task_execution_order", # nodes are already freed by the time dynamo traces the lifted hook
"test_autograd_inplace_views_cross_dtype", # view_fn not supported by compiled autograd
"test_post_accumulate_grad_hook_ordering", # accuracy error
"test_current_graph_task_id", # autograd state already cleared once dynamo is called
"test_custom_function_forward_mode_forward_is_no_op", # forward AD
"test_custom_function_forward_mode_inplace_checks", # forward AD
"test_custom_function_forward_mode_view_checks", # forward AD
"test_custom_function_forward_mode_wrong_formula", # forward AD
"test_node_post_hook_registered_during_unpack_hook", # 'NoneType' object has no attribute 'register_hook'
"test_custom_function_error", # forward AD
"test_custom_function_save_for_forward", # forward AD
"test_dont_materialize_grads", # undefined grad
"test_no_grad_copy", # setting static member in lifted backward
"test_no_grad_copy_sparse", # setting static member in lifted backward
"test_node_ordering_when_none_returned", # torch._dynamo.exc.Unsupported: TypeError <built-in method clone
"test_save_output_nr", # output_nr grad passed as None
# IndexError: list index out of range (NB: x.grad = y where both x and y are input tensors)
"test_grad_nonleaf_register_hook",
"test_backward_twice_without_saved_values", # https://github.com/pytorch/pytorch/issues/129938
# Category: Higher Order Gradients
"test_default_saved_tensors_hooks_double_backward", # wrong when pack hook returns non-leaf
"test_saved_variable_packing_unpacking_saved_original_with_hooks", # wrong when pack hook returns non-leaf
"test_nested_anomaly_detect_nan", # nested anomaly
"test_select_sum", # batched gradients
"test_custom_autograd_no_early_free", # batched gradients
"test_grad_batched_grad", # batched gradients
# Uncategorized
"test_lobpcg", # NaNs
"test_autograd_simple_views_python", # gradient is None
"test_function_returns_undefined_tensor", # gradient is None
"test_input_buffer_accum", # add(sparse, dense)
"test_return_duplicate", # batched gradients
"test_return_duplicate_inplace", # batched gradients
"test_naughty_autograd_function_stashing_ctx", # error not raised
"test_unrelated_inputs", # batched gradients
"test_nested_checkpoint_early_stop_False", # unpack hook grad_fn semantics
"test_nested_checkpoint_early_stop_True", # unpack hook grad_fn semantics
"test_nested_checkpoint_two_children_early_stop_False", # unpack hook grad_fn semantics
"test_nested_checkpoint_two_children_early_stop_True", # unpack hook grad_fn semantics
"test_dropout", # functionalize_rng_ops not yet supported
"test_dropout_inductor", # functionalize_rng_ops not yet supported
"test_function_with_kwargs", # functionalize_rng_ops not yet supported
"test_module", # functionalize_rng_ops not yet supported
"test_grad_dtype", # AttributeError: args / Float did not match Double
},
"eager": { # will be run without torch.compiling the CA graph
"test_setup_context_when_forward_has_default_args", # autograd.Function with class methods
"test_accumulate_grad_tensor_reference", # Out of bounds: frame_state_entry.stride[i] is None
"test_custom_function_exception", # torch.no_grad(), torch._dynamo.exc.Unsupported: missing: WITH_EXCEPT_START
"test_to_sparse_backward", # Out of bounds: frame_state_entry.stride[i] is None
"test_custom_function_non_tensor_inputs_outputs", # gradient batching rule not implemented for aten::sym_size.int
"test_setitem", # CopySlices accuracy error
"test_checkpointing_without_reentrant_saved_object_identity", # same as https://github.com/pytorch/pytorch/issues/136193
"test_dtensor_different_gradient_placement", # Dynamo failed to run FX node with fake tensors
"test_dtensor_noncontiguous_output", # Dynamo failed to run FX node with fake tensors
"test_dtensor_partial_placement_graph_output", # Dynamo failed to run FX node with fake tensors
"test_unwrap_async_collective_tensor_tangent", # AttributeError: 'PlainTensorMeta' object has no attribute 'attrs'
"test_graph_save_on_cpu", # torch.save should no-op and be recorded in the graph
"test_saving_variable_to_disk", # torch.save should no-op and be recorded in the graph
"test_nested_checkpoint_early_stop_False", # AOT backward higher order gradients
# Slow tests, these tests are close to CI timeout if we try to torch.compile them
"test_checkpointing",
"test_checkpointing_without_reentrant_memory_savings",
"test_checkpointing_without_reentrant_input_requires_grad_True",
"test_checkpointing_without_reentrant_input_requires_grad_False",
},
"aot_eager": { # will be run with torch.compile(backend="eager")
# Category: FakeTensor
"test_wrapped_number_saved_tensors_hooks", # Proxy tensor should carryover is_wrapped_number_ of its original
"test_scalar_grad_mixed_device", # Fake Tensors aren't propagating device properly for 0-dim grads
"test_grad", # AOT backward higher order gradients
"test_grad_materialize_grads", # AOT backward higher order gradients
},
"inductor": {}, # will be run with torch.compile(backend="aot_eager")
# tests not present in this dict will be run with torch.compile(backend="inductor")
}
# These tests fail due to difference in semantics that we won't fix
xfail_divergence_from_eager = {
"test_invalid_gradients", # can't give autograd error due to inaccurate output metadata of lifted backward
"test_autograd_node_isinstance", # backward ctx is a fake cls and not directly a Node instance
"test_backward_hook_relative_ordering", # compiled autograd collects breadth first, and module backward hook not supported
"test_checkpointing_without_reentrant_custom_function_works", # ctx.saved_tensors are cached by CA
"test_anomaly_mode_no_check_nan", # different error messages
"test_anomaly_grad_warnings", # different error messages
"test_anomaly_detect_nan", # fake tensor errors on NaN
"test_once_differentiable", # different node name: CompiledFunctionBackward
"test_function", # different node name: CompiledFunctionBackward
"test_inplace_on_view_backward", # different node name: CompiledFunctionBackward
"test_nested_anomaly_printstack_cleanup", # anomaly NaN error message different
"test_not_implemented_grad", # Dynamo changes the types of exceptions
"test_grad_call_compiled_backward_fn", # different functorch error
"test_vjp_call_compiled_backward_fn", # different functorch error
"test_vmap_call_compiled_backward_fn", # different functorch error
"test_accumulate_grad", # always out of place add for compiled autograd
"test_current_node", # slightly different dispatched ops
}
skipped_tests = set()
if not HAS_CUDA_AND_TRITON:
# Found Tesla M60 which is too old to be supported by the triton GPU compiler
skipped_tests.add("test_type_conversions")
if IS_S390X:
skipped_tests.add("test_deep_reentrant")
test_autograd = load_test_module("test_autograd")
test_custom_ops = load_test_module("test_custom_ops")
test_higher_order_ops = load_test_module("dynamo/test_higher_order_ops")
if not HAS_XPU_AND_TRITON:
TestAutogradWithCompiledAutograd = wrap_test_class(test_autograd.TestAutograd)
TestNestedCheckpointWithCompiledAutograd = wrap_test_class(
test_autograd.TestNestedCheckpoint
)
if not HAS_XPU_AND_TRITON:
TestCustomOpWithCompiledAutograd = wrap_test_class(test_custom_ops.TestCustomOp)
HigherOrderOpTestsWithCompiledAutograd = wrap_test_class(
test_higher_order_ops.HigherOrderOpTests
)
FuncTorchHigherOrderOpTestsWithCompiledAutograd = wrap_test_class(
test_higher_order_ops.FuncTorchHigherOrderOpTests
)
ActivationCheckpointingTestsWithCompiledAutograd = wrap_test_class(
test_higher_order_ops.ActivationCheckpointingTests
)
if torch.distributed.is_available() and HAS_CUDA_AND_TRITON:
test_dtensor = load_test_module("distributed/tensor/test_dtensor_compile")
TestDTensorCompileWithCompiledAutograd = wrap_test_class(
test_dtensor.TestDTensorCompile
)
xfail_hops = {"local_map_hop"}
| CompiledAutograd1 |
python | langchain-ai__langchain | libs/partners/fireworks/tests/unit_tests/test_embeddings_standard.py | {
"start": 217,
"end": 742
} | class ____(EmbeddingsUnitTests):
@property
def embeddings_class(self) -> type[Embeddings]:
return FireworksEmbeddings
@property
def embeddings_params(self) -> dict:
return {"api_key": "test_api_key"}
@property
def init_from_env_params(self) -> tuple[dict, dict, dict]:
return (
{
"FIREWORKS_API_KEY": "api_key",
},
{},
{
"fireworks_api_key": "api_key",
},
)
| TestFireworksStandard |
python | PrefectHQ__prefect | src/prefect/server/schemas/responses.py | {
"start": 21502,
"end": 21641
} | class ____(BaseModel):
results: list[schemas.core.Flow]
count: int
limit: int
pages: int
page: int
| FlowPaginationResponse |
python | pypa__warehouse | warehouse/legacy/api/xmlrpc/views.py | {
"start": 5136,
"end": 5443
} | class ____(XmlRpcError):
# NOQA due to N815 'mixedCase variable in class scope',
# This is the interface for specifying fault code and string for XmlRpcError
faultCode = -32403 # NOQA: ignore=N815
faultString = "server error; service unavailable" # NOQA: ignore=N815
| XMLRPCServiceUnavailable |
python | modin-project__modin | modin/tests/pandas/native_df_interoperability/test_compiler_caster.py | {
"start": 5029,
"end": 5634
} | class ____(CalculatorTestQc):
"Represents a local machine query compiler"
def get_backend(self):
return "Local_Machine"
@classmethod
def max_cost(cls):
return QCCoercionCost.COST_MEDIUM
def move_to_cost(self, other_qc_cls, api_cls_name, op, arguments):
return {
CloudQC: QCCoercionCost.COST_MEDIUM,
CloudQCHighSelf: QCCoercionCost.COST_MEDIUM,
ClusterQC: QCCoercionCost.COST_LOW,
LocalMachineQC: QCCoercionCost.COST_ZERO,
PicoQC: QCCoercionCost.COST_MEDIUM,
}.get(other_qc_cls)
| LocalMachineQC |
python | pyparsing__pyparsing | examples/simpleBool.py | {
"start": 1617,
"end": 1685
} | class ____(BoolBinOp):
repr_symbol = "&"
eval_fn = all
| BoolAnd |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/backfill.py | {
"start": 1842,
"end": 2866
} | class ____(Enum):
REQUESTED = "REQUESTED"
COMPLETED = "COMPLETED" # deprecated. Use COMPLETED_SUCCESS or COMPLETED_FAILED instead
FAILED = "FAILED" # denotes when there is a daemon failure, or some other issue processing the backfill
CANCELING = "CANCELING"
CANCELED = "CANCELED"
COMPLETED_SUCCESS = "COMPLETED_SUCCESS"
COMPLETED_FAILED = "COMPLETED_FAILED" # denotes that the backfill daemon completed successfully, but some runs failed
FAILING = "FAILING" # denotes that there is a daemon failure, or some other issue processing the backfill, launched runs will be canceled and then the backfill marked FAILED
@staticmethod
def from_graphql_input(graphql_str):
return BulkActionStatus(graphql_str)
BULK_ACTION_TERMINAL_STATUSES = [
BulkActionStatus.COMPLETED,
BulkActionStatus.FAILED,
BulkActionStatus.CANCELED,
BulkActionStatus.COMPLETED_SUCCESS,
BulkActionStatus.COMPLETED_SUCCESS,
BulkActionStatus.COMPLETED_FAILED,
]
@record
| BulkActionStatus |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataprep.py | {
"start": 1092,
"end": 1293
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataprep flow link."""
name = "Flow details page"
key = "dataprep_flow_page"
format_str = DATAPREP_FLOW_LINK
| DataprepFlowLink |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/constructor30.py | {
"start": 665,
"end": 812
} | class ____(Generic[TA]):
def __init__(self, _type: type[TA]) -> None: ...
c = B(C, A)
reveal_type(c, expected_text="B[(_type: type[A]), C[A]]")
| C |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/utils/mlengine_prediction_summary.py | {
"start": 4184,
"end": 7667
} | class ____(Coder):
"""JSON encoder/decoder."""
@staticmethod
def encode(x):
"""JSON encoder."""
return json.dumps(x).encode()
@staticmethod
def decode(x):
"""JSON decoder."""
return json.loads(x)
@beam.ptransform_fn
def MakeSummary(pcoll, metric_fn, metric_keys):
"""Summary PTransform used in Dataflow."""
return (
pcoll
| "ApplyMetricFnPerInstance" >> beam.Map(metric_fn)
| "PairWith1" >> beam.Map(lambda tup: (*tup, 1))
| "SumTuple" >> beam.CombineGlobally(beam.combiners.TupleCombineFn(*([sum] * (len(metric_keys) + 1))))
| "AverageAndMakeDict"
>> beam.Map(
lambda tup: dict(
[(name, tup[i] / tup[-1]) for i, name in enumerate(metric_keys)] + [("count", tup[-1])]
)
)
)
def run(argv=None):
"""Obtain prediction summary."""
parser = argparse.ArgumentParser()
parser.add_argument(
"--prediction_path",
required=True,
help=(
"The GCS folder that contains BatchPrediction results, containing "
"prediction.results-NNNNN-of-NNNNN files in the json format. "
"Output will be also stored in this folder, as a file"
"'prediction.summary.json'."
),
)
parser.add_argument(
"--metric_fn_encoded",
required=True,
help=(
"An encoded function that calculates and returns a tuple of "
"metric(s) for a given instance (as a dictionary). It should be "
"encoded via base64.b64encode(dill.dumps(fn, recurse=True))."
),
)
parser.add_argument(
"--metric_keys",
required=True,
help=(
"A comma-separated keys of the aggregated metric(s) in the summary "
"output. The order and the size of the keys must match to the "
"output of metric_fn. The summary will have an additional key, "
"'count', to represent the total number of instances, so this flag "
"shouldn't include 'count'."
),
)
known_args, pipeline_args = parser.parse_known_args(argv)
metric_fn = dill.loads(base64.b64decode(known_args.metric_fn_encoded))
if not callable(metric_fn):
raise ValueError("--metric_fn_encoded must be an encoded callable.")
metric_keys = known_args.metric_keys.split(",")
with beam.Pipeline(options=beam.pipeline.PipelineOptions(pipeline_args)) as pipe:
prediction_result_pattern = os.path.join(known_args.prediction_path, "prediction.results-*-of-*")
prediction_summary_path = os.path.join(known_args.prediction_path, "prediction.summary.json")
# This is apache-beam ptransform's convention
_ = (
pipe
| "ReadPredictionResult" >> beam.io.ReadFromText(prediction_result_pattern, coder=JsonCoder())
| "Summary" >> MakeSummary(metric_fn, metric_keys)
| "Write"
>> beam.io.WriteToText(
prediction_summary_path,
shard_name_template="", # without trailing -NNNNN-of-NNNNN.
coder=JsonCoder(),
)
)
if __name__ == "__main__":
# Dataflow does not print anything on the screen by default. Good practice says to configure the logger
# to be able to track the progress. This code is run in a separate process, so it's safe.
logging.getLogger().setLevel(logging.INFO)
run()
| JsonCoder |
python | doocs__leetcode | solution/1300-1399/1373.Maximum Sum BST in Binary Tree/Solution.py | {
"start": 192,
"end": 792
} | class ____:
def maxSumBST(self, root: Optional[TreeNode]) -> int:
def dfs(root: Optional[TreeNode]) -> tuple:
if root is None:
return 1, inf, -inf, 0
lbst, lmi, lmx, ls = dfs(root.left)
rbst, rmi, rmx, rs = dfs(root.right)
if lbst and rbst and lmx < root.val < rmi:
nonlocal ans
s = ls + rs + root.val
ans = max(ans, s)
return 1, min(lmi, root.val), max(rmx, root.val), s
return 0, 0, 0, 0
ans = 0
dfs(root)
return ans
| Solution |
python | walkccc__LeetCode | solutions/3320. Count The Number of Winning Sequences/3320.py | {
"start": 0,
"end": 1365
} | class ____:
def countWinningSequences(self, s: str) -> int:
MOD = 1_000_000_007
@functools.lru_cache(None)
def dp(i: int, prev: int, bob: int) -> int:
"""
Returns the number of distinct sequences Bob can use to beat Alice for
s[i..n), where the previous character is `prev` (0: F, 1: W, 2: E) and the
number of points that Bob is having is `bob`.
"""
if i == len(s):
return int(bob > 0)
f = 0 # If Bob summons a Fire Dragon at i.
w = 0 # If Bob summons a Water Serpent at i.
e = 0 # If Bob summons a Earth Golem at i.
match s[i]:
case 'F':
if prev != 0:
f = dp(i + 1, 0, bob) % MOD
if prev != 1:
w = dp(i + 1, 1, bob + 1) % MOD
if prev != 2:
e = dp(i + 1, 2, bob - 1) % MOD
case 'W':
if prev != 0:
f = dp(i + 1, 0, bob - 1) % MOD
if prev != 1:
w = dp(i + 1, 1, bob) % MOD
if prev != 2:
e = dp(i + 1, 2, bob + 1) % MOD
case 'E':
if prev != 0:
f = dp(i + 1, 0, bob + 1) % MOD
if prev != 1:
w = dp(i + 1, 1, bob - 1) % MOD
if prev != 2:
e = dp(i + 1, 2, bob) % MOD
return f + w + e
return (dp(0, 0, 0) + dp(0, 1, 0) + dp(0, 2, 0)) // 2 % MOD
| Solution |
python | gevent__gevent | src/greentest/3.10/test_httplib.py | {
"start": 14776,
"end": 19643
} | class ____(TestCase):
expected_body = b"It's just a flesh wound"
def test_endheaders_chunked(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.putrequest('POST', '/')
conn.endheaders(self._make_body(), encode_chunked=True)
_, _, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
def test_explicit_headers(self):
# explicit chunked
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
# this shouldn't actually be automatically chunk-encoded because the
# calling code has explicitly stated that it's taking care of it
conn.request(
'POST', '/', self._make_body(), {'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# explicit chunked, string body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self.expected_body.decode('latin-1'),
{'Transfer-Encoding': 'chunked'})
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers.keys()])
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertEqual(body, self.expected_body)
# User-specified TE, but request() does the chunk encoding
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/',
headers={'Transfer-Encoding': 'gzip, chunked'},
encode_chunked=True,
body=self._make_body())
_, headers, body = self._parse_request(conn.sock.data)
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(headers['Transfer-Encoding'], 'gzip, chunked')
self.assertEqual(self._parse_chunked(body), self.expected_body)
def test_request(self):
for empty_lines in (False, True,):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request(
'POST', '/', self._make_body(empty_lines=empty_lines))
_, headers, body = self._parse_request(conn.sock.data)
body = self._parse_chunked(body)
self.assertEqual(body, self.expected_body)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
# Content-Length and Transfer-Encoding SHOULD not be sent in the
# same request
self.assertNotIn('content-length', [k.lower() for k in headers])
def test_empty_body(self):
# Zero-length iterable should be treated like any other iterable
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(b'')
conn.request('POST', '/', ())
_, headers, body = self._parse_request(conn.sock.data)
self.assertEqual(headers['Transfer-Encoding'], 'chunked')
self.assertNotIn('content-length', [k.lower() for k in headers])
self.assertEqual(body, b"0\r\n\r\n")
def _make_body(self, empty_lines=False):
lines = self.expected_body.split(b' ')
for idx, line in enumerate(lines):
# for testing handling empty lines
if empty_lines and idx % 2:
yield b''
if idx < len(lines) - 1:
yield line + b' '
else:
yield line
def _parse_request(self, data):
lines = data.split(b'\r\n')
request = lines[0]
headers = {}
n = 1
while n < len(lines) and len(lines[n]) > 0:
key, val = lines[n].split(b':')
key = key.decode('latin-1').strip()
headers[key] = val.decode('latin-1').strip()
n += 1
return request, headers, b'\r\n'.join(lines[n + 1:])
def _parse_chunked(self, data):
body = []
trailers = {}
n = 0
lines = data.split(b'\r\n')
# parse body
while True:
size, chunk = lines[n:n+2]
size = int(size, 16)
if size == 0:
n += 1
break
self.assertEqual(size, len(chunk))
body.append(chunk)
n += 2
# we /should/ hit the end chunk, but check against the size of
# lines so we're not stuck in an infinite loop should we get
# malformed data
if n > len(lines):
break
return b''.join(body)
| TransferEncodingTest |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/ccroot.py | {
"start": 5496,
"end": 16176
} | class ____(link_task):
run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}'
chmod = Utils.O644
def rm_tgt(cls):
old = cls.run
def wrap(self):
try:
os.remove(self.outputs[0].abspath())
except OSError:
pass
return old(self)
setattr(cls, 'run', wrap)
rm_tgt(stlink_task)
@feature('skip_stlib_link_deps')
@before_method('process_use')
def apply_skip_stlib_link_deps(self):
self.env.SKIP_STLIB_LINK_DEPS = True
@feature('c', 'cxx', 'd', 'fc', 'asm')
@after_method('process_source')
def apply_link(self):
for x in self.features:
if x == 'cprogram' and 'cxx' in self.features:
x = 'cxxprogram'
elif x == 'cshlib' and 'cxx' in self.features:
x = 'cxxshlib'
if x in Task.classes:
if issubclass(Task.classes[x], link_task):
link = x
break
else:
return
objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])]
self.link_task = self.create_task(link, objs)
self.link_task.add_target(self.target)
try:
inst_to = self.install_path
except AttributeError:
inst_to = self.link_task.inst_to
if inst_to:
self.install_task = self.add_install_files(
install_to=inst_to, install_from=self.link_task.outputs[:], chmod=self.link_task.chmod, task=self.link_task
)
@taskgen_method
def use_rec(self, name, **kw):
if name in self.tmp_use_not or name in self.tmp_use_seen:
return
try:
y = self.bld.get_tgen_by_name(name)
except Errors.WafError:
self.uselib.append(name)
self.tmp_use_not.add(name)
return
self.tmp_use_seen.append(name)
y.post()
y.tmp_use_objects = objects = kw.get('objects', True)
y.tmp_use_stlib = stlib = kw.get('stlib', True)
try:
link_task = y.link_task
except AttributeError:
y.tmp_use_var = ''
else:
objects = False
if not isinstance(link_task, stlink_task):
stlib = False
y.tmp_use_var = 'LIB'
else:
y.tmp_use_var = 'STLIB'
p = self.tmp_use_prec
for x in self.to_list(getattr(y, 'use', [])):
if self.env["STLIB_" + x]:
continue
try:
p[x].append(name)
except KeyError:
p[x] = [name]
self.use_rec(x, objects=objects, stlib=stlib)
@feature('c', 'cxx', 'd', 'use', 'fc')
@before_method('apply_incpaths', 'propagate_uselib_vars')
@after_method('apply_link', 'process_source')
def process_use(self):
use_not = self.tmp_use_not = set()
self.tmp_use_seen = []
use_prec = self.tmp_use_prec = {}
self.uselib = self.to_list(getattr(self, 'uselib', []))
self.includes = self.to_list(getattr(self, 'includes', []))
names = self.to_list(getattr(self, 'use', []))
for x in names:
self.use_rec(x)
for x in use_not:
if x in use_prec:
del use_prec[x]
out = self.tmp_use_sorted = []
tmp = []
for x in self.tmp_use_seen:
for k in use_prec.values():
if x in k:
break
else:
tmp.append(x)
while tmp:
e = tmp.pop()
out.append(e)
try:
nlst = use_prec[e]
except KeyError:
pass
else:
del use_prec[e]
for x in nlst:
for y in use_prec:
if x in use_prec[y]:
break
else:
tmp.append(x)
if use_prec:
raise Errors.WafError('Cycle detected in the use processing %r' % use_prec)
out.reverse()
link_task = getattr(self, 'link_task', None)
for x in out:
y = self.bld.get_tgen_by_name(x)
var = y.tmp_use_var
if var and link_task:
if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task):
pass
elif var == 'LIB' or y.tmp_use_stlib or x in names:
self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]])
self.link_task.dep_nodes.extend(y.link_task.outputs)
tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd())
self.env.append_unique(var + 'PATH', [tmp_path])
else:
if y.tmp_use_objects:
self.add_objects_from_tgen(y)
if getattr(y, 'export_includes', None):
self.includes = self.includes + y.to_incnodes(y.export_includes)
if getattr(y, 'export_defines', None):
self.env.append_value('DEFINES', self.to_list(y.export_defines))
for x in names:
try:
y = self.bld.get_tgen_by_name(x)
except Errors.WafError:
if not self.env['STLIB_' + x] and not x in self.uselib:
self.uselib.append(x)
else:
for k in self.to_list(getattr(y, 'use', [])):
if not self.env['STLIB_' + k] and not k in self.uselib:
self.uselib.append(k)
@taskgen_method
def accept_node_to_link(self, node):
return not node.name.endswith('.pdb')
@taskgen_method
def add_objects_from_tgen(self, tg):
try:
link_task = self.link_task
except AttributeError:
pass
else:
for tsk in getattr(tg, 'compiled_tasks', []):
for x in tsk.outputs:
if self.accept_node_to_link(x):
link_task.inputs.append(x)
@taskgen_method
def get_uselib_vars(self):
_vars = set()
for x in self.features:
if x in USELIB_VARS:
_vars |= USELIB_VARS[x]
return _vars
@feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm')
@after_method('process_use')
def propagate_uselib_vars(self):
_vars = self.get_uselib_vars()
env = self.env
app = env.append_value
feature_uselib = self.features + self.to_list(getattr(self, 'uselib', []))
for var in _vars:
y = var.lower()
val = getattr(self, y, [])
if val:
app(var, self.to_list(val))
for x in feature_uselib:
val = env['%s_%s' % (var, x)]
if val:
app(var, val)
@feature('cshlib', 'cxxshlib', 'fcshlib')
@after_method('apply_link')
def apply_implib(self):
if not self.env.DEST_BINFMT == 'pe':
return
dll = self.link_task.outputs[0]
if isinstance(self.target, Node.Node):
name = self.target.name
else:
name = os.path.split(self.target)[1]
implib = self.env.implib_PATTERN % name
implib = dll.parent.find_or_declare(implib)
self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath())
self.link_task.outputs.append(implib)
if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe':
node = self.path.find_resource(self.defs)
if not node:
raise Errors.WafError('invalid def file %r' % self.defs)
if self.env.def_PATTERN:
self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd()))
self.link_task.dep_nodes.append(node)
else:
self.link_task.inputs.append(node)
if getattr(self, 'install_task', None):
try:
inst_to = self.install_path_implib
except AttributeError:
try:
inst_to = self.install_path
except AttributeError:
inst_to = '${IMPLIBDIR}'
self.install_task.install_to = '${BINDIR}'
if not self.env.IMPLIBDIR:
self.env.IMPLIBDIR = self.env.LIBDIR
self.implib_install_task = self.add_install_files(
install_to=inst_to, install_from=implib, chmod=self.link_task.chmod, task=self.link_task
)
re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$')
@feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum')
@after_method('apply_link', 'propagate_uselib_vars')
def apply_vnum(self):
if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'):
return
link = self.link_task
if not re_vnum.match(self.vnum):
raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self)))
nums = self.vnum.split('.')
node = link.outputs[0]
cnum = getattr(self, 'cnum', str(nums[0]))
cnums = cnum.split('.')
if len(cnums) > len(nums) or nums[0:len(cnums)] != cnums:
raise Errors.WafError('invalid compatibility version %s' % cnum)
libname = node.name
if libname.endswith('.dylib'):
name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum)
name2 = libname.replace('.dylib', '.%s.dylib' % cnum)
else:
name3 = libname + '.' + self.vnum
name2 = libname + '.' + cnum
if self.env.SONAME_ST:
v = self.env.SONAME_ST % name2
self.env.append_value('LINKFLAGS', v.split())
if self.env.DEST_OS != 'openbsd':
outs = [node.parent.make_node(name3)]
if name2 != name3:
outs.append(node.parent.make_node(name2))
self.create_task('vnum', node, outs)
if getattr(self, 'install_task', None):
self.install_task.hasrun = Task.SKIPPED
self.install_task.no_errcheck_out = True
path = self.install_task.install_to
if self.env.DEST_OS == 'openbsd':
libname = self.link_task.outputs[0].name
t1 = self.add_install_as(
install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod
)
self.vnum_install_task = (t1,)
else:
t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod)
t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3)
if name2 != name3:
t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3)
self.vnum_install_task = (t1, t2, t3)
else:
self.vnum_install_task = (t1, t3)
if '-dynamiclib' in self.env.LINKFLAGS:
try:
inst_to = self.install_path
except AttributeError:
inst_to = self.link_task.inst_to
if inst_to:
p = Utils.subst_vars(inst_to, self.env)
path = os.path.join(p, name2)
self.env.append_value('LINKFLAGS', ['-install_name', path])
self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum)
self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum)
| stlink_task |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/triggers/base.py | {
"start": 1213,
"end": 6820
} | class ____(BaseTrigger):
"""
Base class for all AWS Triggers that follow the "standard" model of just waiting on a waiter.
Subclasses need to implement the hook() method.
:param serialized_fields: Fields that are specific to the subclass trigger and need to be serialized
to be passed to the __init__ method on deserialization.
The conn id, region, and waiter delay & attempts are always serialized.
format: {<parameter_name>: <parameter_value>}
:param waiter_name: The name of the (possibly custom) boto waiter to use.
:param waiter_args: The arguments to pass to the waiter.
:param failure_message: The message to log if a failure state is reached.
:param status_message: The message logged when printing the status of the service.
:param status_queries: A list containing the JMESPath queries to retrieve status information from
the waiter response. See https://jmespath.org/tutorial.html
:param return_key: The key to use for the return_value in the TriggerEvent this emits on success.
Defaults to "value".
:param return_value: A value that'll be returned in the return_key field of the TriggerEvent.
Set to None if there is nothing to return.
:param waiter_delay: The amount of time in seconds to wait between attempts.
:param waiter_max_attempts: The maximum number of attempts to be made.
:param waiter_config_overrides: A dict to update waiter's default configuration. Only specified keys will
be updated.
:param aws_conn_id: The Airflow connection used for AWS credentials. To be used to build the hook.
:param region_name: The AWS region where the resources to watch are. To be used to build the hook.
:param verify: Whether or not to verify SSL certificates. To be used to build the hook.
See: https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client.
To be used to build the hook. For available key-values see:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
def __init__(
self,
*,
serialized_fields: dict[str, Any],
waiter_name: str,
waiter_args: dict[str, Any],
failure_message: str,
status_message: str,
status_queries: list[str],
return_key: str = "value",
return_value: Any,
waiter_delay: int,
waiter_max_attempts: int,
waiter_config_overrides: dict[str, Any] | None = None,
aws_conn_id: str | None = "aws_default",
region_name: str | None = None,
verify: bool | str | None = None,
botocore_config: dict | None = None,
):
super().__init__()
# parameters that should be hardcoded in the child's implem
self.serialized_fields = serialized_fields
self.waiter_name = waiter_name
self.waiter_args = waiter_args
self.failure_message = failure_message
self.status_message = status_message
self.status_queries = status_queries
self.waiter_config_overrides = waiter_config_overrides
self.return_key = return_key
self.return_value = return_value
# parameters that should be passed directly from the child's parameters
self.waiter_delay = waiter_delay
self.attempts = waiter_max_attempts
self.aws_conn_id = aws_conn_id
self.region_name = region_name
self.verify = verify
self.botocore_config = botocore_config
def serialize(self) -> tuple[str, dict[str, Any]]:
# here we put together the "common" params,
# and whatever extras we got from the subclass in serialized_fields
params = dict(
{
"waiter_delay": self.waiter_delay,
"waiter_max_attempts": self.attempts,
"aws_conn_id": self.aws_conn_id,
},
**self.serialized_fields,
)
# if we serialize the None value from this, it breaks subclasses that don't have it in their ctor.
params.update(
prune_dict(
{
# Keep previous behaviour when empty string in region_name evaluated as `None`
"region_name": self.region_name or None,
"verify": self.verify,
"botocore_config": self.botocore_config,
}
)
)
return (
# remember that self is an instance of the subclass here, not of this class.
self.__class__.__module__ + "." + self.__class__.__qualname__,
params,
)
@abstractmethod
def hook(self) -> AwsGenericHook:
"""Override in subclasses to return the right hook."""
async def run(self) -> AsyncIterator[TriggerEvent]:
hook = self.hook()
async with await hook.get_async_conn() as client:
waiter = hook.get_waiter(
self.waiter_name,
deferrable=True,
client=client,
config_overrides=self.waiter_config_overrides,
)
await async_wait(
waiter,
self.waiter_delay,
self.attempts,
self.waiter_args,
self.failure_message,
self.status_message,
self.status_queries,
)
yield TriggerEvent({"status": "success", self.return_key: self.return_value})
| AwsBaseWaiterTrigger |
python | python-poetry__poetry | src/poetry/console/commands/source/remove.py | {
"start": 286,
"end": 1280
} | class ____(Command):
name = "source remove"
description = "Remove source configured for the project."
arguments: ClassVar[list[Argument]] = [
argument(
"name",
"Source repository name.",
),
]
def handle(self) -> int:
name = self.argument("name")
lower_name = name.lower()
sources = AoT([])
removed = False
for source in self.poetry.get_sources():
if source.name.lower() == lower_name:
self.line(f"Removing source with name <c1>{source.name}</c1>.")
removed = True
continue
sources.append(source.to_toml_table())
if not removed:
self.line_error(
f"<error>Source with name <c1>{name}</c1> was not found.</error>"
)
return 1
self.poetry.pyproject.poetry_config["source"] = sources
self.poetry.pyproject.save()
return 0
| SourceRemoveCommand |
python | instagram__MonkeyType | tests/test_stubs.py | {
"start": 21321,
"end": 31232
} | class ____:
def test_render(self):
cm_stub = _func_stub_from_callable(Dummy.a_class_method)
im_stub = _func_stub_from_callable(Dummy.an_instance_method)
sig_stub = _func_stub_from_callable(Dummy.has_complex_signature)
func_stubs = (cm_stub, im_stub, sig_stub)
test_stub = ClassStub('Test', function_stubs=func_stubs)
test2_stub = ClassStub('Test2', function_stubs=func_stubs)
other_class_stubs = module_stub_for_method_with_typed_dict['tests.util'].class_stubs.values()
class_stubs = (*other_class_stubs, test_stub, test2_stub)
typed_dict_class_stubs = module_stub_for_method_with_typed_dict['tests.util'].typed_dict_class_stubs
mod_stub = ModuleStub(function_stubs=func_stubs,
class_stubs=class_stubs,
typed_dict_class_stubs=typed_dict_class_stubs)
expected = '\n'.join([
'class DummyAnInstanceMethodTypedDict__RENAME_ME__(TypedDict):',
' c: int',
'',
'',
'class FooTypedDict__RENAME_ME__(TypedDict):',
' a: int',
' b: str',
'',
'',
'@classmethod',
'def a_class_method(foo: Any) -> Optional[frame]: ...',
'',
'',
'def an_instance_method(self, foo: Any, bar: Any) -> Optional[frame]: ...',
'',
'',
'def has_complex_signature(',
' self,',
' a: Any,',
' b: Any,',
' /,',
' c: Any,',
' d: Any = ...,',
' *e: Any,',
' f: Any,',
' g: Any = ...,',
' **h: Any',
') -> Optional[frame]: ...',
'',
'',
'class Dummy:',
' def an_instance_method(',
' self,',
' foo: \'FooTypedDict__RENAME_ME__\',',
' bar: int',
' ) -> \'DummyAnInstanceMethodTypedDict__RENAME_ME__\': ...',
'',
'',
'class Test:',
' @classmethod',
' def a_class_method(foo: Any) -> Optional[frame]: ...',
' def an_instance_method(self, foo: Any, bar: Any) -> Optional[frame]: ...',
' def has_complex_signature(',
' self,',
' a: Any,',
' b: Any,',
' /,',
' c: Any,',
' d: Any = ...,',
' *e: Any,',
' f: Any,',
' g: Any = ...,',
' **h: Any',
' ) -> Optional[frame]: ...',
'',
'',
'class Test2:',
' @classmethod',
' def a_class_method(foo: Any) -> Optional[frame]: ...',
' def an_instance_method(self, foo: Any, bar: Any) -> Optional[frame]: ...',
' def has_complex_signature(',
' self,',
' a: Any,',
' b: Any,',
' /,',
' c: Any,',
' d: Any = ...,',
' *e: Any,',
' f: Any,',
' g: Any = ...,',
' **h: Any',
' ) -> Optional[frame]: ...',
])
assert mod_stub.render() == expected
def test_render_nested_typed_dict(self):
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': make_typed_dict(required_fields={
# Naming the key 'z' to test a class name
# that comes last in alphabetical order.
'z': make_typed_dict(required_fields={'a': int, 'b': str}),
'b': str,
}),
'bar': int,
},
int,
None,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE
)
entries = [function]
expected = '\n'.join([
'from mypy_extensions import TypedDict',
'',
'',
'class FooTypedDict__RENAME_ME__(TypedDict):',
' b: str',
# We can forward-reference a class that is defined afterwards.
' z: \'ZTypedDict__RENAME_ME__\'',
'',
'',
'class ZTypedDict__RENAME_ME__(TypedDict):',
' a: int',
' b: str',
'',
'',
'class Dummy:',
' def an_instance_method(self, foo: \'FooTypedDict__RENAME_ME__\', bar: int) -> int: ...'])
self.maxDiff = None
assert build_module_stubs(entries)['tests.util'].render() == expected
def test_render_return_typed_dict(self):
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': int,
'bar': int,
},
make_typed_dict(required_fields={'a': int, 'b': str}),
yield_type=None,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE
)
entries = [function]
expected = '\n'.join([
'from mypy_extensions import TypedDict',
'',
'',
'class DummyAnInstanceMethodTypedDict__RENAME_ME__(TypedDict):',
' a: int',
' b: str',
'',
'',
'class Dummy:',
' def an_instance_method(self, foo: int, bar: int)'
' -> \'DummyAnInstanceMethodTypedDict__RENAME_ME__\': ...',
])
self.maxDiff = None
assert build_module_stubs(entries)['tests.util'].render() == expected
def test_render_yield_typed_dict(self):
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': int,
'bar': int,
},
int,
yield_type=make_typed_dict(required_fields={'a': int, 'b': str}),
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE
)
entries = [function]
expected = '\n'.join([
'from mypy_extensions import TypedDict',
'from typing import Generator',
'',
'',
'class DummyAnInstanceMethodYieldTypedDict__RENAME_ME__(TypedDict):',
' a: int',
' b: str',
'',
'',
'class Dummy:',
' def an_instance_method(',
' self,',
' foo: int,',
' bar: int',
' ) -> Generator[\'DummyAnInstanceMethodYieldTypedDict__RENAME_ME__\', None, int]: ...',
])
self.maxDiff = None
assert build_module_stubs(entries)['tests.util'].render() == expected
def test_render_typed_dict_in_list(self):
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': List[make_typed_dict(required_fields={'a': int})],
'bar': int,
},
int,
None,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE,
)
entries = [function]
expected = '\n'.join([
'from mypy_extensions import TypedDict',
'from typing import List',
'',
'',
'class FooTypedDict__RENAME_ME__(TypedDict):',
' a: int',
'',
'',
'class Dummy:',
' def an_instance_method(self, foo: List[\'FooTypedDict__RENAME_ME__\'], bar: int) -> int: ...'])
self.maxDiff = None
assert build_module_stubs(entries)['tests.util'].render() == expected
def test_render_typed_dict_base_and_subclass(self):
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': make_typed_dict(required_fields={'a': int}, optional_fields={'b': str}),
'bar': int,
},
int,
None,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE,
)
entries = [function]
expected = '\n'.join([
'from mypy_extensions import TypedDict',
'',
'',
'class FooTypedDict__RENAME_ME__(TypedDict):',
' a: int',
'',
'',
'class FooTypedDict__RENAME_ME__NonTotal(FooTypedDict__RENAME_ME__, total=False):',
' b: str',
'',
'',
'class Dummy:',
' def an_instance_method(self, foo: \'FooTypedDict__RENAME_ME__NonTotal\', bar: int) -> int: ...'])
assert build_module_stubs(entries)['tests.util'].render() == expected
def test_render_return_empty_tuple(self):
"""Regression test for #190."""
function = FunctionDefinition.from_callable_and_traced_types(
Dummy.an_instance_method,
{
'foo': int,
'bar': int,
},
Tuple[()],
yield_type=None,
existing_annotation_strategy=ExistingAnnotationStrategy.IGNORE
)
entries = [function]
expected = '\n'.join([
'from typing import Tuple',
'',
'',
'class Dummy:',
' def an_instance_method(self, foo: int, bar: int)'
' -> Tuple[()]: ...',
])
self.maxDiff = None
assert build_module_stubs(entries)['tests.util'].render() == expected
| TestModuleStub |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_data_labels31.py | {
"start": 315,
"end": 1699
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_data_labels31.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [71248896, 71373568]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
[10, 20, 30, 40, 50],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
worksheet.write_column("D1", data[3])
chart.add_series(
{
"values": "=Sheet1!$A$1:$A$5",
"data_labels": {
"value": True,
"category": True,
"series_name": True,
"custom": [{"value": 33}],
},
}
)
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | tensorflow__tensorflow | tensorflow/python/lib/io/file_io_test.py | {
"start": 1411,
"end": 27361
} | class ____(test.TestCase, parameterized.TestCase):
def setUp(self):
self._base_dir = file_io.join(self.get_temp_dir(), "base_dir")
file_io.create_dir(self._base_dir)
def tearDown(self):
file_io.delete_recursively(self._base_dir)
def testEmptyFilename(self):
f = file_io.FileIO("", mode="r")
with self.assertRaises(errors.NotFoundError):
_ = f.read()
def testJoinUrlLike(self):
"""file_io.join joins url-like filesystems with '/' on all platform."""
for fs in ("ram://", "gcs://", "file://"):
expected = fs + "exists/a/b/c.txt"
self.assertEqual(file_io.join(fs, "exists", "a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs + "exists", "a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs, "exists/a", "b", "c.txt"), expected)
self.assertEqual(file_io.join(fs, "exists", "a", "b/c.txt"), expected)
def testJoinFilesystem(self):
"""file_io.join respects the os.path.join behavior for native filesystems."""
for sep in ("/", "\\", os.sep):
self.assertEqual(os.path.join("a", "b", "c"), file_io.join("a", "b", "c"))
self.assertEqual(
os.path.join(sep + "a", "b", "c"), file_io.join(sep + "a", "b", "c"))
self.assertEqual(
os.path.join("a", sep + "b", "c"), file_io.join("a", sep + "b", "c"))
self.assertEqual(
os.path.join("a", "b", sep + "c"), file_io.join("a", "b", sep + "c"))
self.assertEqual(
os.path.join("a", "b", "c" + sep), file_io.join("a", "b", "c" + sep))
@run_all_path_types
def testFileDoesntExist(self, join):
file_path = join(self._base_dir, "temp_file")
self.assertFalse(file_io.file_exists(file_path))
with self.assertRaises(errors.NotFoundError):
_ = file_io.read_file_to_string(file_path)
@run_all_path_types
def testWriteToString(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFile(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "testing")
self.assertTrue(file_io.file_exists(file_path))
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("testing", file_contents)
def testAtomicWriteStringToFileOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.atomic_write_string_to_file(file_path, "old", overwrite=False)
with self.assertRaises(errors.AlreadyExistsError):
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("old", file_contents)
file_io.delete_file(file_path)
file_io.atomic_write_string_to_file(file_path, "new", overwrite=False)
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("new", file_contents)
@run_all_path_types
def testReadBinaryMode(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
@run_all_path_types
def testWriteBinaryMode(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, "wb").write("testing")
with file_io.FileIO(file_path, mode="r") as f:
self.assertEqual("testing", f.read())
def testAppend(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("begin\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a1\n")
with file_io.FileIO(file_path, mode="a") as f:
f.write("a2\n")
with file_io.FileIO(file_path, mode="r") as f:
file_contents = f.read()
self.assertEqual("begin\na1\na2\n", file_contents)
def testMultipleFiles(self):
file_prefix = file_io.join(self._base_dir, "temp_file")
for i in range(5000):
f = file_io.FileIO(file_prefix + str(i), mode="w+")
f.write("testing")
f.flush()
self.assertEqual("testing", f.read())
f.close()
def testMultipleWrites(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="w") as f:
f.write("line1\n")
f.write("line2")
file_contents = file_io.read_file_to_string(file_path)
self.assertEqual("line1\nline2", file_contents)
def testFileWriteBadMode(self):
file_path = file_io.join(self._base_dir, "temp_file")
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="r").write("testing")
def testFileReadBadMode(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
with self.assertRaises(errors.PermissionDeniedError):
file_io.FileIO(file_path, mode="w").read()
@run_all_path_types
def testFileDelete(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_io.delete_file(file_path)
self.assertFalse(file_io.file_exists(file_path))
def testFileDeleteFail(self):
file_path = file_io.join(self._base_dir, "temp_file")
with self.assertRaises(errors.NotFoundError):
file_io.delete_file(file_path)
def testGetMatchingFiles(self):
dir_path = file_io.join(self._base_dir, "temp_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt", "file*.txt"]
for name in files:
file_path = file_io.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [file_io.join(dir_path, name) for name in files]
self.assertItemsEqual(
file_io.get_matching_files(file_io.join(dir_path, "file*.txt")),
expected_match)
self.assertItemsEqual(file_io.get_matching_files(tuple()), [])
files_subset = [
file_io.join(dir_path, files[0]),
file_io.join(dir_path, files[2])
]
self.assertItemsEqual(
file_io.get_matching_files(files_subset), files_subset)
file_io.delete_recursively(dir_path)
self.assertFalse(file_io.file_exists(file_io.join(dir_path, "file3.txt")))
def testGetMatchingFilesWhenParentDirContainsParantheses(self):
dir_path = file_io.join(self._base_dir, "dir_(special)")
file_io.create_dir(dir_path)
files = ["file1.txt", "file(2).txt"]
for name in files:
file_path = file_io.join(dir_path, name)
file_io.FileIO(file_path, mode="w").write("testing")
expected_match = [file_io.join(dir_path, name) for name in files]
glob_pattern = file_io.join(dir_path, "*")
self.assertItemsEqual(
file_io.get_matching_files(glob_pattern), expected_match)
@run_all_path_types
def testCreateRecursiveDir(self, join):
dir_path = join(self._base_dir, "temp_dir/temp_dir1/temp_dir2")
file_io.recursive_create_dir(dir_path)
file_io.recursive_create_dir(dir_path) # repeat creation
file_path = file_io.join(str(dir_path), "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
self.assertTrue(file_io.file_exists(file_path))
file_io.delete_recursively(file_io.join(self._base_dir, "temp_dir"))
self.assertFalse(file_io.file_exists(file_path))
@run_all_path_types
def testCopy(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = join(self._base_dir, "copy_file")
file_io.copy(file_path, copy_path)
self.assertTrue(file_io.file_exists(copy_path))
f = file_io.FileIO(file_path, mode="r")
self.assertEqual("testing", f.read())
self.assertEqual(7, f.tell())
def testCopyOverwrite(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = file_io.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
file_io.copy(file_path, copy_path, overwrite=True)
self.assertTrue(file_io.file_exists(copy_path))
self.assertEqual("testing", file_io.FileIO(file_path, mode="r").read())
def testCopyOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
copy_path = file_io.join(self._base_dir, "copy_file")
file_io.FileIO(copy_path, mode="w").write("copy")
with self.assertRaises(errors.AlreadyExistsError):
file_io.copy(file_path, copy_path, overwrite=False)
@run_all_path_types
def testRename(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = join(self._base_dir, "rename_file")
file_io.rename(file_path, rename_path)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwrite(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = file_io.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
file_io.rename(file_path, rename_path, overwrite=True)
self.assertTrue(file_io.file_exists(rename_path))
self.assertFalse(file_io.file_exists(file_path))
def testRenameOverwriteFalse(self):
file_path = file_io.join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
rename_path = file_io.join(self._base_dir, "rename_file")
file_io.FileIO(rename_path, mode="w").write("rename")
with self.assertRaises(errors.AlreadyExistsError):
file_io.rename(file_path, rename_path, overwrite=False)
self.assertTrue(file_io.file_exists(rename_path))
self.assertTrue(file_io.file_exists(file_path))
def testDeleteRecursivelyFail(self):
fake_dir_path = file_io.join(self._base_dir, "temp_dir")
with self.assertRaises(errors.NotFoundError):
file_io.delete_recursively(fake_dir_path)
@run_all_path_types
def testIsDirectory(self, join):
dir_path = join(self._base_dir, "test_dir")
# Failure for a non-existing dir.
self.assertFalse(file_io.is_directory(dir_path))
file_io.create_dir(dir_path)
self.assertTrue(file_io.is_directory(dir_path))
file_path = join(str(dir_path), "test_file")
file_io.FileIO(file_path, mode="w").write("test")
# False for a file.
self.assertFalse(file_io.is_directory(file_path))
# Test that the value returned from `stat()` has `is_directory` set.
file_statistics = file_io.stat(dir_path)
self.assertTrue(file_statistics.is_directory)
@run_all_path_types
def testListDirectory(self, join):
dir_path = join(self._base_dir, "test_dir")
file_io.create_dir(dir_path)
files = ["file1.txt", "file2.txt", "file3.txt"]
for name in files:
file_path = join(str(dir_path), name)
file_io.FileIO(file_path, mode="w").write("testing")
subdir_path = join(str(dir_path), "sub_dir")
file_io.create_dir(subdir_path)
subdir_file_path = join(str(subdir_path), "file4.txt")
file_io.FileIO(subdir_file_path, mode="w").write("testing")
dir_list = file_io.list_directory(dir_path)
self.assertItemsEqual(files + ["sub_dir"], dir_list)
def testListDirectoryFailure(self):
dir_path = file_io.join(self._base_dir, "test_dir")
with self.assertRaises(errors.NotFoundError):
file_io.list_directory(dir_path)
def _setupWalkDirectories(self, dir_path):
# Creating a file structure as follows
# test_dir -> file: file1.txt; dirs: subdir1_1, subdir1_2, subdir1_3
# subdir1_1 -> file: file3.txt
# subdir1_2 -> dir: subdir2
file_io.create_dir(dir_path)
file_io.FileIO(
file_io.join(dir_path, "file1.txt"), mode="w").write("testing")
sub_dirs1 = ["subdir1_1", "subdir1_2", "subdir1_3"]
for name in sub_dirs1:
file_io.create_dir(file_io.join(dir_path, name))
file_io.FileIO(
file_io.join(dir_path, "subdir1_1/file2.txt"),
mode="w").write("testing")
file_io.create_dir(file_io.join(dir_path, "subdir1_2/subdir2"))
@run_all_path_types
def testWalkInOrder(self, join):
dir_path_str = file_io.join(self._base_dir, "test_dir")
dir_path = join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path_str)
# Now test the walk (in_order = True)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=True):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [dir_path_str] + [
file_io.join(dir_path_str, item) for item in
["subdir1_1", "subdir1_2", "subdir1_2/subdir2", "subdir1_3"]
])
self.assertEqual(dir_path_str, all_dirs[0])
self.assertLess(
all_dirs.index(file_io.join(dir_path_str, "subdir1_2")),
all_dirs.index(file_io.join(dir_path_str, "subdir1_2/subdir2")))
self.assertItemsEqual(all_subdirs[1:5], [[], ["subdir2"], [], []])
self.assertItemsEqual(all_subdirs[0],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file1.txt"], ["file2.txt"], [], [], []])
self.assertLess(
all_files.index(["file1.txt"]), all_files.index(["file2.txt"]))
def testWalkPostOrder(self):
dir_path = file_io.join(self._base_dir, "test_dir")
self._setupWalkDirectories(dir_path)
# Now test the walk (in_order = False)
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [
file_io.join(dir_path, item) for item in
["subdir1_1", "subdir1_2/subdir2", "subdir1_2", "subdir1_3"]
] + [dir_path])
self.assertEqual(dir_path, all_dirs[4])
self.assertLess(
all_dirs.index(file_io.join(dir_path, "subdir1_2/subdir2")),
all_dirs.index(file_io.join(dir_path, "subdir1_2")))
self.assertItemsEqual(all_subdirs[0:4], [[], [], ["subdir2"], []])
self.assertItemsEqual(all_subdirs[4],
["subdir1_1", "subdir1_2", "subdir1_3"])
self.assertItemsEqual(all_files, [["file2.txt"], [], [], [], ["file1.txt"]])
self.assertLess(
all_files.index(["file2.txt"]), all_files.index(["file1.txt"]))
def testWalkFailure(self):
dir_path = file_io.join(self._base_dir, "test_dir")
# Try walking a directory that wasn't created.
all_dirs = []
all_subdirs = []
all_files = []
for (w_dir, w_subdirs, w_files) in file_io.walk(dir_path, in_order=False):
all_dirs.append(w_dir)
all_subdirs.append(w_subdirs)
all_files.append(w_files)
self.assertItemsEqual(all_dirs, [])
self.assertItemsEqual(all_subdirs, [])
self.assertItemsEqual(all_files, [])
@run_all_path_types
def testStat(self, join):
file_path = join(self._base_dir, "temp_file")
file_io.FileIO(file_path, mode="w").write("testing")
file_statistics = file_io.stat(file_path)
os_statistics = os.stat(str(file_path))
self.assertEqual(7, file_statistics.length)
self.assertEqual(
int(os_statistics.st_mtime), int(file_statistics.mtime_nsec / 1e9))
self.assertFalse(file_statistics.is_directory)
def testReadLine(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.readline())
self.assertEqual("testing2\n", f.readline())
self.assertEqual("testing3\n", f.readline())
self.assertEqual("\n", f.readline())
self.assertEqual("testing5", f.readline())
self.assertEqual("", f.readline())
def testRead(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(36, f.size())
self.assertEqual("testing1\n", f.read(9))
self.assertEqual("testing2\n", f.read(9))
self.assertEqual("t", f.read(1))
self.assertEqual("esting3\n\ntesting5", f.read())
def testReadErrorReacquiresGil(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
with self.assertRaises(errors.InvalidArgumentError):
# At present, this is sufficient to convince ourselves that the change
# fixes the problem. That is, this test will seg fault without the change,
# and pass with it. Unfortunately, this is brittle, as it relies on the
# Python layer to pass the argument along to the wrapped C++ without
# checking the argument itself.
f.read(-2)
def testTell(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
self.assertEqual(27, f.tell())
self.assertEqual("\n", f.readline())
self.assertEqual(28, f.tell())
self.assertEqual("testing5", f.readline())
self.assertEqual(36, f.tell())
self.assertEqual("", f.readline())
self.assertEqual(36, f.tell())
def testSeek(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(18)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(0)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(-1)
with self.assertRaises(TypeError):
f.seek()
# TODO(jhseu): Delete after position deprecation.
with self.assertRaises(TypeError):
f.seek(offset=0, position=0)
f.seek(position=9)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
def testSeekFromWhat(self):
file_path = file_io.join(self._base_dir, "temp_file")
with file_io.FileIO(file_path, mode="r+") as f:
f.write("testing1\ntesting2\ntesting3\n\ntesting5")
self.assertEqual("testing1\n", f.readline())
self.assertEqual(9, f.tell())
# Seek to 18
f.seek(9, 1)
self.assertEqual(18, f.tell())
self.assertEqual("testing3\n", f.readline())
# Seek back to 9
f.seek(9, 0)
self.assertEqual(9, f.tell())
self.assertEqual("testing2\n", f.readline())
f.seek(-f.size(), 2)
self.assertEqual(0, f.tell())
self.assertEqual("testing1\n", f.readline())
with self.assertRaises(errors.InvalidArgumentError):
f.seek(0, 3)
def testReadingIterator(self):
file_path = file_io.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
with file_io.FileIO(file_path, mode="r+") as f:
f.write("".join(data))
actual_data = []
for line in f:
actual_data.append(line)
self.assertSequenceEqual(actual_data, data)
def testReadlines(self):
file_path = file_io.join(self._base_dir, "temp_file")
data = ["testing1\n", "testing2\n", "testing3\n", "\n", "testing5"]
f = file_io.FileIO(file_path, mode="r+")
f.write("".join(data))
f.flush()
lines = f.readlines()
self.assertSequenceEqual(lines, data)
def testUTF8StringPath(self):
file_path = file_io.join(self._base_dir, "UTF8测试_file")
file_io.write_string_to_file(file_path, "testing")
with file_io.FileIO(file_path, mode="rb") as f:
self.assertEqual(b"testing", f.read())
def testEof(self):
"""Test that reading past EOF does not raise an exception."""
file_path = file_io.join(self._base_dir, "temp_file")
f = file_io.FileIO(file_path, mode="r+")
content = "testing"
f.write(content)
f.flush()
self.assertEqual(content, f.read(len(content) + 1))
@run_all_path_types
def testUTF8StringPathExists(self, join):
file_path = join(self._base_dir, "UTF8测试_file_exist")
file_io.write_string_to_file(file_path, "testing")
v = file_io.file_exists(file_path)
self.assertEqual(v, True)
def testFilecmp(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is another sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpSameSize(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is b sentence\n" * 100)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, u"This is b sentence\n" * 100)
self.assertFalse(file_io.filecmp(file1, file2))
self.assertTrue(file_io.filecmp(file2, file3))
def testFilecmpBinary(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\na")
file2 = file_io.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\nb")
file3 = file_io.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\nb")
file4 = file_io.join(self._base_dir, "file4")
file_io.FileIO(file4, "wb").write("testing\n\ntesting")
self.assertFalse(file_io.filecmp(file1, file2))
self.assertFalse(file_io.filecmp(file1, file4))
self.assertTrue(file_io.filecmp(file2, file3))
def testFileCrc32(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32WithBytes(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.write_string_to_file(file1, "This is a sentence\n" * 100)
crc1 = file_io.file_crc32(file1, block_size=24)
file2 = file_io.join(self._base_dir, "file2")
file_io.write_string_to_file(file2, "This is another sentence\n" * 100)
crc2 = file_io.file_crc32(file2, block_size=24)
file3 = file_io.join(self._base_dir, "file3")
file_io.write_string_to_file(file3, "This is another sentence\n" * 100)
crc3 = file_io.file_crc32(file3, block_size=-1)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileCrc32Binary(self):
file1 = file_io.join(self._base_dir, "file1")
file_io.FileIO(file1, "wb").write("testing\n\n")
crc1 = file_io.file_crc32(file1)
file2 = file_io.join(self._base_dir, "file2")
file_io.FileIO(file2, "wb").write("testing\n\n\n")
crc2 = file_io.file_crc32(file2)
file3 = file_io.join(self._base_dir, "file3")
file_io.FileIO(file3, "wb").write("testing\n\n\n")
crc3 = file_io.file_crc32(file3)
self.assertTrue(crc1 != crc2)
self.assertEqual(crc2, crc3)
def testFileSeekableWithZip(self):
# Note: Test case for GitHub issue 27276, issue only exposed in python 3.7+.
filename = file_io.join(self._base_dir, "a.npz")
np.savez_compressed(filename, {"a": 1, "b": 2})
with gfile.GFile(filename, "rb") as f:
info = np.load(f, allow_pickle=True) # pylint: disable=unexpected-keyword-arg
_ = [i for i in info.items()]
def testHasAtomicMove(self):
self.assertTrue(file_io.has_atomic_move("/a/b/c"))
def testGetRegisteredSchemes(self):
expected = ["", "file", "ram"]
actual = file_io.get_registered_schemes()
# Be flexible about additional schemes that may sometimes be registered when
# this test is run, while still verifying each scheme appears just once.
maybe_expected = ["gs", "hypercomputer"]
for scheme in maybe_expected:
if scheme in actual:
expected.append(scheme)
self.assertCountEqual(expected, actual)
def testReadWriteWithEncoding(self):
file_path = file_io.join(self._base_dir, "temp_file")
with open(file_path, mode="w", encoding="cp932") as f:
f.write("今日はいい天気")
with file_io.FileIO(file_path, mode="r", encoding="cp932") as f:
self.assertEqual(f.read(), "今日はいい天気")
with file_io.FileIO(file_path, mode="w", encoding="cp932") as f:
f.write("今日はいい天気")
with open(file_path, mode="r", encoding="cp932") as f:
self.assertEqual(f.read(), "今日はいい天気")
if __name__ == "__main__":
test.main()
| FileIoTest |
python | django__django | tests/field_defaults/tests.py | {
"start": 7731,
"end": 8960
} | class ____(SimpleTestCase):
def test_allowed(self):
class Max(Func):
function = "MAX"
tests = [
Value(10),
Max(1, 2),
RawSQL("Now()", ()),
Value(10) + Value(7), # Combined expression.
ExpressionList(Value(1), Value(2)),
ExpressionWrapper(Value(1), output_field=FloatField()),
Case(When(GreaterThan(2, 1), then=3), default=4),
]
for expression in tests:
with self.subTest(expression=expression):
self.assertIs(expression.allowed_default, True)
def test_disallowed(self):
class Max(Func):
function = "MAX"
tests = [
Expression(),
F("field"),
Max(F("count"), 1),
Value(10) + F("count"), # Combined expression.
ExpressionList(F("count"), Value(2)),
ExpressionWrapper(F("count"), output_field=FloatField()),
Collate(Value("John"), "nocase"),
OrderByList("field"),
]
for expression in tests:
with self.subTest(expression=expression):
self.assertIs(expression.allowed_default, False)
| AllowedDefaultTests |
python | huggingface__transformers | examples/pytorch/question-answering/run_qa.py | {
"start": 1792,
"end": 3497
} | class ____:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
token: str = field(
default=None,
metadata={
"help": (
"The token to use as HTTP bearer authorization for remote files. If not specified, will use the token "
"generated when running `hf auth login` (stored in `~/.huggingface`)."
)
},
)
trust_remote_code: bool = field(
default=False,
metadata={
"help": (
"Whether to trust the execution of code from datasets/models defined on the Hub."
" This option should only be set to `True` for repositories you trust and in which you have read the"
" code, as it will execute code present on the Hub on your local machine."
)
},
)
@dataclass
| ModelArguments |
python | getsentry__sentry | tests/sentry/lang/native/test_sources.py | {
"start": 5296,
"end": 9866
} | class ____:
@pytest.fixture
def sources(self):
builtins = [
{
"id": "sentry:microsoft",
"name": "Microsoft",
"type": "gcs",
},
{
"id": "sentry:electron",
"name": "Electron",
"type": "s3",
},
{
"id": "sentry:ios-source",
"name": "iOS",
"type": "http",
},
{
"id": "sentry:tvos-source",
"name": "iOS",
"type": "http",
},
{
"type": "http",
"id": "custom",
"layout": {"type": "symstore"},
"url": "https://msdl.microsoft.com/download/symbols/",
},
]
return builtins
@pytest.fixture
def reversed_alias_map(self):
return {"sentry:ios-source": "sentry:ios", "sentry:tvos-source": "sentry:ios"}
# Explicitly empty list of sources
@django_db_all
def test_sources_included_and_ignored_empty(self) -> None:
with override_options({"symbolicator.ignored_sources": []}):
sources = filter_ignored_sources([])
assert sources == []
# Default/unset list of sources
@django_db_all
def test_sources_ignored_unset(self, sources) -> None:
sources = filter_ignored_sources(sources)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:microsoft",
"sentry:electron",
"sentry:ios-source",
"sentry:tvos-source",
"custom",
]
@django_db_all
def test_sources_ignored_empty(self, sources) -> None:
with override_options({"symbolicator.ignored_sources": []}):
sources = filter_ignored_sources(sources)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:microsoft",
"sentry:electron",
"sentry:ios-source",
"sentry:tvos-source",
"custom",
]
@django_db_all
def test_sources_ignored_builtin(self, sources) -> None:
with override_options({"symbolicator.ignored_sources": ["sentry:microsoft"]}):
sources = filter_ignored_sources(sources)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:electron",
"sentry:ios-source",
"sentry:tvos-source",
"custom",
]
@django_db_all
def test_sources_ignored_alias(self, sources, reversed_alias_map) -> None:
with override_options({"symbolicator.ignored_sources": ["sentry:ios"]}):
sources = filter_ignored_sources(sources, reversed_alias_map)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == ["sentry:microsoft", "sentry:electron", "custom"]
@django_db_all
def test_sources_ignored_bypass_alias(self, sources, reversed_alias_map) -> None:
with override_options({"symbolicator.ignored_sources": ["sentry:ios-source"]}):
sources = filter_ignored_sources(sources, reversed_alias_map)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:microsoft",
"sentry:electron",
"sentry:tvos-source",
"custom",
]
@django_db_all
def test_sources_ignored_custom(self, sources) -> None:
with override_options({"symbolicator.ignored_sources": ["custom"]}):
sources = filter_ignored_sources(sources)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:microsoft",
"sentry:electron",
"sentry:ios-source",
"sentry:tvos-source",
]
@django_db_all
def test_sources_ignored_unrecognized(self, sources) -> None:
with override_options({"symbolicator.ignored_sources": ["honk"]}):
sources = filter_ignored_sources(sources)
source_ids = list(map(lambda s: s["id"], sources))
assert source_ids == [
"sentry:microsoft",
"sentry:electron",
"sentry:ios-source",
"sentry:tvos-source",
"custom",
]
| TestIgnoredSourcesFiltering |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 1459,
"end": 1883
} | class ____(_BackupConfigBase):
"""Options to configure the backup when creating a backup."""
ChunkSize: Optional[int] = Field(
default=None,
alias="chunk_size",
description="DEPRECATED: This parameter no longer has any effect.",
exclude=True,
)
CompressionLevel: Optional[BackupCompressionLevel] = Field(
default=None, alias="compression_level"
)
| BackupConfigCreate |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/_collections.py | {
"start": 8410,
"end": 8803
} | class ____(Dict[_KT, _VT]):
"""A dict which populates missing values via a creation function.
Note the creation function takes a key, unlike
collections.defaultdict.
"""
def __init__(self, creator: Callable[[_KT], _VT]):
self.creator = creator
def __missing__(self, key: Any) -> Any:
self[key] = val = self.creator(key)
return val
| PopulateDict |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/oracle/cx_oracle.py | {
"start": 25442,
"end": 25969
} | class ____(_LOBDataType, sqltypes.LargeBinary):
def get_dbapi_type(self, dbapi):
# previously, this was dbapi.BLOB.
# DB_TYPE_RAW will instead be passed to setinputsizes()
# when this datatype is used.
return dbapi.DB_TYPE_RAW
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
if not dialect.auto_convert_lobs:
return None
else:
return super().result_processor(dialect, coltype)
| _OracleBinary |
python | huggingface__transformers | src/transformers/modeling_gguf_pytorch_utils.py | {
"start": 1748,
"end": 1939
} | class ____:
def __init__(self, config=None):
self.config = config or {}
def process(self, weights, name, **kwargs):
return GGUFTensor(weights, name, {})
| TensorProcessor |
python | numba__numba | numba/tests/test_dispatcher.py | {
"start": 19254,
"end": 21929
} | class ____(BaseTest):
"""
Test support for various parameter passing styles.
"""
def test_named_args(self):
"""
Test passing named arguments to a dispatcher.
"""
f, check = self.compile_func(addsub)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# All calls above fall under the same specialization
self.assertEqual(len(f.overloads), 1)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected 3, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6)
self.assertIn("missing argument 'z'", str(cm.exception))
def test_default_args(self):
"""
Test omitting arguments with a default value.
"""
f, check = self.compile_func(addsub_defaults)
check(3, z=10, y=4)
check(3, 4, 10)
check(x=3, y=4, z=10)
# Now omitting some values
check(3, z=10)
check(3, 4)
check(x=3, y=4)
check(3)
check(x=3)
# Errors
with self.assertRaises(TypeError) as cm:
f(3, 4, y=6, z=7)
self.assertIn("too many arguments: expected 3, got 4",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f()
self.assertIn("not enough arguments: expected at least 1, got 0",
str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(y=6, z=7)
self.assertIn("missing argument 'x'", str(cm.exception))
def test_star_args(self):
"""
Test a compiled function with starargs in the signature.
"""
f, check = self.compile_func(star_defaults)
check(4)
check(4, 5)
check(4, 5, 6)
check(4, 5, 6, 7)
check(4, 5, 6, 7, 8)
check(x=4)
check(x=4, y=5)
check(4, y=5)
with self.assertRaises(TypeError) as cm:
f(4, 5, y=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, 5, z=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
with self.assertRaises(TypeError) as cm:
f(4, x=6)
self.assertIn("some keyword arguments unexpected", str(cm.exception))
| TestSignatureHandling |
python | simonw__datasette | tests/conftest.py | {
"start": 5134,
"end": 8143
} | class ____:
__name__ = "TrackEventPlugin"
@dataclass
class OneEvent(Event):
name = "one"
extra: str
@hookimpl
def register_events(self, datasette):
async def inner():
return [self.OneEvent]
return inner
@hookimpl
def track_event(self, datasette, event):
datasette._tracked_events = getattr(datasette, "_tracked_events", [])
datasette._tracked_events.append(event)
@pytest.fixture(scope="session", autouse=True)
def install_event_tracking_plugin():
from datasette.plugins import pm
pm.register(TrackEventPlugin(), name="TrackEventPlugin")
@pytest.fixture(scope="session")
def ds_localhost_http_server():
ds_proc = subprocess.Popen(
[sys.executable, "-m", "datasette", "--memory", "-p", "8041"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
# Avoid FileNotFoundError: [Errno 2] No such file or directory:
cwd=tempfile.gettempdir(),
)
wait_until_responds("http://localhost:8041/")
# Check it started successfully
assert not ds_proc.poll(), ds_proc.stdout.read().decode("utf-8")
yield ds_proc
# Shut it down at the end of the pytest session
ds_proc.terminate()
@pytest.fixture(scope="session")
def ds_unix_domain_socket_server(tmp_path_factory):
# This used to use tmp_path_factory.mktemp("uds") but that turned out to
# produce paths that were too long to use as UDS on macOS, see
# https://github.com/simonw/datasette/issues/1407 - so I switched to
# using tempfile.gettempdir()
uds = str(pathlib.Path(tempfile.gettempdir()) / "datasette.sock")
ds_proc = subprocess.Popen(
[sys.executable, "-m", "datasette", "--memory", "--uds", uds],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tempfile.gettempdir(),
)
# Poll until available
transport = httpx.HTTPTransport(uds=uds)
client = httpx.Client(transport=transport)
wait_until_responds("http://localhost/_memory.json", client=client)
# Check it started successfully
assert not ds_proc.poll(), ds_proc.stdout.read().decode("utf-8")
yield ds_proc, uds
# Shut it down at the end of the pytest session
ds_proc.terminate()
# Import fixtures from fixtures.py to make them available
from .fixtures import ( # noqa: E402, F401
app_client,
app_client_base_url_prefix,
app_client_conflicting_database_names,
app_client_csv_max_mb_one,
app_client_immutable_and_inspect_file,
app_client_larger_cache_size,
app_client_no_files,
app_client_returned_rows_matches_page_size,
app_client_shorter_time_limit,
app_client_two_attached_databases,
app_client_two_attached_databases_crossdb_enabled,
app_client_two_attached_databases_one_immutable,
app_client_with_cors,
app_client_with_dot,
app_client_with_trace,
generate_compound_rows,
generate_sortable_rows,
make_app_client,
TEMP_PLUGIN_SECRET_FILE,
)
| TrackEventPlugin |
python | nedbat__coveragepy | tests/test_api.py | {
"start": 29733,
"end": 30583
} | class ____(UsingModulesMixin, CoverageTest):
"""Test PEP-420 namespace modules."""
def test_explicit_namespace_module(self) -> None:
self.make_file("main.py", "import namespace_420\n")
cov = coverage.Coverage()
self.start_import_stop(cov, "main")
with pytest.raises(CoverageException, match=r"Module .* has no file"):
cov.analysis(sys.modules["namespace_420"])
def test_bug_572(self) -> None:
self.make_file("main.py", "import namespace_420\n")
# Use source=namespace_420 to trigger the check that used to fail,
# and use source=main so that something is measured.
cov = coverage.Coverage(source=["namespace_420", "main"])
with self.assert_warnings(cov, []):
self.start_import_stop(cov, "main")
cov.report()
| NamespaceModuleTest |
python | davidhalter__jedi | jedi/inference/base_value.py | {
"start": 12494,
"end": 13759
} | class ____:
def __init__(self, context, node):
self.context = context
self.node = node
def get_root_context(self):
return self.context.get_root_context()
def infer(self):
return self.context.infer_node(self.node)
def __repr__(self):
return '<%s: %s in %s>' % (self.__class__.__name__, self.node, self.context)
def _getitem(value, index_values, contextualized_node):
# The actual getitem call.
result = NO_VALUES
unused_values = set()
for index_value in index_values:
index = index_value.get_safe_value(default=None)
if type(index) in (float, int, str, slice, bytes):
try:
result |= value.py__simple_getitem__(index)
continue
except SimpleGetItemNotFound:
pass
unused_values.add(index_value)
# The index was somehow not good enough or simply a wrong type.
# Therefore we now iterate through all the values and just take
# all results.
if unused_values or not index_values:
result |= value.py__getitem__(
ValueSet(unused_values),
contextualized_node
)
debug.dbg('py__getitem__ result: %s', result)
return result
| ContextualizedNode |
python | pyodide__pyodide | src/py/_pyodide/_core_docs.py | {
"start": 34963,
"end": 35239
} | class ____(JsBuffer, JsArray[int]):
_js_type_flags = ["IS_TYPEDARRAY"]
BYTES_PER_ELEMENT: int
def subarray(
self, start: int | None = None, stop: int | None = None
) -> "JsTypedArray":
raise NotImplementedError
buffer: JsBuffer
| JsTypedArray |
python | walkccc__LeetCode | solutions/2409. Count Days Spent Together/2409.py | {
"start": 0,
"end": 690
} | class ____:
def countDaysTogether(
self,
arriveAlice: str,
leaveAlice: str,
arriveBob: str,
leaveBob: str,
) -> int:
days = [0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def toDays(s: str) -> int:
month = int(s[:2])
day = int(s[3:])
prevDays = 0
for m in range(1, month):
prevDays += days[m]
return prevDays + day
arriveA = toDays(arriveAlice)
leaveA = toDays(leaveAlice)
arriveB = toDays(arriveBob)
leaveB = toDays(leaveBob)
ans = 0
for day in range(1, 366):
if arriveA <= day and day <= leaveA and arriveB <= day and day <= leaveB:
ans += 1
return ans
| Solution |
python | pydantic__pydantic | tests/test_json_schema.py | {
"start": 203897,
"end": 203975
} | class ____:
name: str
@pydantic.dataclasses.dataclass
| BuiltinDataclassParent |
python | miyuchina__mistletoe | test/test_span_token.py | {
"start": 883,
"end": 1438
} | class ____(TestBranchToken):
def test_parse(self):
self._test_parse(span_token.Strong, '**some text**', 'some text')
self._test_parse(span_token.Strong, '__some text__', 'some text')
def test_strong_when_both_delimiter_run_lengths_are_multiples_of_3(self):
tokens = iter(span_token.tokenize_inner('foo******bar*********baz'))
self._test_token(next(tokens), 'foo', children=False)
self._test_token(next(tokens), 'bar', children=True)
self._test_token(next(tokens), '***baz', children=False)
| TestStrong |
python | pydantic__pydantic | pydantic/v1/networks.py | {
"start": 18488,
"end": 19127
} | class ____(_BaseAddress):
__slots__ = ()
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
field_schema.update(type='string', format='ipvanyaddress')
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
@classmethod
def validate(cls, value: Union[str, bytes, int]) -> Union[IPv4Address, IPv6Address]:
try:
return IPv4Address(value)
except ValueError:
pass
try:
return IPv6Address(value)
except ValueError:
raise errors.IPvAnyAddressError()
| IPvAnyAddress |
python | dagster-io__dagster | examples/project_analytics/dagster_pypi/resources.py | {
"start": 2284,
"end": 2657
} | class ____(GithubResource):
input_file: str = Field(description="Path to the sample input file")
def get_github_stars(self, date) -> pd.DataFrame:
print("Pretending to fetch Github data for a given date: ", date)
df = pd.read_csv(self.input_file)
df["date"] = datetime.datetime.strptime(date, "%Y-%m-%d")
return df
| GithubLocalResource |
python | ray-project__ray | python/ray/tests/test_actor_retry_2.py | {
"start": 210,
"end": 606
} | class ____:
def __init__(self) -> None:
self._counts = defaultdict(int)
def increment(self, key: Optional[str] = None) -> int:
key = key or "default"
c = self._counts[key]
self._counts[key] += 1
return c
def get_count(self, key: Optional[str] = None) -> int:
return self._counts[key or "default"]
@ray.remote(max_task_retries=3)
| Counter |
python | huggingface__transformers | src/transformers/models/bitnet/modeling_bitnet.py | {
"start": 2106,
"end": 2831
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
BitNetRMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| BitNetRMSNorm |
python | hyperopt__hyperopt | hyperopt/tests/unit/test_rdists.py | {
"start": 3488,
"end": 4594
} | class ____(unittest.TestCase):
def test_smallq(self):
low, high, q = (0, 1, 0.1)
qu = quniform_gen(low, high, q)
check_d_samples(qu, n=10000)
def test_bigq(self):
low, high, q = (-20, -1, 3)
qu = quniform_gen(low, high, q)
check_d_samples(qu, n=10000)
def test_offgrid_int(self):
qn = quniform_gen(0, 2, 2)
assert qn.pmf(0) > 0.0
assert qn.pmf(1) == 0.0
assert qn.pmf(2) > 0.0
assert qn.pmf(3) == 0.0
assert qn.pmf(-1) == 0.0
def test_offgrid_float(self):
qn = quniform_gen(0, 1, 0.2)
assert qn.pmf(0) > 0.0
assert qn.pmf(0.1) == 0.0
assert qn.pmf(0.2) > 0.0
assert qn.pmf(0.4) > 0.0
assert qn.pmf(0.8) > 0.0
assert qn.pmf(-0.2) == 0.0
assert qn.pmf(0.99) == 0.0
assert qn.pmf(-0.99) == 0.0
def test_output_type_int(self):
result = quniform_gen(0, 10, 1).rvs()
assert int == type(result)
def test_output_type_float(self):
assert float == type(quniform_gen(0, 10, 1.0).rvs())
| TestQUniform |
python | falconry__falcon | falcon/routing/compiled.py | {
"start": 43916,
"end": 44056
} | class ____(_CxChild):
def src(self, indentation: int) -> str:
return '{0}return None'.format(_TAB_STR * indentation)
| _CxReturnNone |
python | plotly__plotly.py | plotly/graph_objs/heatmap/_stream.py | {
"start": 233,
"end": 3511
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "heatmap"
_path_str = "heatmap.stream"
_valid_props = {"maxpoints", "token"}
@property
def maxpoints(self):
"""
Sets the maximum number of points to keep on the plots from an
incoming stream. If `maxpoints` is set to 50, only the newest
50 points will be displayed on the plot.
The 'maxpoints' property is a number and may be specified as:
- An int or float in the interval [0, 10000]
Returns
-------
int|float
"""
return self["maxpoints"]
@maxpoints.setter
def maxpoints(self, val):
self["maxpoints"] = val
@property
def token(self):
"""
The stream id number links a data trace on a plot with a
stream. See https://chart-studio.plotly.com/settings for more
details.
The 'token' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["token"]
@token.setter
def token(self, val):
self["token"] = val
@property
def _prop_descriptions(self):
return """\
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
"""
def __init__(self, arg=None, maxpoints=None, token=None, **kwargs):
"""
Construct a new Stream object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.heatmap.Stream`
maxpoints
Sets the maximum number of points to keep on the plots
from an incoming stream. If `maxpoints` is set to 50,
only the newest 50 points will be displayed on the
plot.
token
The stream id number links a data trace on a plot with
a stream. See https://chart-studio.plotly.com/settings
for more details.
Returns
-------
Stream
"""
super().__init__("stream")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.heatmap.Stream
constructor must be a dict or
an instance of :class:`plotly.graph_objs.heatmap.Stream`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("maxpoints", arg, maxpoints)
self._set_property("token", arg, token)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Stream |
python | keras-team__keras | keras/src/ops/nn.py | {
"start": 27812,
"end": 30974
} | class ____(Operation):
def __init__(
self,
pool_size,
strides=None,
padding="valid",
data_format=None,
*,
name=None,
):
super().__init__(name=name)
self.pool_size = pool_size
self.strides = strides
self.padding = padding.lower()
self.data_format = data_format
def call(self, inputs):
return backend.nn.max_pool(
inputs,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
def compute_output_spec(self, inputs):
output_shape = operation_utils.compute_pooling_output_shape(
inputs.shape,
self.pool_size,
self.strides,
self.padding,
self.data_format,
)
return KerasTensor(output_shape, dtype=inputs.dtype)
@keras_export(["keras.ops.max_pool", "keras.ops.nn.max_pool"])
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
"""Max pooling operation.
Args:
inputs: Tensor of rank N+2. `inputs` has shape
`(batch_size,) + inputs_spatial_shape + (num_channels,)` if
`data_format="channels_last"`, or
`(batch_size, num_channels) + inputs_spatial_shape` if
`data_format="channels_first"`. Pooling happens over the spatial
dimensions only.
pool_size: int or tuple/list of integers of size
`len(inputs_spatial_shape)`, specifying the size of the pooling
window for each spatial dimension of the input tensor. If
`pool_size` is int, then every spatial dimension shares the same
`pool_size`.
strides: int or tuple/list of integers of size
`len(inputs_spatial_shape)`. The stride of the sliding window for
each spatial dimension of the input tensor. If `strides` is int,
then every spatial dimension shares the same `strides`.
padding: string, either `"valid"` or `"same"`. `"valid"` means no
padding is applied, and `"same"` results in padding evenly to the
left/right or up/down of the input such that output has the
same height/width dimension as the input when `strides=1`.
data_format: A string, either `"channels_last"` or `"channels_first"`.
`data_format` determines the ordering of the dimensions in the
inputs. If `data_format="channels_last"`, `inputs` is of shape
`(batch_size, ..., channels)` while if
`data_format="channels_first"`, `inputs` is of shape
`(batch_size, channels, ...)`.
Returns:
A tensor of rank N+2, the result of the max pooling operation.
"""
data_format = standardize_data_format(data_format)
padding = padding.lower()
if any_symbolic_tensors((inputs,)):
return MaxPool(
pool_size,
strides,
padding,
data_format,
).symbolic_call(inputs)
return backend.nn.max_pool(inputs, pool_size, strides, padding, data_format)
| MaxPool |
python | mkdocstrings__mkdocstrings | tests/fixtures/nesting.py | {
"start": 0,
"end": 197
} | class ____:
"""A class.
## ::: tests.fixtures.nesting.Class.method
options:
show_root_heading: true
"""
def method(self) -> None:
"""A method."""
| Class |
python | allegroai__clearml | clearml/backend_api/services/v2_20/tasks.py | {
"start": 130112,
"end": 151695
} | class ____(Request):
"""
Create a new task
:param name: Task name. Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task ID Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
"""
_service = "tasks"
_action = "create"
_version = "2.20"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"additionalProperties": {"type": ["string", "null"]},
"description": "Docker container parameters",
"type": "object",
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name. Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["name", "type"],
"type": "object",
}
def __init__(
self,
name: str,
type: Any,
tags: Optional[List[str]] = None,
system_tags: Optional[List[str]] = None,
comment: Optional[str] = None,
parent: Optional[str] = None,
project: Optional[str] = None,
input: Any = None,
output_dest: Optional[str] = None,
execution: Any = None,
script: Any = None,
hyperparams: Optional[dict] = None,
configuration: Optional[dict] = None,
models: Any = None,
container: Optional[dict] = None,
**kwargs: Any
) -> None:
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.input = input
self.output_dest = output_dest
self.execution = execution
self.script = script
self.hyperparams = hyperparams
self.configuration = configuration
self.models = models
self.container = container
@schema_property("name")
def name(self) -> str:
return self._property_name
@name.setter
def name(self, value: str) -> None:
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self) -> Optional[List[str]]:
return self._property_tags
@tags.setter
def tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self) -> Optional[List[str]]:
return self._property_system_tags
@system_tags.setter
def system_tags(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self) -> Any:
return self._property_type
@type.setter
def type(self, value: Any) -> None:
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self) -> Optional[str]:
return self._property_comment
@comment.setter
def comment(self, value: Optional[str]) -> None:
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self) -> Optional[str]:
return self._property_parent
@parent.setter
def parent(self, value: Optional[str]) -> None:
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self) -> Optional[str]:
return self._property_project
@project.setter
def project(self, value: Optional[str]) -> None:
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("input")
def input(self) -> Any:
return self._property_input
@input.setter
def input(self, value: Any) -> None:
self._property_input = value
@schema_property("output_dest")
def output_dest(self) -> Optional[str]:
return self._property_output_dest
@output_dest.setter
def output_dest(self, value: Optional[str]) -> None:
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self) -> Any:
return self._property_execution
@execution.setter
def execution(self, value: Any) -> None:
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("script")
def script(self) -> Any:
return self._property_script
@script.setter
def script(self, value: Any) -> None:
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("hyperparams")
def hyperparams(self) -> Optional[dict]:
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value: Optional[dict]) -> None:
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(value.keys(), "hyperparams_keys", six.string_types, is_array=True)
self.assert_isinstance(value.values(), "hyperparams_values", (SectionParams, dict), is_array=True)
value = dict(((k, SectionParams(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self) -> Optional[dict]:
return self._property_configuration
@configuration.setter
def configuration(self, value: Optional[dict]) -> None:
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(value.keys(), "configuration_keys", six.string_types, is_array=True)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(((k, ConfigurationItem(**v) if isinstance(v, dict) else v) for (k, v) in value.items()))
self._property_configuration = value
@schema_property("models")
def models(self) -> Any:
return self._property_models
@models.setter
def models(self, value: Any) -> None:
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self) -> Optional[dict]:
return self._property_container
@container.setter
def container(self, value: Optional[dict]) -> None:
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", (dict,))
self._property_container = value
| CreateRequest |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_test.py | {
"start": 39024,
"end": 39335
} | class ____(test_lib.TestCase):
def test(self):
np.random.seed(1) # Make it reproducible.
x = np.random.randn(3, 4).astype(np.float32)
y = np.concatenate([x * (x > 0), -x * (x < 0)], axis=1)
z = self.evaluate(nn_ops.crelu(constant_op.constant(x)))
self.assertAllClose(y, z, 1e-4)
| CReluTest |
python | astropy__astropy | astropy/modeling/rotations.py | {
"start": 10027,
"end": 12568
} | class ____(_SkyRotation):
"""
Transform from Native to Celestial Spherical Coordinates.
Parameters
----------
lon : float or `~astropy.units.Quantity` ['angle']
Celestial longitude of the fiducial point.
lat : float or `~astropy.units.Quantity` ['angle']
Celestial latitude of the fiducial point.
lon_pole : float or `~astropy.units.Quantity` ['angle']
Longitude of the celestial pole in the native system.
Notes
-----
If ``lon``, ``lat`` and ``lon_pole`` are numerical values they
should be in units of deg. Inputs are angles on the native sphere.
Outputs are angles on the celestial sphere.
"""
n_inputs = 2
n_outputs = 2
@property
def input_units(self):
"""Input units."""
return {self.inputs[0]: u.deg, self.inputs[1]: u.deg}
@property
def return_units(self):
"""Output units."""
return {self.outputs[0]: u.deg, self.outputs[1]: u.deg}
def __init__(self, lon, lat, lon_pole, **kwargs):
super().__init__(lon, lat, lon_pole, **kwargs)
self.inputs = ("phi_N", "theta_N")
self.outputs = ("alpha_C", "delta_C")
def evaluate(self, phi_N, theta_N, lon, lat, lon_pole):
"""
Parameters
----------
phi_N, theta_N : float or `~astropy.units.Quantity` ['angle']
Angles in the Native coordinate system.
it is assumed that numerical only inputs are in degrees.
If float, assumed in degrees.
lon, lat, lon_pole : float or `~astropy.units.Quantity` ['angle']
Parameter values when the model was initialized.
If float, assumed in degrees.
Returns
-------
alpha_C, delta_C : float or `~astropy.units.Quantity` ['angle']
Angles on the Celestial sphere.
If float, in degrees.
"""
# The values are in radians since they have already been through the setter.
if isinstance(lon, u.Quantity):
lon = lon.value
lat = lat.value
lon_pole = lon_pole.value
# Convert to Euler angles
phi = lon_pole - np.pi / 2
theta = -(np.pi / 2 - lat)
psi = -(np.pi / 2 + lon)
alpha_C, delta_C = super()._evaluate(phi_N, theta_N, phi, theta, psi)
return alpha_C, delta_C
@property
def inverse(self):
# convert to angles on the celestial sphere
return RotateCelestial2Native(self.lon, self.lat, self.lon_pole)
| RotateNative2Celestial |
python | astropy__astropy | astropy/time/formats.py | {
"start": 80314,
"end": 80453
} | class ____(TimeDeltaNumeric, TimeUnique):
"""Time delta in Julian days (86400 SI seconds)."""
name = "jd"
unit = 1.0
| TimeDeltaJD |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 68405,
"end": 68738
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "emails")
id = sgqlc.types.Field(ID, graphql_name="id")
emails = sgqlc.types.Field(
sgqlc.types.list_of(sgqlc.types.non_null(String)), graphql_name="emails"
)
| CommitAuthor |
python | walkccc__LeetCode | solutions/1858. Longest Word With All Prefixes/1858.py | {
"start": 0,
"end": 701
} | class ____:
def __init__(self):
self.root = {}
def longestWord(self, words: list[str]) -> str:
ans = ''
for word in words:
self.insert(word)
for word in words:
if not self.allPrefixed(word):
continue
if len(ans) < len(word) or (len(ans) == len(word) and ans > word):
ans = word
return ans
def insert(self, word: str) -> None:
node = self.root
for c in word:
if c not in node:
node[c] = {}
node = node[c]
node['isWord'] = True
def allPrefixed(self, word: str) -> bool:
node = self.root
for c in word:
node = node[c]
if 'isWord' not in node:
return False
return True
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_utils/aiodataloader.py | {
"start": 1277,
"end": 3098
} | class ____(Generic[KeyT, ReturnT]):
"""Currently, the cache is not shared between blocking and non-blocking DataLoaders, as it is
challenging to drive the event loop properly while managing a shared cache.
"""
def __init__(
self,
batch_load_fn: Callable[[Iterable[KeyT]], Iterable[ReturnT]],
get_cache_key: Optional[Callable[[KeyT], Union[CacheKeyT, KeyT]]] = None,
max_batch_size: Optional[int] = None,
):
self._cache = {}
self._to_query = {}
self.get_cache_key = get_cache_key or (lambda x: x)
self.batch_load_fn = batch_load_fn
self.max_batch_size = max_batch_size
def prepare(self, keys: Iterable[KeyT]) -> None:
# ensure that the provided keys will be fetched as a unit in the next fetch
for key in keys:
cache_key = self.get_cache_key(key)
if cache_key not in self._cache:
self._to_query[cache_key] = key
def blocking_load(self, key: KeyT) -> ReturnT:
"""Loads the provided key synchronously, pulling from the cache if possible."""
self.prepare([key])
if self._to_query:
for chunk in get_chunks(
list(self._to_query.values()),
self.max_batch_size or len(self._to_query),
):
# uses independent event loop from the async system
chunk_results = self.batch_load_fn(chunk)
for k, v in zip(chunk, chunk_results):
self._cache[self.get_cache_key(k)] = v
self._to_query = {}
return self._cache[self.get_cache_key(key)]
def blocking_load_many(self, keys: Iterable[KeyT]) -> Iterable[ReturnT]:
self.prepare(keys)
return [self.blocking_load(key) for key in keys]
| BlockingDataLoader |
python | astropy__astropy | astropy/modeling/projections.py | {
"start": 39064,
"end": 39265
} | class ____(Pix2SkyProjection, QuadCube):
r"""
Quadrilateralized spherical cube projection - pixel to sky.
Corresponds to the ``QSC`` projection in FITS WCS.
"""
| Pix2Sky_QuadSphericalCube |
python | numpy__numpy | numpy/_core/tests/test_multiarray.py | {
"start": 358208,
"end": 363351
} | class ____:
def test_array_scalar_relational_operation(self):
# All integer
for dt1 in np.typecodes['AllInteger']:
assert_(1 > np.array(0, dtype=dt1), f"type {dt1} failed")
assert_(not 1 < np.array(0, dtype=dt1), f"type {dt1} failed")
for dt2 in np.typecodes['AllInteger']:
assert_(np.array(1, dtype=dt1) > np.array(0, dtype=dt2),
f"type {dt1} and {dt2} failed")
assert_(not np.array(1, dtype=dt1) < np.array(0, dtype=dt2),
f"type {dt1} and {dt2} failed")
# Unsigned integers
for dt1 in 'BHILQP':
assert_(-1 < np.array(1, dtype=dt1), f"type {dt1} failed")
assert_(not -1 > np.array(1, dtype=dt1), f"type {dt1} failed")
assert_(-1 != np.array(1, dtype=dt1), f"type {dt1} failed")
# Unsigned vs signed
for dt2 in 'bhilqp':
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
assert_(np.array(1, dtype=dt1) != np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
# Signed integers and floats
for dt1 in 'bhlqp' + np.typecodes['Float']:
assert_(1 > np.array(-1, dtype=dt1), f"type {dt1} failed")
assert_(not 1 < np.array(-1, dtype=dt1), f"type {dt1} failed")
assert_(-1 == np.array(-1, dtype=dt1), f"type {dt1} failed")
for dt2 in 'bhlqp' + np.typecodes['Float']:
assert_(np.array(1, dtype=dt1) > np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
assert_(not np.array(1, dtype=dt1) < np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
assert_(np.array(-1, dtype=dt1) == np.array(-1, dtype=dt2),
f"type {dt1} and {dt2} failed")
def test_to_bool_scalar(self):
assert_equal(bool(np.array([False])), False)
assert_equal(bool(np.array([True])), True)
assert_equal(bool(np.array([[42]])), True)
def test_to_bool_scalar_not_convertible(self):
class NotConvertible:
def __bool__(self):
raise NotImplementedError
assert_raises(NotImplementedError, bool, np.array(NotConvertible()))
assert_raises(NotImplementedError, bool, np.array([NotConvertible()]))
if IS_PYSTON:
pytest.skip("Pyston disables recursion checking")
if IS_WASM:
pytest.skip("Pyodide/WASM has limited stack size")
self_containing = np.array([None])
self_containing[0] = self_containing
Error = RecursionError
assert_raises(Error, bool, self_containing) # previously stack overflow
self_containing[0] = None # resolve circular reference
def test_to_bool_scalar_size_errors(self):
with pytest.raises(ValueError, match=".*one element is ambiguous"):
bool(np.array([1, 2]))
with pytest.raises(ValueError, match=".*empty array is ambiguous"):
bool(np.empty((3, 0)))
with pytest.raises(ValueError, match=".*empty array is ambiguous"):
bool(np.empty((0,)))
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
int_funcs = (int, lambda x: x.__int__())
for int_func in int_funcs:
assert_equal(int_func(np.array(0)), 0)
assert_raises(TypeError, int_func, np.array([1]))
assert_raises(TypeError, int_func, np.array([[42]]))
assert_raises(TypeError, int_func, np.array([1, 2]))
# gh-9972
assert_equal(4, int_func(np.array('4')))
assert_equal(5, int_func(np.bytes_(b'5')))
assert_equal(6, int_func(np.str_('6')))
class NotConvertible:
def __int__(self):
raise NotImplementedError
assert_raises(NotImplementedError,
int_func, np.array(NotConvertible()))
assert_raises(TypeError,
int_func, np.array([NotConvertible()]))
def test_to_float_scalar(self):
float_funcs = (float, lambda x: x.__float__())
for float_func in float_funcs:
assert_equal(float_func(np.array(0)), 0.0)
assert_equal(float_func(np.array(1.0, np.float64)), 1.0)
assert_raises(TypeError, float_func, np.array([2]))
assert_raises(TypeError, float_func, np.array([3.14]))
assert_raises(TypeError, float_func, np.array([[4.0]]))
assert_equal(5.0, float_func(np.array('5')))
assert_equal(5.1, float_func(np.array('5.1')))
assert_equal(6.0, float_func(np.bytes_(b'6')))
assert_equal(6.1, float_func(np.bytes_(b'6.1')))
assert_equal(7.0, float_func(np.str_('7')))
assert_equal(7.1, float_func(np.str_('7.1')))
| TestConversion |
python | astropy__astropy | astropy/cosmology/_src/tests/funcs/test_comparison.py | {
"start": 3003,
"end": 5861
} | class ____(ComparisonFunctionTestBase):
"""Test functions ``_parse_format``."""
@pytest.fixture(scope="class")
def converted(self, to_format, format):
if format == "astropy.model": # special case Model
return to_format(format, method="comoving_distance")
converted = to_format(format)
# Some raise a segfault! TODO: figure out why
if isinstance(converted, _CANT_BROADCAST):
converted = _CosmologyWrapper(converted)
return converted
# ========================================================================
def test_shortcut(self, cosmo):
"""Test the already-a-cosmology shortcut."""
# A Cosmology
for fmt in (None, True, False, "astropy.cosmology"):
assert _parse_format(cosmo, fmt) is cosmo, f"{fmt} failed"
# A Cosmology, but improperly formatted
# see ``test_parse_format_error_wrong_format``.
def test_convert(self, converted, format, cosmo):
"""Test converting a cosmology-like object"""
out = _parse_format(converted, format)
assert isinstance(out, Cosmology)
assert out == cosmo
def test_parse_format_error_wrong_format(self, cosmo):
"""
Test ``_parse_format`` errors when given a Cosmology object and format
is not compatible.
"""
with pytest.raises(
ValueError, match=re.escape("for parsing a Cosmology, 'format'")
):
_parse_format(cosmo, "mapping")
def test_parse_format_error_noncosmology_cant_convert(self):
"""
Test ``_parse_format`` errors when given a non-Cosmology object
and format is `False`.
"""
notacosmo = object()
with pytest.raises(TypeError, match=re.escape("if 'format' is False")):
_parse_format(notacosmo, False)
def test_parse_format_vectorized(self, cosmo, format, converted):
# vectorized on cosmos
out = _parse_format([cosmo, cosmo], None)
assert len(out) == 2
assert np.all(out == cosmo)
# vectorized on formats
out = _parse_format(cosmo, [None, None])
assert len(out) == 2
assert np.all(out == cosmo)
# more complex broadcast
out = _parse_format(
[[cosmo, converted], [converted, cosmo]], [[None, format], [format, None]]
)
assert out.shape == (2, 2)
assert np.all(out == cosmo)
def test_parse_formats_vectorized(self, cosmo):
# vectorized on cosmos
out = _parse_formats(cosmo, cosmo, format=None)
assert len(out) == 2
assert np.all(out == cosmo)
# does NOT vectorize on formats
with pytest.raises(ValueError, match="operands could not be broadcast"):
_parse_formats(cosmo, format=[None, None])
| Test_parse_format |
python | pandas-dev__pandas | asv_bench/benchmarks/tslibs/period.py | {
"start": 2944,
"end": 3734
} | class ____:
params = [
_sizes,
_freq_ints,
_tzs,
]
param_names = ["size", "freq", "tz"]
def setup(self, size, freq, tz):
if size == 10**6 and tz is tzlocal_obj:
# tzlocal is cumbersomely slow, so skip to keep runtime in check
raise NotImplementedError
# we pick 2**55 because smaller values end up returning
# -1 from npy_datetimestruct_to_datetime with NPY_FR_Y frequency
# this artificially slows down functions since -1 is also the
# error sentinel
arr = np.arange(2**55, 2**55 + 10, dtype="i8").repeat(size // 10)
self.i8values = arr
def time_dt64arr_to_periodarr(self, size, freq, tz):
dt64arr_to_periodarr(self.i8values, freq, tz)
| TimeDT64ArrToPeriodArr |
python | getsentry__sentry | src/sentry/integrations/utils/metrics.py | {
"start": 13853,
"end": 14687
} | class ____(StrEnum):
"""A specific step in an integration's pipeline that is not a static page."""
# IdentityPipeline
IDENTITY_LOGIN = "identity_login"
IDENTITY_LINK = "identity_link"
TOKEN_EXCHANGE = "token_exchange"
# GitHub
OAUTH_LOGIN = "oauth_login"
GITHUB_INSTALLATION = "github_installation"
ORGANIZATION_SELECTION = "organization_selection"
# Bitbucket
VERIFY_INSTALLATION = "verify_installation"
# Bitbucket Server
# OAUTH_LOGIN = "OAUTH_LOGIN"
OAUTH_CALLBACK = "oauth_callback"
# Azure DevOps
ACCOUNT_CONFIG = "account_config"
# Jira Server
WEBHOOK_CREATION = "webhook_creation"
# All Integrations
FINISH_PIPELINE = "finish_pipeline"
# Opsgenie
INSTALLATION_CONFIGURATION = "installation_configuration"
| IntegrationPipelineViewType |
python | Netflix__metaflow | metaflow/decorators.py | {
"start": 2407,
"end": 2822
} | class ____(MetaflowException):
headline = "Duplicate decorators"
def __init__(self, deco, func):
msg = (
"Step '{step}' already has a decorator '{deco}'. "
"You can specify this decorator only once.".format(
step=func.__name__, deco=deco
)
)
super(DuplicateStepDecoratorException, self).__init__(msg)
| DuplicateStepDecoratorException |
python | neetcode-gh__leetcode | python/1220-count-vowels-permutation.py | {
"start": 0,
"end": 1789
} | class ____:
Memo = {}
def countVowelPermutation(self, n, c = '') -> int:
if (c, n) in self.Memo:
return self.Memo[(c, n)]
if n == 1:
if c == 'a':
return 1
if c == 'e':
return 2
if c == 'i':
return 4
if c == 'o':
return 2
if c == 'u':
return 1
if c == '':
return 5
else:
if c == 'a':
self.Memo[('a', n)] = self.countVowelPermutation(n - 1, 'e')
return self.Memo[('a', n)]
if c == 'e':
self.Memo[('e', n)] = self.countVowelPermutation(n - 1, 'a') + self.countVowelPermutation(n - 1, 'i')
return self.Memo[('e', n)]
if c == 'i':
self.Memo[('i', n)] = self.countVowelPermutation(n - 1, 'a') + self.countVowelPermutation(n - 1, 'e') + self.countVowelPermutation(n - 1, 'o') + self.countVowelPermutation(n - 1, 'u')
return self.Memo[('i', n)]
if c == 'o':
self.Memo[('o', n)] = self.countVowelPermutation(n - 1, 'i') + self.countVowelPermutation(n - 1, 'u')
return self.Memo[('o', n)]
if c == 'u':
self.Memo[('u', n)] = self.countVowelPermutation(n - 1, 'a')
return self.Memo[('u', n)]
if c == '':
Tot = 0
for i in ['a', 'e', 'i', 'o', 'u']:
Tot = Tot + self.countVowelPermutation(n - 1, i);
return Tot % 1000000007
| Solution |
python | FactoryBoy__factory_boy | examples/django_demo/generic_foreignkey/factories.py | {
"start": 712,
"end": 855
} | class ____(TaggedItemFactory):
content_object = factory.SubFactory(UserFactory)
class Meta:
model = TaggedItem
| TaggedUserFactory |
python | apache__airflow | airflow-core/src/airflow/exceptions.py | {
"start": 3839,
"end": 3957
} | class ____(AirflowException):
"""Raise when skipping dag is needed in Cluster Policy."""
| AirflowClusterPolicySkipDag |
python | jd__tenacity | tests/test_tenacity.py | {
"start": 46534,
"end": 51899
} | class ____(unittest.TestCase):
_attempt_number = 0
def test_before_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _before(retry_state):
TestBeforeAfterAttempts._attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(1),
before=_before,
)
def _test_before():
pass
_test_before()
self.assertTrue(TestBeforeAfterAttempts._attempt_number == 1)
def test_after_attempts(self):
TestBeforeAfterAttempts._attempt_number = 0
def _after(retry_state):
TestBeforeAfterAttempts._attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(0.1),
stop=tenacity.stop_after_attempt(3),
after=_after,
)
def _test_after():
if TestBeforeAfterAttempts._attempt_number < 2:
raise Exception("testing after_attempts handler")
else:
pass
_test_after()
self.assertTrue(TestBeforeAfterAttempts._attempt_number == 2)
def test_before_sleep(self):
def _before_sleep(retry_state):
self.assertGreater(retry_state.next_action.sleep, 0)
_before_sleep.attempt_number = retry_state.attempt_number
@retry(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
def _test_before_sleep():
if _before_sleep.attempt_number < 2:
raise Exception("testing before_sleep_attempts handler")
_test_before_sleep()
self.assertEqual(_before_sleep.attempt_number, 2)
def _before_sleep_log_raises(self, get_call_fn):
thing = NoIOErrorAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(logger, logging.INFO)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
get_call_fn(retrying)(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = (
r"^Retrying .* in 0\.01 seconds as it raised "
r"(IO|OS)Error: Hi there, I'm an IOError\.$"
)
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_raises(self):
self._before_sleep_log_raises(lambda x: x)
def test_before_sleep_log_raises_with_exc_info(self):
thing = NoIOErrorAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(
logger, logging.INFO, exc_info=True
)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
before_sleep=_before_sleep,
)
retrying(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = re.compile(
r"^Retrying .* in 0\.01 seconds as it raised "
r"(IO|OS)Error: Hi there, I'm an IOError\.{0}"
r"Traceback \(most recent call last\):{0}"
r".*$".format("\n"),
flags=re.MULTILINE,
)
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_returns(self, exc_info=False):
thing = NoneReturnUntilAfterCount(2)
logger = logging.getLogger(self.id())
logger.propagate = False
logger.setLevel(logging.INFO)
handler = CapturingHandler()
logger.addHandler(handler)
try:
_before_sleep = tenacity.before_sleep_log(
logger, logging.INFO, exc_info=exc_info
)
_retry = tenacity.retry_if_result(lambda result: result is None)
retrying = Retrying(
wait=tenacity.wait_fixed(0.01),
stop=tenacity.stop_after_attempt(3),
retry=_retry,
before_sleep=_before_sleep,
)
retrying(thing.go)
finally:
logger.removeHandler(handler)
etalon_re = r"^Retrying .* in 0\.01 seconds as it returned None\.$"
self.assertEqual(len(handler.records), 2)
fmt = logging.Formatter().format
self.assertRegex(fmt(handler.records[0]), etalon_re)
self.assertRegex(fmt(handler.records[1]), etalon_re)
def test_before_sleep_log_returns_with_exc_info(self):
self.test_before_sleep_log_returns(exc_info=True)
| TestBeforeAfterAttempts |
python | google__pytype | pytype/ast/visitor_test.py | {
"start": 1334,
"end": 1840
} | class ____: # pylint: disable=invalid-name
"""Tests a custom ast module."""
class AST:
pass
class Thing(AST):
pass
def __getattr__(self, name):
return type(name, (custom_ast.AST,), {})
def iter_fields(self, node):
if isinstance(node, custom_ast.Thing):
return []
elif isinstance(node, custom_ast.AST):
return [("thing", node.thing)]
def parse(self, unused_src):
module = custom_ast.AST()
module.thing = custom_ast.Thing()
return module
| custom_ast |
python | pyca__cryptography | src/cryptography/hazmat/primitives/asymmetric/ec.py | {
"start": 12123,
"end": 13288
} | class ____:
pass
_OID_TO_CURVE = {
EllipticCurveOID.SECP192R1: SECP192R1,
EllipticCurveOID.SECP224R1: SECP224R1,
EllipticCurveOID.SECP256K1: SECP256K1,
EllipticCurveOID.SECP256R1: SECP256R1,
EllipticCurveOID.SECP384R1: SECP384R1,
EllipticCurveOID.SECP521R1: SECP521R1,
EllipticCurveOID.BRAINPOOLP256R1: BrainpoolP256R1,
EllipticCurveOID.BRAINPOOLP384R1: BrainpoolP384R1,
EllipticCurveOID.BRAINPOOLP512R1: BrainpoolP512R1,
EllipticCurveOID.SECT163K1: SECT163K1,
EllipticCurveOID.SECT163R2: SECT163R2,
EllipticCurveOID.SECT233K1: SECT233K1,
EllipticCurveOID.SECT233R1: SECT233R1,
EllipticCurveOID.SECT283K1: SECT283K1,
EllipticCurveOID.SECT283R1: SECT283R1,
EllipticCurveOID.SECT409K1: SECT409K1,
EllipticCurveOID.SECT409R1: SECT409R1,
EllipticCurveOID.SECT571K1: SECT571K1,
EllipticCurveOID.SECT571R1: SECT571R1,
}
def get_curve_for_oid(oid: ObjectIdentifier) -> type[EllipticCurve]:
try:
return _OID_TO_CURVE[oid]
except KeyError:
raise LookupError(
"The provided object identifier has no matching elliptic "
"curve class"
)
| ECDH |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.