language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pytorch__pytorch | test/dynamo/cpython/3_13/seq_tests.py | {
"start": 2528,
"end": 2810
} | class ____:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __next__(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
| IterNextOnly |
python | sympy__sympy | sympy/matrices/common.py | {
"start": 6493,
"end": 24287
} | class ____(MatrixRequired):
"""Provides basic matrix shaping and extracting of submatrices"""
def _eval_col_del(self, col):
def entry(i, j):
return self[i, j] if j < col else self[i, j + 1]
return self._new(self.rows, self.cols - 1, entry)
def _eval_col_insert(self, pos, other):
def entry(i, j):
if j < pos:
return self[i, j]
elif pos <= j < pos + other.cols:
return other[i, j - pos]
return self[i, j - other.cols]
return self._new(self.rows, self.cols + other.cols, entry)
def _eval_col_join(self, other):
rows = self.rows
def entry(i, j):
if i < rows:
return self[i, j]
return other[i - rows, j]
return classof(self, other)._new(self.rows + other.rows, self.cols,
entry)
def _eval_extract(self, rowsList, colsList):
mat = list(self)
cols = self.cols
indices = (i * cols + j for i in rowsList for j in colsList)
return self._new(len(rowsList), len(colsList),
[mat[i] for i in indices])
def _eval_get_diag_blocks(self):
sub_blocks = []
def recurse_sub_blocks(M):
for i in range(1, M.shape[0] + 1):
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[:i, i:]
to_the_bottom = M[i:, :i]
if any(to_the_right) or any(to_the_bottom):
continue
sub_blocks.append(M[:i, :i])
if M.shape != M[:i, :i].shape:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def _eval_row_del(self, row):
def entry(i, j):
return self[i, j] if i < row else self[i + 1, j]
return self._new(self.rows - 1, self.cols, entry)
def _eval_row_insert(self, pos, other):
entries = list(self)
insert_pos = pos * self.cols
entries[insert_pos:insert_pos] = list(other)
return self._new(self.rows + other.rows, self.cols, entries)
def _eval_row_join(self, other):
cols = self.cols
def entry(i, j):
if j < cols:
return self[i, j]
return other[i, j - cols]
return classof(self, other)._new(self.rows, self.cols + other.cols,
entry)
def _eval_tolist(self):
return [list(self[i,:]) for i in range(self.rows)]
def _eval_todok(self):
dok = {}
rows, cols = self.shape
for i in range(rows):
for j in range(cols):
val = self[i, j]
if val != self.zero:
dok[i, j] = val
return dok
def _eval_vec(self):
rows = self.rows
def entry(n, _):
# we want to read off the columns first
j = n // rows
i = n - j * rows
return self[i, j]
return self._new(len(self), 1, entry)
def _eval_vech(self, diagonal):
c = self.cols
v = []
if diagonal:
for j in range(c):
for i in range(j, c):
v.append(self[i, j])
else:
for j in range(c):
for i in range(j + 1, c):
v.append(self[i, j])
return self._new(len(v), 1, v)
def col_del(self, col):
"""Delete the specified column."""
if col < 0:
col += self.cols
if not 0 <= col < self.cols:
raise IndexError("Column {} is out of range.".format(col))
return self._eval_col_del(col)
def col_insert(self, pos, other):
"""Insert one or more columns at the given column position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.col_insert(1, V)
Matrix([
[0, 1, 0, 0],
[0, 1, 0, 0],
[0, 1, 0, 0]])
See Also
========
col
row_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return type(self)(other)
pos = as_int(pos)
if pos < 0:
pos = self.cols + pos
if pos < 0:
pos = 0
elif pos > self.cols:
pos = self.cols
if self.rows != other.rows:
raise ShapeError(
"The matrices have incompatible number of rows ({} and {})"
.format(self.rows, other.rows))
return self._eval_col_insert(pos, other)
def col_join(self, other):
"""Concatenates two matrices along self's last and other's first row.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.col_join(V)
Matrix([
[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[1, 1, 1]])
See Also
========
col
row_join
"""
# A null matrix can always be stacked (see #10770)
if self.rows == 0 and self.cols != other.cols:
return self._new(0, other.cols, []).col_join(other)
if self.cols != other.cols:
raise ShapeError(
"The matrices have incompatible number of columns ({} and {})"
.format(self.cols, other.cols))
return self._eval_col_join(other)
def col(self, j):
"""Elementary column selector.
Examples
========
>>> from sympy import eye
>>> eye(2).col(0)
Matrix([
[1],
[0]])
See Also
========
row
col_del
col_join
col_insert
"""
return self[:, j]
def extract(self, rowsList, colsList):
r"""Return a submatrix by specifying a list of rows and columns.
Negative indices can be given. All indices must be in the range
$-n \le i < n$ where $n$ is the number of rows or columns.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(4, 3, range(12))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8],
[9, 10, 11]])
>>> m.extract([0, 1, 3], [0, 1])
Matrix([
[0, 1],
[3, 4],
[9, 10]])
Rows or columns can be repeated:
>>> m.extract([0, 0, 1], [-1])
Matrix([
[2],
[2],
[5]])
Every other row can be taken by using range to provide the indices:
>>> m.extract(range(0, m.rows, 2), [-1])
Matrix([
[2],
[8]])
RowsList or colsList can also be a list of booleans, in which case
the rows or columns corresponding to the True values will be selected:
>>> m.extract([0, 1, 2, 3], [True, False, True])
Matrix([
[0, 2],
[3, 5],
[6, 8],
[9, 11]])
"""
if not is_sequence(rowsList) or not is_sequence(colsList):
raise TypeError("rowsList and colsList must be iterable")
# ensure rowsList and colsList are lists of integers
if rowsList and all(isinstance(i, bool) for i in rowsList):
rowsList = [index for index, item in enumerate(rowsList) if item]
if colsList and all(isinstance(i, bool) for i in colsList):
colsList = [index for index, item in enumerate(colsList) if item]
# ensure everything is in range
rowsList = [a2idx(k, self.rows) for k in rowsList]
colsList = [a2idx(k, self.cols) for k in colsList]
return self._eval_extract(rowsList, colsList)
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Examples
========
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
Matrix([
[1, 3],
[y, z**2]])
>>> a2
Matrix([[x]])
>>> a3
Matrix([[0]])
"""
return self._eval_get_diag_blocks()
@classmethod
def hstack(cls, *args):
"""Return a matrix formed by joining args horizontally (i.e.
by repeated application of row_join).
Examples
========
>>> from sympy import Matrix, eye
>>> Matrix.hstack(eye(2), 2*eye(2))
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.row_join, args)
def reshape(self, rows, cols):
"""Reshape the matrix. Total number of elements must remain the same.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(2, 3, lambda i, j: 1)
>>> m
Matrix([
[1, 1, 1],
[1, 1, 1]])
>>> m.reshape(1, 6)
Matrix([[1, 1, 1, 1, 1, 1]])
>>> m.reshape(3, 2)
Matrix([
[1, 1],
[1, 1],
[1, 1]])
"""
if self.rows * self.cols != rows * cols:
raise ValueError("Invalid reshape parameters %d %d" % (rows, cols))
return self._new(rows, cols, lambda i, j: self[i * cols + j])
def row_del(self, row):
"""Delete the specified row."""
if row < 0:
row += self.rows
if not 0 <= row < self.rows:
raise IndexError("Row {} is out of range.".format(row))
return self._eval_row_del(row)
def row_insert(self, pos, other):
"""Insert one or more rows at the given row position.
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(1, 3)
>>> M.row_insert(1, V)
Matrix([
[0, 0, 0],
[1, 1, 1],
[0, 0, 0],
[0, 0, 0]])
See Also
========
row
col_insert
"""
# Allows you to build a matrix even if it is null matrix
if not self:
return self._new(other)
pos = as_int(pos)
if pos < 0:
pos = self.rows + pos
if pos < 0:
pos = 0
elif pos > self.rows:
pos = self.rows
if self.cols != other.cols:
raise ShapeError(
"The matrices have incompatible number of columns ({} and {})"
.format(self.cols, other.cols))
return self._eval_row_insert(pos, other)
def row_join(self, other):
"""Concatenates two matrices along self's last and rhs's first column
Examples
========
>>> from sympy import zeros, ones
>>> M = zeros(3)
>>> V = ones(3, 1)
>>> M.row_join(V)
Matrix([
[0, 0, 0, 1],
[0, 0, 0, 1],
[0, 0, 0, 1]])
See Also
========
row
col_join
"""
# A null matrix can always be stacked (see #10770)
if self.cols == 0 and self.rows != other.rows:
return self._new(other.rows, 0, []).row_join(other)
if self.rows != other.rows:
raise ShapeError(
"The matrices have incompatible number of rows ({} and {})"
.format(self.rows, other.rows))
return self._eval_row_join(other)
def diagonal(self, k=0):
"""Returns the kth diagonal of self. The main diagonal
corresponds to `k=0`; diagonals above and below correspond to
`k > 0` and `k < 0`, respectively. The values of `self[i, j]`
for which `j - i = k`, are returned in order of increasing
`i + j`, starting with `i + j = |k|`.
Examples
========
>>> from sympy import Matrix
>>> m = Matrix(3, 3, lambda i, j: j - i); m
Matrix([
[ 0, 1, 2],
[-1, 0, 1],
[-2, -1, 0]])
>>> _.diagonal()
Matrix([[0, 0, 0]])
>>> m.diagonal(1)
Matrix([[1, 1]])
>>> m.diagonal(-2)
Matrix([[-2]])
Even though the diagonal is returned as a Matrix, the element
retrieval can be done with a single index:
>>> Matrix.diag(1, 2, 3).diagonal()[1] # instead of [0, 1]
2
See Also
========
diag
"""
rv = []
k = as_int(k)
r = 0 if k > 0 else -k
c = 0 if r else k
while True:
if r == self.rows or c == self.cols:
break
rv.append(self[r, c])
r += 1
c += 1
if not rv:
raise ValueError(filldedent('''
The %s diagonal is out of range [%s, %s]''' % (
k, 1 - self.rows, self.cols - 1)))
return self._new(1, len(rv), rv)
def row(self, i):
"""Elementary row selector.
Examples
========
>>> from sympy import eye
>>> eye(2).row(0)
Matrix([[1, 0]])
See Also
========
col
row_del
row_join
row_insert
"""
return self[i, :]
@property
def shape(self):
"""The shape (dimensions) of the matrix as the 2-tuple (rows, cols).
Examples
========
>>> from sympy import zeros
>>> M = zeros(2, 3)
>>> M.shape
(2, 3)
>>> M.rows
2
>>> M.cols
3
"""
return (self.rows, self.cols)
def todok(self):
"""Return the matrix as dictionary of keys.
Examples
========
>>> from sympy import Matrix
>>> M = Matrix.eye(3)
>>> M.todok()
{(0, 0): 1, (1, 1): 1, (2, 2): 1}
"""
return self._eval_todok()
def tolist(self):
"""Return the Matrix as a nested Python list.
Examples
========
>>> from sympy import Matrix, ones
>>> m = Matrix(3, 3, range(9))
>>> m
Matrix([
[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
>>> ones(3, 0).tolist()
[[], [], []]
When there are no rows then it will not be possible to tell how
many columns were in the original matrix:
>>> ones(0, 3).tolist()
[]
"""
if not self.rows:
return []
if not self.cols:
return [[] for i in range(self.rows)]
return self._eval_tolist()
def todod(M):
"""Returns matrix as dict of dicts containing non-zero elements of the Matrix
Examples
========
>>> from sympy import Matrix
>>> A = Matrix([[0, 1],[0, 3]])
>>> A
Matrix([
[0, 1],
[0, 3]])
>>> A.todod()
{0: {1: 1}, 1: {1: 3}}
"""
rowsdict = {}
Mlol = M.tolist()
for i, Mi in enumerate(Mlol):
row = {j: Mij for j, Mij in enumerate(Mi) if Mij}
if row:
rowsdict[i] = row
return rowsdict
def vec(self):
"""Return the Matrix converted into a one column matrix by stacking columns
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 3], [2, 4]])
>>> m
Matrix([
[1, 3],
[2, 4]])
>>> m.vec()
Matrix([
[1],
[2],
[3],
[4]])
See Also
========
vech
"""
return self._eval_vec()
def vech(self, diagonal=True, check_symmetry=True):
"""Reshapes the matrix into a column vector by stacking the
elements in the lower triangle.
Parameters
==========
diagonal : bool, optional
If ``True``, it includes the diagonal elements.
check_symmetry : bool, optional
If ``True``, it checks whether the matrix is symmetric.
Examples
========
>>> from sympy import Matrix
>>> m=Matrix([[1, 2], [2, 3]])
>>> m
Matrix([
[1, 2],
[2, 3]])
>>> m.vech()
Matrix([
[1],
[2],
[3]])
>>> m.vech(diagonal=False)
Matrix([[2]])
Notes
=====
This should work for symmetric matrices and ``vech`` can
represent symmetric matrices in vector form with less size than
``vec``.
See Also
========
vec
"""
if not self.is_square:
raise NonSquareMatrixError
if check_symmetry and not self.is_symmetric():
raise ValueError("The matrix is not symmetric.")
return self._eval_vech(diagonal)
@classmethod
def vstack(cls, *args):
"""Return a matrix formed by joining args vertically (i.e.
by repeated application of col_join).
Examples
========
>>> from sympy import Matrix, eye
>>> Matrix.vstack(eye(2), 2*eye(2))
Matrix([
[1, 0],
[0, 1],
[2, 0],
[0, 2]])
"""
if len(args) == 0:
return cls._new()
kls = type(args[0])
return reduce(kls.col_join, args)
| MatrixShaping |
python | chroma-core__chroma | chromadb/telemetry/opentelemetry/__init__.py | {
"start": 1404,
"end": 6021
} | class ____(Component):
def __init__(self, system: System):
super().__init__(system)
otel_init(
system.settings.chroma_otel_service_name,
system.settings.chroma_otel_collection_endpoint,
system.settings.chroma_otel_collection_headers,
OpenTelemetryGranularity(
system.settings.chroma_otel_granularity
if system.settings.chroma_otel_granularity
else "none"
),
)
tracer: Optional[trace.Tracer] = None
granularity: OpenTelemetryGranularity = OpenTelemetryGranularity("none")
def otel_init(
otel_service_name: Optional[str],
otel_collection_endpoint: Optional[str],
otel_collection_headers: Optional[Dict[str, str]],
otel_granularity: OpenTelemetryGranularity,
) -> None:
"""Initializes module-level state for OpenTelemetry.
Parameters match the environment variables which configure OTel as documented
at https://docs.trychroma.com/deployment/observability.
- otel_service_name: The name of the service for OTel tagging and aggregation.
- otel_collection_endpoint: The endpoint to which OTel spans are sent
(e.g. api.honeycomb.com).
- otel_collection_headers: The headers to send with OTel spans
(e.g. {"x-honeycomb-team": "abc123"}).
- otel_granularity: The granularity of the spans to emit.
"""
if otel_granularity == OpenTelemetryGranularity.NONE:
return
resource = Resource(attributes={SERVICE_NAME: str(otel_service_name)})
provider = TracerProvider(resource=resource)
provider.add_span_processor(
BatchSpanProcessor(
# TODO: we may eventually want to make this configurable.
OTLPSpanExporter(
endpoint=str(otel_collection_endpoint),
headers=otel_collection_headers,
)
)
)
trace.set_tracer_provider(provider)
global tracer, granularity
tracer = trace.get_tracer(__name__)
granularity = otel_granularity
T = TypeVar("T", bound=Callable) # type: ignore[type-arg]
def trace_method(
trace_name: str,
trace_granularity: OpenTelemetryGranularity,
attributes: Optional[
Dict[
str,
Union[
str,
bool,
float,
int,
Sequence[str],
Sequence[bool],
Sequence[float],
Sequence[int],
],
]
] = None,
) -> Callable[[T], T]:
"""A decorator that traces a method."""
def decorator(f: T) -> T:
if asyncio.iscoroutinefunction(f):
@wraps(f)
async def async_wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
global tracer, granularity
if trace_granularity < granularity:
return await f(*args, **kwargs)
if not tracer:
return await f(*args, **kwargs)
with tracer.start_as_current_span(trace_name, attributes=attributes):
add_attributes_to_current_span(
{"pod_name": os.environ.get("HOSTNAME")}
)
return await f(*args, **kwargs)
return async_wrapper # type: ignore
else:
@wraps(f)
def wrapper(*args, **kwargs): # type: ignore[no-untyped-def]
global tracer, granularity
if trace_granularity < granularity:
return f(*args, **kwargs)
if not tracer:
return f(*args, **kwargs)
with tracer.start_as_current_span(trace_name, attributes=attributes):
add_attributes_to_current_span(
{"pod_name": os.environ.get("HOSTNAME")}
)
return f(*args, **kwargs)
return wrapper # type: ignore
return decorator
def add_attributes_to_current_span(
attributes: Dict[
str,
Union[
str,
bool,
float,
int,
Sequence[str],
Sequence[bool],
Sequence[float],
Sequence[int],
None,
],
]
) -> None:
"""Add attributes to the current span."""
global tracer, granularity
if granularity == OpenTelemetryGranularity.NONE:
return
if not tracer:
return
span = trace.get_current_span()
span.set_attributes({k: v for k, v in attributes.items() if v is not None})
| OpenTelemetryClient |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/sensors/rds.py | {
"start": 4019,
"end": 5788
} | class ____(RdsBaseSensor):
"""
Waits for RDS export task with a specific status.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:RdsExportTaskExistenceSensor`
:param export_task_identifier: A unique identifier for the snapshot export task.
:param target_statuses: Target status of export task
:param error_statuses: Target error status of export task to fail the sensor
"""
template_fields: Sequence[str] = aws_template_fields(
"export_task_identifier", "target_statuses", "error_statuses"
)
def __init__(
self,
*,
export_task_identifier: str,
target_statuses: list[str] | None = None,
error_statuses: list[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.export_task_identifier = export_task_identifier
self.target_statuses = target_statuses or [
"starting",
"in_progress",
"complete",
"canceling",
"canceled",
]
self.error_statuses = error_statuses or ["failed"]
def poke(self, context: Context):
self.log.info(
"Poking for statuses : %s\nfor export task %s", self.target_statuses, self.export_task_identifier
)
try:
state = self.hook.get_export_task_state(self.export_task_identifier)
if state in self.error_statuses:
raise AirflowException(
f"Export task {self.export_task_identifier} failed with status {state}"
)
except AirflowNotFoundException:
return False
return state in self.target_statuses
| RdsExportTaskExistenceSensor |
python | airbytehq__airbyte | airbyte-ci/connectors/connectors_qa/tests/unit_tests/test_models.py | {
"start": 237,
"end": 3340
} | class ____:
def test_fail_when_requires_metadata_and_metata_is_missing(self, mocker):
# Arrange
connector = mocker.MagicMock(metadata={}, is_released=False)
# Act
results = []
for check in ENABLED_CHECKS:
if check.requires_metadata:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.FAILED for result in results)
assert all(
result.message
== f"This checks requires metadata file to run. Please add {consts.METADATA_FILE_NAME} file to the connector code directory."
for result in results
)
def test_fail_when_language_is_missing(self, mocker):
# Arrange
connector = mocker.MagicMock(language=None, is_released=False)
# Act
results = []
for check in ENABLED_CHECKS:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.FAILED for result in results)
assert all(result.message == "Connector language could not be inferred" for result in results)
def test_skip_when_language_does_not_apply(self, mocker):
# Arrange
connector = mocker.MagicMock(language=ConnectorLanguage.JAVA)
# Act
results = []
for check in ENABLED_CHECKS:
if connector.language not in check.applies_to_connector_languages:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.SKIPPED for result in results)
def test_skip_when_type_does_not_apply(self, mocker):
# Arrange
connector = mocker.MagicMock(connector_type="destination")
# Act
results = []
for check in ENABLED_CHECKS:
if connector.connector_type not in check.applies_to_connector_types:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.SKIPPED for result in results)
def test_skip_when_check_does_not_apply_to_released_connectors(self, mocker):
# Arrange
connector = mocker.MagicMock(is_released=True)
# Act
results = []
for check in ENABLED_CHECKS:
if not check.runs_on_released_connectors:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.SKIPPED for result in results)
assert all(result.message == "Check does not apply to released connectors" for result in results)
def test_skip_when_connector_support_level_does_not_apply_to(self, mocker):
# Arrange
connector = mocker.MagicMock(support_level="community")
# Act
results = []
for check in ENABLED_CHECKS:
if check.applies_to_connector_support_levels and connector.support_level not in check.applies_to_connector_support_levels:
results.append(check.run(connector))
# Assert
assert all(result.status == CheckStatus.SKIPPED for result in results)
| TestCheck |
python | ray-project__ray | python/ray/serve/schema.py | {
"start": 51878,
"end": 53109
} | class ____(BaseModel):
"""
Task processor config. You can use it to configure the task processor for your Serve application.
"""
queue_name: str = Field(
..., description="The name of the queue to use for task processing."
)
adapter: Union[str, Callable] = Field(
default="ray.serve.task_processor.CeleryTaskProcessorAdapter",
description="The adapter to use for task processing. By default, Celery is used.",
)
adapter_config: Any = Field(..., description="The adapter config.")
max_retries: Optional[int] = Field(
default=3,
description="The maximum number of times to retry a task before marking it as failed.",
)
failed_task_queue_name: Optional[str] = Field(
default=None,
description="The name of the failed task queue. This is used to move failed tasks to a dead-letter queue after max retries.",
)
unprocessable_task_queue_name: Optional[str] = Field(
default=None,
description="The name of the unprocessable task queue. This is used to move unprocessable tasks(like tasks with serialization issue, or missing handler) to a dead-letter queue.",
)
@PublicAPI(stability="alpha")
| TaskProcessorConfig |
python | keras-team__keras | keras/src/ops/operation_test.py | {
"start": 1210,
"end": 1553
} | class ____(operation.Operation):
def __init__(self, alpha, beta=1.0):
super().__init__()
self.alpha = alpha
self.beta = beta
def call(self, x):
return self.alpha * x + self.beta
def compute_output_spec(self, x):
return keras_tensor.KerasTensor(x.shape, x.dtype)
| OpWithCustomConstructorNoName |
python | kamyu104__LeetCode-Solutions | Python/remove-duplicates-from-sorted-list-ii.py | {
"start": 280,
"end": 826
} | class ____(object):
def deleteDuplicates(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
dummy = ListNode(0)
pre, cur = dummy, head
while cur:
if cur.next and cur.next.val == cur.val:
val = cur.val
while cur and cur.val == val:
cur = cur.next
pre.next = cur
else:
pre.next = cur
pre = cur
cur = cur.next
return dummy.next
| Solution |
python | redis__redis-py | redis/commands/bf/__init__.py | {
"start": 3612,
"end": 4600
} | class ____(TOPKCommands, AbstractBloom):
def __init__(self, client, **kwargs):
"""Create a new RedisBloom client."""
# Set the module commands' callbacks
_MODULE_CALLBACKS = {
TOPK_RESERVE: bool_ok,
# TOPK_QUERY: spaceHolder,
# TOPK_COUNT: spaceHolder,
}
_RESP2_MODULE_CALLBACKS = {
TOPK_ADD: parse_to_list,
TOPK_INCRBY: parse_to_list,
TOPK_INFO: TopKInfo,
TOPK_LIST: parse_to_list,
}
_RESP3_MODULE_CALLBACKS = {}
self.client = client
self.commandmixin = TOPKCommands
self.execute_command = client.execute_command
if get_protocol_version(self.client) in ["3", 3]:
_MODULE_CALLBACKS.update(_RESP3_MODULE_CALLBACKS)
else:
_MODULE_CALLBACKS.update(_RESP2_MODULE_CALLBACKS)
for k, v in _MODULE_CALLBACKS.items():
self.client.set_response_callback(k, v)
| TOPKBloom |
python | apache__airflow | task-sdk/src/airflow/sdk/execution_time/comms.py | {
"start": 30653,
"end": 30873
} | class ____(BaseModel):
dag_id: str
logical_dates: list[AwareDatetime] | None = None
run_ids: list[str] | None = None
states: list[str] | None = None
type: Literal["GetDRCount"] = "GetDRCount"
| GetDRCount |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Filters.py | {
"start": 4556,
"end": 5024
} | class ____(CtrlNode):
"""Filters data by taking the median of a sliding window"""
nodeName = 'MedianFilter'
uiTemplate = [
('n', 'intSpin', {'min': 1, 'max': 1000000})
]
def processData(self, data):
try:
import scipy.ndimage
except ImportError:
raise Exception("MedianFilter node requires the package scipy.ndimage.")
return scipy.ndimage.median_filter(data, self.ctrls['n'].value())
| Median |
python | django__django | tests/inspectdb/models.py | {
"start": 3053,
"end": 3436
} | class ____(models.Model):
json_field = models.JSONField()
null_json_field = models.JSONField(blank=True, null=True)
class Meta:
required_db_features = {
"can_introspect_json_field",
"supports_json_field",
}
test_collation = SimpleLazyObject(
lambda: connection.features.test_collations.get("non_default")
)
| JSONFieldColumnType |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/workflows.py | {
"start": 17403,
"end": 20324
} | class ____(GoogleCloudBaseOperator):
"""
Creates a new execution using the latest revision of the given workflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsCreateExecutionOperator`
:param execution: Required. Execution to be created.
:param workflow_id: Required. The ID of the workflow.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id", "execution")
template_fields_renderers = {"execution": "json"}
operator_extra_links = (WorkflowsExecutionLink(),)
def __init__(
self,
*,
workflow_id: str,
execution: dict,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.execution = execution
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Creating execution")
execution = hook.create_execution(
workflow_id=self.workflow_id,
execution=self.execution,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
execution_id = execution.name.split("/")[-1]
context["task_instance"].xcom_push(key="execution_id", value=execution_id)
WorkflowsExecutionLink.persist(
context=context,
location_id=self.location,
workflow_id=self.workflow_id,
execution_id=execution_id,
project_id=self.project_id or hook.project_id,
)
return Execution.to_dict(execution)
| WorkflowsCreateExecutionOperator |
python | django__django | django/forms/widgets.py | {
"start": 12537,
"end": 12944
} | class ____(Input):
input_type = "password"
template_name = "django/forms/widgets/password.html"
def __init__(self, attrs=None, render_value=False):
super().__init__(attrs)
self.render_value = render_value
def get_context(self, name, value, attrs):
if not self.render_value:
value = None
return super().get_context(name, value, attrs)
| PasswordInput |
python | allegroai__clearml | clearml/backend_api/api_proxy.py | {
"start": 164,
"end": 2041
} | class ____(object):
_main_services_module = "clearml.backend_api.services"
_available_versions = None
def __init__(self, module: str) -> None:
self.__wrapped_name__ = module
self.__wrapped_version__ = Session.api_version
def __getattr__(self, attr: str) -> Any:
if attr in ["__wrapped_name__", "__wrapped__", "__wrapped_version__"]:
return self.__dict__.get(attr)
if not self.__dict__.get("__wrapped__") or self.__dict__.get("__wrapped_version__") != Session.api_version:
if not ApiServiceProxy._available_versions:
services = self._import_module(self._main_services_module, None)
ApiServiceProxy._available_versions = sorted(
Version(name[1:].replace("_", "."))
for name in [
module_name
for _, module_name, _ in pkgutil.iter_modules(services.__path__)
if re.match(r"^v[0-9]+_[0-9]+$", module_name)
]
)
# get the most advanced service version that supports our api
version = [str(v) for v in ApiServiceProxy._available_versions if Session.check_min_api_version(v)][-1]
Session.api_version = version
self.__dict__["__wrapped_version__"] = Session.api_version
name = ".v{}.{}".format(version.replace(".", "_"), self.__dict__.get("__wrapped_name__"))
self.__dict__["__wrapped__"] = self._import_module(name, self._main_services_module)
return getattr(self.__dict__["__wrapped__"], attr)
def _import_module(self, name: str, package: Optional[str]) -> Any:
# noinspection PyBroadException
try:
return importlib.import_module(name, package=package)
except Exception:
return None
| ApiServiceProxy |
python | Netflix__metaflow | metaflow/_vendor/yaml/__init__.py | {
"start": 865,
"end": 11776
} | class ____(RuntimeWarning):
pass
def load_warning(method):
if _warnings_enabled['YAMLLoadWarning'] is False:
return
import warnings
message = (
"calling yaml.%s() without Loader=... is deprecated, as the "
"default Loader is unsafe. Please read "
"https://msg.pyyaml.org/load for full details."
) % method
warnings.warn(message, YAMLLoadWarning, stacklevel=3)
#------------------------------------------------------------------------------
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
try:
while loader.check_token():
yield loader.get_token()
finally:
loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
try:
while loader.check_event():
yield loader.get_event()
finally:
loader.dispose()
def compose(stream, Loader=Loader):
"""
Parse the first YAML document in a stream
and produce the corresponding representation tree.
"""
loader = Loader(stream)
try:
return loader.get_single_node()
finally:
loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding representation trees.
"""
loader = Loader(stream)
try:
while loader.check_node():
yield loader.get_node()
finally:
loader.dispose()
def load(stream, Loader=None):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
"""
if Loader is None:
load_warning('load')
Loader = FullLoader
loader = Loader(stream)
try:
return loader.get_single_data()
finally:
loader.dispose()
def load_all(stream, Loader=None):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
"""
if Loader is None:
load_warning('load_all')
Loader = FullLoader
loader = Loader(stream)
try:
while loader.check_data():
yield loader.get_data()
finally:
loader.dispose()
def full_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load(stream, FullLoader)
def full_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags except those known to be
unsafe on untrusted input.
"""
return load_all(stream, FullLoader)
def safe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load(stream, SafeLoader)
def safe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve only basic YAML tags. This is known
to be safe for untrusted input.
"""
return load_all(stream, SafeLoader)
def unsafe_load(stream):
"""
Parse the first YAML document in a stream
and produce the corresponding Python object.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load(stream, UnsafeLoader)
def unsafe_load_all(stream):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
Resolve all tags, even those known to be
unsafe on untrusted input.
"""
return load_all(stream, UnsafeLoader)
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
"""
Emit YAML parsing events into a stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
stream = io.StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
try:
for event in events:
dumper.emit(event)
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize_all(nodes, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None):
"""
Serialize a sequence of representation trees into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
try:
dumper.open()
for node in nodes:
dumper.serialize(node)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def serialize(node, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a representation tree into a YAML stream.
If stream is None, return the produced string instead.
"""
return serialize_all([node], stream, Dumper=Dumper, **kwds)
def dump_all(documents, stream=None, Dumper=Dumper,
default_style=None, default_flow_style=False,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None,
encoding=None, explicit_start=None, explicit_end=None,
version=None, tags=None, sort_keys=True):
"""
Serialize a sequence of Python objects into a YAML stream.
If stream is None, return the produced string instead.
"""
getvalue = None
if stream is None:
if encoding is None:
stream = io.StringIO()
else:
stream = io.BytesIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
default_flow_style=default_flow_style,
canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end, sort_keys=sort_keys)
try:
dumper.open()
for data in documents:
dumper.represent(data)
dumper.close()
finally:
dumper.dispose()
if getvalue:
return getvalue()
def dump(data, stream=None, Dumper=Dumper, **kwds):
"""
Serialize a Python object into a YAML stream.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=Dumper, **kwds)
def safe_dump_all(documents, stream=None, **kwds):
"""
Serialize a sequence of Python objects into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all(documents, stream, Dumper=SafeDumper, **kwds)
def safe_dump(data, stream=None, **kwds):
"""
Serialize a Python object into a YAML stream.
Produce only basic YAML tags.
If stream is None, return the produced string instead.
"""
return dump_all([data], stream, Dumper=SafeDumper, **kwds)
def add_implicit_resolver(tag, regexp, first=None,
Loader=None, Dumper=Dumper):
"""
Add an implicit scalar detector.
If an implicit scalar value matches the given regexp,
the corresponding tag is assigned to the scalar.
first is a sequence of possible initial characters or None.
"""
if Loader is None:
loader.Loader.add_implicit_resolver(tag, regexp, first)
loader.FullLoader.add_implicit_resolver(tag, regexp, first)
loader.UnsafeLoader.add_implicit_resolver(tag, regexp, first)
else:
Loader.add_implicit_resolver(tag, regexp, first)
Dumper.add_implicit_resolver(tag, regexp, first)
def add_path_resolver(tag, path, kind=None, Loader=None, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
if Loader is None:
loader.Loader.add_path_resolver(tag, path, kind)
loader.FullLoader.add_path_resolver(tag, path, kind)
loader.UnsafeLoader.add_path_resolver(tag, path, kind)
else:
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind)
def add_constructor(tag, constructor, Loader=None):
"""
Add a constructor for the given tag.
Constructor is a function that accepts a Loader instance
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_constructor(tag, constructor)
loader.FullLoader.add_constructor(tag, constructor)
loader.UnsafeLoader.add_constructor(tag, constructor)
else:
Loader.add_constructor(tag, constructor)
def add_multi_constructor(tag_prefix, multi_constructor, Loader=None):
"""
Add a multi-constructor for the given tag prefix.
Multi-constructor is called for a node if its tag starts with tag_prefix.
Multi-constructor accepts a Loader instance, a tag suffix,
and a node object and produces the corresponding Python object.
"""
if Loader is None:
loader.Loader.add_multi_constructor(tag_prefix, multi_constructor)
loader.FullLoader.add_multi_constructor(tag_prefix, multi_constructor)
loader.UnsafeLoader.add_multi_constructor(tag_prefix, multi_constructor)
else:
Loader.add_multi_constructor(tag_prefix, multi_constructor)
def add_representer(data_type, representer, Dumper=Dumper):
"""
Add a representer for the given type.
Representer is a function accepting a Dumper instance
and an instance of the given data type
and producing the corresponding representation node.
"""
Dumper.add_representer(data_type, representer)
def add_multi_representer(data_type, multi_representer, Dumper=Dumper):
"""
Add a representer for the given type.
Multi-representer is a function accepting a Dumper instance
and an instance of the given data type or subtype
and producing the corresponding representation node.
"""
Dumper.add_multi_representer(data_type, multi_representer)
| YAMLLoadWarning |
python | PyCQA__pylint | tests/functional/m/misplaced_bare_raise.py | {
"start": 1044,
"end": 1433
} | class ____:
try:
pass
except Exception:
raise
raise # [misplaced-bare-raise]
# This works in Python 2, but the intent is nevertheless
# unclear. It will also not work on Python 3, so it's best
# not to rely on it.
exc = None
try:
1/0
except ZeroDivisionError as exc:
pass
if exc:
raise # [misplaced-bare-raise]
# Don't emit if we're in ``__exit__``.
| A |
python | vyperlang__vyper | tests/functional/builtins/codegen/test_convert.py | {
"start": 5828,
"end": 20857
} | class ____(enum.Enum):
Left = enum.auto()
Right = enum.auto()
def _padding_direction(typ):
if isinstance(typ, (BytesM_T, StringT, BytesT)):
return _PadDirection.Right
return _PadDirection.Left
# TODO this could be a function in vyper.builtins._convert
# which implements literal folding and also serves as a reference/spec
def _padconvert(val_bits, direction, n, padding_byte=None):
"""
Takes the ABI representation of a value, and convert the padding if needed.
If fill_zeroes is false, the two halves of the bytestring are just swapped
and the dirty bytes remain dirty. If fill_zeroes is true, the padding
bytes get set to 0
"""
assert len(val_bits) == 32
# convert left-padded to right-padded
if direction == _PadDirection.Right:
tail = val_bits[:-n]
if padding_byte is not None:
tail = padding_byte * len(tail)
return val_bits[-n:] + tail
# right- to left- padded
if direction == _PadDirection.Left:
head = val_bits[n:]
if padding_byte is not None:
head = padding_byte * len(head)
return head + val_bits[:n]
def _from_bits(val_bits, o_typ):
# o_typ: the type to convert to
try:
ret = abi.decode(o_typ.abi_type.selector_name(), val_bits)
if isinstance(o_typ, DecimalT):
return Decimal(ret) / o_typ.divisor
return ret
except eth.codecs.abi.exceptions.DecodeError:
raise _OutOfBounds() from None
def _to_bits(val, i_typ):
# i_typ: the type to convert from
if isinstance(i_typ, DecimalT):
val = val * i_typ.divisor
assert math.ceil(val) == math.floor(val)
val = int(val)
return abi.encode(i_typ.abi_type.selector_name(), val)
def _signextend(val_bytes, bits):
as_uint = int.from_bytes(val_bytes, byteorder="big")
as_sint = unsigned_to_signed(as_uint, bits)
return (as_sint % 2**256).to_bytes(32, byteorder="big")
def _convert_int_to_int(val, o_typ):
lo, hi = o_typ.int_bounds
if not lo <= val <= hi:
return None
return val
def _convert_decimal_to_int(val, o_typ):
# note special behavior for decimal: catch OOB before truncation.
lo, hi = o_typ.int_bounds
if not lo <= val <= hi:
return None
return round_towards_zero(val)
def _convert_int_to_decimal(val, o_typ):
ret = Decimal(val)
lo, hi = o_typ.ast_bounds
if not lo <= ret <= hi:
return None
return ret
def _py_convert(val, i_typ, o_typ):
"""
Perform conversion on the Python representation of a Vyper value.
Returns None if the conversion is invalid (i.e., would revert in Vyper)
"""
if isinstance(i_typ, IntegerT) and isinstance(o_typ, IntegerT):
return _convert_int_to_int(val, o_typ)
if isinstance(i_typ, DecimalT) and isinstance(o_typ, IntegerT):
return _convert_decimal_to_int(val, o_typ)
if isinstance(i_typ, (BoolT, IntegerT)) and isinstance(o_typ, DecimalT):
# Note: Decimal(True) == Decimal("1")
return _convert_int_to_decimal(val, o_typ)
val_bits = _to_bits(val, i_typ)
if isinstance(i_typ, (BytesT, StringT)):
val_bits = val_bits[32:]
if _padding_direction(i_typ) != _padding_direction(o_typ):
# subtle! the padding conversion follows the bytes argument
if isinstance(i_typ, (BytesM_T, BytesT)):
n = bytes_of_type(i_typ)
padding_byte = None
else:
# output type is bytes
n = bytes_of_type(o_typ)
padding_byte = b"\x00"
val_bits = _padconvert(val_bits, _padding_direction(o_typ), n, padding_byte)
if getattr(o_typ, "is_signed", False) and isinstance(i_typ, BytesM_T):
n_bits = _bits_of_type(i_typ)
val_bits = _signextend(val_bits, n_bits)
try:
if isinstance(o_typ, BoolT):
return _from_bits(val_bits, UINT256_T) != 0
ret = _from_bits(val_bits, o_typ)
if isinstance(o_typ, AddressT):
return checksum_encode(ret)
return ret
except _OutOfBounds:
return None
# the matrix of all type pairs
def all_pairs():
return sorted(itertools.product(BASE_TYPES, BASE_TYPES))
# pairs which can compile
def convertible_pairs():
return [(i, o) for (i, o) in all_pairs() if can_convert(i, o)]
# pairs which shouldn't even compile
def non_convertible_pairs():
return [(i, o) for (i, o) in all_pairs() if not can_convert(i, o) and i != o]
# _CASES_CACHE = {}
def cases_for_pair(i_typ, o_typ):
"""
Helper function to generate all cases for pair
"""
# if (i_typ, o_typ) in _CASES_CACHE:
# # cache the cases for reproducibility, to ensure test_passing_cases and test_failing_cases
# # test exactly the two halves of the produced cases.
# return _CASES_CACHE[(i_typ, o_typ)]
cases = interesting_cases_for_type(i_typ)
# only return cases which are valid for the input type
cases = _filter_cases(cases, i_typ)
for c in interesting_cases_for_type(o_typ):
# convert back into i_typ
try:
c = _py_convert(c, o_typ, i_typ)
if c is not None:
cases.append(c)
except eth.codecs.abi.exceptions.EncodeError:
pass
# _CASES_CACHE[(i_typ, o_typ)] = cases
return cases
def generate_passing_cases():
ret = []
for i_typ, o_typ in convertible_pairs():
cases = cases_for_pair(i_typ, o_typ)
for c in cases:
# only add convertible cases
if _py_convert(c, i_typ, o_typ) is not None:
ret.append((i_typ, o_typ, c))
return sorted(ret)
def generate_reverting_cases():
ret = []
for i_typ, o_typ in convertible_pairs():
cases = cases_for_pair(i_typ, o_typ)
for c in cases:
if _py_convert(c, i_typ, o_typ) is None:
ret.append((i_typ, o_typ, c))
return sorted(ret)
def _vyper_literal(val, typ):
if isinstance(typ, BytesM_T):
return "0x" + val.hex()
if isinstance(typ, DecimalT):
tmp = val
val = quantize(val)
assert tmp == val
return str(val)
@pytest.mark.parametrize("i_typ,o_typ,val", generate_passing_cases())
@pytest.mark.fuzzing
def test_convert_passing(get_contract, assert_compile_failed, i_typ, o_typ, val):
expected_val = _py_convert(val, i_typ, o_typ)
if isinstance(o_typ, DecimalT):
expected_val = decimal_to_int(expected_val)
input_val = val
if isinstance(i_typ, DecimalT):
input_val = decimal_to_int(val)
contract_1 = f"""
@external
def test_convert() -> {o_typ}:
return convert({_vyper_literal(val, i_typ)}, {o_typ})
"""
skip_c1 = False
# Skip bytes20 literals when there is ambiguity with `address` since address takes precedence.
# generally happens when there are only digits in the literal.
if i_typ == BYTES20_T and is_checksum_encoded(_vyper_literal(val, BYTES20_T)):
skip_c1 = True
# typechecker inference borked, ambiguity with bytes20
if isinstance(i_typ, AddressT) and o_typ == BYTES20_T and val == val.lower():
skip_c1 = True
if not skip_c1:
c1 = get_contract(contract_1)
assert c1.test_convert() == expected_val
contract_2 = f"""
@external
def test_input_convert(x: {i_typ}) -> {o_typ}:
return convert(x, {o_typ})
"""
c2 = get_contract(contract_2)
assert c2.test_input_convert(input_val) == expected_val
contract_3 = f"""
bar: {i_typ}
@external
def test_state_variable_convert() -> {o_typ}:
self.bar = {_vyper_literal(val, i_typ)}
return convert(self.bar, {o_typ})
"""
c3 = get_contract(contract_3)
assert c3.test_state_variable_convert() == expected_val
contract_4 = f"""
@external
def test_memory_variable_convert(x: {i_typ}) -> {o_typ}:
y: {i_typ} = x
return convert(y, {o_typ})
"""
c4 = get_contract(contract_4)
assert c4.test_memory_variable_convert(input_val) == expected_val
@pytest.mark.parametrize("typ", ["uint8", "int128", "int256", "uint256"])
@pytest.mark.parametrize("val", [1, 2, 2**128, 2**256 - 1, 2**256 - 2])
def test_flag_conversion(get_contract, assert_compile_failed, val, typ):
roles = "\n ".join([f"ROLE_{i}" for i in range(256)])
contract = f"""
flag Roles:
{roles}
@external
def foo(a: Roles) -> {typ}:
return convert(a, {typ})
@external
def bar(a: uint256) -> Roles:
return convert(a, Roles)
"""
if typ == "uint256":
c = get_contract(contract)
assert c.foo(val) == val
assert c.bar(val) == val
else:
assert_compile_failed(lambda: get_contract(contract), TypeMismatch)
@pytest.mark.parametrize("typ", ["uint8", "int128", "int256", "uint256"])
@pytest.mark.parametrize("val", [1, 2, 3, 4, 2**128, 2**256 - 1, 2**256 - 2])
def test_flag_conversion_2(get_contract, assert_compile_failed, tx_failed, val, typ):
contract = f"""
flag Status:
STARTED
PAUSED
STOPPED
@external
def foo(a: {typ}) -> Status:
return convert(a, Status)
"""
if typ == "uint256":
c = get_contract(contract)
lo, hi = int_bounds(signed=False, bits=3)
if lo <= val <= hi:
assert c.foo(val) == val
else:
with tx_failed():
c.foo(val)
else:
assert_compile_failed(lambda: get_contract(contract), TypeMismatch)
# uint256 conversion is currently valid due to type inference on literals
# not quite working yet
same_type_conversion_blocked = sorted(TEST_TYPES - {UINT256_T})
@pytest.mark.parametrize("typ", same_type_conversion_blocked)
def test_same_type_conversion_blocked(get_contract, assert_compile_failed, typ):
code = f"""
@external
def foo(x: {typ}) -> {typ}:
return convert(x, {typ})
"""
assert_compile_failed(lambda: get_contract(code), InvalidType)
@pytest.mark.parametrize("i_typ,o_typ", non_convertible_pairs())
def test_type_conversion_blocked(get_contract, assert_compile_failed, i_typ, o_typ):
code = f"""
@external
def foo(x: {i_typ}) -> {o_typ}:
return convert(x, {o_typ})
"""
assert_compile_failed(lambda: get_contract(code), TypeMismatch)
@pytest.mark.parametrize("typ", sorted(BASE_TYPES))
def test_bytes_too_large_cases(typ):
code_1 = f"""
@external
def foo(x: Bytes[33]) -> {typ}:
return convert(x, {typ})
"""
with pytest.raises(TypeMismatch):
compile_code(code_1)
bytes_33 = b"1" * 33
code_2 = f"""
@external
def foo() -> {typ}:
return convert({bytes_33}, {typ})
"""
with pytest.raises(TypeMismatch):
compile_code(code_2)
@pytest.mark.parametrize("cls1,cls2", itertools.product((StringT, BytesT), (StringT, BytesT)))
def test_bytestring_conversions(cls1, cls2, get_contract, tx_failed):
typ1 = cls1(33)
typ2 = cls2(32)
def bytestring(cls, string):
if cls == BytesT:
return string.encode("utf-8")
return string
code_1 = f"""
@external
def foo(x: {typ1}) -> {typ2}:
return convert(x, {typ2})
"""
c = get_contract(code_1)
for i in range(33): # inclusive 32
s = "1" * i
arg = bytestring(cls1, s)
out = bytestring(cls2, s)
assert c.foo(arg) == out
with tx_failed():
# TODO: sanity check it is convert which is reverting, not arg clamping
c.foo(bytestring(cls1, "1" * 33))
code_2_template = """
@external
def foo() -> {typ}:
return convert({arg}, {typ})
"""
# test literals
for i in range(33): # inclusive 32
s = "1" * i
arg = bytestring(cls1, s)
out = bytestring(cls2, s)
code = code_2_template.format(typ=typ2, arg=repr(arg))
if cls1 == cls2: # ex.: can't convert "" to String[32]
with pytest.raises(InvalidType):
compile_code(code)
else:
c = get_contract(code)
assert c.foo() == out
failing_code = code_2_template.format(typ=typ2, arg=bytestring(cls1, "1" * 33))
with pytest.raises(TypeMismatch):
compile_code(failing_code)
@pytest.mark.parametrize("n", range(1, 33))
def test_Bytes_to_bytes(get_contract, n: int):
t_bytes = f"bytes{n}"
t_Bytes = f"Bytes[{n}]"
test_data = b"\xff" * n
code1 = f"""
@external
def foo() -> {t_bytes}:
x: {t_Bytes} = {test_data}
return convert(x, {t_bytes})
"""
c1 = get_contract(code1)
assert c1.foo() == test_data
code2 = f"""
bar: {t_Bytes}
@external
def foo() -> {t_bytes}:
self.bar = {test_data}
return convert(self.bar, {t_bytes})
"""
c2 = get_contract(code2)
assert c2.foo() == test_data
@pytest.mark.parametrize("i_typ,o_typ,val", generate_reverting_cases())
@pytest.mark.fuzzing
def test_conversion_failures(get_contract, assert_compile_failed, tx_failed, i_typ, o_typ, val):
"""
Test multiple contracts and check for a specific exception.
If no exception is provided, a runtime revert is expected (e.g. clamping).
"""
contract_1 = f"""
@external
def foo() -> {o_typ}:
return convert({_vyper_literal(val, i_typ)}, {o_typ})
"""
c1_exception = InvalidLiteral
if isinstance(i_typ, IntegerT) and isinstance(o_typ, BytesM_T):
# integer literals get upcasted to uint256 / int256 types, so the convert
# will not compile unless it is bytes32
if o_typ != BYTES32_T:
c1_exception = TypeMismatch
# compile-time folding not implemented for these:
skip_c1 = False
# if isinstance(o_typ, IntegerT.signeds()) and isinstance(i_typ, Address()):
# skip_c1 = True
if isinstance(o_typ, BytesM_T):
skip_c1 = True
# if o_typ in (AddressT(), BYTES20_T):
# skip_c1 = True
if not skip_c1:
assert_compile_failed(lambda: get_contract(contract_1), c1_exception)
contract_2 = f"""
@external
def foo():
bar: {i_typ} = {_vyper_literal(val, i_typ)}
foobar: {o_typ} = convert(bar, {o_typ})
"""
c2 = get_contract(contract_2)
with tx_failed():
c2.foo()
contract_3 = f"""
@external
def foo(bar: {i_typ}) -> {o_typ}:
return convert(bar, {o_typ})
"""
c3 = get_contract(contract_3)
input_val = val
if isinstance(i_typ, DecimalT):
input_val = decimal_to_int(input_val)
with tx_failed():
c3.foo(input_val)
@pytest.mark.parametrize(
"val",
[
"a000",
"0880",
"deadbeef",
"cafebabe",
"0123456789abcdef",
# test cases that would trigger sign extension
"80",
"8000",
"800000",
"80000000",
"8000000000000000",
"ff",
"ffff",
"ffffffff",
"ffffffffffffffff",
],
)
def test_convert_bytes_literal_int(get_contract, val):
expected = int(val, 16)
source = f"""
@external
def test() -> uint256:
return convert(x'{val}', uint256)
"""
c = get_contract(source)
assert c.test() == expected
| _PadDirection |
python | google__jax | tests/memories_test.py | {
"start": 28448,
"end": 62038
} | class ____(jtu.BufferDonationTestCase):
def setUp(self):
if not jtu.test_device_matches(["tpu", "gpu"]):
self.skipTest("Memories do not work on CPU backends yet.")
super().setUp()
def _check_mem_kind(self, executable_kind, out_sharding, expected_kind):
out_kind = out_sharding.memory_kind
self.assertEqual(executable_kind, out_kind)
self.assertEqual(out_kind, expected_kind)
self.assertEqual(executable_kind, expected_kind)
def test_compute_no_inputs(self):
mesh = jtu.create_mesh((4,), ('data'))
tpu_sharding = NamedSharding(mesh, P('data'))
cpu_sharding = NamedSharding(mesh, P('data'), memory_kind='pinned_host')
@functools.partial(jax.jit, out_shardings=(tpu_sharding, cpu_sharding))
def init():
tpu_array = jax.random.normal(jax.random.key(42), (16,16))
cpu_array = jax.random.normal(jax.random.key(42), (16,16))
return tpu_array, cpu_array
tpu_array, cpu_array = init()
self.assertEqual(tpu_array.sharding, tpu_sharding)
self.assertEqual(cpu_array.sharding, cpu_sharding)
def test_compute_no_inputs_host_replicated(self):
mesh = jtu.create_mesh((4,), ('data'))
tpu_sharding = NamedSharding(mesh, P('data'))
cpu_sharding = NamedSharding(mesh, P(), memory_kind='pinned_host')
@functools.partial(jax.jit, out_shardings=(tpu_sharding, cpu_sharding))
def init():
tpu_array = jax.random.normal(jax.random.key(42), (16, 16))
cpu_array = jax.random.normal(jax.random.key(42), (16, 16))
return tpu_array, cpu_array
tpu_array, cpu_array = init()
self.assertEqual(tpu_array.sharding, tpu_sharding)
self.assertEqual(cpu_array.sharding, cpu_sharding)
def test_compute_on_basic(self):
out_s = SingleDeviceSharding(jax.devices()[0], memory_kind='pinned_host')
@compute_on2(compute_type='device_host',
out_memory_spaces=jax.memory.Space.Device)
def g(x):
return x * 2
@jax.jit
def f(x):
y = g(x)
return y * 3
inp = jnp.arange(8)
out = f(inp)
self.assertArraysEqual(out, inp * 6)
lowered_text = f.lower(jnp.arange(8)).as_text()
self.assertIn('_xla_compute_type', lowered_text)
@functools.partial(jax.jit, out_shardings=out_s)
def h(x):
y = g(x)
return y * 3
out2 = h(inp)
self.assertArraysEqual(out2, inp * 6)
self.assertEqual(out2.sharding.memory_kind, "pinned_host")
def test_compute_on_2d(self):
out_s = SingleDeviceSharding(jax.devices()[0], memory_kind="pinned_host")
@compute_on("device_host")
@jax.jit
def g(x):
return x * 2
@jax.jit
def f(x):
y = g(x)
return y * 3
inp = jnp.arange(9943.0)
inp = jnp.reshape(inp, (61, 163))
out = f(inp)
self.assertArraysEqual(out, inp * 6)
lowered_text = f.lower(inp).as_text()
self.assertIn("_xla_compute_type", lowered_text)
@functools.partial(jax.jit, out_shardings=out_s)
def h(x):
y = g(x)
return y * 3
out2 = h(inp)
self.assertArraysEqual(out2, inp * 6)
self.assertEqual(out2.sharding.memory_kind, 'pinned_host')
def test_compute_on_host_shared_sharding(self):
mesh = jtu.create_mesh((2,), ("x"))
device_sharding = NamedSharding(mesh, P("x"))
host_sharding = device_sharding.with_memory_kind("pinned_host")
@compute_on("device_host")
@jax.jit
def host_func(x, y):
y = jax.device_put(y, host_sharding)
out1 = x * y
out2 = (x ** 2) * (y ** 2)
return (jax.device_put(out1, host_sharding),
jax.device_put(out2, device_sharding))
@functools.partial(
jax.jit,
out_shardings=(host_sharding, device_sharding),
donate_argnums=(0),
)
def device_func(host_data, device_data):
host_data, device_data = host_func(host_data, device_data)
device_data = device_data * 2
host_data, device_data = host_func(host_data, device_data)
return host_data, device_data
input_host = jax.device_put(jnp.ones(8), host_sharding)
input_device = jnp.arange(8)
input_device = jnp.where(input_device < 4, 0, 1)
input_device = jax.device_put(input_device, device_sharding)
output_host, output_device = device_func(input_host, input_device)
self.assertEqual(output_host.sharding.memory_kind, 'pinned_host')
self.assertEqual(output_device.sharding.memory_kind, 'device')
self.assertArraysEqual(output_host, [0., 0., 0., 0., 2., 2., 2., 2.])
self.assertArraysEqual(output_device, [0., 0., 0., 0., 4., 4., 4., 4.])
def test_compute_on_basic_inline(self):
@compute_on('device_host')
@jax.jit
def g(x):
return x * 2
@functools.partial(jax.jit, inline=True)
def h(x):
y = g(x)
return y * 3
@jax.jit
def f(x):
return h(x)
inp = jnp.arange(8)
out = f(inp)
self.assertArraysEqual(out, inp * 6)
lowered_text = f.lower(jnp.arange(8)).as_text('hlo')
self.assertRegex(lowered_text,
'to_apply=g.*frontend_attributes={_xla_compute_type="host"}')
def test_compute_on_reduction(self):
out_s = SingleDeviceSharding(jax.devices()[0], memory_kind='pinned_host')
@compute_on('device_host')
@jax.jit
def g(x):
# Reduction generates multiple host computations (inside a single host
# computation module): the main one and a reduction body.
return jnp.sum(x)
@jax.jit
def f(x):
y = g(x)
z = jnp.sum(x)
return y * z
inp = jnp.arange(8)
out = f(inp)
self.assertArraysEqual(out, np.sum(inp) * np.sum(inp))
lowered_text = f.lower(jnp.arange(8)).as_text()
self.assertIn('_xla_compute_type', lowered_text)
@functools.partial(jax.jit, out_shardings=out_s)
def h(x):
y = g(x)
z = jnp.sum(x)
return y * z
out2 = h(inp)
self.assertArraysEqual(out2, np.sum(inp) * np.sum(inp))
self.assertEqual(out2.sharding.memory_kind, 'pinned_host')
def test_compute_host_loop(self):
@compute_on('device_host')
@jax.jit
def fn():
k = jax.random.key(0)
return jax.nn.initializers.lecun_normal()(k, (2, 2), jnp.float32)
fn() # doesn't crash
@compute_on('device_host')
def fn():
k = jax.random.key(0)
return jax.nn.initializers.lecun_normal()(k, (2, 2), jnp.float32)
fn() # doesn't crash
def test_nested_compute_error(self):
@compute_on('device')
@jax.jit
def f0(x):
return x * 2
@compute_on('device_host')
@jax.jit
def f1(x):
return f0(x)
@jax.jit
def f2(x):
return f1(x)
with self.assertRaisesRegex(
NotImplementedError,
"Nesting `compute_on` with different compute types is not supported"
" yet."):
f2(jnp.arange(8))
def test_compute_on_grad(self):
@compute_on2(compute_type='device_host',
out_memory_spaces=jax.memory.Space.Device)
def g(x):
return jnp.sin(x)
def f(x):
y = g(x)
return jnp.sum(y)
inp = jnp.arange(8.)
jf = jax.jit(jax.grad(f))
jtu.check_grads(jf, (inp,), order=2)
lowered_text = jf.lower(inp).as_text('hlo')
out = re.findall(r"call.*to_apply.*_xla_compute_type", lowered_text)
self.assertLen(out, 1)
def test_compute_on_remat(self):
inp = jnp.arange(16.)
def policy(prim, *avals, **params):
return Recompute
@compute_on2(compute_type='device_host',
out_memory_spaces=jax.memory.Space.Device)
def g(x):
x = jnp.sin(x)
x = jnp.sin(x)
x = jnp.sin(x)
return x
@functools.partial(remat, policy=policy)
def f(x):
x = g(x)
return jnp.sum(x)
# Execution test.
jf = jax.jit(jax.grad(f))
jf(inp) # doesn't crash
lowered_text = jf.lower(inp).as_text('hlo')
out = re.findall(r"call.*to_apply.*_xla_compute_type", lowered_text)
self.assertLen(out, 1)
def test_nested_no_op_compute(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(16).reshape(8, 2)
arr = jax.device_put(np_inp, s)
@compute_on('device_host')
@jax.jit
def f0(x):
return x * 2
@compute_on('device_host')
@jax.jit
def f1(x):
x = x * 3
return f0(x)
@jax.jit
def f2(x):
return f1(x)
out = f2(arr)
self.assertArraysEqual(out, arr * 6)
self.assertEqual(out.sharding, s)
def test_sharded_compute_on_host(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(16).reshape(8, 2)
arr = jax.device_put(np_inp, s)
@compute_on2(compute_type='device_host',
out_memory_spaces=jax.memory.Space.Device)
def g(x, y):
return x * y
@jax.jit
def f(x):
x = x * 3
return g(x, x)
out = f(arr)
expected_out = (np_inp * 3) * (np_inp * 3)
self.assertEqual(out.sharding, s)
self.assertArraysEqual(out, expected_out)
def test_host_offload_in_custom_vjp(self):
if xb.backend_xla_version() is not None and xb.backend_xla_version() < 2:
self.skipTest("This test requires an xla_version >= 2.")
@jax.custom_vjp
def f(x):
return jnp.sin(x)
@compute_on('device_host')
@jax.jit
def eq(x, y):
return (x == y).astype(jnp.float32)
def f_fwd(x):
y = x * 2
z = jax.device_put(y, jax.memory.Space.Host)
return y, (x, z)
def f_bwd(res, tx):
x, z = res
y = x * 2
z2 = jax.device_put(y, jax.memory.Space.Host)
return (eq(z, z2),)
f.defvjp(f_fwd, f_bwd)
g = jax.jit(jax.grad(lambda x: f(x).sum()))
x = jnp.ones(3) * 4
all_true = jnp.ones(3, jnp.float32)
self.assertArraysEqual(g(x), all_true)
def test_host_offload_in_custom_vjp_sharded(self):
if xb.backend_xla_version() is not None and xb.backend_xla_version() < 2:
self.skipTest("This test requires an xla_version >= 2.")
mesh = jtu.create_mesh((2, 2), ("x", "y"))
s = NamedSharding(mesh, P('x'))
@jax.custom_vjp
def f(x):
return jnp.sin(x)
@compute_on('device_host')
@jax.jit
def eq(x, y):
return (x == y).astype(jnp.float32)
def f_fwd(x):
y = x * 2
z = jax.device_put(y, s.with_memory_kind('pinned_host'))
return y, (x, z)
def f_bwd(res, tx):
x, z = res
y = x * 2
z2 = jax.device_put(y, s.with_memory_kind('pinned_host'))
return (eq(z, z2),)
f.defvjp(f_fwd, f_bwd)
g = jax.jit(jax.grad(lambda x: f(x).sum()))
arr = jax.device_put(jnp.ones(4) * 4, s)
all_true = jnp.ones(4, dtype=jnp.float32)
self.assertArraysEqual(g(arr), all_true)
def test_scan_offload(self):
np_inp = jnp.arange(4096).reshape(16, 16, 16)
@jax.jit
def f(xs):
def body(carry, x):
with compute_on('device_host'):
out_tpu = x + carry
return carry, out_tpu
_, res = jax.lax.scan(body, 1, xs)
return res
out = f(np_inp)
self.assertArraysEqual(out, np_inp + 1)
@compute_on('device_host')
@jax.jit
def body2(carry, x):
out_tpu = x + carry
return carry, out_tpu
@jax.jit
def f2(xs):
_, res = jax.lax.scan(body2, 1, xs)
return res
out2 = f2(np_inp)
self.assertArraysEqual(out2, np_inp + 1)
@parameterized.parameters(True, False)
def test_copy_offload(self, jit_compute_fn: bool):
# test an explicit copy within the host computation.
def g(x):
return jnp.copy(x) * 2
@jax.jit
def f(x):
if jit_compute_fn:
y = compute_on("device_host")(jax.jit(g))(x)
else:
y = compute_on("device_host")(g)(x)
return y * 3
inp = jnp.arange(8)
out = f(inp)
self.assertArraysEqual(out, inp * 6)
lowered_text = f.lower(jnp.arange(8)).as_text()
self.assertIn('_xla_compute_type', lowered_text)
def test_pure_host_data_and_compute(self):
if xb.backend_xla_version() is not None and xb.backend_xla_version() < 2:
self.skipTest("This test requires an xla_version >= 2.")
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'), memory_kind='pinned_host')
np_inp = np.arange(16).reshape(8, 2)
arr_host = jax.device_put(np_inp, s)
@compute_on('device_host')
@jax.jit
def g(x):
return x * x
@functools.partial(jax.jit, out_shardings=s)
def f(x):
return g(x)
out = f(arr_host)
self.assertEqual(out.sharding, s)
self.assertEqual(out.sharding.memory_kind, 'pinned_host')
self.assertArraysEqual(out, np_inp * np_inp)
def test_eager_compute(self):
inp = jnp.arange(8.)
with compute_on('device_host'):
out = inp * 2
out = jnp.sin(out)
self.assertArraysAllClose(out, jnp.sin(inp * 2))
def test_compute_per_annotation(self):
mesh = jtu.create_mesh((2, 2), ("x", "y"))
s = NamedSharding(mesh, P("x", "y"))
np_inp = np.arange(16.).reshape(8, 2)
arr = jax.device_put(np_inp, s)
@jax.jit
@compute_on('device_host')
def f(x):
return jnp.sin(x * 2)
# # sharded input
out = f(arr)
self.assertArraysAllClose(out, np.sin(np_inp * 2))
out2 = f(np_inp)
self.assertArraysAllClose(out2, np.sin(np_inp * 2))
def test_jit_host_multi_outputs(self):
if xb.backend_xla_version() is not None and xb.backend_xla_version() < 2:
self.skipTest("This test requires an xla_version >= 2.")
_, s, np_inp, inp = _create_inputs((8, 2), P("x"))
@jax.jit
def f(x, y):
x, y = jnp.sin(x), jnp.cos(y)
x = jax.device_put(x, s.with_memory_kind("pinned_host"))
y = jax.device_put(y, s.with_memory_kind("device"))
return x, y
out1, out2 = f(inp, inp)
self.assertArraysAllClose(out1, np.sin(np_inp))
self.assertArraysAllClose(out2, np.cos(np_inp))
self.assertEqual(out1.sharding, s.with_memory_kind("pinned_host"))
self.assertEqual(out2.sharding, s.with_memory_kind("device"))
def test_jit_out_shardings_single_output(self):
mesh, _, _, inp = _create_inputs((8, 2), P("x", "y"))
out_s = NamedSharding(mesh, P(), memory_kind="pinned_host")
@functools.partial(jax.jit, out_shardings=out_s)
def g(x):
return jnp.sum(x * 2)
out = g(inp)
self.assertEqual(out.sharding, out_s)
executable_mk = get_memory_kinds_from_executable(g, [inp])
self._check_mem_kind(executable_mk[0], out.sharding, "pinned_host")
@jax.jit
def h(x):
x = jnp.sum(x * 2)
out = jax.device_put(x, out_s)
return out
out = h(inp)
self.assertEqual(out.sharding, out_s)
executable_mk = get_memory_kinds_from_executable(h, [inp])
self._check_mem_kind(executable_mk[0], out.sharding, "pinned_host")
def test_jit_in_shardings(self):
_, s, np_inp, inp = _create_inputs((8, 2), P("x", "y"))
@functools.partial(jax.jit, in_shardings=s.with_memory_kind("pinned_host"))
def f(x):
return x * 2
with self.assertRaisesRegex(
ValueError,
"Memory kinds passed to jax.jit does not match memory kind on the"
" respective arg. Got jit memory kind: pinned_host, arg memory kind:"
" device for arg.*"):
f(jnp.arange(16).reshape(8, 2)) # uncommitted inp also raises error
with self.assertRaisesRegex(
ValueError,
"Memory kinds passed to jax.jit does not match memory kind on the"
" respective arg. Got jit memory kind: pinned_host, arg memory kind:"
" device for arg.*"):
f(inp) # committed inp raises error.
@functools.partial(jax.jit, in_shardings=s.with_memory_kind("device"))
def g(x):
return x * 2
out = g(inp)
executable_kind = get_memory_kinds_from_executable(g, [inp])
self.assertArraysEqual(out, np_inp * 2)
self._check_mem_kind(executable_kind[0], out.sharding, "device")
def test_jit_in_out_shardings(self):
mesh, s, np_inp, inp = _create_inputs((8, 2), P("x", "y"), mem_kind="device")
out_s = NamedSharding(mesh, P(), memory_kind="device")
@functools.partial(jax.jit, in_shardings=s, out_shardings=out_s)
def f(x):
return jnp.sum(x)
out = f(inp)
executable_kind = get_memory_kinds_from_executable(f, [inp])
self.assertArraysEqual(out, np.sum(np_inp))
self._check_mem_kind(executable_kind[0], out.sharding, "device")
@functools.partial(
jax.jit,
in_shardings=s,
out_shardings=out_s.with_memory_kind("pinned_host"),
)
def g(x):
return jnp.sum(x)
out = g(inp)
executable_kind = get_memory_kinds_from_executable(g, [inp])
self.assertArraysEqual(out, np.sum(np_inp))
self._check_mem_kind(executable_kind[0], out.sharding, "pinned_host")
def test_device_put_different_devices(self):
_, _, _, inp = _create_inputs((8, 2), P("x", "y"))
@jax.jit
def f(x):
return jax.device_put(
x, SingleDeviceSharding(jax.devices()[0], memory_kind="pinned_host"))
with self.assertRaisesRegex(
ValueError, "Received incompatible devices for jitted computation"):
f(inp)
def test_jit_cpp_cache_hit(self):
mesh, _, np_inp, inp = _create_inputs((8, 2), P("x", "y"))
inp2 = jax.device_put(
np_inp, NamedSharding(mesh, P("x", "y"), memory_kind="device"))
f = jax.jit(lambda x: x @ x.T)
with jtu.count_pjit_cpp_cache_miss() as count:
out = f(inp)
out2 = f(inp2)
self.assertEqual(count(), 1)
self.assertArraysEqual(out, np_inp @ np_inp.T)
self.assertArraysEqual(out2, np_inp @ np_inp.T)
def test_jit_compilation_cache_hit(self):
if config.use_shardy_partitioner.value:
self.skipTest("Shardy doesn't support GSPMDSharding")
mesh, s, np_inp, inp = _create_inputs((8, 2), P("x", "y"))
inp2 = jax.device_put(
np_inp, GSPMDSharding(tuple(mesh.devices.flat),
s._to_xla_hlo_sharding(inp.ndim),
memory_kind="device")
)
f = jax.jit(lambda x: x @ x.T)
with (jtu.count_pjit_cpp_cache_miss() as cpp_count,
jtu.count_jit_and_pmap_lowerings() as lowering_count):
f(inp)
f(inp2)
self.assertEqual(cpp_count(), 2)
self.assertEqual(lowering_count(), 2)
def test_jit_cpp_cache_output_hit(self):
_, _, _, inp = _create_inputs((8, 2), P("x"), mem_kind="device")
@jax.jit
def mul_two(x):
return x * 2
with jtu.count_pjit_cpp_cache_miss() as count:
out = mul_two(inp)
mul_two(out)
self.assertEqual(count(), 1)
def test_jit_cache_hit_with_default_and_specified_mem_kind(self):
_, s, np_inp, _ = _create_inputs((8, 2), P("x", "y"))
_, s2, np_inp2, _ = _create_inputs((8, 2), P("x", "y"), mem_kind="device")
def mul(x):
return x @ x.T
f = jax.jit(mul, in_shardings=s)
g = jax.jit(mul, in_shardings=s2)
with jtu.count_jit_and_pmap_lowerings() as count:
out = f(np_inp)
out2 = g(np_inp2)
self.assertEqual(count(), 1)
self.assertArraysEqual(out, np_inp @ np_inp.T)
self.assertArraysEqual(out2, np_inp2 @ np_inp2.T)
def test_sharding_devices_indices_map_cache_hit(self):
mesh = jtu.create_mesh((2, 2), ("x", "y"))
shape = (8, 2)
s1 = NamedSharding(mesh, P("x", "y"))
s2 = NamedSharding(mesh, P("x", "y"), memory_kind="device")
s1.devices_indices_map(shape)
cache_info1 = common_devices_indices_map.cache_info()
s2.devices_indices_map(shape)
cache_info2 = common_devices_indices_map.cache_info()
self.assertEqual(cache_info2.hits, cache_info1.hits + 1)
self.assertEqual(cache_info2.misses, cache_info1.misses)
def test_no_donation_across_memory_kinds(self):
mesh = jtu.create_mesh((2, 1), ("x", "y"))
np_inp = np.arange(16).reshape(8, 2)
s_hbm = NamedSharding(mesh, P("x"))
s_host = s_hbm.with_memory_kind("pinned_host")
inp = jax.device_put(np_inp, s_hbm)
@functools.partial(jax.jit, out_shardings=s_host, donate_argnums=0)
def f(x):
return x * 2
with self.assertWarnsRegex(
UserWarning, "Some donated buffers were not usable"):
f(inp)
lowered_text = f.lower(inp).as_text("hlo")
self.assertNotIn("input_output_alias", lowered_text)
self.assertNotDeleted(inp)
def test_single_mem_kind_donation_default_mem_kind(self):
mesh = jtu.create_mesh((2,), "x")
s = NamedSharding(mesh, P())
@functools.partial(jax.jit, out_shardings=s, donate_argnums=0)
def f(inp1):
return inp1 * 2
x = jax.device_put(np.arange(16).reshape(8, 2), s)
f(x)
lowered_text = f.lower(x).as_text("hlo")
self.assertIn("input_output_alias", lowered_text)
self.assertDeleted(x)
def test_compute_offload_inside_shmap(self):
mesh = jtu.create_mesh((2, 2), ('x', 'y'))
s = NamedSharding(mesh, P('x', 'y'))
np_inp = np.arange(16).reshape(8, 2)
arr = jax.device_put(np_inp, s)
@compute_on('device_host')
@jax.jit
def g(x):
return x * 2
def f(x):
x = x * 3
y = g(x)
return y * 4
out = jax.jit(shard_map(f, mesh=mesh, in_specs=P('x', 'y'),
out_specs=P('x', 'y')))(arr)
self.assertArraysEqual(out, np_inp * 24)
def test_qr_decomposition_offload(self):
if jtu.is_cloud_tpu():
self.skipTest("Test fails on cloud TPU")
if jtu.test_device_matches(["gpu"]):
# TODO(b/446898771) This test fails on GPU in OSS, it will work
# internally.
self.skipTest("Test doesn't work on GPU in OSS.")
shape = (3, 3)
dtype = np.float32
operand = jnp.reshape(jnp.arange(math.prod(shape), dtype=dtype), shape)
@compute_on("device_host")
@jax.jit
def g(x):
return lax.linalg.qr(x, full_matrices=True)
@jax.jit
def f(x):
x, _ = lax.linalg.qr(x, full_matrices=True)
x, _ = g(x)
return x
out = f(operand) # doesn't crash
lowered_text = f.lower(operand).as_text()
self.assertIn('@lapack_sgeqrf', lowered_text)
if jtu.test_device_matches(["tpu"]):
self.assertIn("@Qr", lowered_text)
@jax.jit
def h(x):
x, _ = lax.linalg.qr(x, full_matrices=True)
x, _ = lax.linalg.qr(x, full_matrices=True)
return x
expected_out = h(operand)
self.assertArraysAllClose(out, expected_out, rtol=1e-3)
def test_mem_kind_donation_pinned_host(self):
mesh = jtu.create_mesh((2,), "x")
s = NamedSharding(mesh, P(), memory_kind='pinned_host')
s_dev = s.with_memory_kind('device')
@functools.partial(jax.jit, out_shardings=(s, s_dev), donate_argnums=(0, 1))
@compute_on('device_host')
def f(inp1, inp2):
return inp1 * 2, inp2 * 2
np_inp = np.arange(16).reshape(8, 2)
x = jax.device_put(np_inp, s)
x_dev = jax.device_put(np_inp, s_dev)
f(x, x_dev)
lowered_text = f.lower(x, x_dev).as_text("hlo")
self.assertIn("input_output_alias", lowered_text)
self.assertDeleted(x)
self.assertDeleted(x_dev)
@parameterized.parameters("pinned_host", "device")
def test_identity_mem_kind_donation(self, mem_kind):
mesh = jtu.create_mesh((2,), "x")
s = NamedSharding(mesh, P(), memory_kind=mem_kind)
@functools.partial(jax.jit, out_shardings=s, donate_argnums=0)
def f(inp):
return inp
np_inp = np.arange(16).reshape(8, 2)
x = jax.device_put(np_inp, s)
f(x)
lowered_text = f.lower(x).as_text("hlo")
self.assertIn("input_output_alias", lowered_text)
self.assertDeleted(x)
def test_compute_offload_with_donation(self):
sharding = jax.sharding.SingleDeviceSharding(jax.devices()[0])
p_sharding = jax.sharding.SingleDeviceSharding(
jax.devices()[0], memory_kind="pinned_host"
)
@compute_on("device_host")
@jax.jit
def host_fn(x_in, y_in):
return x_in * x_in, y_in + y_in
def test_fn(x_in, y_in):
x_out, y_out = host_fn(x_in, y_in)
return x_out, y_out
x = jnp.arange(0, 1024, dtype=jnp.float32)
y = jnp.arange(0, 1024, dtype=jnp.float32)
y = jax.device_put(y, p_sharding)
x1 = jnp.arange(0, 1024, dtype=jnp.float32)
y1 = jnp.arange(0, 1024, dtype=jnp.float32)
jit_fn = jax.jit(
test_fn,
in_shardings=(sharding, p_sharding),
out_shardings=(sharding, p_sharding),
donate_argnums=(0, 1),
)
x_out, y_out = jit_fn(x, y)
self.assertArraysEqual(x_out, x1 * x1)
self.assertArraysEqual(y_out, y1 + y1)
def test_compute_offload_with_linear_layout(self):
if jtu.test_device_matches(["gpu"]):
self.skipTest("GPU does not support tiling.")
sharding = jax.sharding.SingleDeviceSharding(jax.devices()[0])
p_sharding = jax.sharding.SingleDeviceSharding(
jax.devices()[0], memory_kind="pinned_host"
)
@compute_on("device_host")
@jax.jit
def host_fn(x_in, y_in):
return x_in * x_in, y_in + y_in
def test_fn(x_in, y_in):
x_out, y_out = host_fn(x_in, y_in)
return x_out, y_out
x = jnp.arange(0, 1024, dtype=jnp.float32)
x = jnp.reshape(x, (16, 64))
y = jnp.arange(0, 1024, dtype=jnp.float32)
y = jnp.reshape(y, (16, 64))
custom_dll = DLL(major_to_minor=(0, 1), tiling=((8, 128),))
custom_dll_linear = DLL(major_to_minor=(0, 1), tiling=((1,),))
x = jax.device_put(x, Format(custom_dll, sharding))
y = jax.device_put(y, Format(custom_dll_linear, p_sharding))
x1 = jnp.arange(0, 1024, dtype=jnp.float32)
x1 = jnp.reshape(x1, (16, 64))
y1 = jnp.arange(0, 1024, dtype=jnp.float32)
y1 = jnp.reshape(y1, (16, 64))
jit_fn = jax.jit(
test_fn,
out_shardings=(
Format(custom_dll, sharding),
Format(custom_dll_linear, p_sharding),
),
)
x_out, y_out = jit_fn(x, y)
self.assertArraysEqual(x_out, x1 * x1)
self.assertArraysEqual(y_out, y1 + y1)
def test_compute_offload_mesh_with_linear_layout(self):
if jtu.test_device_matches(["gpu"]):
self.skipTest("GPU does not support tiling.")
mesh = jtu.create_mesh((2, 2), ("x", "y"))
sharding = NamedSharding(mesh, P("x", "y"))
p_sharding = NamedSharding(mesh, P("x", "y"), memory_kind="pinned_host")
@compute_on("device_host")
@jax.jit
def host_fn(x_in, y_in):
return x_in * x_in, y_in + y_in
def test_fn(x_in, y_in):
x_out, y_out = host_fn(x_in, y_in)
return x_out, y_out
x = jnp.arange(0, 2048, dtype=jnp.float32)
x = jnp.reshape(x, (32, 64))
y = jnp.arange(0, 2048, dtype=jnp.float32)
y = jnp.reshape(y, (32, 64))
custom_dll = DLL(major_to_minor=(0, 1), tiling=((8, 128),))
custom_dll_linear = DLL(major_to_minor=(0, 1), tiling=((1,),))
x = jax.device_put(x, Format(custom_dll, sharding))
y = jax.device_put(y, Format(custom_dll_linear, p_sharding))
x1 = jnp.arange(0, 2048, dtype=jnp.float32)
x1 = jnp.reshape(x1, (32, 64))
y1 = jnp.arange(0, 2048, dtype=jnp.float32)
y1 = jnp.reshape(y1, (32, 64))
jit_fn = jax.jit(
test_fn,
out_shardings=(
Format(custom_dll, sharding),
Format(custom_dll_linear, p_sharding),
),
)
x_out, y_out = jit_fn(x, y)
self.assertArraysEqual(x_out, x1 * x1)
self.assertArraysEqual(y_out, y1 + y1)
def test_indexing_on_host(self):
@jax.jit
@compute_on("device_host")
def fn2(x):
x = jax.device_put(x, jax.memory.Space.Host)
y = jnp.ones((2, 1, 4))
y = jax.device_put(y, jax.memory.Space.Host)
z = x.at[:, 1:2, :].set(y)
return z
x_host = jax.device_put(jnp.ones((2,3,4)), jax.memory.Space.Host)
fn2(x_host) # doesn't crash
def test_compute_on_cache_miss(self):
@jax.jit
def f(x):
return x * 2
inp = jnp.arange(10)
with jtu.count_jit_tracing_cache_miss() as count:
with compute_on('device_host'):
f(inp)
with compute_on('device'):
f(inp)
# 2 for `f` and `2` for `mul` (compute type changes for `mul`)
self.assertEqual(count(), 4)
def test_compute_on_aot(self):
operand = np.float32(0.)
@jax.jit
@compute_on("device_host")
def f_host(x):
# Adds 1 on CPU and adds 2 on other platforms
return jax.lax.platform_dependent(x,
cpu=lambda x: x + 1.,
default=lambda x: x + 2.)
self.assertAllClose(jnp.float32(1.0), f_host(operand))
self.assertAllClose(
jnp.float32(1.0), f_host.lower(operand).compile()(operand)
)
def test_offload_take_host(self):
@compute_on('device_host')
@jax.jit
def peer_forward(x, experts, indices, scores):
w = jnp.take(experts, indices.astype(int), axis=0)
w_gate, w_down, w_up = w[..., 0], w[..., 1], w[..., 2]
g = jnp.einsum('btd, bthkd->bthk', x, w_gate)
x = jnp.einsum('btd, bthkd->bthk', x, w_down)
x = x * jax.nn.gelu(g) * scores
return jnp.einsum('bthk, bthkd->btd', x, w_up)
x = jnp.ones((16, 4, 32))
experts = jnp.ones((128, 32, 3))
indices = jnp.ones((16, 4, 4, 2), dtype=jnp.int32)
scores = jnp.ones((16, 4, 4, 2))
jax.jit(peer_forward)(x, experts, indices, scores) # doesn't crash
def test_int4_host_compute(self):
@compute_on("device_host")
@jax.jit
def g(x):
return x + x
@jax.jit
def f(x):
y = g(x)
return 2 * y
inp = jnp.arange(4, dtype=jnp.uint4)
out = f(inp)
self.assertArraysEqual(out, 4 * inp)
lowered_text = f.lower(inp).as_text()
self.assertIn("_xla_compute_type", lowered_text)
def test_sparsecore_unsupported_gather(self):
if not (
jax.devices()[0].device_kind == "TPU v5"
or jtu.is_device_tpu_at_least(6)
):
self.skipTest("Does not have a sparsecore present")
dnums = jax.lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)
)
slice_sizes = (1, 3)
@compute_on("tpu_sparsecore")
@jax.jit
def f_sc(operand, indices):
return jax.lax.gather(operand, indices, dnums, slice_sizes)
inputs = (
np.linspace(0, 1, 10 * 5).reshape(10, 5),
np.array([[4, 2], [3, 2]]),
)
unsupported_gather = False
error_msg = None
try:
jax.jit(f_sc).lower(*inputs).compile()
except jax.errors.JaxRuntimeError as e:
unsupported_gather = True
error_msg = str(e)
self.assertTrue(unsupported_gather)
self.assertIn("UNIMPLEMENTED", error_msg)
def test_sparsecore_supported_gather(self):
if not (
jax.devices()[0].device_kind == "TPU v5"
or jtu.is_device_tpu_at_least(6)
):
self.skipTest("Does not have a sparsecore present")
dnums = jax.lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)
)
slice_sizes = (1, 128)
@jax.jit
def f_tc(operand, indices):
return jax.lax.gather(operand, indices, dnums, slice_sizes)
@compute_on("tpu_sparsecore")
@jax.jit
def f_sc(operand, indices):
return jax.lax.gather(operand, indices, dnums, slice_sizes)
inputs = (
np.linspace(0, 1, 122479 * 128).reshape(122479, 128),
np.random.randint(2, size=32768).reshape(32768, 1),
)
self.assertAllClose(f_tc(*inputs), f_sc(*inputs))
compiled_f_sc = jax.jit(f_sc).lower(*inputs).compile()
compiled_text = compiled_f_sc.as_text()
self.assertIn('async_execution_thread="sparsecore"', compiled_text)
def test_sparsecore_unsupported_scatter(self):
if not (
jax.devices()[0].device_kind == "TPU v5"
or jtu.is_device_tpu_at_least(6)
):
self.skipTest("Does not have a sparsecore present")
dnums = jax.lax.ScatterDimensionNumbers(
update_window_dims=(),
inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,),
)
@compute_on("tpu_sparsecore")
@jax.jit
def f_sc(operand, indices, updates):
return jax.lax.scatter(operand, indices, updates, dnums)
inputs = (
np.linspace(0, 1, 15677312).reshape(15677312),
np.random.randint(15677312, size=524288).reshape(524288, 1),
np.linspace(0, 1, 524288).reshape(524288),
)
unsupported_scatter = False
error_msg = None
try:
jax.jit(f_sc).lower(*inputs).compile()
except jax.errors.JaxRuntimeError as e:
unsupported_scatter = True
error_msg = str(e)
self.assertTrue(unsupported_scatter)
self.assertIn("UNIMPLEMENTED", error_msg)
def test_sparsecore_supported_scatter(self):
if not (
jax.devices()[0].device_kind == "TPU v5"
or jtu.is_device_tpu_at_least(6)
):
self.skipTest("Does not have a sparsecore present")
dnums = jax.lax.ScatterDimensionNumbers(
update_window_dims=(),
inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,),
)
@jax.jit
def f_tc(operand, indices, updates):
return jax.lax.scatter_add(operand, indices, updates, dnums)
@compute_on("tpu_sparsecore")
@jax.jit
def f_sc(operand, indices, updates):
return jax.lax.scatter_add(operand, indices, updates, dnums)
inputs = (
np.linspace(0, 1, 15677312).reshape(15677312),
np.random.randint(15677312, size=524288).reshape(524288, 1),
np.linspace(0, 1, 524288).reshape(524288),
)
self.assertAllClose(f_tc(*inputs), f_sc(*inputs))
compiled_f_sc = jax.jit(f_sc).lower(*inputs).compile()
compiled_text = compiled_f_sc.as_text()
self.assertIn('async_execution_thread="sparsecore"', compiled_text)
| ComputeOffload |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_profile_report.py | {
"start": 394,
"end": 1521
} | class ____(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.profile_report"
value_keys = ("profile_path",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
profile_path = metric_value_kwargs["profile_path"]
try:
profile: dp.profilers.profile_builder.BaseProfiler = dp.Profiler.load(profile_path)
profile_report = profile.report(report_options={"output_format": "serializable"})
profile_report["global_stats"]["profile_schema"] = dict(
profile_report["global_stats"]["profile_schema"]
)
return profile_report
except FileNotFoundError:
raise ValueError( # noqa: TRY003
"'profile_path' does not point to a valid DataProfiler stored profile."
)
except Exception as e:
raise gx_exceptions.MetricError(
message=str(e),
) from e
| DataProfilerProfileReport |
python | cython__cython | Cython/Coverage.py | {
"start": 15605,
"end": 16907
} | class ____(FileTracer):
"""
Find the Python/Cython source file for a Cython module.
"""
def __init__(self, module_file, py_file, c_file, c_files_map, file_path_map):
super().__init__()
self.module_file = module_file
self.py_file = py_file
self.c_file = c_file
self._c_files_map = c_files_map
self._file_path_map = file_path_map
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
"""
Determine source file path. Called by the function call tracer.
"""
source_file = frame.f_code.co_filename
try:
return self._file_path_map[source_file]
except KeyError:
pass
abs_path = _find_dep_file_path(filename, source_file)
if self.py_file and source_file[-3:].lower() == '.py':
# always let coverage.py handle this case itself
self._file_path_map[source_file] = self.py_file
return self.py_file
assert self._c_files_map is not None
if abs_path not in self._c_files_map:
self._c_files_map[abs_path] = (self.c_file, source_file, None)
self._file_path_map[source_file] = abs_path
return abs_path
| CythonModuleTracer |
python | pyinstaller__pyinstaller | PyInstaller/utils/hooks/gi.py | {
"start": 950,
"end": 19112
} | class ____:
def __init__(self, module, version, hook_api=None):
self.name = module
self.version = version
self.available = False
self.sharedlibs = []
self.typelib = None
self.dependencies = []
# If hook API is available, use it to override the version from hookconfig.
if hook_api is not None:
module_versions = get_hook_config(hook_api, 'gi', 'module-versions')
if module_versions:
self.version = module_versions.get(module, version)
logger.debug("Gathering GI module info for %s %s", module, self.version)
@isolated.decorate
def _get_module_info(module, version):
import gi
# Ideally, we would use gi.Repository, which provides common abstraction for some of the functions we use in
# this codepath (e.g., `require`, `get_typelib_path`, `get_immediate_dependencies`). However, it lacks the
# `get_shared_library` function, which is why we are using "full" bindings via `gi.repository.GIRepository`.
#
# PyGObject 3.52.0 switched from girepository-1.0 to girepository-2.0, which means that GIRepository version
# has changed from 2.0 to 3.0 and some of the API has changed.
try:
gi.require_version("GIRepository", "3.0")
new_api = True
except ValueError:
gi.require_version("GIRepository", "2.0")
new_api = False
from gi.repository import GIRepository
# The old API had `get_default` method to obtain global singleton object; it was removed in the new API,
# which requires creation of separate GIRepository instances.
if new_api:
repo = GIRepository.Repository()
try:
repo.require(module, version, GIRepository.RepositoryLoadFlags.LAZY)
except ValueError:
return None # Module not available
# The new API returns the list of shared libraries.
sharedlibs = repo.get_shared_libraries(module)
else:
repo = GIRepository.Repository.get_default()
try:
repo.require(module, version, GIRepository.RepositoryLoadFlags.IREPOSITORY_LOAD_FLAG_LAZY)
except ValueError:
return None # Module not available
# Shared library/libraries
# Comma-separated list of paths to shared libraries, or None if none are associated. Convert to list.
sharedlibs = repo.get_shared_library(module)
sharedlibs = [lib.strip() for lib in sharedlibs.split(",")] if sharedlibs else []
# Path to .typelib file
typelib = repo.get_typelib_path(module)
# Dependencies
# GIRepository.Repository.get_immediate_dependencies is available from gobject-introspection v1.44 on
if hasattr(repo, 'get_immediate_dependencies'):
dependencies = repo.get_immediate_dependencies(module)
else:
dependencies = repo.get_dependencies(module)
return {
'sharedlibs': sharedlibs,
'typelib': typelib,
'dependencies': dependencies,
}
# Try to query information; if this fails, mark module as unavailable.
try:
info = _get_module_info(module, self.version)
if info is None:
logger.debug("GI module info %s %s not found.", module, self.version)
else:
logger.debug("GI module info %s %s found.", module, self.version)
self.sharedlibs = info['sharedlibs']
self.typelib = info['typelib']
self.dependencies = info['dependencies']
self.available = True
except Exception as e:
logger.warning("Failed to query GI module %s %s: %s", module, self.version, e)
def get_libdir(self):
"""
Return the path to shared library used by the module. If no libraries are associated with the typelib, None is
returned. If multiple library names are associated with the typelib, the path to the first resolved shared
library is returned. Raises exception if module is unavailable or none of the shared libraries could be
resolved.
"""
# Module unavailable
if not self.available:
raise ValueError(f"Module {self.name} {self.version} is unavailable!")
# Module has no associated shared libraries
if not self.sharedlibs:
return None
for lib in self.sharedlibs:
path = findSystemLibrary(lib)
if path:
return os.path.normpath(os.path.dirname(path))
raise ValueError(f"Could not resolve any shared library of {self.name} {self.version}: {self.sharedlibs}!")
def collect_typelib_data(self):
"""
Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks.
"""
datas = []
binaries = []
hiddenimports = []
logger.debug("Collecting module data for %s %s", self.name, self.version)
# Module unavailable
if not self.available:
raise ValueError(f"Module {self.name} {self.version} is unavailable!")
# Find shared libraries
resolved_libs = _resolveCtypesImports(self.sharedlibs)
for resolved_lib in resolved_libs:
logger.debug("Collecting shared library %s at %s", resolved_lib[0], resolved_lib[1])
binaries.append((resolved_lib[1], "."))
# Find and collect .typelib file. Run it through the `gir_library_path_fix` to fix the library path, if
# necessary.
typelib_entry = gir_library_path_fix(self.typelib)
if typelib_entry:
logger.debug('Collecting gir typelib at %s', typelib_entry[0])
datas.append(typelib_entry)
# Overrides for the module
hiddenimports += collect_submodules('gi.overrides', lambda name: name.endswith('.' + self.name))
# Module dependencies
for dep in self.dependencies:
dep_module, _ = dep.rsplit('-', 1)
hiddenimports += [f'gi.repository.{dep_module}']
return binaries, datas, hiddenimports
# The old function, provided for backwards compatibility in 3rd party hooks.
def get_gi_libdir(module, version):
module_info = GiModuleInfo(module, version)
return module_info.get_libdir()
# The old function, provided for backwards compatibility in 3rd party hooks.
def get_gi_typelibs(module, version):
"""
Return a tuple of (binaries, datas, hiddenimports) to be used by PyGObject related hooks. Searches for and adds
dependencies recursively.
:param module: GI module name, as passed to 'gi.require_version()'
:param version: GI module version, as passed to 'gi.require_version()'
"""
module_info = GiModuleInfo(module, version)
return module_info.collect_typelib_data()
def gir_library_path_fix(path):
import subprocess
# 'PyInstaller.config' cannot be imported as other top-level modules.
from PyInstaller.config import CONF
path = os.path.abspath(path)
# On macOS we need to recompile the GIR files to reference the loader path,
# but this is not necessary on other platforms.
if compat.is_darwin:
# If using a virtualenv, the base prefix and the path of the typelib
# have really nothing to do with each other, so try to detect that.
common_path = os.path.commonprefix([compat.base_prefix, path])
if common_path == '/':
logger.debug("virtualenv detected? fixing the gir path...")
common_path = os.path.abspath(os.path.join(path, '..', '..', '..'))
gir_path = os.path.join(common_path, 'share', 'gir-1.0')
typelib_name = os.path.basename(path)
gir_name = os.path.splitext(typelib_name)[0] + '.gir'
gir_file = os.path.join(gir_path, gir_name)
if not os.path.exists(gir_path):
logger.error(
"Unable to find gir directory: %s.\nTry installing your platform's gobject-introspection package.",
gir_path
)
return None
if not os.path.exists(gir_file):
logger.error(
"Unable to find gir file: %s.\nTry installing your platform's gobject-introspection package.", gir_file
)
return None
with open(gir_file, 'r', encoding='utf-8') as f:
lines = f.readlines()
# GIR files are `XML encoded <https://developer.gnome.org/gi/stable/gi-gir-reference.html>`_,
# which means they are by definition encoded using UTF-8.
with open(os.path.join(CONF['workpath'], gir_name), 'w', encoding='utf-8') as f:
for line in lines:
if 'shared-library' in line:
split = re.split('(=)', line)
files = re.split('(["|,])', split[2])
for count, item in enumerate(files):
if 'lib' in item:
files[count] = '@loader_path/' + os.path.basename(item)
line = ''.join(split[0:2]) + ''.join(files)
f.write(line)
# g-ir-compiler expects a file so we cannot just pipe the fixed file to it.
command = subprocess.Popen((
'g-ir-compiler', os.path.join(CONF['workpath'], gir_name),
'-o', os.path.join(CONF['workpath'], typelib_name)
)) # yapf: disable
command.wait()
return os.path.join(CONF['workpath'], typelib_name), 'gi_typelibs'
else:
return path, 'gi_typelibs'
@isolated.decorate
def get_glib_system_data_dirs():
import gi
gi.require_version('GLib', '2.0')
from gi.repository import GLib
return GLib.get_system_data_dirs()
def get_glib_sysconf_dirs():
"""
Try to return the sysconf directories (e.g., /etc).
"""
if compat.is_win:
# On Windows, if you look at gtkwin32.c, sysconfdir is actually relative to the location of the GTK DLL. Since
# that is what we are actually interested in (not the user path), we have to do that the hard way...
return [os.path.join(get_gi_libdir('GLib', '2.0'), 'etc')]
@isolated.call
def data_dirs():
import gi
gi.require_version('GLib', '2.0')
from gi.repository import GLib
return GLib.get_system_config_dirs()
return data_dirs
def collect_glib_share_files(*path):
"""
Path is relative to the system data directory (e.g., /usr/share).
"""
glib_data_dirs = get_glib_system_data_dirs()
if glib_data_dirs is None:
return []
destdir = os.path.join('share', *path)
# TODO: will this return too much?
collected = []
for data_dir in glib_data_dirs:
p = os.path.join(data_dir, *path)
collected += collect_system_data_files(p, destdir=destdir, include_py_files=False)
return collected
def collect_glib_etc_files(*path):
"""
Path is relative to the system config directory (e.g., /etc).
"""
glib_config_dirs = get_glib_sysconf_dirs()
if glib_config_dirs is None:
return []
destdir = os.path.join('etc', *path)
# TODO: will this return too much?
collected = []
for config_dir in glib_config_dirs:
p = os.path.join(config_dir, *path)
collected += collect_system_data_files(p, destdir=destdir, include_py_files=False)
return collected
_glib_translations = None
def collect_glib_translations(prog, lang_list=None):
"""
Return a list of translations in the system locale directory whose names equal prog.mo.
"""
global _glib_translations
if _glib_translations is None:
if lang_list is not None:
trans = []
for lang in lang_list:
trans += collect_glib_share_files(os.path.join("locale", lang))
_glib_translations = trans
else:
_glib_translations = collect_glib_share_files('locale')
names = [os.sep + prog + '.mo', os.sep + prog + '.po']
namelen = len(names[0])
return [(src, dst) for src, dst in _glib_translations if src[-namelen:] in names]
# Not a hook utility function per-se (used by main Analysis class), but kept here to have all GLib/GObject functions
# in one place...
def compile_glib_schema_files(datas_toc, workdir, collect_source_files=False):
"""
Compile collected GLib schema files. Extracts the list of GLib schema files from the given input datas TOC, copies
them to temporary working directory, and compiles them. The resulting `gschemas.compiled` file is added to the
output TOC, replacing any existing entry with that name. If `collect_source_files` flag is set, the source XML
schema files are also (re)added to the output TOC; by default, they are not. This function is no-op (returns the
original TOC) if no GLib schemas are found in TOC or if `glib-compile-schemas` executable is not found in `PATH`.
"""
SCHEMA_DEST_DIR = pathlib.PurePath("share/glib-2.0/schemas")
workdir = pathlib.Path(workdir)
schema_files = []
output_toc = []
for toc_entry in datas_toc:
dest_name, src_name, typecode = toc_entry
dest_name = pathlib.PurePath(dest_name)
src_name = pathlib.PurePath(src_name)
# Pass-through for non-schema files, identified based on the destination directory.
if dest_name.parent != SCHEMA_DEST_DIR:
output_toc.append(toc_entry)
continue
# It seems schemas directory contains different files with different suffices:
# - .gschema.xml
# - .schema.override
# - .enums.xml
# To avoid omitting anything, simply collect everything into temporary directory.
# Exemptions are gschema.dtd (which should be unnecessary) and gschemas.compiled (which we will generate
# ourselves in this function).
if src_name.name in {"gschema.dtd", "gschemas.compiled"}:
continue
schema_files.append(src_name)
# If there are no schema files available, simply return the input datas TOC.
if not schema_files:
return datas_toc
# Ensure that `glib-compile-schemas` executable is in PATH, just in case...
schema_compiler_exe = shutil.which('glib-compile-schemas')
if not schema_compiler_exe:
logger.warning("GLib schema compiler (glib-compile-schemas) not found! Skipping GLib schema recompilation...")
return datas_toc
# If `gschemas.compiled` file already exists in the temporary working directory, record its modification time and
# hash. This will allow us to restore the modification time on the newly-compiled copy, if the latter turns out
# to be identical to the existing old one. Just in case, if the file becomes subject to timestamp-based caching
# mechanism.
compiled_file = workdir / "gschemas.compiled"
old_compiled_file_hash = None
old_compiled_file_stat = None
if compiled_file.is_file():
# Record creation/modification time
old_compiled_file_stat = compiled_file.stat()
# Compute SHA1 hash; since compiled schema files are relatively small, do it in single step.
old_compiled_file_hash = hashlib.sha1(compiled_file.read_bytes()).digest()
# Ensure that temporary working directory exists, and is empty.
if workdir.exists():
shutil.rmtree(workdir)
workdir.mkdir(exist_ok=True)
# Copy schema (source) files to temporary working directory
for schema_file in schema_files:
shutil.copy(schema_file, workdir)
# Compile. The glib-compile-schema might produce warnings on its own (e.g., schemas using deprecated paths, or
# overrides for non-existent keys). Since these are non-actionable, capture and display them only as a DEBUG
# message, or as a WARNING one if the command fails.
logger.info("Compiling collected GLib schema files in %r...", str(workdir))
try:
cmd_args = [schema_compiler_exe, str(workdir), '--targetdir', str(workdir)]
p = subprocess.run(
cmd_args,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
check=True,
errors='ignore',
encoding='utf-8',
)
logger.debug("Output from glib-compile-schemas:\n%s", p.stdout)
except subprocess.CalledProcessError as e:
# The called glib-compile-schema returned error. Display stdout/stderr, and return original datas TOC to
# minimize damage.
logger.warning("Failed to recompile GLib schemas! Returning collected files as-is!", exc_info=True)
logger.warning("Output from glib-compile-schemas:\n%s", e.stdout)
return datas_toc
except Exception:
# Compilation failed for whatever reason. Return original datas TOC to minimize damage.
logger.warning("Failed to recompile GLib schemas! Returning collected files as-is!", exc_info=True)
return datas_toc
# Compute the checksum of the new compiled file, and if it matches the old checksum, restore the modification time.
if old_compiled_file_hash is not None:
new_compiled_file_hash = hashlib.sha1(compiled_file.read_bytes()).digest()
if new_compiled_file_hash == old_compiled_file_hash:
os.utime(compiled_file, ns=(old_compiled_file_stat.st_atime_ns, old_compiled_file_stat.st_mtime_ns))
# Add the resulting gschemas.compiled file to the output TOC
output_toc.append((str(SCHEMA_DEST_DIR / compiled_file.name), str(compiled_file), "DATA"))
# Include source schema files in the output TOC (optional)
if collect_source_files:
for schema_file in schema_files:
output_toc.append((str(SCHEMA_DEST_DIR / schema_file.name), str(schema_file), "DATA"))
return output_toc
| GiModuleInfo |
python | huggingface__transformers | src/transformers/models/aria/modeling_aria.py | {
"start": 34696,
"end": 36276
} | class ____(ModelOutput):
r"""
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
Language modeling loss (for next-token prediction).
logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
past_key_values (`Cache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.Cache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks) that can be used (see
`past_key_values` input) to speed up sequential decoding.
image_hidden_states (`torch.FloatTensor`, *optional*):
A `torch.FloatTensor` of size `(batch_size, num_images, sequence_length, hidden_size)`.
image_hidden_states of the model produced by the vision encoder and after projecting the last hidden state.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[Cache] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
image_hidden_states: Optional[torch.FloatTensor] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for Aria outputs, with hidden states and attentions.
"""
)
| AriaCausalLMOutputWithPast |
python | pytest-dev__pytest | testing/test_assertrewrite.py | {
"start": 70563,
"end": 72560
} | class ____:
@pytest.mark.parametrize(
"prefix, source, expected",
[
("c:/tmp/pycs", "d:/projects/src/foo.py", "c:/tmp/pycs/projects/src"),
(None, "d:/projects/src/foo.py", "d:/projects/src/__pycache__"),
("/tmp/pycs", "/home/projects/src/foo.py", "/tmp/pycs/home/projects/src"),
(None, "/home/projects/src/foo.py", "/home/projects/src/__pycache__"),
],
)
def test_get_cache_dir(self, monkeypatch, prefix, source, expected) -> None:
monkeypatch.delenv("PYTHONPYCACHEPREFIX", raising=False)
monkeypatch.setattr(sys, "pycache_prefix", prefix, raising=False)
assert get_cache_dir(Path(source)) == Path(expected)
def test_sys_pycache_prefix_integration(
self, tmp_path, monkeypatch, pytester: Pytester
) -> None:
"""Integration test for sys.pycache_prefix (#4730)."""
pycache_prefix = tmp_path / "my/pycs"
monkeypatch.setattr(sys, "pycache_prefix", str(pycache_prefix))
monkeypatch.setattr(sys, "dont_write_bytecode", False)
pytester.makepyfile(
**{
"src/test_foo.py": """
import bar
def test_foo():
pass
""",
"src/bar/__init__.py": "",
}
)
result = pytester.runpytest()
assert result.ret == 0
test_foo = pytester.path.joinpath("src/test_foo.py")
bar_init = pytester.path.joinpath("src/bar/__init__.py")
assert test_foo.is_file()
assert bar_init.is_file()
# test file: rewritten, custom pytest cache tag
test_foo_pyc = get_cache_dir(test_foo) / ("test_foo" + PYC_TAIL)
assert test_foo_pyc.is_file()
# normal file: not touched by pytest, normal cache tag
bar_init_pyc = (
get_cache_dir(bar_init) / f"__init__.{sys.implementation.cache_tag}.pyc"
)
assert bar_init_pyc.is_file()
| TestPyCacheDir |
python | pandas-dev__pandas | pandas/tests/tseries/offsets/test_halfyear.py | {
"start": 981,
"end": 7346
} | class ____:
def test_repr(self):
expected = "<HalfYearBegin: startingMonth=1>"
assert repr(HalfYearBegin()) == expected
expected = "<HalfYearBegin: startingMonth=3>"
assert repr(HalfYearBegin(startingMonth=3)) == expected
expected = "<HalfYearBegin: startingMonth=1>"
assert repr(HalfYearBegin(startingMonth=1)) == expected
def test_offset_corner_case(self):
# corner
offset = HalfYearBegin(n=-1, startingMonth=1)
assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)
offset_cases = []
offset_cases.append(
(
HalfYearBegin(startingMonth=1),
{
datetime(2007, 12, 1): datetime(2008, 1, 1),
datetime(2008, 1, 1): datetime(2008, 7, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 1): datetime(2008, 7, 1),
datetime(2008, 7, 1): datetime(2009, 1, 1),
datetime(2008, 7, 15): datetime(2009, 1, 1),
},
)
)
offset_cases.append(
(
HalfYearBegin(startingMonth=2),
{
datetime(2008, 1, 1): datetime(2008, 2, 1),
datetime(2008, 1, 31): datetime(2008, 2, 1),
datetime(2008, 1, 15): datetime(2008, 2, 1),
datetime(2008, 2, 29): datetime(2008, 8, 1),
datetime(2008, 3, 15): datetime(2008, 8, 1),
datetime(2008, 3, 31): datetime(2008, 8, 1),
datetime(2008, 4, 15): datetime(2008, 8, 1),
datetime(2008, 4, 30): datetime(2008, 8, 1),
},
)
)
offset_cases.append(
(
HalfYearBegin(startingMonth=1, n=0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 12, 1): datetime(2009, 1, 1),
datetime(2008, 2, 15): datetime(2008, 7, 1),
datetime(2008, 2, 29): datetime(2008, 7, 1),
datetime(2008, 3, 15): datetime(2008, 7, 1),
datetime(2008, 3, 31): datetime(2008, 7, 1),
datetime(2008, 4, 15): datetime(2008, 7, 1),
datetime(2008, 4, 30): datetime(2008, 7, 1),
datetime(2008, 7, 1): datetime(2008, 7, 1),
datetime(2008, 7, 15): datetime(2009, 1, 1),
},
)
)
offset_cases.append(
(
HalfYearBegin(startingMonth=1, n=-1),
{
datetime(2008, 1, 1): datetime(2007, 7, 1),
datetime(2008, 1, 31): datetime(2008, 1, 1),
datetime(2008, 2, 15): datetime(2008, 1, 1),
datetime(2008, 2, 29): datetime(2008, 1, 1),
datetime(2008, 3, 15): datetime(2008, 1, 1),
datetime(2008, 3, 31): datetime(2008, 1, 1),
datetime(2008, 4, 15): datetime(2008, 1, 1),
datetime(2008, 4, 30): datetime(2008, 1, 1),
datetime(2008, 7, 1): datetime(2008, 1, 1),
datetime(2008, 7, 15): datetime(2008, 7, 1),
},
)
)
offset_cases.append(
(
HalfYearBegin(startingMonth=1, n=2),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 2, 15): datetime(2009, 1, 1),
datetime(2008, 2, 29): datetime(2009, 1, 1),
datetime(2008, 3, 15): datetime(2009, 1, 1),
datetime(2008, 3, 31): datetime(2009, 1, 1),
datetime(2008, 4, 15): datetime(2009, 1, 1),
datetime(2008, 4, 1): datetime(2009, 1, 1),
datetime(2008, 7, 15): datetime(2009, 7, 1),
datetime(2008, 7, 1): datetime(2009, 7, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(HalfYearBegin(1, startingMonth=1), datetime(2008, 1, 1), True),
(HalfYearBegin(1, startingMonth=1), datetime(2007, 12, 1), False),
(HalfYearBegin(1, startingMonth=1), datetime(2008, 2, 1), False),
(HalfYearBegin(1, startingMonth=1), datetime(2007, 3, 1), False),
(HalfYearBegin(1, startingMonth=1), datetime(2008, 4, 1), False),
(HalfYearBegin(1, startingMonth=1), datetime(2008, 5, 1), False),
(HalfYearBegin(1, startingMonth=1), datetime(2007, 6, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2008, 1, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2007, 12, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2008, 2, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2007, 3, 1), True),
(HalfYearBegin(1, startingMonth=3), datetime(2008, 4, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2008, 5, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2008, 5, 2), False),
(HalfYearBegin(1, startingMonth=3), datetime(2007, 6, 1), False),
(HalfYearBegin(1, startingMonth=3), datetime(2007, 6, 2), False),
(HalfYearBegin(1, startingMonth=6), datetime(2008, 1, 1), False),
(HalfYearBegin(1, startingMonth=6), datetime(2007, 12, 1), True),
(HalfYearBegin(1, startingMonth=6), datetime(2008, 2, 1), False),
(HalfYearBegin(1, startingMonth=6), datetime(2007, 3, 1), False),
(HalfYearBegin(1, startingMonth=6), datetime(2007, 3, 2), False),
(HalfYearBegin(1, startingMonth=6), datetime(2008, 4, 1), False),
(HalfYearBegin(1, startingMonth=6), datetime(2008, 5, 1), False),
(HalfYearBegin(1, startingMonth=6), datetime(2008, 5, 2), False),
(HalfYearBegin(1, startingMonth=6), datetime(2007, 6, 1), True),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
| TestHalfYearBegin |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/translator.py | {
"start": 1727,
"end": 1961
} | class ____(Enum):
"""Enum representing each object in Tableau's ontology."""
WORKBOOK = "workbook"
SHEET = "sheet"
DASHBOARD = "dashboard"
DATA_SOURCE = "data_source"
@whitelist_for_serdes
@record
| TableauContentType |
python | getsentry__sentry-python | sentry_sdk/integrations/dramatiq.py | {
"start": 2716,
"end": 6564
} | class ____(Middleware): # type: ignore[misc]
"""
A Dramatiq middleware that automatically captures and sends
exceptions to Sentry.
This is automatically added to every instantiated broker via the
DramatiqIntegration.
"""
SENTRY_HEADERS_NAME = "_sentry_headers"
def before_enqueue(self, broker, message, delay):
# type: (Broker, Message[R], int) -> None
integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
if integration is None:
return
message.options[self.SENTRY_HEADERS_NAME] = {
BAGGAGE_HEADER_NAME: get_baggage(),
SENTRY_TRACE_HEADER_NAME: get_traceparent(),
}
def before_process_message(self, broker, message):
# type: (Broker, Message[R]) -> None
integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
if integration is None:
return
message._scope_manager = sentry_sdk.isolation_scope()
scope = message._scope_manager.__enter__()
scope.clear_breadcrumbs()
scope.set_extra("dramatiq_message_id", message.message_id)
scope.add_event_processor(_make_message_event_processor(message, integration))
sentry_headers = message.options.get(self.SENTRY_HEADERS_NAME) or {}
if "retries" in message.options:
# start new trace in case of retrying
sentry_headers = {}
transaction = continue_trace(
sentry_headers,
name=message.actor_name,
op=OP.QUEUE_TASK_DRAMATIQ,
source=TransactionSource.TASK,
origin=DramatiqIntegration.origin,
)
transaction.set_status(SPANSTATUS.OK)
sentry_sdk.start_transaction(
transaction,
name=message.actor_name,
op=OP.QUEUE_TASK_DRAMATIQ,
source=TransactionSource.TASK,
)
transaction.__enter__()
def after_process_message(self, broker, message, *, result=None, exception=None):
# type: (Broker, Message[R], Optional[Any], Optional[Exception]) -> None
integration = sentry_sdk.get_client().get_integration(DramatiqIntegration)
if integration is None:
return
actor = broker.get_actor(message.actor_name)
throws = message.options.get("throws") or actor.options.get("throws")
scope_manager = message._scope_manager
transaction = sentry_sdk.get_current_scope().transaction
if not transaction:
return None
is_event_capture_required = (
exception is not None
and not (throws and isinstance(exception, throws))
and not isinstance(exception, Retry)
)
if not is_event_capture_required:
# normal transaction finish
transaction.__exit__(None, None, None)
scope_manager.__exit__(None, None, None)
return
event, hint = event_from_exception(
exception, # type: ignore[arg-type]
client_options=sentry_sdk.get_client().options,
mechanism={
"type": DramatiqIntegration.identifier,
"handled": False,
},
)
sentry_sdk.capture_event(event, hint=hint)
# transaction error
transaction.__exit__(type(exception), exception, None)
scope_manager.__exit__(type(exception), exception, None)
def _make_message_event_processor(message, integration):
# type: (Message[R], DramatiqIntegration) -> Callable[[Event, Hint], Optional[Event]]
def inner(event, hint):
# type: (Event, Hint) -> Optional[Event]
with capture_internal_exceptions():
DramatiqMessageExtractor(message).extract_into_event(event)
return event
return inner
| SentryMiddleware |
python | huggingface__transformers | tests/models/bridgetower/test_modeling_bridgetower.py | {
"start": 3435,
"end": 5251
} | class ____:
def __init__(
self,
parent,
hidden_size=64,
initializer_factor=1,
layer_norm_eps=1e-05,
num_hidden_layers=2,
init_layernorm_from_vision_encoder=False,
output_hidden_states=False,
image_size=64,
):
self.parent = parent
self.hidden_size = hidden_size
self.initializer_factor = initializer_factor
self.layer_norm_eps = layer_norm_eps
self.num_hidden_layers = num_hidden_layers
self.init_layernorm_from_vision_encoder = init_layernorm_from_vision_encoder
self.num_channels = 3
self.num_image_features = 17
self.batch_size = 1
self.image_size = image_size
self.is_training = False
self.output_hidden_states = output_hidden_states
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
pixel_mask = random_attention_mask([self.batch_size, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values, pixel_mask
def get_config(self):
return BridgeTowerVisionConfig(
hidden_size=self.hidden_size,
initializer_factor=self.initializer_factor,
layer_norm_eps=self.layer_norm_eps,
num_hidden_layers=self.num_hidden_layers,
init_layernorm_from_vision_encoder=self.init_layernorm_from_vision_encoder,
num_channels=self.num_channels,
num_image_features=self.num_image_features,
batch_size=self.batch_size,
image_size=self.image_size,
is_training=self.is_training,
output_hidden_states=self.output_hidden_states,
)
| BridgeTowerImageModelTester |
python | mahmoud__boltons | boltons/socketutils.py | {
"start": 28230,
"end": 28346
} | class ____(Error):
"Base class for all of socketutils' Netstring exception types."
pass
| NetstringProtocolError |
python | fluentpython__example-code-2e | 10-dp-1class-func/untyped/strategy_param2.py | {
"start": 1854,
"end": 2068
} | class ____:
"""compute discount for order"""
def __init__(self, percent):
self.percent = percent
def __call__(self, order):
raise NotImplementedError("Subclass responsibility")
| Promotion |
python | astropy__astropy | astropy/io/votable/tree.py | {
"start": 12888,
"end": 14506
} | class ____:
"""
A base class for all classes that represent XML elements in the
VOTABLE file.
"""
_element_name = ""
_attr_list = []
def _add_unknown_tag(self, iterator, tag, data, config, pos):
warn_or_raise(W10, W10, tag, config, pos)
def _ignore_add(self, iterator, tag, data, config, pos):
warn_unknown_attrs(tag, data.keys(), config, pos)
def _add_definitions(self, iterator, tag, data, config, pos):
if config.get("version_1_1_or_later"):
warn_or_raise(W22, W22, (), config, pos)
warn_unknown_attrs(tag, data.keys(), config, pos)
def parse(self, iterator, config):
"""
For internal use. Parse the XML content of the children of the
element.
Parameters
----------
iterator : xml iterable
An iterator over XML elements as returned by
`~astropy.utils.xml.iterparser.get_xml_iterator`.
config : dict
The configuration dictionary that affects how certain
elements are read.
Returns
-------
self : `~astropy.io.votable.tree.Element`
Returns self as a convenience.
"""
raise NotImplementedError()
def to_xml(self, w, **kwargs):
"""
For internal use. Output the element to XML.
Parameters
----------
w : astropy.utils.xml.writer.XMLWriter object
An XML writer to write to.
**kwargs : dict
Any configuration parameters to control the output.
"""
raise NotImplementedError()
| Element |
python | sympy__sympy | sympy/core/tests/test_constructor_postprocessor.py | {
"start": 1015,
"end": 2441
} | class ____(SymbolRemovesOtherSymbols):
pass
def test_constructor_postprocessors1():
x = SymbolInMulOnce("x")
y = SymbolInMulOnce("y")
assert isinstance(3*x, Mul)
assert (3*x).args == (3, x)
assert x*x == x
assert 3*x*x == 3*x
assert 2*x*x + x == 3*x
assert x**3*y*y == x*y
assert x**5 + y*x**3 == x + x*y
w = SymbolRemovesOtherSymbols("w")
assert x*w == w
assert (3*w).args == (3, w)
assert set((w + x).args) == {x, w}
def test_constructor_postprocessors2():
x = SubclassSymbolInMulOnce("x")
y = SubclassSymbolInMulOnce("y")
assert isinstance(3*x, Mul)
assert (3*x).args == (3, x)
assert x*x == x
assert 3*x*x == 3*x
assert 2*x*x + x == 3*x
assert x**3*y*y == x*y
assert x**5 + y*x**3 == x + x*y
w = SubclassSymbolRemovesOtherSymbols("w")
assert x*w == w
assert (3*w).args == (3, w)
assert set((w + x).args) == {x, w}
@XFAIL
def test_subexpression_postprocessors():
# The postprocessors used to work with subexpressions, but the
# functionality was removed. See #15948.
a = symbols("a")
x = SymbolInMulOnce("x")
w = SymbolRemovesOtherSymbols("w")
assert 3*a*w**2 == 3*w**2
assert 3*a*x**3*w**2 == 3*w**2
x = SubclassSymbolInMulOnce("x")
w = SubclassSymbolRemovesOtherSymbols("w")
assert 3*a*w**2 == 3*w**2
assert 3*a*x**3*w**2 == 3*w**2
| SubclassSymbolRemovesOtherSymbols |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 23528,
"end": 24137
} | class ____(
LegacyNamedTupleMixin,
MetadataValue["PythonArtifactMetadataValue"],
):
"""Container class for python artifact metadata entry data.
Args:
module (str): The module where the python artifact can be found
name (str): The name of the python artifact
"""
module: PublicAttr[str]
name: PublicAttr[str]
@public
@property
def value(self) -> Self:
"""PythonArtifactMetadataValue: Identity function."""
return self
@whitelist_for_serdes(storage_name="FloatMetadataEntryData")
@record(kw_only=False)
@public
| PythonArtifactMetadataValue |
python | getsentry__sentry | src/sentry/plugins/base/v2.py | {
"start": 548,
"end": 680
} | class ____(Protocol):
def __call__(self, data: MutableMapping[str, Any]) -> MutableMapping[str, Any] | None: ...
| EventPreprocessor |
python | scrapy__scrapy | scrapy/downloadermiddlewares/httpproxy.py | {
"start": 644,
"end": 3966
} | class ____:
def __init__(self, auth_encoding: str | None = "latin-1"):
self.auth_encoding: str | None = auth_encoding
self.proxies: dict[str, tuple[bytes | None, str]] = {}
for type_, url in getproxies().items():
try:
self.proxies[type_] = self._get_proxy(url, type_)
# some values such as '/var/run/docker.sock' can't be parsed
# by _parse_proxy and as such should be skipped
except ValueError:
continue
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
if not crawler.settings.getbool("HTTPPROXY_ENABLED"):
raise NotConfigured
auth_encoding: str | None = crawler.settings.get("HTTPPROXY_AUTH_ENCODING")
return cls(auth_encoding)
def _basic_auth_header(self, username: str, password: str) -> bytes:
user_pass = to_bytes(
f"{unquote(username)}:{unquote(password)}", encoding=self.auth_encoding
)
return base64.b64encode(user_pass)
def _get_proxy(self, url: str, orig_type: str) -> tuple[bytes | None, str]:
proxy_type, user, password, hostport = _parse_proxy(url)
proxy_url = urlunparse((proxy_type or orig_type, hostport, "", "", "", ""))
creds = self._basic_auth_header(user, password) if user else None
return creds, proxy_url
@_warn_spider_arg
def process_request(
self, request: Request, spider: Spider | None = None
) -> Request | Response | None:
creds, proxy_url, scheme = None, None, None
if "proxy" in request.meta:
if request.meta["proxy"] is not None:
creds, proxy_url = self._get_proxy(request.meta["proxy"], "")
elif self.proxies:
parsed = urlparse_cached(request)
_scheme = parsed.scheme
if (
# 'no_proxy' is only supported by http schemes
_scheme not in ("http", "https")
or (parsed.hostname and not proxy_bypass(parsed.hostname))
) and _scheme in self.proxies:
scheme = _scheme
creds, proxy_url = self.proxies[scheme]
self._set_proxy_and_creds(request, proxy_url, creds, scheme)
return None
def _set_proxy_and_creds(
self,
request: Request,
proxy_url: str | None,
creds: bytes | None,
scheme: str | None,
) -> None:
if scheme:
request.meta["_scheme_proxy"] = True
if proxy_url:
request.meta["proxy"] = proxy_url
elif request.meta.get("proxy") is not None:
request.meta["proxy"] = None
if creds:
request.headers[b"Proxy-Authorization"] = b"Basic " + creds
request.meta["_auth_proxy"] = proxy_url
elif "_auth_proxy" in request.meta:
if proxy_url != request.meta["_auth_proxy"]:
if b"Proxy-Authorization" in request.headers:
del request.headers[b"Proxy-Authorization"]
del request.meta["_auth_proxy"]
elif b"Proxy-Authorization" in request.headers:
if proxy_url:
request.meta["_auth_proxy"] = proxy_url
else:
del request.headers[b"Proxy-Authorization"]
| HttpProxyMiddleware |
python | apache__airflow | providers/opsgenie/src/airflow/providers/opsgenie/notifications/opsgenie.py | {
"start": 1229,
"end": 2673
} | class ____(BaseNotifier):
"""
This notifier allows you to post alerts to Opsgenie.
Accepts a connection that has an Opsgenie API key as the connection's password.
This notifier sets the domain to conn_id.host, and if not set will default
to ``https://api.opsgenie.com``.
Each Opsgenie API key can be pre-configured to a team integration.
You can override these defaults in this notifier.
.. seealso::
For more information on how to use this notifier, take a look at the guide:
:ref:`howto/notifier:OpsgenieNotifier`
:param payload: The payload necessary for creating an alert.
:param opsgenie_conn_id: Optional. The name of the Opsgenie connection to use. Default conn_id is opsgenie_default
"""
template_fields: Sequence[str] = ("payload",)
def __init__(
self,
*,
payload: CreateAlertPayload,
opsgenie_conn_id: str = "opsgenie_default",
) -> None:
super().__init__()
self.payload = payload
self.opsgenie_conn_id = opsgenie_conn_id
@cached_property
def hook(self) -> OpsgenieAlertHook:
"""Opsgenie alert Hook."""
return OpsgenieAlertHook(self.opsgenie_conn_id)
def notify(self, context: Context) -> None:
"""Call the OpsgenieAlertHook to post message."""
self.hook.get_conn().create_alert(self.payload)
send_opsgenie_notification = OpsgenieNotifier
| OpsgenieNotifier |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF033.py | {
"start": 658,
"end": 785
} | class ____:
def __post_init__(self, bar: int = 11, baz: Something[Whatever | None] = 11) -> None: ...
# RUF033
@dataclass
| Foo |
python | apache__airflow | providers/google/tests/unit/google/cloud/hooks/test_compute.py | {
"start": 25848,
"end": 38615
} | class ____:
def setup_method(self):
with mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.__init__",
new=mock_base_gcp_hook_default_project_id,
):
self.gce_hook = ComputeEngineHook(gcp_conn_id="test")
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_start_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
start_method.assert_called_once_with(instance="instance", project="example-project", zone="zone")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="example-project", operation_name="operation_id", zone="zone"
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_start_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
start_method = get_conn.return_value.instances.return_value.start
execute_method = start_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.start_instance(project_id="new-project", zone=GCE_ZONE, resource_id=GCE_INSTANCE)
assert res is None
start_method.assert_called_once_with(instance="instance", project="new-project", zone="zone")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="new-project", operation_name="operation_id", zone="zone"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_stop_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
stop_method.assert_called_once_with(instance="instance", project="example-project", zone="zone")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="example-project", operation_name="operation_id", zone="zone"
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_stop_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
stop_method = get_conn.return_value.instances.return_value.stop
execute_method = stop_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.stop_instance(project_id="new-project", zone=GCE_ZONE, resource_id=GCE_INSTANCE)
assert res is None
stop_method.assert_called_once_with(instance="instance", project="new-project", zone="zone")
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="new-project", operation_name="operation_id", zone="zone"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_set_machine_type_instance(self, wait_for_operation_to_complete, get_conn, mock_project_id):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
body={},
zone=GCE_ZONE,
resource_id=GCE_INSTANCE,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="example-project", operation_name="operation_id", zone="zone"
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_set_machine_type_instance_overridden_project_id(self, wait_for_operation_to_complete, get_conn):
execute_method = get_conn.return_value.instances.return_value.setMachineType.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.set_machine_type(
project_id="new-project", body={}, zone=GCE_ZONE, resource_id=GCE_INSTANCE
)
assert res is None
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
project_id="new-project", operation_name="operation_id", zone="zone"
)
@mock.patch(
"airflow.providers.google.common.hooks.base_google.GoogleBaseHook.project_id",
new_callable=PropertyMock,
return_value=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_patch_instance_group_manager(self, wait_for_operation_to_complete, get_conn, mock_project_id):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID,
project_id=GCP_PROJECT_ID_HOOK_UNIT_TEST,
)
assert res is None
patch_method.assert_called_once_with(
body={},
instanceGroupManager="instance_group_manager",
project="example-project",
requestId="request_id",
zone="zone",
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="example-project", zone="zone"
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._wait_for_operation_to_complete"
)
def test_patch_instance_group_manager_overridden_project_id(
self, wait_for_operation_to_complete, get_conn
):
patch_method = get_conn.return_value.instanceGroupManagers.return_value.patch
execute_method = patch_method.return_value.execute
execute_method.return_value = {"name": "operation_id"}
wait_for_operation_to_complete.return_value = None
res = self.gce_hook.patch_instance_group_manager(
project_id="new-project",
zone=GCE_ZONE,
resource_id=GCE_INSTANCE_GROUP_MANAGER,
body={},
request_id=GCE_REQUEST_ID,
)
assert res is None
patch_method.assert_called_once_with(
body={},
instanceGroupManager="instance_group_manager",
project="new-project",
requestId="request_id",
zone="zone",
)
execute_method.assert_called_once_with(num_retries=5)
wait_for_operation_to_complete.assert_called_once_with(
operation_name="operation_id", project_id="new-project", zone="zone"
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status"
)
def test_wait_for_operation_to_complete_no_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {"status": GceOperationStatus.DONE, "error": None}
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=None
)
mock_operation_status.assert_called_once_with(
service=service, operation_name=operation_name, project_id=project_id, num_retries=num_retries
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch(
"airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_global_operation_status"
)
def test_wait_for_operation_to_complete_no_zone_error(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
# Test error
mock_get_conn.return_value = service
mock_operation_status.return_value = {
"status": GceOperationStatus.DONE,
"error": {"errors": "some nasty errors"},
"httpErrorStatusCode": 400,
"httpErrorMessage": "sample msg",
}
with pytest.raises(AirflowException):
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=None
)
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook.get_conn")
@mock.patch("airflow.providers.google.cloud.hooks.compute.ComputeEngineHook._check_zone_operation_status")
def test_wait_for_operation_to_complete_with_zone(self, mock_operation_status, mock_get_conn):
service = "test-service"
project_id = "test-project"
operation_name = "test-operation"
zone = "west-europe3"
num_retries = self.gce_hook.num_retries
# Test success
mock_get_conn.return_value = service
mock_operation_status.return_value = {"status": GceOperationStatus.DONE, "error": None}
self.gce_hook._wait_for_operation_to_complete(
project_id=project_id, operation_name=operation_name, zone=zone
)
mock_operation_status.assert_called_once_with(service, operation_name, project_id, zone, num_retries)
| TestGcpComputeHookDefaultProjectId |
python | spack__spack | lib/spack/spack/vendor/jinja2/nodes.py | {
"start": 19822,
"end": 20227
} | class ____(Literal):
"""Any dict literal such as ``{1: 2, 3: 4}``. The items must be a list of
:class:`Pair` nodes.
"""
fields = ("items",)
items: t.List["Pair"]
def as_const(
self, eval_ctx: t.Optional[EvalContext] = None
) -> t.Dict[t.Any, t.Any]:
eval_ctx = get_eval_context(self, eval_ctx)
return dict(x.as_const(eval_ctx) for x in self.items)
| Dict |
python | bokeh__bokeh | src/bokeh/models/ui/menus.py | {
"start": 4396,
"end": 5521
} | class ____(UIElement):
""" An implicitly positioned panel containing a collection of items.
These items can include commands, checked items, dividers, etc.
"""
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
items = List(Either(Instance(MenuItem), Instance(DividerItem), Null), default=[], help="""
A collection of menu items representing the contents of this menu.
""")
reversed = Bool(default=False, help="""
Whether to keep the order of menu's items or reverse it.
""")
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| Menu |
python | great-expectations__great_expectations | tests/integration/metrics/column/test_distinct_values_count.py | {
"start": 508,
"end": 990
} | class ____:
@parameterize_batch_for_data_sources(
data_source_configs=ALL_DATA_SOURCES,
data=DATA_FRAME,
)
def test_distinct_values_count(self, batch_for_datasource: Batch) -> None:
metric = ColumnDistinctValuesCount(column=COLUMN_NAME)
metric_result = batch_for_datasource.compute_metrics(metric)
assert isinstance(metric_result, ColumnDistinctValuesCountResult)
assert metric_result.value == 3
| TestColumnDistinctValuesCount |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/hooks/kinesis.py | {
"start": 1108,
"end": 2126
} | class ____(_FirehoseHook):
"""
Interact with Amazon Kinesis Firehose.
Provide thick wrapper around :external+boto3:py:class:`boto3.client("firehose") <Firehose.Client>`.
:param delivery_stream: Name of the delivery stream
Additional arguments (such as ``aws_conn_id``) may be specified and
are passed down to the underlying AwsBaseHook.
.. seealso::
- :class:`airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook`
.. deprecated::
This hook was moved. Import from
:class:`airflow.providers.amazon.aws.hooks.firehose.FirehoseHook`
instead of kinesis.py
"""
def __init__(self, *args, **kwargs) -> None:
warnings.warn(
"Importing FirehoseHook from kinesis.py is deprecated "
"and will be removed in a future release. "
"Please import it from firehose.py instead.",
AirflowProviderDeprecationWarning,
stacklevel=2,
)
super().__init__(*args, **kwargs)
| FirehoseHook |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/sentry_app_requests.py | {
"start": 1486,
"end": 4230
} | class ____(RegionSentryAppBaseEndpoint):
owner = ApiOwner.INTEGRATIONS
publish_status = {
"GET": ApiPublishStatus.UNKNOWN,
}
permission_classes = (SentryAppStatsPermission,)
def get(self, request: Request, sentry_app) -> Response:
"""
:qparam string eventType: Optionally specify a specific event type to filter requests
:qparam bool errorsOnly: If this is true, only return error/warning requests (300-599)
:qparam string organizationSlug: Optionally specify an org slug to filter requests
:qparam string start: Optionally specify a date to begin at. Format must be YYYY-MM-DD HH:MM:SS
:qparam string end: Optionally specify a date to end at. Format must be YYYY-MM-DD HH:MM:SS
"""
date_format = "%Y-%m-%d %H:%M:%S"
start_time: datetime = datetime.strptime("2000-01-01 00:00:00", date_format)
end_time: datetime = datetime.now()
event_type = request.GET.get("eventType")
errors_only = request.GET.get("errorsOnly")
org_slug = request.GET.get("organizationSlug")
start_parameter = request.GET.get("start", None)
end_parameter = request.GET.get("end", None)
try:
start_time = (
datetime.strptime(start_parameter, date_format) if start_parameter else start_time
)
except ValueError:
return Response({"detail": INVALID_DATE_FORMAT_MESSAGE}, status=400)
try:
end_time = datetime.strptime(end_parameter, date_format) if end_parameter else end_time
except ValueError:
return Response({"detail": INVALID_DATE_FORMAT_MESSAGE}, status=400)
kwargs: dict[Any, Any] = {}
if event_type:
if event_type not in EXTENDED_VALID_EVENTS:
return Response({"detail": "Invalid event type."}, status=400)
kwargs["event"] = event_type
if errors_only:
kwargs["errors_only"] = True
buffer = SentryAppWebhookRequestsBuffer(sentry_app)
organization = None
if org_slug:
try:
organization = Organization.objects.get(slug=org_slug)
except Organization.DoesNotExist:
return Response({"detail": "Invalid organization."}, status=400)
filtered_requests = []
for i, req in enumerate(buffer.get_requests(**kwargs)):
if filter_by_date(req, start_time, end_time) and filter_by_organization(
req, organization
):
filtered_requests.append(BufferedRequest(id=i, data=req))
return Response(serialize(filtered_requests, request.user, RequestSerializer(sentry_app)))
| SentryAppRequestsEndpoint |
python | sphinx-doc__sphinx | sphinx/util/logging.py | {
"start": 11073,
"end": 11345
} | class ____:
def __init__(self) -> None:
self.logs: list[logging.LogRecord] = []
@contextmanager
def collect(self) -> Iterator[None]:
with pending_logging() as memhandler:
yield
self.logs = memhandler.clear()
| LogCollector |
python | realpython__materials | python-314/linked_list.py | {
"start": 126,
"end": 185
} | class ____:
value: Any
next: Optional[Node] = None
| Node |
python | getsentry__sentry | src/sentry/auth/password_validation.py | {
"start": 3762,
"end": 5209
} | class ____:
"""
Validate whether a password has previously appeared in a data breach.
"""
def __init__(self, threshold: int = 1, timeout: float = 0.200) -> None:
self.threshold = threshold
self.timeout = timeout
def validate(self, password: str, user: User | None = None) -> None:
digest = sha1(password.encode("utf-8")).hexdigest().upper()
prefix = digest[:5]
suffix = digest[5:]
url = f"https://api.pwnedpasswords.com/range/{prefix}"
headers = {
"User-Agent": "Sentry @ {}".format(options.get("system.url-prefix")),
}
try:
r = requests.get(url, headers=headers, timeout=self.timeout)
except Exception as e:
logger.warning(
"Unable to fetch PwnedPasswords API",
extra={
"exception": str(e),
"prefix": prefix,
},
)
return
for line in r.text.split("\n"):
if ":" not in line:
continue
breached_suffix, occurrences = line.rstrip().split(":")
if breached_suffix == suffix:
if int(occurrences) >= self.threshold:
raise ValidationError(
f"This password has previously appeared in data breaches {occurrences} times."
)
break
| PwnedPasswordsValidator |
python | hynek__structlog | src/structlog/threadlocal.py | {
"start": 4209,
"end": 9212
} | class ____:
"""
Wrap a dict-like class and keep the state *global* but *thread-local*.
Attempts to re-initialize only updates the wrapped dictionary.
Useful for short-lived threaded applications like requests in web app.
Use :func:`wrap` to instantiate and use
:func:`structlog.BoundLogger.new` to clear the context.
"""
_tl: Any
_dict_class: type[dict[str, Any]]
def __init__(self, *args: Any, **kw: Any) -> None:
"""
We cheat. A context dict gets never recreated.
"""
if args and isinstance(args[0], self.__class__):
# our state is global, no need to look at args[0] if it's of our
# class
self._dict.update(**kw)
else:
self._dict.update(*args, **kw)
@property
def _dict(self) -> Context:
"""
Return or create and return the current context.
"""
try:
return self.__class__._tl.dict_
except AttributeError:
self.__class__._tl.dict_ = self.__class__._dict_class()
return self.__class__._tl.dict_
def __repr__(self) -> str:
return f"<{self.__class__.__name__}({self._dict!r})>"
def __eq__(self, other: object) -> bool:
# Same class == same dictionary
return self.__class__ == other.__class__
def __ne__(self, other: object) -> bool:
return not self.__eq__(other)
# Proxy methods necessary for structlog.
# Dunder methods don't trigger __getattr__ so we need to proxy by hand.
def __iter__(self) -> Iterator[str]:
return self._dict.__iter__()
def __setitem__(self, key: str, value: Any) -> None:
self._dict[key] = value
def __delitem__(self, key: str) -> None:
self._dict.__delitem__(key)
def __len__(self) -> int:
return self._dict.__len__()
def __getattr__(self, name: str) -> Any:
return getattr(self._dict, name)
_CONTEXT = threading.local()
def get_threadlocal() -> Context:
"""
Return a copy of the current thread-local context.
.. versionadded:: 21.2.0
.. deprecated:: 22.1.0
"""
_deprecated()
return _get_context().copy()
def get_merged_threadlocal(bound_logger: BindableLogger) -> Context:
"""
Return a copy of the current thread-local context merged with the context
from *bound_logger*.
.. versionadded:: 21.2.0
.. deprecated:: 22.1.0
"""
_deprecated()
ctx = _get_context().copy()
ctx.update(structlog.get_context(bound_logger))
return ctx
def merge_threadlocal(
logger: WrappedLogger, method_name: str, event_dict: EventDict
) -> EventDict:
"""
A processor that merges in a global (thread-local) context.
Use this as your first processor in :func:`structlog.configure` to ensure
thread-local context is included in all log calls.
.. versionadded:: 19.2.0
.. versionchanged:: 20.1.0
This function used to be called ``merge_threadlocal_context`` and that
name is still kept around for backward compatibility.
.. deprecated:: 22.1.0
"""
_deprecated()
context = _get_context().copy()
context.update(event_dict)
return context
# Alias that shouldn't be used anymore.
merge_threadlocal_context = merge_threadlocal
def clear_threadlocal() -> None:
"""
Clear the thread-local context.
The typical use-case for this function is to invoke it early in
request-handling code.
.. versionadded:: 19.2.0
.. deprecated:: 22.1.0
"""
_deprecated()
_CONTEXT.context = {}
def bind_threadlocal(**kw: Any) -> None:
"""
Put keys and values into the thread-local context.
Use this instead of :func:`~structlog.BoundLogger.bind` when you want some
context to be global (thread-local).
.. versionadded:: 19.2.0
.. deprecated:: 22.1.0
"""
_deprecated()
_get_context().update(kw)
def unbind_threadlocal(*keys: str) -> None:
"""
Tries to remove bound *keys* from threadlocal logging context if present.
.. versionadded:: 20.1.0
.. deprecated:: 22.1.0
"""
_deprecated()
context = _get_context()
for key in keys:
context.pop(key, None)
@contextlib.contextmanager
def bound_threadlocal(**kw: Any) -> Generator[None, None, None]:
"""
Bind *kw* to the current thread-local context. Unbind or restore *kw*
afterwards. Do **not** affect other keys.
Can be used as a context manager or decorator.
.. versionadded:: 21.4.0
.. deprecated:: 22.1.0
"""
_deprecated()
context = get_threadlocal()
saved = {k: context[k] for k in context.keys() & kw.keys()}
bind_threadlocal(**kw)
try:
yield
finally:
unbind_threadlocal(*kw.keys())
bind_threadlocal(**saved)
def _get_context() -> Context:
try:
return _CONTEXT.context
except AttributeError:
_CONTEXT.context = {}
return _CONTEXT.context
| _ThreadLocalDictWrapper |
python | pyca__cryptography | tests/hazmat/asn1/test_serialization.py | {
"start": 2366,
"end": 2753
} | class ____:
def test_bytes(self) -> None:
assert_roundtrips(
[
(b"", b"\x04\x00"),
(b"hello", b"\x04\x05hello"),
(b"\x01\x02\x03", b"\x04\x03\x01\x02\x03"),
(
b"\x00\xff\x80\x7f",
b"\x04\x04\x00\xff\x80\x7f",
),
]
)
| TestBytes |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/workflows.py | {
"start": 23105,
"end": 26429
} | class ____(GoogleCloudBaseOperator):
"""
Returns a list of executions which belong to the workflow with the given name.
The method returns executions of all workflow revisions. Returned
executions are ordered by their start time (newest first).
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsListExecutionsOperator`
:param workflow_id: Required. The ID of the workflow to be created.
:param start_date_filter: If passed only executions older that this date will be returned.
By default, operators return executions from last 60 minutes.
Note that datetime object must specify a time zone, e.g. ``datetime.timezone.utc``.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id")
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow_id: str,
location: str,
start_date_filter: datetime.datetime | None = None,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.start_date_filter = start_date_filter or datetime.datetime.now(
tz=datetime.timezone.utc
) - datetime.timedelta(minutes=60)
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving executions for workflow %s", self.workflow_id)
execution_iter = hook.list_executions(
workflow_id=self.workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsWorkflowDetailsLink.persist(
context=context,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return [
Execution.to_dict(e)
for e in execution_iter
if e.start_time > self.start_date_filter # type: ignore
]
| WorkflowsListExecutionsOperator |
python | spyder-ide__spyder | spyder/widgets/onecolumntree.py | {
"start": 949,
"end": 10491
} | class ____(QTreeWidget, SpyderWidgetMixin):
"""
One-column tree widget with context menu.
"""
def __init__(self, parent):
if not PYSIDE2:
super().__init__(parent, class_parent=parent)
else:
QTreeWidget.__init__(self, parent)
SpyderWidgetMixin.__init__(self, class_parent=parent)
self.__expanded_state = None
# Widget setup
self.setItemsExpandable(True)
self.setColumnCount(1)
# Setup context menu
self.collapse_all_action = None
self.collapse_selection_action = None
self.expand_all_action = None
self.expand_selection_action = None
self.setup()
self.common_actions = self.setup_common_actions()
# Signals
self.itemActivated.connect(self.on_item_activated)
self.itemClicked.connect(self.on_item_clicked)
self.itemSelectionChanged.connect(self.on_item_selection_changed)
# To use mouseMoveEvent
self.setMouseTracking(True)
# Use horizontal scrollbar when needed
self.setHorizontalScrollMode(QAbstractItemView.ScrollPerPixel)
self.header().setSectionResizeMode(0, QHeaderView.ResizeToContents)
self.header().setStretchLastSection(False)
self.on_item_selection_changed()
# ---- SpyderWidgetMixin API
# -------------------------------------------------------------------------
def setup(self):
self.menu = self.create_menu("context_menu")
self.collapse_all_action = self.create_action(
OneColumnTreeActions.CollapseAllAction,
text=_("Collapse all"),
icon=ima.icon("collapse"),
triggered=self.collapseAll,
register_shortcut=False,
)
self.expand_all_action = self.create_action(
OneColumnTreeActions.ExpandAllAction,
text=_("Expand all"),
icon=ima.icon("expand"),
triggered=self.expandAll,
register_shortcut=False,
)
self.restore_action = self.create_action(
OneColumnTreeActions.RestoreAction,
text=_("Restore"),
tip=_("Restore original tree layout"),
icon=ima.icon("restore"),
triggered=self.restore,
register_shortcut=False,
)
self.collapse_selection_action = self.create_action(
OneColumnTreeActions.CollapseSelectionAction,
text=_("Collapse section"),
icon=ima.icon("collapse_selection"),
triggered=self.collapse_selection,
register_shortcut=False,
)
self.expand_selection_action = self.create_action(
OneColumnTreeActions.ExpandSelectionAction,
text=_("Expand section"),
icon=ima.icon("expand_selection"),
triggered=self.expand_selection,
register_shortcut=False,
)
for item in [self.collapse_all_action, self.expand_all_action]:
self.add_item_to_menu(
item,
self.menu,
section=OneColumnTreeContextMenuSections.Global,
)
self.add_item_to_menu(
self.restore_action,
self.menu,
section=OneColumnTreeContextMenuSections.Restore,
)
for item in [self.collapse_selection_action,
self.expand_selection_action]:
self.add_item_to_menu(
item,
self.menu,
section=OneColumnTreeContextMenuSections.Section,
)
def update_actions(self):
pass
# ---- Public API
# -------------------------------------------------------------------------
def on_item_activated(self, item):
"""Double-click event"""
raise NotImplementedError
def on_item_clicked(self, item):
pass
def set_title(self, title):
self.setHeaderLabels([title])
def setup_common_actions(self):
"""Setup context menu common actions"""
return [self.collapse_all_action, self.expand_all_action,
self.collapse_selection_action, self.expand_selection_action]
def get_menu_actions(self):
"""Returns a list of menu actions"""
items = self.selectedItems()
actions = self.get_actions_from_items(items)
if actions:
actions.append(None)
actions += self.common_actions
return actions
def get_actions_from_items(self, items):
# Right here: add other actions if necessary
# (reimplement this method)
return []
@Slot()
def restore(self):
self.collapseAll()
for item in self.get_top_level_items():
self.expandItem(item)
def is_item_expandable(self, item):
"""To be reimplemented in child class
See example in project explorer widget"""
return True
def __expand_item(self, item):
if self.is_item_expandable(item):
self.expandItem(item)
for index in range(item.childCount()):
child = item.child(index)
self.__expand_item(child)
@Slot()
def expand_selection(self):
items = self.selectedItems()
if not items:
items = self.get_top_level_items()
for item in items:
self.__expand_item(item)
if items:
self.scrollToItem(items[0])
def __collapse_item(self, item):
self.collapseItem(item)
for index in range(item.childCount()):
child = item.child(index)
self.__collapse_item(child)
@Slot()
def collapse_selection(self):
items = self.selectedItems()
if not items:
items = self.get_top_level_items()
for item in items:
self.__collapse_item(item)
if items:
self.scrollToItem(items[0])
def on_item_selection_changed(self):
"""Item selection has changed"""
is_selection = len(self.selectedItems()) > 0
self.expand_selection_action.setEnabled(is_selection)
self.collapse_selection_action.setEnabled(is_selection)
def get_top_level_items(self):
"""Iterate over top level items"""
return [self.topLevelItem(_i) for _i in range(self.topLevelItemCount())]
def get_items(self):
"""Return items (excluding top level items)"""
itemlist = []
def add_to_itemlist(item):
for index in range(item.childCount()):
citem = item.child(index)
itemlist.append(citem)
add_to_itemlist(citem)
for tlitem in self.get_top_level_items():
add_to_itemlist(tlitem)
return itemlist
def get_scrollbar_position(self):
return (self.horizontalScrollBar().value(),
self.verticalScrollBar().value())
def set_scrollbar_position(self, position):
hor, ver = position
self.horizontalScrollBar().setValue(hor)
self.verticalScrollBar().setValue(ver)
def get_expanded_state(self):
self.save_expanded_state()
return self.__expanded_state
def set_expanded_state(self, state):
self.__expanded_state = state
self.restore_expanded_state()
def save_expanded_state(self):
"""Save all items expanded state"""
self.__expanded_state = {}
def add_to_state(item):
user_text = get_item_user_text(item)
self.__expanded_state[hash(user_text)] = item.isExpanded()
def browse_children(item):
add_to_state(item)
for index in range(item.childCount()):
citem = item.child(index)
user_text = get_item_user_text(citem)
self.__expanded_state[hash(user_text)] = citem.isExpanded()
browse_children(citem)
for tlitem in self.get_top_level_items():
browse_children(tlitem)
def restore_expanded_state(self):
"""Restore all items expanded state"""
if self.__expanded_state is None:
return
for item in self.get_items()+self.get_top_level_items():
user_text = get_item_user_text(item)
is_expanded = self.__expanded_state.get(hash(user_text))
if is_expanded is not None:
item.setExpanded(is_expanded)
def sort_top_level_items(self, key):
"""Sorting tree wrt top level items"""
self.save_expanded_state()
items = sorted([self.takeTopLevelItem(0)
for index in range(self.topLevelItemCount())], key=key)
for index, item in enumerate(items):
self.insertTopLevelItem(index, item)
self.restore_expanded_state()
# ---- Qt methods
# -------------------------------------------------------------------------
def contextMenuEvent(self, event):
"""Override Qt method"""
self.menu.popup(event.globalPos())
def mouseMoveEvent(self, event):
"""Change cursor shape."""
index = self.indexAt(event.pos())
if index.isValid():
vrect = self.visualRect(index)
item_identation = vrect.x() - self.visualRect(self.rootIndex()).x()
if event.pos().x() > item_identation:
# When hovering over results
self.setCursor(Qt.PointingHandCursor)
else:
# On every other element
self.setCursor(Qt.ArrowCursor)
| OneColumnTree |
python | astropy__astropy | astropy/convolution/utils.py | {
"start": 433,
"end": 12151
} | class ____(KernelError):
"""Called when doing invalid arithmetic with a kernel."""
def has_even_axis(array):
if isinstance(array, (list, tuple)):
return not len(array) % 2
return any(not axes_size % 2 for axes_size in array.shape)
def add_kernel_arrays_1D(array_1, array_2):
"""
Add two 1D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = array_1.size // 2
slice_ = slice(center - array_2.size // 2, center + array_2.size // 2 + 1)
new_array[slice_] += array_2
return new_array
if array_2.size > array_1.size:
new_array = array_2.copy()
center = array_2.size // 2
slice_ = slice(center - array_1.size // 2, center + array_1.size // 2 + 1)
new_array[slice_] += array_1
return new_array
return array_2 + array_1
def add_kernel_arrays_2D(array_1, array_2):
"""
Add two 2D kernel arrays of different size.
The arrays are added with the centers lying upon each other.
"""
if array_1.size > array_2.size:
new_array = array_1.copy()
center = [axes_size // 2 for axes_size in array_1.shape]
slice_x = slice(
center[1] - array_2.shape[1] // 2, center[1] + array_2.shape[1] // 2 + 1
)
slice_y = slice(
center[0] - array_2.shape[0] // 2, center[0] + array_2.shape[0] // 2 + 1
)
new_array[slice_y, slice_x] += array_2
return new_array
if array_2.size > array_1.size:
new_array = array_2.copy()
center = [axes_size // 2 for axes_size in array_2.shape]
slice_x = slice(
center[1] - array_1.shape[1] // 2, center[1] + array_1.shape[1] // 2 + 1
)
slice_y = slice(
center[0] - array_1.shape[0] // 2, center[0] + array_1.shape[0] // 2 + 1
)
new_array[slice_y, slice_x] += array_1
return new_array
return array_2 + array_1
def discretize_model(model, x_range, y_range=None, mode="center", factor=10):
"""
Evaluate an analytical model function on a pixel grid.
Parameters
----------
model : `~astropy.modeling.Model` or callable.
Analytical model function to be discretized. A callable that is
not a `~astropy.modeling.Model` instance is converted to a model
using `~astropy.modeling.custom_model`.
x_range : 2-tuple
Lower and upper bounds of x pixel values at which the model is
evaluated. The upper bound is non-inclusive. A ``x_range`` of
``(0, 3)`` means the model will be evaluated at x pixels 0, 1,
and 2. The difference between the upper and lower bound must be
a whole number so that the output array size is well-defined.
y_range : 2-tuple or `None`, optional
Lower and upper bounds of y pixel values at which the model is
evaluated. The upper bound is non-inclusive. A ``y_range`` of
``(0, 3)`` means the model will be evaluated at y pixels of 0,
1, and 2. The difference between the upper and lower bound must
be a whole number so that the output array size is well-defined.
``y_range`` is necessary only for 2D models.
mode : {'center', 'linear_interp', 'oversample', 'integrate'}, optional
One of the following modes:
* ``'center'`` (default)
Discretize model by taking the value at the center of
the pixel bins.
* ``'linear_interp'``
Discretize model by linearly interpolating between the
values at the edges (1D) or corners (2D) of the pixel
bins. For 2D models, the interpolation is bilinear.
* ``'oversample'``
Discretize model by taking the average of model values
in the pixel bins on an oversampled grid. Use the
``factor`` keyword to set the integer oversampling
factor.
* ``'integrate'``
Discretize model by integrating the model over the pixel
bins using `scipy.integrate.quad`. This mode conserves
the model integral on a subpixel scale, but is very
slow.
factor : int, optional
The integer oversampling factor used when ``mode='oversample'``.
Ignored otherwise.
Returns
-------
array : `numpy.ndarray`
The discretized model array.
Examples
--------
In this example, we define a
`~astropy.modeling.functional_models.Gaussian1D` model that has been
normalized so that it sums to 1.0. We then discretize this model
using the ``'center'``, ``'linear_interp'``, and ``'oversample'``
(with ``factor=10``) modes.
.. plot::
:show-source-link:
import matplotlib.pyplot as plt
import numpy as np
from astropy.convolution.utils import discretize_model
from astropy.modeling.models import Gaussian1D
gauss_1D = Gaussian1D(1 / (0.5 * np.sqrt(2 * np.pi)), 0, 0.5)
x_range = (-2, 3)
x = np.arange(*x_range)
y_center = discretize_model(gauss_1D, x_range, mode='center')
y_edge = discretize_model(gauss_1D, x_range, mode='linear_interp')
y_oversample = discretize_model(gauss_1D, x_range, mode='oversample')
fig, ax = plt.subplots(figsize=(8, 6))
label = f'center (sum={y_center.sum():.3f})'
ax.plot(x, y_center, '.-', label=label)
label = f'linear_interp (sum={y_edge.sum():.3f})'
ax.plot(x, y_edge, '.-', label=label)
label = f'oversample (sum={y_oversample.sum():.3f})'
ax.plot(x, y_oversample, '.-', label=label)
ax.set_xlabel('x')
ax.set_ylabel('Value')
plt.legend()
"""
if not callable(model):
raise TypeError("Model must be callable.")
if not isinstance(model, Model):
model = custom_model(model)()
ndim = model.n_inputs
if ndim > 2:
raise ValueError("discretize_model supports only 1D and 2D models.")
dxrange = np.diff(x_range)[0]
if dxrange != int(dxrange):
raise ValueError(
"The difference between the upper and lower limit of"
" 'x_range' must be a whole number."
)
if y_range:
dyrange = np.diff(y_range)[0]
if dyrange != int(dyrange):
raise ValueError(
"The difference between the upper and lower limit of"
" 'y_range' must be a whole number."
)
if factor != int(factor):
raise ValueError("factor must have an integer value")
factor = int(factor)
if ndim == 2 and y_range is None:
raise ValueError("y_range must be specified for a 2D model")
if ndim == 1 and y_range is not None:
raise ValueError("y_range should not be input for a 1D model")
match (mode, ndim):
case ("center", 1):
result = discretize_center_1D(model, x_range)
case ("center", 2):
result = discretize_center_2D(model, x_range, y_range)
case ("linear_interp", 1):
result = discretize_linear_1D(model, x_range)
case ("linear_interp", 2):
result = discretize_bilinear_2D(model, x_range, y_range)
case ("oversample", 1):
result = discretize_oversample_1D(model, x_range, factor)
case ("oversample", 2):
result = discretize_oversample_2D(model, x_range, y_range, factor)
case ("integrate", 1):
result = discretize_integrate_1D(model, x_range)
case ("integrate", 2):
result = discretize_integrate_2D(model, x_range, y_range)
case _:
raise ValueError(
"Invalid (mode, ndim) combination for discretize_model. "
f"Got {mode=}, {ndim=}"
)
return result
def discretize_center_1D(model, x_range):
"""
Discretize model by taking the value at the center of the bin.
"""
x = np.arange(*x_range)
return model(x)
def discretize_center_2D(model, x_range, y_range):
"""
Discretize model by taking the value at the center of the pixel.
"""
x = np.arange(*x_range)
y = np.arange(*y_range)
x, y = np.meshgrid(x, y)
return model(x, y)
def discretize_linear_1D(model, x_range):
"""
Discretize model by performing a linear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values_intermediate_grid = model(x)
return 0.5 * (values_intermediate_grid[1:] + values_intermediate_grid[:-1])
def discretize_bilinear_2D(model, x_range, y_range):
"""
Discretize model by performing a bilinear interpolation.
"""
# Evaluate model 0.5 pixel outside the boundaries
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
x, y = np.meshgrid(x, y)
values_intermediate_grid = model(x, y)
# Mean in y direction
values = 0.5 * (values_intermediate_grid[1:, :] + values_intermediate_grid[:-1, :])
# Mean in x direction
return 0.5 * (values[:, 1:] + values[:, :-1])
def discretize_oversample_1D(model, x_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(
x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor),
)
values = model(x)
# Reshape and compute mean
values = np.reshape(values, (x.size // factor, factor))
return values.mean(axis=1)
def discretize_oversample_2D(model, x_range, y_range, factor=10):
"""
Discretize model by taking the average on an oversampled grid.
"""
# Evaluate model on oversampled grid
x = np.linspace(
x_range[0] - 0.5 * (1 - 1 / factor),
x_range[1] - 0.5 * (1 + 1 / factor),
num=int((x_range[1] - x_range[0]) * factor),
)
y = np.linspace(
y_range[0] - 0.5 * (1 - 1 / factor),
y_range[1] - 0.5 * (1 + 1 / factor),
num=int((y_range[1] - y_range[0]) * factor),
)
x_grid, y_grid = np.meshgrid(x, y)
values = model(x_grid, y_grid)
# Reshape and compute mean
shape = (y.size // factor, factor, x.size // factor, factor)
values = np.reshape(values, shape)
return values.mean(axis=3).mean(axis=1)
def discretize_integrate_1D(model, x_range):
"""
Discretize model by integrating numerically the model over the bin.
"""
from scipy.integrate import quad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
values = np.array([])
# Integrate over all bins
for i in range(x.size - 1):
values = np.append(values, quad(model, x[i], x[i + 1])[0])
return values
def discretize_integrate_2D(model, x_range, y_range):
"""
Discretize model by integrating the model over the pixel.
"""
from scipy.integrate import dblquad
# Set up grid
x = np.arange(x_range[0] - 0.5, x_range[1] + 0.5)
y = np.arange(y_range[0] - 0.5, y_range[1] + 0.5)
values = np.empty((y.size - 1, x.size - 1))
# Integrate over all pixels
for i in range(x.size - 1):
for j in range(y.size - 1):
values[j, i] = dblquad(
func=lambda y, x: model(x, y),
a=x[i],
b=x[i + 1],
gfun=lambda x: y[j],
hfun=lambda x: y[j + 1],
)[0]
return values
| KernelArithmeticError |
python | google__jax | jax/_src/lax/linalg.py | {
"start": 17268,
"end": 107125
} | class ____(enum.Enum):
"""Enum for SVD algorithm."""
DEFAULT = "default"
QR = "QR"
JACOBI = "Jacobi"
POLAR = "polar"
@overload
def svd(
x: ArrayLike,
*,
full_matrices: bool = True,
compute_uv: Literal[True],
subset_by_index: tuple[int, int] | None = None,
algorithm: SvdAlgorithm | None = None,
) -> tuple[Array, Array, Array]:
...
@overload
def svd(
x: ArrayLike,
*,
full_matrices: bool = True,
compute_uv: Literal[False],
subset_by_index: tuple[int, int] | None = None,
algorithm: SvdAlgorithm | None = None,
) -> Array:
...
@overload
def svd(
x: ArrayLike,
*,
full_matrices: bool = True,
compute_uv: bool = True,
subset_by_index: tuple[int, int] | None = None,
algorithm: SvdAlgorithm | None = None,
) -> Array | tuple[Array, Array, Array]:
...
# TODO: Add `max_qdwh_iterations` to the function signature for TPU SVD.
def svd(
x: ArrayLike,
*,
full_matrices: bool = True,
compute_uv: bool = True,
subset_by_index: tuple[int, int] | None = None,
algorithm: SvdAlgorithm | None = None,
) -> Array | tuple[Array, Array, Array]:
"""Singular value decomposition.
Computes the singular value decomposition of an input matrix.
Args:
x: A batch of matrices with shape ``[..., m, n]``.
full_matrices: Determines if full or reduced matrices are returned.
compute_uv: If ``True``, returns the left singular vectors, the singular
values and the adjoint of the right singular vectors. Otherwise, only
the singular values are returned.
subset_by_index: If ``None``, the entire matrix is returned. Otherwise,
returns the singular values and vectors for the given range of indices.
algorithm: The SVD algorithm to use. Must be ``None`` or a value from
:class:`~jax.lax.linalg.SvdAlgorithm`.
Returns:
The singular values if ``compute_uv`` is ``False``, otherwise returns a
triple containing the left singular vectors, the singular values, and the
adjoint of the right singular vectors.
"""
result = svd_p.bind(
x,
full_matrices=full_matrices,
compute_uv=compute_uv,
subset_by_index=subset_by_index,
algorithm=algorithm,
)
if compute_uv:
s, u, v = result
return u, s, v
else:
s, = result
return s
def symmetric_product(
a_matrix: ArrayLike,
c_matrix: ArrayLike,
*,
alpha: float = 1.,
beta: float = 0.,
symmetrize_output: bool = False
):
r"""Symmetric product.
Computes the symmetric product
.. math::
\alpha \, A \, A^T + \beta \, C
where :math:`A` is a rectangular matrix and :math:`C` is a symmetric matrix.
Args:
a_matrix: A batch of matrices with shape ``[..., m, n]``.
c_matrix: A batch of matrices with shape ``[..., m, m]``.
alpha: A scalar.
beta: A scalar.
symmetrize_output: If ``True``, the upper triangle of the output is
replaced with its transpose.
Returns:
A batch of matrices with shape ``[..., m, m]`` where only the lower
triangle is guaranteed to include the correct values on all platforms. If
``symmetrize_output`` is ``True``, the upper triangle is filled with the
transpose of the lower triangle, and the whole matrix is valid.
"""
a_matrix, c_matrix = core.standard_insert_pvary(a_matrix, c_matrix)
result = symmetric_product_p.bind(a_matrix, c_matrix, alpha=alpha, beta=beta)
if symmetrize_output:
upper_half = lax.transpose(
_tril(result, k=-1),
(*range(result.ndim - 2), result.ndim - 1, result.ndim - 2))
result = _tril(result, k=0) + upper_half
return result
def triangular_solve(
a: ArrayLike,
b: ArrayLike,
*,
left_side: bool = False,
lower: bool = False,
transpose_a: bool = False,
conjugate_a: bool = False,
unit_diagonal: bool = False,
) -> Array:
r"""Triangular solve.
Solves either the matrix equation
.. math::
\mathit{op}(A) . X = B
if ``left_side`` is ``True`` or
.. math::
X . \mathit{op}(A) = B
if ``left_side`` is ``False``.
``A`` must be a lower or upper triangular square matrix, and where
:math:`\mathit{op}(A)` may either transpose :math:`A` if ``transpose_a``
is ``True`` and/or take its complex conjugate if ``conjugate_a`` is ``True``.
Args:
a: A batch of matrices with shape ``[..., m, m]``.
b: A batch of matrices with shape ``[..., m, n]`` if ``left_side`` is
``True`` or shape ``[..., n, m]`` otherwise.
left_side: describes which of the two matrix equations to solve; see above.
lower: describes which triangle of ``a`` should be used. The other triangle
is ignored.
transpose_a: if ``True``, the value of ``a`` is transposed.
conjugate_a: if ``True``, the complex conjugate of ``a`` is used in the
solve. Has no effect if ``a`` is real.
unit_diagonal: if ``True``, the diagonal of ``a`` is assumed to be unit
(all 1s) and not accessed.
Returns:
A batch of matrices the same shape and dtype as ``b``.
"""
conjugate_a = conjugate_a and dtypes.issubdtype(lax.dtype(a), np.complexfloating)
singleton = np.ndim(b) == np.ndim(a) - 1
if singleton:
b = lax.expand_dims(b, (-1 if left_side else -2,))
a, b = core.standard_insert_pvary(a, b)
out = triangular_solve_p.bind(
a, b, left_side=left_side, lower=lower, transpose_a=transpose_a,
conjugate_a=conjugate_a, unit_diagonal=unit_diagonal)
if singleton:
out = out[..., 0] if left_side else out[..., 0, :]
return out
def tridiagonal(
a: ArrayLike, *, lower: bool=True
) -> tuple[Array, Array, Array, Array]:
"""Reduces a symmetric/Hermitian matrix to tridiagonal form.
Currently implemented on CPU and GPU only.
Args:
a: A floating point or complex matrix or batch of matrices.
lower: Describes which triangle of the input matrices to use.
The other triangle is ignored and not accessed.
Returns:
A ``(a, d, e, taus)`` tuple. If ``lower=True``, the diagonal and first
subdiagonal of matrix (or batch of matrices) ``a`` contain the tridiagonal
representation, and elements below the first subdiagonal contain the
elementary Householder reflectors, where additionally ``d`` contains the
diagonal of the matrix and ``e`` contains the first subdiagonal. If
``lower=False`` the diagonal and first superdiagonal of the matrix contains
the tridiagonal representation, and elements above the first superdiagonal
contain the elementary Householder reflectors, where additionally ``d``
contains the diagonal of the matrix and ``e`` contains the first
superdiagonal. ``taus`` contains the scalar factors of the elementary
Householder reflectors.
"""
return tridiagonal_p.bind(lax.asarray(a), lower=lower)
def tridiagonal_solve(dl: Array, d: Array, du: Array, b: Array) -> Array:
r"""Computes the solution of a tridiagonal linear system.
This function computes the solution of a tridiagonal linear system:
.. math::
A \, X = B
Args:
dl: A batch of vectors with shape ``[..., m]``.
The lower diagonal of A: ``dl[i] := A[i, i-1]`` for i in ``[0,m)``.
Note that ``dl[0] = 0``.
d: A batch of vectors with shape ``[..., m]``.
The middle diagonal of A: ``d[i] := A[i, i]`` for i in ``[0,m)``.
du: A batch of vectors with shape ``[..., m]``.
The upper diagonal of A: ``du[i] := A[i, i+1]`` for i in ``[0,m)``.
Note that ``dl[m - 1] = 0``.
b: Right hand side matrix.
Returns:
Solution ``X`` of tridiagonal system.
"""
dl, d, du, b = core.standard_insert_pvary(dl, d, du, b)
return tridiagonal_solve_p.bind(dl, d, du, b)
# Primitive registration helper functions
_platform_prefix_map = {"cpu": "cpu", "cuda": "cu", "rocm": "hip"}
def register_cpu_gpu_lowering(
prim, lowering_rule, supported_platforms=("cpu", "cuda", "rocm")
):
for platform in supported_platforms:
prefix = _platform_prefix_map[platform]
mlir.register_lowering(
prim,
partial(lowering_rule, target_name_prefix=prefix),
platform=platform)
def linalg_shape_rule(multiple_results, supports_batching, ranks, result_shape,
name, *avals, **kwargs):
batch_dims, dims = [], []
for i, (rank, aval) in enumerate(zip(ranks, avals)):
shape = aval.shape
if len(shape) < rank:
raise TypeError(
f"Input {i} to {name} must have rank at least {rank}, but got "
f"shape={shape}"
)
if not supports_batching and len(shape) != rank:
raise TypeError(
f"Input {i} to {name} must have a rank of exactly {rank}, but got "
f"shape={shape}"
)
batch_dims.append(shape[:len(shape) - rank])
dims.append(shape[len(shape) - rank:])
if not all(len(batch_dims[0]) == len(b) for b in batch_dims):
raise TypeError(
f"All inputs to {name} must have the same number of batch dimensions, "
f"but got {[len(b) for b in batch_dims]} batch dimensions for the "
"inputs."
)
batch_dims = tuple(batch_dims[0])
out = result_shape(*dims, **kwargs)
if multiple_results:
return tuple(batch_dims + tuple(d) for d in out)
else:
return batch_dims + tuple(out)
def linalg_sharding_rule(
multiple_results, shape_rule, ranks, name, *avals, **kwargs
):
output_shapes = shape_rule(*avals, **kwargs)
batch_specs = []
for i, (rank, aval) in enumerate(zip(ranks, avals)):
spec = aval.sharding.spec
batch_spec, rest_spec = spec[:len(spec) - rank], spec[len(spec) - rank:]
if not all(s is None for s in rest_spec):
raise core.ShardingTypeError(
f"Input {i} to {name} must be unsharded on non-batch dimensions, "
f"but got {spec}."
)
batch_specs.append(batch_spec)
batch_spec = batch_specs[0]
if any(b != batch_spec for b in batch_specs[1:]):
raise core.ShardingTypeError(
f"All inputs to {name} must have the same batch sharding, but got "
f"{batch_specs}."
)
sharding = avals[0].sharding
if multiple_results:
return [
sharding.update(spec=
P(*(tuple(batch_spec) + (None,) * (len(s) - len(batch_spec))))
)
for s in output_shapes
]
else:
ndim = len(output_shapes) - len(batch_spec)
return sharding.update(spec=P(*(tuple(batch_spec) + (None,) * ndim)))
def linalg_vma_rule(multiple_results, shape_rule, name, *avals, **kwargs):
output_shapes = shape_rule(*avals, **kwargs)
out_vma = core.standard_vma_rule(name, *avals)
if multiple_results:
return [out_vma] * len(output_shapes)
else:
return out_vma
def linalg_primitive(result_dtype, accepted_dtypes, ranks, result_shape, name,
multiple_results=False, supports_batching=True,
require_same=True):
dtype_rule = partial(
lax.naryop_dtype_rule, result_dtype, accepted_dtypes, name,
require_same=require_same)
shape_rule = partial(
linalg_shape_rule, multiple_results, supports_batching, ranks,
result_shape, name)
if supports_batching:
sharding_rule = partial(
linalg_sharding_rule, multiple_results, shape_rule, ranks, name)
else:
sharding_rule = None
vma_rule = partial(linalg_vma_rule, multiple_results, shape_rule, name)
prim = core.Primitive(name)
prim.multiple_results = multiple_results
prim.def_impl(partial(dispatch.apply_primitive, prim))
if multiple_results:
prim.def_abstract_eval(
partial(lax_utils.standard_multi_result_abstract_eval, prim,
shape_rule, dtype_rule, lax_utils._standard_weak_type_rule,
sharding_rule, vma_rule))
else:
prim.def_abstract_eval(
partial(lax_utils.standard_abstract_eval, prim, shape_rule, dtype_rule,
lax_utils._standard_weak_type_rule, sharding_rule,
partial(core.standard_vma_rule, name), None, None, None))
if supports_batching:
batching.primitive_batchers[prim] = partial(
batching.expand_dims_batcher, prim)
return prim
standard_linalg_primitive = partial(linalg_primitive, lax.input_dtype)
# Primitive implementations
# Cholesky decomposition
def _cholesky_shape_rule(shape):
if shape[0] != shape[1]:
raise ValueError(
f"The input to cholesky must be a square matrix. Got shape {shape}.")
return shape
def _cholesky_jvp_rule(primals, tangents):
x, = primals
sigma_dot, = tangents
L = _tril(cholesky_p.bind(x))
# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf
def phi(X):
l = _tril(X)
return l / lax.expand_dims(
lax._const(X, 1) + lax._eye(X.dtype, (X.shape[-1], X.shape[-1])),
range(l.ndim - 2))
tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True,
conjugate_a=True, lower=True)
L_dot = lax.batch_matmul(L, phi(triangular_solve(
L, tmp, left_side=True, transpose_a=False, lower=True)),
precision=lax.Precision.HIGHEST)
return L, L_dot
def _cholesky_lowering(ctx, x):
del ctx # unused
return [hlo.cholesky(x, lower=ir.BoolAttr.get(True))]
def _cholesky_cpu_lowering(ctx, operand):
operand_aval, = ctx.avals_in
out_aval, = ctx.avals_out
batch_dims = operand_aval.shape[:-2]
target_name = lapack.prepare_lapack_call("potrf_ffi", operand_aval.dtype)
info_aval = ShapedArray(batch_dims, np.int32)
rule = _linalg_ffi_lowering(target_name, avals_out=[operand_aval, info_aval],
operand_output_aliases={0: 0})
result, info = rule(ctx, operand, uplo=_matrix_uplo_attr(True))
ok = mlir.compare_hlo(info, mlir.full_like_aval(ctx, 0, info_aval), "EQ",
"SIGNED")
return [_replace_not_ok_with_nan(ctx, batch_dims, ok, result, out_aval)]
def _cholesky_gpu_lowering(ctx, operand, *, target_name_prefix):
operand_aval, = ctx.avals_in
out_aval, = ctx.avals_out
batch_dims = operand_aval.shape[:-2]
info_aval = ShapedArray(batch_dims, np.int32)
rule = _linalg_ffi_lowering(f"{target_name_prefix}solver_potrf_ffi",
avals_out=[operand_aval, info_aval],
operand_output_aliases={0: 0})
result, info = rule(ctx, operand, lower=True)
ok = mlir.compare_hlo(info, mlir.full_like_aval(ctx, 0, info_aval), "EQ",
"SIGNED")
return [_replace_not_ok_with_nan(ctx, batch_dims, ok, result, out_aval)]
cholesky_p = standard_linalg_primitive(
(_float | _complex,), (2,), _cholesky_shape_rule, "cholesky")
ad.primitive_jvps[cholesky_p] = _cholesky_jvp_rule
mlir.register_lowering(cholesky_p, _cholesky_lowering)
mlir.register_lowering(cholesky_p, _cholesky_cpu_lowering, platform="cpu")
register_cpu_gpu_lowering(cholesky_p, _cholesky_gpu_lowering,
supported_platforms=("cuda", "rocm"))
# Cholesky update
def _cholesky_update_shape_rule(r_shape, w_shape):
if r_shape[0] != r_shape[1] or w_shape[0] != r_shape[1]:
raise ValueError(
"Rank-1 update to Cholesky decomposition takes a square matrix "
f"and a vector of the same size as input. Got shapes {r_shape} and "
f"{w_shape} instead")
return r_shape
def _cholesky_update_jax_fn(R, z):
def _drotg(x, y):
"""Get coefs for Givens rotation in a numerically stable way."""
def _drotg_nonzero(x, y):
abs_x = abs(x)
abs_y = abs(y)
denominator = lax.select(abs_x > abs_y, abs_x, abs_y)
x /= denominator
y /= denominator
rh = 1 / lax.sqrt(x ** 2 + y ** 2)
return x * rh, -y * rh
one_and_zero = (
np.array(1., dtype=x.dtype),
np.array(0., dtype=x.dtype),
)
return control_flow.cond(
y == 0, lambda x, y: one_and_zero, _drotg_nonzero, x, y)
def _drot(
first_vector: Array, second_vector: Array,
c_coef: float, s_coef: float) -> tuple[Array, Array]:
return (
c_coef * first_vector - s_coef * second_vector,
c_coef * second_vector + s_coef * first_vector)
n = z.shape[0]
for k in range(n):
c, s = _drotg(R[k, k], z[k])
row_k, z = _drot(R[k, :], z, c, s)
R = R.at[k, :].set(row_k)
return R
def _cholesky_update_gpu_lowering_rule(target_name_prefix, ctx, r_matrix,
w_vector):
rule = ffi.ffi_lowering(f"{target_name_prefix}_cholesky_update_ffi",
operand_output_aliases={0: 0, 1: 1})
sub_ctx = ctx.replace(avals_out=ctx.avals_in)
return rule(sub_ctx, r_matrix, w_vector)[:1]
cholesky_update_p = standard_linalg_primitive(
(_float, _float), (2, 1), _cholesky_update_shape_rule, "cholesky_update",
supports_batching=False)
mlir.register_lowering(
cholesky_update_p, partial(_cholesky_update_gpu_lowering_rule, "cu"),
platform="cuda")
mlir.register_lowering(
cholesky_update_p,
mlir.lower_fun(_cholesky_update_jax_fn, multiple_results=False))
# General eigendecomposition
def _eig_dtype_rule(
a_dtype, *, compute_left_eigenvectors, compute_right_eigenvectors, **_
):
dtype = dtypes.to_complex_dtype(a_dtype)
return (dtype,) * (1 + compute_left_eigenvectors + compute_right_eigenvectors)
def _eig_shape_rule(
shape, *, compute_left_eigenvectors, compute_right_eigenvectors, **_
):
if shape[0] != shape[1]:
raise ValueError(
f"The input to eig must be a square matrix. Got shape {shape}.")
count = compute_left_eigenvectors + compute_right_eigenvectors
return (shape[:-1],) + (shape,) * count
def _eig_compute_attr(compute):
return _enum_attr(
lapack.eig.ComputationMode.kComputeEigenvectors if compute
else lapack.eig.ComputationMode.kNoEigenvectors
)
def _eig_cpu_lowering(ctx, operand, *, compute_left_eigenvectors,
compute_right_eigenvectors, implementation):
if implementation and implementation != EigImplementation.LAPACK:
raise ValueError("Only the lapack implementation is supported on CPU.")
operand_aval, = ctx.avals_in
out_aval = ctx.avals_out[0]
batch_dims = operand_aval.shape[:-2]
real = operand_aval.dtype == np.float32 or operand_aval.dtype == np.float64
eigvals_aval = ShapedArray(operand_aval.shape[:-1], operand_aval.dtype)
eigvecs_aval = ShapedArray(operand_aval.shape,
dtypes.to_complex_dtype(operand_aval.dtype))
info_aval = ShapedArray(batch_dims, np.int32)
avals_out = [eigvals_aval, eigvecs_aval, eigvecs_aval, info_aval]
if real:
avals_out = [eigvals_aval, *avals_out]
target_name = lapack.prepare_lapack_call("geev_ffi", operand_aval.dtype)
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out)
*w, vl, vr, info = rule(ctx, operand,
compute_left=_eig_compute_attr(compute_left_eigenvectors),
compute_right=_eig_compute_attr(compute_right_eigenvectors))
w = hlo.complex(w[0], w[1]) if real else w[0]
ok = mlir.compare_hlo(
info, mlir.full_like_aval(ctx, 0, ShapedArray(batch_dims, np.dtype(np.int32))),
"EQ", "SIGNED")
w = _replace_not_ok_with_nan(ctx, batch_dims, ok, w, out_aval)
output = [w]
if compute_left_eigenvectors:
aval = ctx.avals_out[len(output)]
vl = _replace_not_ok_with_nan(ctx, batch_dims, ok, vl, aval)
output.append(vl)
if compute_right_eigenvectors:
aval = ctx.avals_out[len(output)]
vr = _replace_not_ok_with_nan(ctx, batch_dims, ok, vr, aval)
output.append(vr)
return output
def _unpack_conjugate_pairs(w, vr):
# cusolver, like LAPACK, uses a packed representation of the complex
# eigenvectors, where the (re, im) vectors are adjacent and shared by the
# conjugate pair:
# https://docs.nvidia.com/cuda/cusolver/index.html?highlight=geev#cusolverdnxgeev
if w.size == 0:
return lax.complex(vr, lax.zeros_like_array(vr))
is_real = ((w.imag == 0) | (w.imag == np.nan))
# Finds the positions at which each conjugate pair starts, via the parity of
# the count of the number of complex numbers seen.
conj_pair_start = control_flow.cumsum((~is_real).astype(int),
axis=len(w.shape) - 1)
conj_pair_start = conj_pair_start % 2 == 1
pads = [(0, 0, 0)] * (len(vr.shape))
pads[-1] = (-1, 1, 0)
vr_shifted_left = lax.pad(vr, lax._zero(vr), pads)
pads[-1] = (1, -1, 0)
vr_shifted_right = lax.pad(vr, lax._zero(vr), pads)
dims = np.delete(np.arange(len(vr.shape), dtype=np.int32), -2)
is_real = lax.broadcast_in_dim(is_real, vr.shape, broadcast_dimensions=dims)
conj_pair_start = lax.broadcast_in_dim(conj_pair_start, vr.shape,
broadcast_dimensions=dims)
re = lax.select(is_real | conj_pair_start, vr, vr_shifted_right)
im = lax.select(conj_pair_start, vr_shifted_left, -vr)
im = lax.select(is_real, lax.zeros_like_array(vr), im)
return lax.complex(re, im)
def _eig_gpu_lowering(ctx, operand, *,
compute_left_eigenvectors, compute_right_eigenvectors,
implementation, target_name_prefix):
operand_aval, = ctx.avals_in
batch_dims = operand_aval.shape[:-2]
n, m = operand_aval.shape[-2:]
assert n == m
dtype = operand_aval.dtype
complex_dtype = np.result_type(dtype, 1j)
if dtype in (np.float32, np.float64):
is_real = True
elif dtype in (np.complex64, np.complex128):
is_real = False
else:
raise ValueError(f"Unsupported dtype: {dtype}")
have_cusolver_geev = (
target_name_prefix == "cu"
and cuda_versions
and cuda_versions.cusolver_get_version() >= 11701
)
if (
implementation is None and have_cusolver_geev
and not compute_left_eigenvectors
) or implementation == EigImplementation.CUSOLVER:
if not have_cusolver_geev:
raise RuntimeError(
"Nonsymmetric eigendecomposition requires cusolver 11.7.1 or newer"
)
if compute_left_eigenvectors:
raise NotImplementedError(
"Left eigenvectors are not supported by cusolver")
target_name = f"{target_name_prefix}solver_geev_ffi"
avals_out = [
ShapedArray(batch_dims + (n, n), dtype),
ShapedArray(batch_dims + (n,), complex_dtype),
ShapedArray(batch_dims + (n, n), dtype),
ShapedArray(batch_dims + (n, n), dtype),
ShapedArray(batch_dims, np.int32),
]
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out)
_, w, vl, vr, info = rule(ctx, operand, left=compute_left_eigenvectors,
right=compute_right_eigenvectors)
if is_real:
unpack = mlir.lower_fun(_unpack_conjugate_pairs, multiple_results=False)
if compute_left_eigenvectors:
sub_ctx = ctx.replace(
primitive=None,
avals_in=[
ShapedArray(batch_dims + (n,), complex_dtype),
ShapedArray(batch_dims + (n, n), dtype),
],
avals_out=[ShapedArray(batch_dims + (n, n), complex_dtype)],
)
vl, = unpack(sub_ctx, w, vl)
if compute_right_eigenvectors:
sub_ctx = ctx.replace(
primitive=None,
avals_in=[
ShapedArray(batch_dims + (n,), complex_dtype),
ShapedArray(batch_dims + (n, n), dtype),
],
avals_out=[ShapedArray(batch_dims + (n, n), complex_dtype)],
)
vr, = unpack(sub_ctx, w, vr)
else:
magma = config.gpu_use_magma.value
if implementation is not None:
magma = "on" if implementation == EigImplementation.MAGMA else "off"
gpu_solver.initialize_hybrid_kernels()
if is_real:
target_name = f"{target_name_prefix}hybrid_eig_real"
complex_dtype = np.complex64 if dtype == np.float32 else np.complex128
else:
target_name = f"{target_name_prefix}hybrid_eig_comp"
assert dtype == np.complex64 or dtype == np.complex128
complex_dtype = dtype
avals_out = [
ShapedArray(batch_dims + (n,), dtype),
ShapedArray(batch_dims + (n, n), complex_dtype),
ShapedArray(batch_dims + (n, n), complex_dtype),
ShapedArray(batch_dims, np.int32),
]
if is_real:
avals_out = [ShapedArray(batch_dims + (n,), dtype)] + avals_out
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out)
*w, vl, vr, info = rule(ctx, operand, magma=magma,
left=compute_left_eigenvectors,
right=compute_right_eigenvectors)
if is_real:
assert len(w) == 2
w = hlo.complex(*w)
else:
assert len(w) == 1
w = w[0]
zeros = mlir.full_like_aval(ctx, 0, ShapedArray(batch_dims, np.int32))
ok = mlir.compare_hlo(info, zeros, "EQ", "SIGNED")
w_aval = ShapedArray(batch_dims + (n,), complex_dtype)
w = _replace_not_ok_with_nan(ctx, batch_dims, ok, w, w_aval)
output = [w]
if compute_left_eigenvectors:
vl_aval = ShapedArray(batch_dims + (n, n), complex_dtype)
vl = _replace_not_ok_with_nan(ctx, batch_dims, ok, vl, vl_aval)
output.append(vl)
if compute_right_eigenvectors:
vr_aval = ShapedArray(batch_dims + (n, n), complex_dtype)
vr = _replace_not_ok_with_nan(ctx, batch_dims, ok, vr, vr_aval)
output.append(vr)
return output
def eig_jvp_rule(primals, tangents, *, compute_left_eigenvectors,
compute_right_eigenvectors, implementation):
if compute_left_eigenvectors or compute_right_eigenvectors:
raise NotImplementedError(
'The derivatives of non-symmetric eigenvectors are not supported. '
'Only first-order derivatives of eigenvalues are supported. See '
'https://github.com/jax-ml/jax/issues/2748 for discussion.')
# Formula for derivative of eigenvalues w.r.t. a is eqn 4.60 in
# https://arxiv.org/abs/1701.00392
a, = primals
da, = tangents
l, v = eig(a, compute_left_eigenvectors=False, implementation=implementation)
return [l], [(_solve(v, da.astype(v.dtype)) * _T(v)).sum(-1)]
eig_p = linalg_primitive(
_eig_dtype_rule, (_float | _complex,), (2,), _eig_shape_rule, "eig",
multiple_results=True)
ad.primitive_jvps[eig_p] = eig_jvp_rule
mlir.register_lowering(eig_p, _eig_cpu_lowering, platform="cpu")
register_cpu_gpu_lowering(eig_p, _eig_gpu_lowering, ("cuda", "rocm"))
# Symmetric/Hermitian eigendecomposition
def _eigh_shape_rule(shape, *, subset_by_index, **_):
if shape[0] != shape[-1]:
raise ValueError(
"Argument to symmetric eigendecomposition must have shape [..., n, n], "
f"got shape {shape}"
)
n = shape[0]
d = (n if subset_by_index is None else
subset_by_index[1] - subset_by_index[0])
return (n, d), (d,)
def _eigh_dtype_rule(dtype, **_):
return dtype, lax._complex_basetype(dtype)
def _eigh_cpu_gpu_lowering(
ctx, operand, *, lower, sort_eigenvalues, subset_by_index, algorithm,
target_name_prefix: str
):
del sort_eigenvalues # The CPU/GPU implementations always sort.
operand_aval, = ctx.avals_in
v_aval, w_aval = ctx.avals_out
n = operand_aval.shape[-1]
if not (subset_by_index is None or subset_by_index == (0, n)):
raise NotImplementedError("subset_by_index not supported on CPU and GPU")
batch_dims = operand_aval.shape[:-2]
if algorithm == EighImplementation.QDWH:
raise NotImplementedError("QDWH implementation is only supported on TPU")
if algorithm == EighImplementation.JACOBI and target_name_prefix == "cpu":
raise NotImplementedError("Jacobi implementation is not supported on CPU")
if target_name_prefix == "cpu":
dtype = operand_aval.dtype
prefix = "he" if dtypes.issubdtype(dtype, np.complexfloating) else "sy"
target_name = lapack.prepare_lapack_call(f"{prefix}evd_ffi",
operand_aval.dtype)
kwargs = {
"mode": np.uint8(ord("V")),
"uplo": np.uint8(ord("L" if lower else "U")),
}
else:
target_name = f"{target_name_prefix}solver_syevd_ffi"
# Use Jacobi (algorithm=2) if requested, otherwise use QR (algorithm=1)
if algorithm is None:
algo_int = 0
else:
algo_int = 2 if algorithm == EighImplementation.JACOBI else 1
kwargs = {"lower": lower, "algorithm": np.uint8(algo_int)}
info_aval = ShapedArray(batch_dims, np.int32)
avals_out = [v_aval, w_aval, info_aval]
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out,
operand_output_aliases={0: 0})
v, w, info = rule(ctx, operand, **kwargs)
zeros = mlir.full_like_aval(ctx, 0, info_aval)
ok = mlir.compare_hlo(info, zeros, "EQ", "SIGNED")
v = _replace_not_ok_with_nan(ctx, batch_dims, ok, v, v_aval)
w = _replace_not_ok_with_nan(ctx, batch_dims, ok, w, w_aval)
return [v, w]
def _eigh_jvp_rule(
primals, tangents, *, lower, sort_eigenvalues, subset_by_index, algorithm
):
(a,) = primals
n = a.shape[-1]
if not (subset_by_index is None or subset_by_index == (0, n)):
raise NotImplementedError(
"Derivatives not defined for partial eigen decomposition."
)
# Derivative for eigh in the simplest case of distinct eigenvalues.
# This is classic nondegenerate perurbation theory, but also see
# https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
# The general solution treating the case of degenerate eigenvalues is
# considerably more complicated. Ambitious readers may refer to the general
# methods below or refer to degenerate perturbation theory in physics.
# https://www.win.tue.nl/analysis/reports/rana06-33.pdf and
# https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf
a_dot, = tangents
v, w_real = eigh_p.bind(
symmetrize(a),
lower=lower,
sort_eigenvalues=sort_eigenvalues,
subset_by_index=subset_by_index,
algorithm=algorithm,
)
# for complex numbers we need eigenvalues to be full dtype of v, a:
w = w_real.astype(a.dtype)
eye_n = lax._eye(a.dtype, (n, n))
# carefully build reciprocal delta-eigenvalue matrix, avoiding NaNs.
with config.numpy_rank_promotion("allow"):
Fmat = lax.integer_pow(eye_n + w[..., np.newaxis, :] - w[..., np.newaxis], -1) - eye_n
# eigh impl doesn't support batch dims, but future-proof the grad.
dot = partial(lax.dot if a.ndim == 2 else lax.batch_matmul,
precision=lax.Precision.HIGHEST)
vdag_adot_v = dot(dot(_H(v), a_dot), v)
dv = dot(v, Fmat * vdag_adot_v)
dw = _extract_diagonal(vdag_adot_v.real)
return (v, w_real), (dv, dw)
eigh_p = linalg_primitive(
_eigh_dtype_rule, (_float | _complex,), (2,), _eigh_shape_rule, "eigh",
multiple_results=True)
ad.primitive_jvps[eigh_p] = _eigh_jvp_rule
register_cpu_gpu_lowering(eigh_p, _eigh_cpu_gpu_lowering)
# Hessenberg reduction
def _hessenberg_shape_rule(shape, **_):
if shape[0] != shape[-1]:
raise ValueError(
"Argument to Hessenberg reduction must have shape [..., n, n], "
f"got shape {shape}"
)
return shape, shape[:-2] + (shape[-1] - 1,)
def _hessenberg_dtype_rule(dtype, **_):
return dtype, dtype
def _hessenberg_cpu_lowering(ctx, a):
a_aval, = ctx.avals_in
batch_dims = a_aval.shape[:-2]
n = a_aval.shape[-1]
if not core.is_constant_dim(n):
raise ValueError("hessenberg requires the last dimension of a to be "
f"constant, got a.shape of {a.shape}.")
target_name = lapack.prepare_lapack_call("gehrd_ffi", a_aval.dtype)
avals_out = [*ctx.avals_out, ShapedArray(batch_dims, np.int32)]
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out,
operand_output_aliases={0: 0})
a, taus, info = rule(ctx, a, low=np.int32(1), high=np.int32(n))
ok = mlir.compare_hlo(
info, mlir.full_like_aval(ctx, 0, ShapedArray(batch_dims, np.dtype(np.int32))),
"EQ", "SIGNED")
return [
_replace_not_ok_with_nan(ctx, batch_dims, ok, a, ctx.avals_out[0]),
_replace_not_ok_with_nan(ctx, batch_dims, ok, taus, ctx.avals_out[1]),
]
hessenberg_p = linalg_primitive(
_hessenberg_dtype_rule, (_float | _complex,), (2,), _hessenberg_shape_rule,
"hessenberg", multiple_results=True)
mlir.register_lowering(hessenberg_p, _hessenberg_cpu_lowering, platform="cpu")
# Householder product
def _householder_product_shape_rule(a_shape, taus_shape, **_):
m, n = a_shape
if m < n:
raise ValueError(
"The first argument to householder_product must have at least as many "
f"rows as columns, got shape {a_shape}")
k = taus_shape[0]
if k > core.min_dim(m, n):
raise ValueError(
"The second argument to householder_product must not have more rows "
"than the minimum of the first argument's rows and columns.")
return a_shape
def _householder_product_lowering(ctx, a, taus):
aval_out, = ctx.avals_out
if not is_constant_shape(aval_out.shape):
result_shapes = [
mlir.eval_dynamic_shape_as_tensor(ctx, aval_out.shape)]
else:
result_shapes = None
op = mlir.custom_call(
"ProductOfElementaryHouseholderReflectors",
result_types=[mlir.aval_to_ir_type(aval_out)],
operands=[a, taus],
api_version=1,
result_shapes=result_shapes)
return [op.result]
def _householder_product_cpu_gpu_lowering(ctx, a, taus, *,
target_name_prefix: str):
a_aval, _ = ctx.avals_in
if target_name_prefix == "cpu":
dtype = a_aval.dtype
prefix = "un" if dtypes.issubdtype(dtype, np.complexfloating) else "or"
target_name = lapack.prepare_lapack_call(f"{prefix}gqr_ffi", dtype)
else:
target_name = f"{target_name_prefix}solver_orgqr_ffi"
rule = _linalg_ffi_lowering(target_name, operand_output_aliases={0: 0})
return rule(ctx, a, taus)
householder_product_p = standard_linalg_primitive(
(_float | _complex, _float | _complex), (2, 1),
_householder_product_shape_rule, "householder_product")
mlir.register_lowering(householder_product_p, _householder_product_lowering)
register_cpu_gpu_lowering(
householder_product_p, _householder_product_cpu_gpu_lowering)
# LU decomposition
# Computes a pivoted LU decomposition such that
# PA = LU
# In the style of LAPACK, LU are stored in the same matrix.
def _lu_unblocked(a):
"""Unblocked LU decomposition, as a rolled loop."""
m, n = a.shape
def body(k, state):
pivot, perm, a = state
m_idx = lax.iota('int32', m)
n_idx = lax.iota('int32', n)
if dtypes.issubdtype(a.dtype, np.complexfloating):
t = a[:, k]
magnitude = abs(t.real) + abs(t.imag)
else:
magnitude = abs(a[:, k])
i = lax.argmax(lax.select(m_idx >= k, magnitude, lax.full_like(magnitude, -np.inf)),
axis=0, index_dtype=pivot.dtype)
pivot = pivot.at[k].set(i)
a = a.at[[k, i],].set(a[[i, k],])
perm = perm.at[[i, k],].set(perm[[k, i],])
# a[k+1:, k] /= a[k, k], adapted for loop-invariant shapes
x = a[k, k]
a = a.at[:, k].set(lax.select((m_idx > k) & (x != 0), a[:, k] / x, a[:, k]))
# a[k+1:, k+1:] -= jnp.outer(a[k+1:, k], a[k, k+1:])
a_outer = a[:, k, None] * a[k, None]
a = a - lax.select((m_idx[:, None] > k) & (n_idx[None, :] > k),
a_outer, lax._zeros(a_outer))
return pivot, perm, a
pivot = lax.full((min(m, n),), 0, dtype=np.int32)
perm = lax.iota('int32', m)
if m == 0 and n == 0:
# If the array is empty, the loop body never executes but tracing it to a
# jaxpr fails because the indexing cannot succeed.
return (pivot, perm, a)
return control_flow.fori_loop(0, min(m, n), body, (pivot, perm, a))
def _lu_blocked(a, block_size=128):
"""Blocked LU decomposition, as an unrolled loop."""
m, n = a.shape
r = min(m, n)
pivot = lax.full((r,), 0, dtype=np.int32)
perm = lax.iota('int32', m)
for k in range(0, r, block_size):
b = min(r - k, block_size)
block_pivot, block_perm, lu_block = _lu_unblocked(a[k:, k:k+b])
pivot = pivot.at[k:k+b].set(block_pivot + k)
perm = perm.at[k:].set(perm[block_perm + k])
a = a.at[k:, :].set(a[block_perm + k, :])
a = a.at[k:, k:k+b].set(lu_block)
if k + b < n:
a = a.at[k:k+b, k+b:].set(
triangular_solve(a[k:k+b, k:k+b], a[k:k+b, k+b:], left_side=True,
lower=True, unit_diagonal=True))
a = a.at[k+b:, k+b:].add(-lax.dot(a[k+b:, k:k+b], a[k:k+b, k+b:],
precision=lax.Precision.HIGHEST))
return a, pivot, perm
def _lu_python(x):
"""Default LU decomposition in Python, where no better version exists."""
batch_dims = x.shape[:-2]
fn = _lu_blocked
for _ in range(len(batch_dims)):
fn = api.vmap(fn)
return fn(x)
def _lu_shape_rule(shape):
m, n = shape
return shape, (core.min_dim(m, n),), (m,)
def _lu_dtype_rule(dtype, **_):
return dtype, dtypes.dtype(np.int32), dtypes.dtype(np.int32)
def _lu_jvp_inner(lu, a_dot, permutation):
# Differentiation of Matrix Functionals Using Triangular Factorization
# F. R. De Hoog, R. S. Anderssen, and M. A. Lukas
#
# LU = A
# ==> L'U + LU' = A'
# ==> inv(L) . L' + U' . inv(U) = inv(L) A' inv(U)
# ==> L' = L . tril(inv(L) . A' . inv(U), -1)
# U' = triu(inv(L) . A' . inv(U)) . U
a_shape = np.shape(a_dot)
assert len(a_shape) == 2
m, n = a_shape
dtype = lax.dtype(a_dot)
k = min(m, n)
l_padding = [(0, 0, 0)] * 2
l_padding[-1] = (0, m - k, 0)
zero = lax._const(lu, 0)
l = lax.pad(_tril(lu[:, :k], -1), zero, l_padding)
l = l + lax._eye(dtype, (m, m))
u_eye = lax.pad(lax._eye(dtype, (n - k, n - k)), zero,
((k, 0, 0), (k, 0, 0)))
u_padding = [(0, 0, 0)] * 2
u_padding[-2] = (0, n - k, 0)
u = lax.pad(_triu(lu[:k, :]), zero, u_padding) + u_eye
la = triangular_solve(l, a_dot[permutation], left_side=True,
transpose_a=False, lower=True, unit_diagonal=True)
lau = triangular_solve(u, la, left_side=False, transpose_a=False,
lower=False)
with config.default_matmul_precision("highest"):
l_dot = l @ _tril(lau, -1)
u_dot = _triu(lau) @ u
return l_dot + u_dot
def _lu_jvp_rule(primals, tangents):
a, = primals
a_dot, = tangents
lu, pivots, permutation = lu_p.bind(a)
lu_dot_fun = _lu_jvp_inner
for _ in np.shape(a)[:-2]:
lu_dot_fun = api.vmap(lu_dot_fun)
lu_dot = lu_dot_fun(lu, a_dot, permutation)
return (lu, pivots, permutation), (lu_dot, ad_util.Zero.from_primal_value(pivots),
ad_util.Zero.from_primal_value(permutation))
def _lu_cpu_gpu_lowering(ctx, operand, *, target_name_prefix: str):
operand_aval, = ctx.avals_in
out_aval, pivot_aval, perm_aval = ctx.avals_out
batch_dims = operand_aval.shape[:-2]
info_aval = ShapedArray(batch_dims, np.dtype(np.int32))
m = operand_aval.shape[-2]
if target_name_prefix == "cpu":
target_name = lapack.prepare_lapack_call("getrf_ffi", operand_aval.dtype)
else:
target_name = f"{target_name_prefix}solver_getrf_ffi"
rule = _linalg_ffi_lowering(target_name,
avals_out=[out_aval, pivot_aval, info_aval],
operand_output_aliases={0: 0})
lu, pivot, info = rule(ctx, operand)
# Subtract 1 from the pivot to get 0-based indices.
pivot = hlo.subtract(pivot, mlir.full_like_aval(ctx, 1, pivot_aval))
ok = mlir.compare_hlo(info, mlir.full_like_aval(ctx, 0, info_aval),
"GE", "SIGNED")
lu = _replace_not_ok_with_nan(ctx, batch_dims, ok, lu, out_aval)
sub_ctx = ctx.replace(primitive=None, avals_in=[pivot_aval],
avals_out=[perm_aval])
perm_fn = mlir.lower_fun(lambda x: lu_pivots_to_permutation(x, m),
multiple_results=False)
perm, = perm_fn(sub_ctx, pivot)
return [lu, pivot, perm]
def _lu_tpu_lowering_rule(ctx, operand):
result_types = [
mlir.aval_to_ir_type(ctx.avals_out[0]),
mlir.aval_to_ir_type(ctx.avals_out[1]),
mlir.aval_to_ir_type(ctx.avals_out[2])]
if any(not is_constant_shape(a.shape) for a in ctx.avals_out):
result_shapes = [
mlir.eval_dynamic_shape_as_tensor(ctx, a.shape)
for a in ctx.avals_out]
else:
result_shapes = None
op = mlir.custom_call(
"LuDecomposition",
result_types=result_types,
operands=[operand],
result_shapes=result_shapes)
return op.results
lu_p = linalg_primitive(
_lu_dtype_rule, (_float | _complex,), (2,), _lu_shape_rule, "lu",
multiple_results=True)
ad.primitive_jvps[lu_p] = _lu_jvp_rule
mlir.register_lowering(lu_p, mlir.lower_fun(_lu_python, multiple_results=True))
mlir.register_lowering(lu_p, _lu_tpu_lowering_rule, platform='tpu')
register_cpu_gpu_lowering(lu_p, _lu_cpu_gpu_lowering)
def lu_solve(lu: ArrayLike, permutation: ArrayLike, b: ArrayLike,
trans: int = 0) -> Array:
"""LU solve with broadcasting."""
return _lu_solve(lu, permutation, b, trans)
def _lu_solve_core(lu: Array, permutation: Array, b: Array, trans: int) -> Array:
m = lu.shape[0]
x = lax.reshape(b, (m, math.prod(b.shape[1:])))
if trans == 0:
x = x[permutation, :]
x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True)
x = triangular_solve(lu, x, left_side=True, lower=False)
elif trans == 1 or trans == 2:
conj = trans == 2
x = triangular_solve(lu, x, left_side=True, lower=False, transpose_a=True,
conjugate_a=conj)
x = triangular_solve(lu, x, left_side=True, lower=True, unit_diagonal=True,
transpose_a=True, conjugate_a=conj)
_, ind = lax.sort_key_val(permutation, lax.iota('int32', permutation.shape[0]))
x = x[ind, :]
else:
raise ValueError(f"'trans' value must be 0, 1, or 2, got {trans}")
return lax.reshape(x, b.shape)
@api.jit(static_argnums=(3,))
def _lu_solve(lu: Array, permutation: Array, b: Array, trans: int) -> Array:
if len(lu.shape) < 2 or lu.shape[-1] != lu.shape[-2]:
raise ValueError("last two dimensions of LU decomposition must be equal, "
"got shape {}".format(lu.shape))
if len(b.shape) < 1:
raise ValueError("b matrix must have rank >= 1, got shape {}"
.format(b.shape))
# Broadcasting follows NumPy's convention for linalg.solve: the RHS is
# treated as a (batched) vector if the number of dimensions differ by 1.
# Otherwise, broadcasting rules apply.
rhs_vector = lu.ndim == b.ndim + 1
if rhs_vector:
if b.shape[-1] != lu.shape[-1]:
raise ValueError("When LU decomposition matrix and b have the same "
"number of dimensions, last axis of LU decomposition "
"matrix (shape {}) and b array (shape {}) must match"
.format(lu.shape, b.shape))
b = b[..., np.newaxis]
else:
if b.shape[-2] != lu.shape[-1]:
raise ValueError("When LU decomposition matrix and b different "
"numbers of dimensions, last axis of LU decomposition "
"matrix (shape {}) and second to last axis of b array "
"(shape {}) must match"
.format(lu.shape, b.shape))
batch_shape = lax.broadcast_shapes(lu.shape[:-2], permutation.shape[:-1], b.shape[:-2])
lu = _broadcast_to(lu, (*batch_shape, *lu.shape[-2:]))
permutation = _broadcast_to(permutation, (*batch_shape, permutation.shape[-1]))
b = _broadcast_to(b, (*batch_shape, *b.shape[-2:]))
fn = _lu_solve_core
for _ in batch_shape:
fn = api.vmap(fn, in_axes=(0, 0, 0, None))
x = fn(lu, permutation, b, trans)
return x[..., 0] if rhs_vector else x
# Support operation for LU decomposition: Transformation of the pivots returned
# by LU decomposition into permutations.
# Define this outside lu_pivots_to_permutation to ensure fori_loop cache hits
def _lu_pivots_body_fn_inner(i, permutation, swaps):
j = swaps[i]
x = permutation[i]
y = permutation[j]
permutation = permutation.at[i].set(y)
return permutation.at[j].set(x)
def _lu_pivots_body_fn(i, permutation_and_swaps):
permutation, swaps = permutation_and_swaps
batch_dims = swaps.shape[:-1]
fn = _lu_pivots_body_fn_inner
for _ in range(len(batch_dims)):
fn = api.vmap(fn, in_axes=(None, 0, 0), out_axes=0)
return fn(i, permutation, swaps), swaps
def _generic_lu_pivots_to_permutation(swaps, permutation_size):
"""Converts the pivots (row swaps) returned by LU to a permutation.
We build a permutation rather than applying `swaps` directly to the rows
of a matrix because lax loops aren't differentiable.
Args:
swaps: an array of shape (..., k) of row swaps to perform
permutation_size: the size of the output permutation. Should be >= k.
Returns:
An int32 array of shape (..., m).
"""
assert len(swaps.shape) >= 1
batch_dims = swaps.shape[:-1]
swaps_sharding = core.typeof(swaps).sharding
batch_spec = swaps_sharding.spec[:-1]
if swaps_sharding.spec[-1] != None:
raise ValueError(
"The last dim of swaps should be unsharded but got:"
f" {swaps_sharding.spec[-1]} for type {core.typeof(swaps)}")
permutation_sharding = swaps_sharding.update(spec=batch_spec + (None,))
k = swaps.shape[-1]
m = permutation_size
permutation = lax.broadcasted_iota(
np.int32, batch_dims + (m,), len(batch_dims),
out_sharding=permutation_sharding)
if m == 0 or k == 0:
return permutation
upper = np.array(k, np.int32) if is_constant_dim(k) else k
permutation, swaps = core.standard_insert_pvary(permutation, swaps)
result, _ = control_flow.fori_loop(np.array(0, np.int32), upper,
_lu_pivots_body_fn, (permutation, swaps))
return result
def _lu_pivots_to_permutation_shape_rule(shape, *, permutation_size):
pivots_size, = shape
if not permutation_size >= pivots_size:
raise ValueError(
f"Output permutation size {permutation_size} has to exceed the "
f"trailing dimension of the pivots. Got pivots size {pivots_size}")
return (permutation_size,)
def _lu_pivots_to_permutation_gpu_lowering(ctx, pivots, *,
permutation_size,
target_name_prefix):
del permutation_size # unused
rule = _linalg_ffi_lowering(f"{target_name_prefix}_lu_pivots_to_permutation",
num_non_batch_dims=1, column_major=False)
return rule(ctx, pivots)
lu_pivots_to_permutation_p = standard_linalg_primitive(
({np.int32},), (1,), _lu_pivots_to_permutation_shape_rule,
"lu_pivots_to_permutation")
mlir.register_lowering(
lu_pivots_to_permutation_p,
mlir.lower_fun(_generic_lu_pivots_to_permutation, multiple_results=False))
register_cpu_gpu_lowering(
lu_pivots_to_permutation_p, _lu_pivots_to_permutation_gpu_lowering,
("cuda", "rocm"))
# QR decomposition
# QR decomposition is implemented as a composition of two lower-level primitives
# geqrf and orgqr. The names, while cryptic Fortran alphabet soup, are LAPACK's
# names for the primitives, and we stick with them for consistency.
def geqrf(a: ArrayLike) -> tuple[Array, Array]:
"""Computes the QR decomposition of a matrix.
Args:
a: an ``[..., m, n]`` batch of matrices, with floating-point or complex type.
Returns:
An ``(a, taus)`` pair where ``r`` is in the upper triangle of ``a``,
``q`` is represented in the lower triangle of ``a`` and in ``taus`` as
elementary Householder reflectors.
"""
a_out, taus = geqrf_p.bind(a)
return a_out, taus
def _geqrf_shape_rule(shape):
m, n = shape
return shape, (core.min_dim(m, n),)
def _geqrf_dtype_rule(dtype):
return dtype, dtype
def _geqrf_lowering_rule(ctx, operand):
ts_type = mlir.aval_to_ir_type(ctx.avals_out[0])
r_type = mlir.aval_to_ir_type(ctx.avals_out[1])
result_types = [ts_type, r_type]
if any(not is_constant_shape(aval_out.shape)
for aval_out in ctx.avals_out):
result_shapes = [
mlir.eval_dynamic_shape_as_tensor(ctx, aval_out.shape)
for aval_out in ctx.avals_out
]
else:
result_shapes = None
op = mlir.custom_call(
"Qr",
result_types=result_types,
operands=[operand],
api_version=1,
result_shapes=result_shapes
)
return op.results
def _geqrf_cpu_gpu_lowering(ctx, a, *, target_name_prefix: str):
operand_aval, = ctx.avals_in
if target_name_prefix == "cpu":
target_name = lapack.prepare_lapack_call("geqrf_ffi", operand_aval.dtype)
else:
target_name = f"{target_name_prefix}solver_geqrf_ffi"
rule = _linalg_ffi_lowering(target_name, operand_output_aliases={0: 0})
return rule(ctx, a)
geqrf_p = linalg_primitive(
_geqrf_dtype_rule, (_float | _complex,), (2,), _geqrf_shape_rule, "geqrf",
multiple_results=True)
mlir.register_lowering(geqrf_p, _geqrf_lowering_rule)
register_cpu_gpu_lowering(geqrf_p, _geqrf_cpu_gpu_lowering)
def geqp3(a: ArrayLike, jpvt: ArrayLike, *,
use_magma: bool | None = None) -> tuple[Array, Array, Array]:
"""Computes the column-pivoted QR decomposition of a matrix.
Args:
a: a ``[..., m, n]`` batch of matrices, with floating-point or complex type.
jpvt: a ``[..., n]`` batch of column-pivot index vectors with integer type,
use_magma: Locally override the ``jax_use_magma`` flag. If ``True``, the
`geqp3` is computed using MAGMA. If ``False``, the computation is done using
LAPACK on to the host CPU. If ``None`` (default), the behavior is controlled
by the ``jax_use_magma`` flag. This argument is only used on GPU.
Returns:
A ``(a, jpvt, taus)`` triple, where ``r`` is in the upper triangle of ``a``,
``q`` is represented in the lower triangle of ``a`` and in ``taus`` as
elementary Householder reflectors, and ``jpvt`` is the column-pivot indices
such that ``a[:, jpvt] = q @ r``.
"""
a, jpvt = core.standard_insert_pvary(a, jpvt)
a_out, jpvt_out, taus = geqp3_p.bind(a, jpvt, use_magma=use_magma)
return a_out, jpvt_out, taus
def _geqp3_shape_rule(a_shape, jpvt_shape, **_):
m, n = a_shape
return a_shape, jpvt_shape, (core.min_dim(m, n),)
def _geqp3_dtype_rule(dtype, jpvt_dtype, *_, **__):
return dtype, jpvt_dtype, dtype
def _geqp3_cpu_gpu_lowering(ctx, a, jpvt, *, use_magma, target_name_prefix):
a_aval, _ = ctx.avals_in
if target_name_prefix == "cpu":
target_name = lapack.prepare_lapack_call("geqp3_ffi", a_aval.dtype)
params = {}
else:
gpu_solver.initialize_hybrid_kernels()
magma = config.gpu_use_magma.value
target_name = f"{target_name_prefix}hybrid_geqp3"
if use_magma is not None:
magma = "on" if use_magma else "off"
params = {"magma": magma}
rule = _linalg_ffi_lowering(target_name, operand_output_aliases={0: 0, 1: 1})
return rule(ctx, a, jpvt, **params)
geqp3_p = linalg_primitive(
_geqp3_dtype_rule, (_float | _complex, _int), (2, 1),
_geqp3_shape_rule, "geqp3", multiple_results=True, require_same=False)
register_cpu_gpu_lowering(geqp3_p, _geqp3_cpu_gpu_lowering)
def _qr_shape_rule(shape, *, pivoting, full_matrices, **_):
m, n = shape
k = m if full_matrices else core.min_dim(m, n)
return ((m, k), (k, n), (n,)) if pivoting else ((m, k), (k, n))
def _qr_dtype_rule(dtype, *, pivoting, **_):
return (dtype, dtype, dtypes.dtype(np.int32)) if pivoting else (dtype, dtype)
def qr_jvp_rule(primals, tangents, *, pivoting, full_matrices, use_magma):
# See j-towns.github.io/papers/qr-derivative.pdf for a terse derivation.
x, = primals
dx, = tangents
q, r, *p = qr_p.bind(x, pivoting=pivoting, full_matrices=False, use_magma=use_magma)
*_, m, n = x.shape
if m < n or (full_matrices and m != n):
raise NotImplementedError(
"Unimplemented case of QR decomposition derivative")
if pivoting:
dx = dx[..., p[0]]
dx_rinv = triangular_solve(r, dx) # Right side solve by default
qt_dx_rinv = _H(q) @ dx_rinv
qt_dx_rinv_lower = _tril(qt_dx_rinv, -1)
do = qt_dx_rinv_lower - _H(qt_dx_rinv_lower) # This is skew-symmetric
# The following correction is necessary for complex inputs
I = lax.expand_dims(lax._eye(do.dtype, (n, n)), range(qt_dx_rinv.ndim - 2))
do = do + I * (qt_dx_rinv - qt_dx_rinv.real.astype(qt_dx_rinv.dtype))
dq = q @ (do - qt_dx_rinv) + dx_rinv
dr = (qt_dx_rinv - do) @ r
if pivoting:
dp = ad_util.Zero.from_primal_value(p[0])
return (q, r, p[0]), (dq, dr, dp)
return (q, r), (dq, dr)
def _qr_lowering(a, *, pivoting, full_matrices, use_magma):
*batch_dims, m, n = a.shape
if m == 0 or n == 0:
k = m if full_matrices else core.min_dim(m, n)
q = lax.broadcast_in_dim(lax._eye(a.dtype, (m, k)),
(*batch_dims, m, k),
(len(batch_dims), len(batch_dims) + 1))
r = lax.full((*batch_dims, k, n), 0, dtype=a.dtype)
if pivoting:
p = lax.full((*batch_dims, n), 0, dtype=np.dtype(np.int32))
return q, r, p
return q, r
if pivoting:
jpvt = lax.full((*batch_dims, n), 0, dtype=np.dtype(np.int32))
r, p, taus = geqp3(a, jpvt, use_magma=use_magma)
p -= 1 # Convert geqp3's 1-based indices to 0-based indices by subtracting 1.
else:
r, taus = geqrf(a)
if m < n:
q = householder_product(r[..., :m, :m], taus)
elif full_matrices:
pads = [(0, 0, 0)] * (len(batch_dims) + 1) + [(0, m - n, 0)]
q = lax.pad(r, lax._zero(r), pads)
q = householder_product(q, taus)
else:
q = householder_product(r, taus)
r = r[..., :n, :n]
r = _triu(r)
if pivoting:
return q, r, p
return q, r
qr_p = linalg_primitive(
_qr_dtype_rule, (_float | _complex,), (2,), _qr_shape_rule, "qr",
multiple_results=True)
ad.primitive_jvps[qr_p] = qr_jvp_rule
mlir.register_lowering(qr_p, mlir.lower_fun(_qr_lowering))
# Schur Decomposition
def _schur_shape_rule(shape, *, compute_schur_vectors, **_):
if shape[0] != shape[1]:
raise ValueError(
f"The input to schur must be a square matrix. Got shape {shape}.")
return (shape, shape) if compute_schur_vectors else (shape,)
def _schur_dtype_rule(dtype, *, compute_schur_vectors, **_):
return (dtype, dtype) if compute_schur_vectors else (dtype,)
def _schur_cpu_lowering(ctx, operand, *, compute_schur_vectors, sort_eig_vals,
select_callable):
del select_callable # unused
if sort_eig_vals:
raise NotImplementedError(
"The sort feature of LAPACK's gees routine is not implemented.")
operand_aval, = ctx.avals_in
batch_dims = operand_aval.shape[:-2]
real = operand_aval.dtype == np.float32 or operand_aval.dtype == np.float64
target_name = lapack.prepare_lapack_call("gees_ffi", operand_aval.dtype)
info_aval = ShapedArray(batch_dims, np.dtype(np.int32))
eigvals_aval = ShapedArray(operand_aval.shape[:-1], operand_aval.dtype)
if real:
avals_out = [operand_aval, operand_aval, eigvals_aval, eigvals_aval,
info_aval, info_aval]
else:
avals_out = [operand_aval, operand_aval, eigvals_aval, info_aval, info_aval]
mode = (
lapack.schur.ComputationMode.kComputeSchurVectors
if compute_schur_vectors
else lapack.schur.ComputationMode.kNoComputeSchurVectors
)
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out,
operand_output_aliases={0: 0})
schur_form, schur_vectors, *_, info = rule(
ctx, operand, mode=_enum_attr(mode),
sort=_enum_attr(lapack.schur.Sort.kNoSortEigenvalues))
ok = mlir.compare_hlo(
info, mlir.full_like_aval(ctx, 0, ShapedArray(batch_dims, np.dtype(np.int32))),
"EQ", "SIGNED")
schur_form = _replace_not_ok_with_nan(ctx, batch_dims, ok, schur_form,
ctx.avals_out[0])
output = [schur_form]
if compute_schur_vectors:
schur_vectors = _replace_not_ok_with_nan(ctx, batch_dims, ok, schur_vectors,
ctx.avals_out[1])
output.append(schur_vectors)
return output
schur_p = linalg_primitive(
_schur_dtype_rule, (_float | _complex,), (2,), _schur_shape_rule, "schur",
multiple_results=True)
mlir.register_lowering(schur_p, _schur_cpu_lowering, platform="cpu")
# Singular value decomposition
def _svd_shape_rule(shape, *, full_matrices, compute_uv, subset_by_index, **_):
m, n = shape
rank = core.min_dim(m, n)
if subset_by_index is not None:
if full_matrices and subset_by_index != (0, rank):
raise ValueError("full_matrices and subset_by_index cannot both be set")
rank = core.min_dim(rank, subset_by_index[1] - subset_by_index[0])
if compute_uv:
return (
(rank,),
(m, m if full_matrices else rank),
(n if full_matrices else rank, n),
)
else:
return (rank,),
def _svd_dtype_rule(dtype, *, compute_uv, **_):
real_dtype = lax._complex_basetype(dtype)
if compute_uv:
return real_dtype, dtype, dtype
else:
return real_dtype,
@config.default_matmul_precision("float32")
def _svd_jvp_rule(
primals, tangents, *, full_matrices, compute_uv, subset_by_index,
algorithm=None,
):
A, = primals
dA, = tangents
s, U, Vt = svd_p.bind(
A, full_matrices=False, compute_uv=True, subset_by_index=subset_by_index,
algorithm=algorithm,
)
if (
compute_uv
and full_matrices
and not core.definitely_equal(A.shape[-2], A.shape[-1])
):
# TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf
raise NotImplementedError(
"Singular value decomposition JVP not implemented for full matrices")
Ut, V = _H(U), _H(Vt)
s_dim = s[..., None, :]
dS = Ut @ dA @ V
ds = _extract_diagonal(dS.real)
if not compute_uv:
return (s,), (ds,)
s_diffs = (s_dim + _T(s_dim)) * (s_dim - _T(s_dim))
s_diffs_zeros = lax._eye(s.dtype, (s.shape[-1], s.shape[-1])) # jnp.ones((), dtype=A.dtype) * (s_diffs == 0.) # is 1. where s_diffs is 0. and is 0. everywhere else
s_diffs_zeros = lax.expand_dims(s_diffs_zeros, range(s_diffs.ndim - 2))
F = 1 / (s_diffs + s_diffs_zeros) - s_diffs_zeros
dSS = s_dim.astype(A.dtype) * dS # dS.dot(jnp.diag(s))
SdS = _T(s_dim.astype(A.dtype)) * dS # jnp.diag(s).dot(dS)
s_zeros = (s == 0).astype(s.dtype)
s_inv = 1 / (s + s_zeros) - s_zeros
s_inv_mat = _construct_diagonal(s_inv)
dUdV_diag = .5 * (dS - _H(dS)) * s_inv_mat.astype(A.dtype)
dU = U @ (F.astype(A.dtype) * (dSS + _H(dSS)) + dUdV_diag)
dV = V @ (F.astype(A.dtype) * (SdS + _H(SdS)))
m, n = A.shape[-2:]
if m > n:
dAV = dA @ V
dU = dU + (dAV - U @ (Ut @ dAV)) / s_dim.astype(A.dtype)
if n > m:
dAHU = _H(dA) @ U
dV = dV + (dAHU - V @ (Vt @ dAHU)) / s_dim.astype(A.dtype)
return (s, U, Vt), (ds, dU, _H(dV))
def _empty_svd(a, *, full_matrices, compute_uv):
batch_shape = a.shape[:-2]
m, n = a.shape[-2:]
s = lax.full(batch_shape + (0,), 0, dtype=lax._complex_basetype(a.dtype))
if not compute_uv:
return (s,)
if full_matrices:
size = max(m, n)
u = lax.broadcast_in_dim(lax._eye(a.dtype, (size, size)),
(*batch_shape, size, size),
(len(batch_shape), len(batch_shape) + 1))
else:
u = lax.full(batch_shape + (m, n), 0, dtype=a.dtype)
v = lax.full(batch_shape + (0, 0), 0, dtype=a.dtype)
if m < n:
u, v = v, u
return s, u, v
def _svd_computation_attr(compute_uv, full_matrices):
mode = "A"
if full_matrices is None:
full_matrices = True
if not compute_uv:
mode = "N"
elif not full_matrices:
mode = "S"
return _char_attr(mode)
def _svd_cpu_gpu_lowering(
ctx,
operand,
*,
full_matrices,
compute_uv,
subset_by_index,
target_name_prefix: str,
algorithm=None,
):
operand_aval, = ctx.avals_in
s_aval = ctx.avals_out[0]
m, n = operand_aval.shape[-2:]
batch_dims = operand_aval.shape[:-2]
if not (subset_by_index is None or subset_by_index == (0, min(m, n))):
raise NotImplementedError("subset_by_index not implemented for CPU and GPU")
if m == 0 or n == 0:
return mlir.lower_fun(_empty_svd, multiple_results=True)(
ctx,
operand,
full_matrices=full_matrices,
compute_uv=compute_uv,
)
if target_name_prefix == "cpu":
if algorithm is None or algorithm == SvdAlgorithm.DEFAULT:
target_name = lapack.prepare_lapack_call("gesdd_ffi", operand_aval.dtype)
elif algorithm == SvdAlgorithm.QR:
target_name = lapack.prepare_lapack_call("gesvd_ffi", operand_aval.dtype)
else:
raise NotImplementedError(
"The SVD Jacobi and Polar algorithms are not implemented on CPU.")
mode = _svd_computation_attr(compute_uv, full_matrices)
info_aval = ShapedArray(batch_dims, np.dtype(np.int32))
if compute_uv:
s_aval, u_aval, vt_aval = ctx.avals_out
else:
s_aval, = ctx.avals_out
# TODO(danfm): It should be possible to skip instantiating these arrays
# when they are not used.
u_aval = ShapedArray((*batch_dims, m,
m if full_matrices else core.min_dim(m, n)),
operand_aval.dtype)
vt_aval = ShapedArray((*batch_dims,
n if full_matrices else core.min_dim(m, n), n),
operand_aval.dtype)
avals_out = [operand_aval, s_aval, u_aval, vt_aval, info_aval]
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out,
operand_output_aliases={0: 0})
_, s, u, vt, info = rule(ctx, operand, mode=mode)
else:
s, u, vt, info = _svd_gpu_sub_lowering(ctx, operand,
full_matrices=full_matrices,
compute_uv=compute_uv,
target_name_prefix=target_name_prefix,
algorithm=algorithm)
zeros = mlir.full_like_aval(ctx, 0, ShapedArray(batch_dims, np.dtype(np.int32)))
ok = mlir.compare_hlo(info, zeros, "EQ", "SIGNED")
s = _replace_not_ok_with_nan(ctx, batch_dims, ok, s, s_aval)
result = [s]
if compute_uv:
u_aval, vt_aval = ctx.avals_out[1:]
u = _replace_not_ok_with_nan(ctx, batch_dims, ok, u, u_aval)
vt = _replace_not_ok_with_nan(ctx, batch_dims, ok, vt, vt_aval)
result += [u, vt]
return result
def _svd_gpu_sub_lowering(ctx, operand, *, full_matrices, compute_uv,
target_name_prefix, algorithm):
operand_aval, = ctx.avals_in
if compute_uv:
s_aval, u_aval, vt_aval = ctx.avals_out
else:
s_aval, = ctx.avals_out
u_aval = vt_aval = ShapedArray((), operand_aval.dtype)
batch_dims = operand_aval.shape[:-2]
info_aval = ShapedArray(batch_dims, np.dtype(np.int32))
nb = len(batch_dims)
m, n = operand_aval.shape[-2:]
k = core.min_dim(m, n)
transposed = False
kwargs = {}
# The Jacobi algorithm appears to outperform the default QR algorithm for
# small to medium sized matrices. See:
# https://developer.download.nvidia.com/video/gputechconf/gtc/2019/presentation/s9226-fast-singular-value-decomposition-on-gpus-v2.pdf
# slide 5. With this in mind, we default to using the Jacobi algorithm for
# matrices smaller than 1024x1024.
#
# Note that the Jacobi algorithm is only used by default for matrices with
# concrete matrix dimensions. When using dynamic shapes, we always use the
# default QR algorithm, but users can (in principle) override this behavior
# by passing `use_jacobi=True`.
#
# TODO(danfm): Since this was originally implemented, hipSolver appears to
# have added support for the Jacobi algorithm, so we should investigate
# removing this condition.
# TODO(phawkins): Consider making polar decomposition the default.
use_jacobi = False
use_polar = False
if algorithm is None or algorithm == SvdAlgorithm.DEFAULT:
try:
use_jacobi = target_name_prefix == "cu" and m <= 1024 and n <= 1024
except core.InconclusiveDimensionOperation:
use_jacobi = False
elif algorithm == SvdAlgorithm.JACOBI:
use_jacobi = True
elif algorithm == SvdAlgorithm.POLAR:
use_polar = True
column_major = True
if use_jacobi:
target_name = f"{target_name_prefix}solver_gesvdj_ffi"
# The gesvdjbatched kernel doesn't support "econ" mode, but it also only
# supports matrices up to 32x32, so it's always worth using the batched
# version and then slicing afterwards when the matrix is small enough.
try:
econ = not full_matrices and m > 32 and n > 32
except core.InconclusiveDimensionOperation:
econ = False
elif use_polar:
target_name = f"{target_name_prefix}solver_gesvdp_ffi"
econ = not full_matrices
else:
target_name = f"{target_name_prefix}solver_gesvd_ffi"
econ = not full_matrices
# Because the base gesvd kernel only supports matrices where m >= n, we
# conceptually transpose the matrix if m < n.
transposed = m < n
kwargs = {"transposed": transposed}
if transposed:
column_major = False
if use_jacobi or use_polar:
# When using the Jacobi or polar algorithms, the U and V matrices must
# always be allocated even if compute_uv is False.
u_aval = ShapedArray((*batch_dims, m, k if econ else m), u_aval.dtype)
v_aval = ShapedArray((*batch_dims, n, k if econ else n), vt_aval.dtype)
avals_out = [operand_aval, s_aval, u_aval, v_aval, info_aval]
elif transposed:
avals_out = [operand_aval, s_aval, vt_aval, u_aval, info_aval]
else:
avals_out = [operand_aval, s_aval, u_aval, vt_aval, info_aval]
rule = _linalg_ffi_lowering(target_name, avals_out=avals_out,
operand_output_aliases={0: 0},
column_major=column_major)
_, s, u, vt, info = rule(ctx, operand, full_matrices=not econ,
compute_uv=compute_uv, **kwargs)
if (use_jacobi or use_polar) and compute_uv:
vt = hlo.transpose(
vt,
mlir.dense_int_array(np.array(tuple(range(nb)) + (nb + 1, nb))))
if np.issubdtype(operand_aval.dtype, np.complexfloating):
vt = hlo.complex(hlo.real(vt), hlo.negate(hlo.imag(vt)))
if not full_matrices and not econ:
nd = len(operand_aval.shape)
u = mlir.slice_op(ctx, u, ctx.avals_out[1],
start_indices=np.zeros([nd], np.int64),
limit_indices=batch_dims + (m, k),
strides=np.ones([nd], np.int64))
vt = mlir.slice_op(ctx, vt, ctx.avals_out[2],
start_indices=np.zeros([nd], np.int64),
limit_indices=batch_dims + (k, n),
strides=np.ones([nd], np.int64))
if transposed:
return s, vt, u, info
else:
return s, u, vt, info
svd_p = linalg_primitive(
_svd_dtype_rule, (_float | _complex,), (2,), _svd_shape_rule, "svd",
multiple_results=True)
ad.primitive_jvps[svd_p] = _svd_jvp_rule
register_cpu_gpu_lowering(svd_p, _svd_cpu_gpu_lowering)
# Symmetric product
def _symmetric_product_shape_rule(a_shape, c_shape, **_):
if a_shape[0] != c_shape[1] or c_shape[0] != c_shape[1]:
raise ValueError(
"symmetric_update expects a rectangular matrix of shape (m, n) and a "
f"square matrix of shape (n, n). Got shapes {a_shape} and {c_shape}.")
return c_shape
def _symmetric_product_jax_fn(a, c, *, alpha, beta):
a_T = lax.transpose(a, (*range(a.ndim - 2), a.ndim - 1, a.ndim - 2))
return alpha * lax.batch_matmul(
a, a_T, precision=lax.Precision.HIGHEST) + beta * c
def _symmetric_product_gpu_lowering(
platform, ctx, a_tensor, c_tensor, alpha, beta):
a_aval, c_aval = ctx.avals_in[:2]
dtype = a_aval.dtype
alpha_aval = beta_aval = ShapedArray((), dtype)
alpha_array = mlir.full_like_aval(ctx, alpha, alpha_aval)
beta_array = mlir.full_like_aval(ctx, beta, beta_aval)
rule = ffi.ffi_lowering(f"{platform}solver_syrk_ffi",
operand_output_aliases={1: 0})
ctx = ctx.replace(avals_in=[a_aval, c_aval, alpha_aval, beta_aval])
return rule(ctx, a_tensor, c_tensor, alpha_array, beta_array, transpose=False)
symmetric_product_p = standard_linalg_primitive(
(_float, _float), (2, 2), _symmetric_product_shape_rule,
"symmetric_product")
mlir.register_lowering(
symmetric_product_p,
partial(_symmetric_product_gpu_lowering, "cu"), platform="cuda")
mlir.register_lowering(
symmetric_product_p,
mlir.lower_fun(_symmetric_product_jax_fn, multiple_results=False))
# Triangular solve
def _triangular_solve_shape_rule(a_shape, b_shape, *, left_side=False, **_):
if a_shape[0] != a_shape[1]:
raise ValueError(
"The first input to triangular_solve must be a square matrix. Got "
f"shape {a_shape}.")
common_dim = -2 if left_side else -1
if a_shape[-1] != b_shape[common_dim]:
raise ValueError(
f"Incompatible shapes for arguments to triangular_solve: {a_shape} and "
f"{b_shape}.")
return b_shape
def _triangular_solve_dtype_rule(dtype, *_, **__):
return dtype
def _triangular_solve_jvp_rule_a(
g_a, ans, a, b, *, left_side, lower, transpose_a, conjugate_a,
unit_diagonal):
m, n = b.shape[-2:]
k = 1 if unit_diagonal else 0
g_a = _tril(g_a, k=-k) if lower else _triu(g_a, k=k)
g_a = lax.neg(g_a)
g_a = _T(g_a) if transpose_a else g_a
g_a = g_a.conj() if conjugate_a else g_a
dot = partial(lax.dot if g_a.ndim == 2 else lax.batch_matmul,
precision=lax.Precision.HIGHEST)
def a_inverse(rhs):
return triangular_solve(a, rhs, left_side=left_side, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal)
# triangular_solve is about the same cost as matrix multiplication (~n^2 FLOPs
# for matrix/vector inputs). Order these operations in whichever order is
# cheaper.
if left_side:
assert g_a.shape[-2:] == a.shape[-2:] == (m, m) and ans.shape[-2:] == (m, n)
if m > n:
return a_inverse(dot(g_a, ans)) # A^{-1} (∂A X)
else:
return dot(a_inverse(g_a), ans) # (A^{-1} ∂A) X
else:
assert g_a.shape[-2:] == a.shape[-2:] == (n, n) and ans.shape[-2:] == (m, n)
if m < n:
return a_inverse(dot(ans, g_a)) # (X ∂A) A^{-1}
else:
return dot(ans, a_inverse(g_a)) # X (∂A A^{-1})
def _triangular_solve_transpose_rule(
cotangent, a, b, *, left_side, lower, transpose_a, conjugate_a,
unit_diagonal):
# Triangular solve is nonlinear in its first argument and linear in its second
# argument, analogous to `div` but swapped.
assert not ad.is_undefined_primal(a) and ad.is_undefined_primal(b)
if type(cotangent) is ad_util.Zero:
cotangent_b = ad_util.Zero(b.aval)
else:
cotangent_b = triangular_solve(a, cotangent, left_side=left_side,
lower=lower, transpose_a=not transpose_a,
conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal)
return [None, cotangent_b]
def _triangular_solve_batching_rule(batched_args, batch_dims, *, left_side,
lower, transpose_a, conjugate_a,
unit_diagonal):
x, y = batched_args
bx, by = batch_dims
if bx is batching.not_mapped:
if left_side:
y = batching.moveaxis(y, by, -1)
y_flat = y.reshape(y.shape[:-2] + (y.shape[-2] * y.shape[-1],))
bdim_out = y.ndim - 1
else:
y = batching.moveaxis(y, by, -2)
y_flat = y.reshape(y.shape[:-3] + (y.shape[-3] * y.shape[-2], y.shape[-1]))
bdim_out = y.ndim - 2
out_flat = triangular_solve(
x, y_flat, left_side=left_side, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal)
return out_flat.reshape(y.shape), bdim_out
else:
size = next(t.shape[i] for t, i in zip(batched_args, batch_dims)
if i is not None)
x = batching.bdim_at_front(x, bx, size)
y = batching.bdim_at_front(y, by, size)
return triangular_solve(x, y, left_side=left_side, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal), 0
def _triangular_solve_lowering(
ctx, a, b, *, left_side, lower, transpose_a, conjugate_a, unit_diagonal):
out_aval, = ctx.avals_out
if conjugate_a and not transpose_a:
a = chlo.ConjOp(a)
conjugate_a = False
if not transpose_a:
transpose = "NO_TRANSPOSE"
else:
transpose = "ADJOINT" if conjugate_a else "TRANSPOSE"
out = hlo.triangular_solve(a, b, ir.BoolAttr.get(left_side),
ir.BoolAttr.get(lower),
ir.BoolAttr.get(unit_diagonal),
hlo.TransposeAttr.get(transpose))
return [mlir.lower_with_sharding_in_types(ctx, out, out_aval)]
_cpu_lapack_types = {np.dtype(np.float32), np.dtype(np.float64),
np.dtype(np.complex64), np.dtype(np.complex128)}
def _triangular_solve_cpu_lower(
ctx, a, b, *, left_side, lower, transpose_a,
conjugate_a, unit_diagonal):
a_aval, b_aval = ctx.avals_in
if conjugate_a and not transpose_a:
a = chlo.conj(a)
conjugate_a = False
if np.dtype(a_aval.dtype) in _cpu_lapack_types:
target_name = lapack.prepare_lapack_call("trsm_ffi", a_aval.dtype)
alpha, alpha_aval, batch_partitionable = (), (), True
rule = _linalg_ffi_lowering(target_name,
[a_aval, b_aval, *alpha_aval],
operand_output_aliases={1: 0},
batch_partitionable=batch_partitionable)
return rule(ctx, a, b, *alpha,
side=_matrix_side_attr(left_side),
uplo=_matrix_uplo_attr(lower),
trans_x=_matrix_transpose_attr(transpose_a, conjugate_a),
diag=_matrix_diagonal_attr(unit_diagonal))
else:
# Fall back to the HLO implementation for unsupported types or batching.
# TODO: Consider swapping XLA for LAPACK in batched case
if transpose_a:
transpose = "ADJOINT" if conjugate_a else "TRANSPOSE"
else:
transpose = "NO_TRANSPOSE"
return [hlo.triangular_solve(a, b, ir.BoolAttr.get(left_side),
ir.BoolAttr.get(lower),
ir.BoolAttr.get(unit_diagonal),
hlo.TransposeAttr.get(transpose))]
triangular_solve_p = linalg_primitive(
_triangular_solve_dtype_rule, (_float | _complex, _float | _complex),
(2, 2), _triangular_solve_shape_rule, "triangular_solve")
ad.defjvp2(triangular_solve_p,
_triangular_solve_jvp_rule_a,
lambda g_b, _, a, b, **kws: triangular_solve(a, g_b, **kws))
ad.primitive_transposes[triangular_solve_p] = _triangular_solve_transpose_rule
batching.primitive_batchers[triangular_solve_p] = _triangular_solve_batching_rule
mlir.register_lowering(triangular_solve_p, _triangular_solve_lowering)
mlir.register_lowering(triangular_solve_p, _triangular_solve_cpu_lower,
platform="cpu")
# tridiagonal: Upper Hessenberg reduction
def _tridiagonal_shape_rule(shape, **_):
if shape[0] != shape[1] or shape[1] == 0:
raise ValueError(
f"The input to tridiagonal must be a square matrix. Got shape {shape}.")
n, _ = shape
return shape, (n,), (n - 1,), (n - 1,)
def _tridiagonal_dtype_rule(dtype, **_):
real_dtype = lax._complex_basetype(dtype)
return dtype, real_dtype, real_dtype, dtype
def _tridiagonal_cpu_gpu_lowering(ctx, a, *, lower, target_name_prefix):
a_aval, = ctx.avals_in
arr_aval, d_aval, e_aval, taus_aval = ctx.avals_out
batch_dims = a_aval.shape[:-2]
if target_name_prefix == "cpu":
real = a_aval.dtype == np.float32 or a_aval.dtype == np.float64
prefix = "sy" if real else "he"
target_name = lapack.prepare_lapack_call(f"{prefix}trd_ffi", a_aval.dtype)
params = {"uplo": _matrix_uplo_attr(lower)}
else:
target_name = f"{target_name_prefix}solver_sytrd_ffi"
params = {"lower": lower}
info_aval = ShapedArray(batch_dims, np.int32)
rule = _linalg_ffi_lowering(
target_name, avals_out=(*ctx.avals_out, info_aval),
operand_output_aliases={0: 0})
arr, d, e, taus, info = rule(ctx, a, **params)
zeros = mlir.full_like_aval(ctx, 0, info_aval)
ok = mlir.compare_hlo(info, zeros, "EQ", "SIGNED")
arr = _replace_not_ok_with_nan(ctx, batch_dims, ok, arr, arr_aval)
d = _replace_not_ok_with_nan(ctx, batch_dims, ok, d, d_aval)
e = _replace_not_ok_with_nan(ctx, batch_dims, ok, e, e_aval)
taus = _replace_not_ok_with_nan(ctx, batch_dims, ok, taus, taus_aval)
return arr, d, e, taus
tridiagonal_p = linalg_primitive(
_tridiagonal_dtype_rule, (_float | _complex,), (2,),
_tridiagonal_shape_rule, "tridiagonal", multiple_results=True)
register_cpu_gpu_lowering(tridiagonal_p, _tridiagonal_cpu_gpu_lowering)
# Tridiagonal solve
def _tridiagonal_solve_shape_rule(dl_shape, d_shape, du_shape, b_shape, **_):
if dl_shape != d_shape or dl_shape != du_shape:
raise TypeError(
"tridiagonal_solve requires that all diagonal arguments have the same "
"shape.")
if dl_shape != b_shape[:-1]:
raise TypeError(
"tridiagonal_solve requires that the leading ndim-1 dimensions of b "
"equal the dimensions of the diagonal arguments.")
return b_shape
def _tridiagonal_solve_gpu_lowering(ctx, dl, d, du, b, *, target_name_prefix):
target_name = f"{target_name_prefix}sparse_gtsv2_ffi"
rule = _linalg_ffi_lowering(target_name, operand_output_aliases={3: 0})
return rule(ctx, dl, d, du, b)
def _tridiagonal_solve_cpu_lowering(ctx, dl, d, du, b, **kwargs):
del kwargs # unused
b_aval = ctx.avals_in[-1]
batch_dims = b_aval.shape[:-2]
target_name = lapack.prepare_lapack_call("gtsv_ffi", b_aval.dtype)
info_aval = ShapedArray(batch_dims, np.int32)
rule = _linalg_ffi_lowering(target_name,
avals_out=[*ctx.avals_in, info_aval],
operand_output_aliases={0: 0, 1: 1, 2: 2, 3: 3})
*_, b_out, info = rule(ctx, dl, d, du, b)
zeros = mlir.full_like_aval(ctx, 0, info_aval)
ok = mlir.compare_hlo(info, zeros, "EQ", "SIGNED")
return [_replace_not_ok_with_nan(ctx, batch_dims, ok, b_out, b_aval)]
def _tridiagonal_product(dl, d, du, b):
y = lax.reshape(d, d.shape + (1,)) * b
y = y.at[..., 1:, :].add(dl[..., 1:, None] * b[..., :-1, :])
y = y.at[..., :-1, :].add(du[..., :-1, None] * b[..., 1:, :])
return y
def _tridiagonal_solve_jvp_rule(primals, tangents):
*diags, _ = primals
*diags_dot, b_dot = tangents
ans = tridiagonal_solve_p.bind(*primals)
if all(type(p) is ad_util.Zero for p in diags_dot):
rhs = b_dot
else:
matvec_dot = _tridiagonal_product(*map(ad.instantiate_zeros, diags_dot), ans)
rhs = ad.add_tangents(b_dot, -matvec_dot)
ans_dot = tridiagonal_solve_p.bind(*diags, rhs)
return ans, ans_dot
def _tridiagonal_solve_transpose_rule(cotangent, dl, d, du, b):
# Tridiagonal solve is nonlinear in the tridiagonal arguments and linear
# otherwise.
assert not (ad.is_undefined_primal(dl) or ad.is_undefined_primal(d) or
ad.is_undefined_primal(du)) and ad.is_undefined_primal(b)
if type(cotangent) is ad_util.Zero:
cotangent_b = ad_util.Zero(b.aval)
else:
dl_trans = lax.concatenate((lax.zeros_like_array(du[..., -1:]), du[..., :-1]),
du.ndim-1)
du_trans = lax.concatenate((dl[..., 1:], lax.zeros_like_array(dl[..., :1])),
dl.ndim-1)
cotangent_b = tridiagonal_solve(dl_trans, d, du_trans, cotangent)
return [None, None, None, cotangent_b]
def _tridiagonal_solve_batching_rule(batched_args, batch_dims):
dl, d, du, b = batched_args
bdl, bd, bdu, bb = batch_dims
if (bdl is batching.not_mapped and
bd is batching.not_mapped and
bdu is batching.not_mapped):
b = batching.moveaxis(b, bb, -2)
b_flat = b.reshape(b.shape[:-3] + (b.shape[-3], b.shape[-2] * b.shape[-1]))
bdim_out = b.ndim - 2
out_flat = tridiagonal_solve(dl, d, du, b_flat)
return out_flat.reshape(b.shape), bdim_out
else:
size = next(t.shape[i] for t, i in zip(batched_args, batch_dims)
if i is not None)
dl = batching.bdim_at_front(dl, bdl, size)
d = batching.bdim_at_front(d, bd, size)
du = batching.bdim_at_front(du, bdu, size)
b = batching.bdim_at_front(b, bb, size)
return tridiagonal_solve(dl, d, du, b), 0
def _tridiagonal_solve_jax_impl(dl, d, du, b):
def fwd(carry, args):
cp, dp = carry
a, b, c, d = args
cp_next = c / (b - a * cp)
dp_next = (d - a * dp) / (b - a * cp)
return (cp_next, dp_next), (cp, dp)
(_, final), (cp, dp) = control_flow.scan(
fwd, (du[0] / d[0], b[0] / d[0]), (dl[1:], d[1:], du[1:], b[1:, :]),
unroll=32)
def bwd(xn, args):
cp, dp = args
x = dp - cp * xn
return x, xn
end, ans = control_flow.scan(bwd, final, (cp, dp), unroll=32, reverse=True)
return lax.concatenate((end[None], ans), 0)
def _tridiagonal_solve_jax(dl, d, du, b, **_):
impl = _tridiagonal_solve_jax_impl
for _ in range(dl.ndim - 1):
impl = api.vmap(impl)
return impl(dl, d, du, b)
tridiagonal_solve_p = standard_linalg_primitive(
(_float | _complex, _float | _complex, _float | _complex, _float | _complex),
(1, 1, 1, 2), _tridiagonal_solve_shape_rule, "tridiagonal_solve")
ad.primitive_jvps[tridiagonal_solve_p] = _tridiagonal_solve_jvp_rule
ad.primitive_transposes[tridiagonal_solve_p] = _tridiagonal_solve_transpose_rule
batching.primitive_batchers[tridiagonal_solve_p] = _tridiagonal_solve_batching_rule
mlir.register_lowering(
tridiagonal_solve_p,
_tridiagonal_solve_cpu_lowering,
platform='cpu')
mlir.register_lowering(
tridiagonal_solve_p,
partial(_tridiagonal_solve_gpu_lowering, target_name_prefix='cu'),
platform='cuda')
mlir.register_lowering(
tridiagonal_solve_p,
partial(_tridiagonal_solve_gpu_lowering, target_name_prefix='hip'),
platform='rocm')
mlir.register_lowering(tridiagonal_solve_p, mlir.lower_fun(
_tridiagonal_solve_jax, multiple_results=False))
# Utilities
def _broadcasted_matvec(a: Array, b: Array) -> Array:
# This is a broadcasted dot_general with signature (...,n,m),(...,m)->(...,n)
assert a.ndim >= 2
assert b.ndim >= 1
batch_shape = lax.broadcast_shapes(a.shape[:-2], b.shape[:-1])
n_batch = len(batch_shape)
a = _broadcast_to(a, (*batch_shape, *a.shape[-2:]))
b = _broadcast_to(b, (*batch_shape, b.shape[-1]))
dimension_numbers = (([a.ndim - 1], [b.ndim - 1]), (list(range(n_batch)), list(range(n_batch))))
return lax.dot_general(a, b, dimension_numbers=dimension_numbers, precision=lax.Precision.HIGHEST)
def _check_solve_shapes(a: Array, b: Array):
if not (a.ndim >= 2 and b.ndim in [a.ndim, a.ndim - 1] and
a.shape[-1] == a.shape[-2] == b.shape[a.ndim - 2]):
raise ValueError(
"The arguments to solve must have shapes a=[..., m, m] and "
f"b=[..., m, k] or b=[..., m]; got a={a.shape} and b={b.shape}")
def _solve(a: Array, b: Array) -> Array:
_check_solve_shapes(a, b)
# Broadcast leading dimensions of b to the shape of a, as is required by
# custom_linear_solve.
out_shape = tuple(d_a if d_b == 1 else d_b
for d_a, d_b in zip(a.shape[:-1] + (1,), b.shape))
b = lax.broadcast_in_dim(b, out_shape, range(b.ndim))
# With custom_linear_solve, we can reuse the same factorization when
# computing sensitivities. This is considerably faster.
lu_, _, permutation = lu(lax.stop_gradient(a))
custom_solve = partial(
control_flow.custom_linear_solve,
lambda x: _broadcasted_matvec(a, x),
solve=lambda _, x: lu_solve(lu_, permutation, x, trans=0),
transpose_solve=lambda _, x: lu_solve(lu_, permutation, x, trans=1))
if a.ndim == b.ndim + 1:
# b.shape == [..., m]
return custom_solve(b)
else:
# b.shape == [..., m, k]
return api.vmap(custom_solve, b.ndim - 1, max(a.ndim, b.ndim) - 1)(b)
def _T(x: Array) -> Array:
return lax.transpose(x, (*range(x.ndim - 2), x.ndim - 1, x.ndim - 2))
def _H(x: Array) -> Array:
return _T(x).conj()
def symmetrize(x: Array) -> Array: return (x + _H(x)) / 2
def _tril(m: Array, k:int = 0) -> Array:
*_, N, M = m.shape
mask = lax._tri(bool, (N, M), k)
return lax.select(lax.broadcast(mask, m.shape[:-2]), m, lax.zeros_like_array(m))
def _triu(m: Array, k:int = 0) -> Array:
*_, N, M = m.shape
mask = lax._tri(bool, (N, M), k - 1)
return lax.select(lax.broadcast(mask, m.shape[:-2]), lax.zeros_like_array(m), m)
def _construct_diagonal(s: Array) -> Array:
"""Construct a (batched) diagonal matrix"""
i = lax.iota('int32', s.shape[-1])
return lax.full((*s.shape, s.shape[-1]), 0, s.dtype).at[..., i, i].set(s)
def _extract_diagonal(s: Array) -> Array:
"""Extract the diagonal from a batched matrix"""
i = lax.iota('int32', min(s.shape[-2], s.shape[-1]))
return s[..., i, i]
def _broadcast_to(x: Array, shape: tuple[int, ...]) -> Array:
assert x.ndim <= len(shape)
return lax.broadcast_in_dim(x, shape, range(len(shape) - x.ndim, len(shape)))
def _nan_like_hlo(ctx: mlir.LoweringRuleContext, aval) -> ir.Value:
if dtypes.issubdtype(aval.dtype, np.complexfloating):
return mlir.full_like_aval(ctx, np.nan + np.nan * 1j, aval)
else:
return mlir.full_like_aval(ctx, np.nan, aval)
def _broadcasting_select_hlo(ctx, which, which_aval, x, x_aval, y, y_aval) -> ir.Value:
"""Wrapper around XLA `Select` that broadcasts its arguments."""
out_shapes = list(lax.broadcast_shapes(
tuple(which_aval.shape), tuple(x_aval.shape), tuple(y_aval.shape)))
out_sharding = lax.broadcast_shardings(which_aval, x_aval, y_aval)
which, x, y = mlir.multi_broadcast_in_dim(ctx, (which, x, y),
(which_aval, x_aval, y_aval),
out_shapes, out_sharding)
return hlo.select(which, x, y)
def _replace_not_ok_with_nan(ctx, batch_dims, ok, x, x_aval):
num_bcast_dims = len(x_aval.shape) - len(batch_dims)
select_aval = ShapedArray(batch_dims + (1,) * num_bcast_dims, np.bool_)
return _broadcasting_select_hlo(
ctx,
mlir.broadcast_in_dim(ctx, ok, select_aval,
broadcast_dimensions=range(len(batch_dims))),
select_aval,
x, x_aval, _nan_like_hlo(ctx, x_aval), x_aval)
def _enum_attr(e):
return ir.IntegerAttr.get(ir.IntegerType.get_unsigned(8), e.value)
def _char_attr(c):
return ir.IntegerAttr.get(ir.IntegerType.get_unsigned(8), ord(c))
def _matrix_side_attr(left_side):
return _char_attr("L" if left_side else "R")
def _matrix_uplo_attr(lower):
return _char_attr("L" if lower else "U")
def _matrix_transpose_attr(transpose: bool, conjugate: bool):
return _char_attr(("C" if conjugate else "T") if transpose else "N")
def _matrix_diagonal_attr(unit_diag: bool):
return _char_attr("U" if unit_diag else "N")
def _column_major_matrix_layout(dim: int) -> tuple[int, ...]:
# The layout for a batch of matrices with Fortran order.
return (dim - 2, dim - 1) + tuple(range(dim - 3, -1, -1))
def _sdy_rule_for_aval(letters, num_batch_dims, aval):
d = len(aval.shape) - num_batch_dims
prefix = "... " if num_batch_dims and d >= 0 else ""
return prefix + " ".join(next(letters) for _ in range(d))
def _build_sdy_sharding_rule(num_batch_dims, avals_in, avals_out):
letters = iter(string.ascii_letters)
lhs = ", ".join(
_sdy_rule_for_aval(letters, num_batch_dims, a) for a in avals_in)
rhs = ", ".join(
_sdy_rule_for_aval(letters, num_batch_dims, a) for a in avals_out)
sdy_sharding_rule = str_to_sdy_sharding_rule(f"{lhs} -> {rhs}")
return sdy_sharding_rule_to_mlir(
sdy_sharding_rule,
[mlir.aval_to_ir_type(a) for a in avals_in],
[mlir.aval_to_ir_type(a) for a in avals_out])
def _linalg_ffi_lowering(target_name, avals_in=None, avals_out=None,
operand_output_aliases=None, column_major=True,
num_non_batch_dims=2, batch_partitionable=True):
# A lightweight wrapper around ffi.ffi_lowering that can automatically set
# the layouts appropriately for column-major matrices, which most handlers
# used here will expect.
def rule(ctx, *args, **kwargs):
avals_in_ = ctx.avals_in if avals_in is None else avals_in
avals_out_ = ctx.avals_out if avals_out is None else avals_out
# TODO(danfm): Add support for shape polymorphism and batch partitioning.
has_dynamic_shape = any(
not is_constant_shape(aval.shape) for aval in (*avals_in_, *avals_out_))
batch_partitionable_ = batch_partitionable and not has_dynamic_shape
max_num_dims = max(len(v.shape) for v in avals_in_)
ctx = ctx.replace(avals_in=avals_in_, avals_out=avals_out_)
operand_layouts = [
_column_major_matrix_layout(len(aval.shape))
if column_major and len(aval.shape) == max_num_dims else None
for aval in avals_in_]
result_layouts = [
_column_major_matrix_layout(len(aval.shape))
if column_major and len(aval.shape) == max_num_dims else None
for aval in avals_out_]
num_batch_dims = max_num_dims - num_non_batch_dims
frontend_attrs = mlir.ir_attribute({"num_batch_dims": str(num_batch_dims)})
if batch_partitionable_:
extra_attributes = {"mhlo.frontend_attributes": frontend_attrs}
if config.use_shardy_partitioner.value:
extra_attributes["sdy.sharding_rule"] = _build_sdy_sharding_rule(
num_batch_dims, avals_in_, avals_out_)
else:
extra_attributes = None
rule = ffi.ffi_lowering(target_name, operand_layouts=operand_layouts,
result_layouts=result_layouts,
operand_output_aliases=operand_output_aliases,
extra_attributes=extra_attributes)
return rule(ctx, *args, **kwargs)
return rule
| SvdAlgorithm |
python | dagster-io__dagster | python_modules/dagster/dagster/_config/pythonic_config/resource.py | {
"start": 27379,
"end": 30324
} | class ____(
MakeConfigCacheable,
Generic[TResValue],
):
data: dict[str, Any]
resource_cls: type[Any]
def __init__(
self,
resource_cls: type[ConfigurableResourceFactory[TResValue]],
data: dict[str, Any],
):
resource_pointers, _data_without_resources = separate_resource_params(resource_cls, data)
super().__init__(data=data, resource_cls=resource_cls) # type: ignore # extends BaseModel, takes kwargs
def resource_fn(context: InitResourceContext):
to_populate = resource_cls._get_non_default_public_field_values_cls( # noqa: SLF001
{**data, **context.resource_config}
)
instantiated = resource_cls(
**to_populate
) # So that collisions are resolved in favor of the latest provided run config
return instantiated._get_initialize_and_run_fn()(context) # noqa: SLF001
self._state__internal__ = PartialResourceState(
# We keep track of any resources we depend on which are not fully configured
# so that we can retrieve them at runtime
nested_partial_resources={
k: v for k, v in resource_pointers.items() if (not _is_fully_configured(v))
},
config_schema=infer_schema_from_config_class(
resource_cls, fields_to_omit=set(resource_pointers.keys())
),
resource_fn=resource_fn,
description=resource_cls.__doc__,
nested_resources={k: v for k, v in resource_pointers.items()},
)
# to make AllowDelayedDependencies work
@property
def _nested_partial_resources(
self,
) -> Mapping[str, Any]:
return self._state__internal__.nested_partial_resources
@property
def nested_resources(
self,
) -> Mapping[str, Any]:
return self._state__internal__.nested_resources
@cached_method # resource resolution depends on always resolving to the same ResourceDefinition instance
def get_resource_definition(self) -> ConfigurableResourceFactoryResourceDefinition:
return ConfigurableResourceFactoryResourceDefinition(
self.resource_cls,
resource_fn=self._state__internal__.resource_fn,
config_schema=self._state__internal__.config_schema,
description=self._state__internal__.description,
nested_resources=self.nested_resources,
nested_partial_resources=self._nested_partial_resources,
dagster_maintained=self.resource_cls._is_dagster_maintained(), # noqa: SLF001
)
ResourceOrPartial: TypeAlias = Union[
ConfigurableResourceFactory[TResValue], PartialResource[TResValue]
]
ResourceOrPartialOrValue: TypeAlias = Union[
ConfigurableResourceFactory[TResValue],
PartialResource[TResValue],
ResourceDefinition,
TResValue,
]
V = TypeVar("V")
| PartialResource |
python | scipy__scipy | benchmarks/benchmarks/interpolate.py | {
"start": 8483,
"end": 9860
} | class ____(Benchmark):
"""
Benchmark RegularGridInterpolator with method="linear".
"""
param_names = ['ndim', 'max_coord_size', 'n_samples', 'flipped']
params = [
[2, 3, 4],
[10, 40, 200],
[10, 100, 1000, 10000],
[1, -1]
]
def setup(self, ndim, max_coord_size, n_samples, flipped):
rng = np.random.default_rng(314159)
# coordinates halve in size over the dimensions
coord_sizes = [max_coord_size // 2**i for i in range(ndim)]
self.points = [np.sort(rng.random(size=s))[::flipped]
for s in coord_sizes]
self.values = rng.random(size=coord_sizes)
# choose in-bounds sample points xi
bounds = [(p.min(), p.max()) for p in self.points]
xi = [rng.uniform(low, high, size=n_samples)
for low, high in bounds]
self.xi = np.array(xi).T
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi_setup_interpolator(self, ndim, max_coord_size,
n_samples, flipped):
self.interp = interpolate.RegularGridInterpolator(
self.points,
self.values,
)
def time_rgi(self, ndim, max_coord_size, n_samples, flipped):
self.interp(self.xi)
| RegularGridInterpolator |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dlp.py | {
"start": 80186,
"end": 83631
} | class ____(GoogleCloudBaseOperator):
"""
Returns a list of the sensitive information types that the DLP API supports.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudDLPListInfoTypesOperator`
:param language_code: (Optional) Optional BCP-47 language code for localized infoType
friendly names. If omitted, or if localized strings are not available, en-US
strings will be returned.
:param results_filter: (Optional) Filter used to specify a subset of results.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"language_code",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudDLPPossibleInfoTypesListLink(),)
def __init__(
self,
*,
project_id: str = PROVIDE_PROJECT_ID,
language_code: str | None = None,
results_filter: str | None = None,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.language_code = language_code
self.results_filter = results_filter
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudDLPHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
response = hook.list_info_types(
language_code=self.language_code,
results_filter=self.results_filter,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
project_id = self.project_id or hook.project_id
if project_id:
CloudDLPPossibleInfoTypesListLink.persist(
context=context,
project_id=project_id,
)
return ListInfoTypesResponse.to_dict(response)
| CloudDLPListInfoTypesOperator |
python | langchain-ai__langchain | libs/core/langchain_core/messages/content.py | {
"start": 4933,
"end": 6778
} | class ____(TypedDict):
"""Annotation for citing data from a document.
!!! note
`start`/`end` indices refer to the **response text**,
not the source text. This means that the indices are relative to the model's
response, not the original document (as specified in the `url`).
!!! note "Factory function"
`create_citation` may also be used as a factory to create a `Citation`.
Benefits include:
* Automatic ID generation (when not provided)
* Required arguments strictly validated at creation time
"""
type: Literal["citation"]
"""Type of the content block. Used for discrimination."""
id: NotRequired[str]
"""Content block identifier.
Either:
- Generated by the provider (e.g., OpenAI's file ID)
- Generated by LangChain upon creation (`UUID4` prefixed with `'lc_'`))
"""
url: NotRequired[str]
"""URL of the document source."""
title: NotRequired[str]
"""Source document title.
For example, the page title for a web page or the title of a paper.
"""
start_index: NotRequired[int]
"""Start index of the **response text** (`TextContentBlock.text`)."""
end_index: NotRequired[int]
"""End index of the **response text** (`TextContentBlock.text`)"""
cited_text: NotRequired[str]
"""Excerpt of source text being cited."""
# NOTE: not including spans for the raw document text (such as `text_start_index`
# and `text_end_index`) as this is not currently supported by any provider. The
# thinking is that the `cited_text` should be sufficient for most use cases, and it
# is difficult to reliably extract spans from the raw document text across file
# formats or encoding schemes.
extras: NotRequired[dict[str, Any]]
"""Provider-specific metadata."""
| Citation |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/metadata.py | {
"start": 2319,
"end": 2528
} | class ____(graphene.ObjectType):
md_str = graphene.NonNull(graphene.String)
class Meta:
interfaces = (GrapheneMetadataEntry,)
name = "MarkdownMetadataEntry"
| GrapheneMarkdownMetadataEntry |
python | django-extensions__django-extensions | tests/collisions/models.py | {
"start": 777,
"end": 1043
} | class ____(models.Model):
# no conflicts but FK to conflicting models.
name = models.ForeignKey(Name, on_delete=models.CASCADE)
group = models.ForeignKey(Group, on_delete=models.CASCADE)
global_id = models.CharField(unique=True, max_length=32)
| SystemUser |
python | pdm-project__pdm | src/pdm/formats/flit.py | {
"start": 2128,
"end": 5824
} | class ____(MetaConverter):
def warn_against_dynamic_version_or_docstring(self, source: Path, version: str, description: str) -> None:
if not self._ui:
return
dynamic_fields = []
if not version:
dynamic_fields.append("version")
if not description:
dynamic_fields.append("description")
if not dynamic_fields:
return
fields = " and ".join(dynamic_fields)
message = (
f"Can't retrieve {fields} from pyproject.toml or parsing {source}. "
"They are probably imported from other files which is not supported by PDM."
" You may need to supply their values in pyproject.toml manually."
)
self._ui.warn(message)
@convert_from("metadata")
def name(self, metadata: dict[str, Any]) -> str:
# name
module = metadata.pop("module")
self._data["name"] = metadata.pop("dist-name", module)
# version and description
if (Path(module) / "__init__.py").exists():
source = Path(module) / "__init__.py"
else:
source = Path(f"{module}.py")
version = self._data.get("version")
description = self._data.get("description")
description_in_ast, version_in_ast = get_docstring_and_version_via_ast(source)
self._data["version"] = version or version_in_ast or ""
self._data["description"] = description or description_in_ast or ""
self.warn_against_dynamic_version_or_docstring(source, self._data["version"], self._data["description"])
# author and maintainer
if "author" in metadata:
self._data["authors"] = _get_author(metadata)
if "maintainer" in metadata:
self._data["maintainers"] = _get_author(metadata, "maintainer")
if "license" in metadata:
self._data["license"] = make_inline_table({"text": metadata.pop("license")})
self._data["dynamic"] = ["classifiers"]
if "urls" in metadata:
self._data["urls"] = metadata.pop("urls")
if "home-page" in metadata:
self._data.setdefault("urls", {})["homepage"] = metadata.pop("home-page")
if "description-file" in metadata:
self._data["readme"] = metadata.pop("description-file")
if "requires-python" in metadata:
self._data["requires-python"] = metadata.pop("requires-python")
self._data["dynamic"] = ["classifiers"]
# requirements
self._data["dependencies"] = make_array(metadata.pop("requires", []), True)
self._data["optional-dependencies"] = metadata.pop("requires-extra", {})
# Add remaining metadata as the same key
self._data.update(metadata)
return self._data["name"]
@convert_from("entrypoints", name="entry-points")
def entry_points(self, value: dict[str, dict[str, str]]) -> dict[str, dict[str, str]]:
return value
@convert_from("sdist")
def includes(self, value: dict[str, list[str]]) -> None:
self.settings.setdefault("build", {}).update(
{"excludes": value.get("exclude"), "includes": value.get("include")}
)
raise Unset()
def convert(project: Project | None, filename: PathLike, options: Namespace | None) -> tuple[Mapping, Mapping]:
with open(filename, "rb") as fp, cd(os.path.dirname(os.path.abspath(filename))):
converter = FlitMetaConverter(tomllib.load(fp)["tool"]["flit"], project.core.ui if project else None)
return converter.convert()
def export(project: Project, candidates: list, options: Namespace | None) -> None:
raise NotImplementedError()
| FlitMetaConverter |
python | huggingface__transformers | src/transformers/models/llava/modeling_llava.py | {
"start": 13395,
"end": 19953
} | class ____(LlavaPreTrainedModel, GenerationMixin):
_checkpoint_conversion_mapping = {
r"^language_model.model": "model.language_model",
r"^vision_tower": "model.vision_tower",
r"^multi_modal_projector": "model.multi_modal_projector",
r"^language_model.lm_head": "lm_head",
}
_tied_weights_keys = {"lm_head.weight": "model.language_model.embed_tokens.weight"}
def __init__(self, config: LlavaConfig):
super().__init__(config)
self.model = LlavaModel(config)
self.lm_head = nn.Linear(config.text_config.hidden_size, config.text_config.vocab_size, bias=False)
self.post_init()
def get_input_embeddings(self):
return self.model.get_input_embeddings()
def set_input_embeddings(self, value):
self.model.set_input_embeddings(value)
def get_output_embeddings(self) -> nn.Module:
return self.lm_head
def get_image_features(
self,
pixel_values: torch.FloatTensor,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
**kwargs,
):
return self.model.get_image_features(
pixel_values=pixel_values,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
**kwargs,
)
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
vision_feature_layer: Optional[Union[int, list[int]]] = None,
vision_feature_select_strategy: Optional[str] = None,
labels: Optional[torch.LongTensor] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
image_sizes: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> Union[tuple, LlavaCausalLMOutputWithPast]:
r"""
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
(masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
Example:
```python
>>> from PIL import Image
>>> import requests
>>> from transformers import AutoProcessor, LlavaForConditionalGeneration
>>> model = LlavaForConditionalGeneration.from_pretrained("llava-hf/llava-1.5-7b-hf")
>>> processor = AutoProcessor.from_pretrained("llava-hf/llava-1.5-7b-hf")
>>> prompt = "USER: <image>\nWhat's the content of the image? ASSISTANT:"
>>> url = "https://www.ilankelman.org/stopsigns/australia.jpg"
>>> image = Image.open(requests.get(url, stream=True).raw)
>>> inputs = processor(images=image, text=prompt, return_tensors="pt")
>>> # Generate
>>> generate_ids = model.generate(**inputs, max_new_tokens=15)
>>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
"USER: \nWhat's the content of the image? ASSISTANT: The image features a busy city street with a stop sign prominently displayed"
```"""
vision_feature_layer = (
vision_feature_layer if vision_feature_layer is not None else self.config.vision_feature_layer
)
vision_feature_select_strategy = (
vision_feature_select_strategy
if vision_feature_select_strategy is not None
else self.config.vision_feature_select_strategy
)
outputs = self.model(
input_ids=input_ids,
pixel_values=pixel_values,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
vision_feature_layer=vision_feature_layer,
vision_feature_select_strategy=vision_feature_select_strategy,
cache_position=cache_position,
image_sizes=image_sizes,
**kwargs,
)
hidden_states = outputs[0]
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs
)
return LlavaCausalLMOutputWithPast(
loss=loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=outputs.image_hidden_states,
)
def prepare_inputs_for_generation(
self,
input_ids,
past_key_values=None,
inputs_embeds=None,
pixel_values=None,
attention_mask=None,
cache_position=None,
logits_to_keep=None,
**kwargs,
):
# Overwritten -- in specific circumstances we don't want to forward image inputs to the model
model_inputs = super().prepare_inputs_for_generation(
input_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
logits_to_keep=logits_to_keep,
**kwargs,
)
if cache_position[0] == 0:
# If we're in cached decoding stage, pixel values should be None because input ids do not contain special image token anymore
# Otherwise we need pixel values to be passed to model
model_inputs["pixel_values"] = pixel_values
return model_inputs
__all__ = ["LlavaForConditionalGeneration", "LlavaPreTrainedModel", "LlavaModel"]
| LlavaForConditionalGeneration |
python | python-visualization__folium | folium/features.py | {
"start": 34470,
"end": 40869
} | class ____(JSCSSMixin, Layer):
"""
Creates a TopoJson object for plotting into a Map.
Parameters
----------
data: file, dict or str.
The TopoJSON data you want to plot.
* If file, then data will be read in the file and fully
embedded in Leaflet's JavaScript.
* If dict, then data will be converted to JSON and embedded
in the JavaScript.
* If str, then data will be passed to the JavaScript as-is.
object_path: str
The path of the desired object into the TopoJson structure.
Ex: 'objects.myobject'.
style_function: function, default None
A function mapping a TopoJson geometry to a style dict.
name : string, default None
The name of the Layer, as it will appear in LayerControls
overlay : bool, default False
Adds the layer as an optional overlay (True) or the base layer (False).
control : bool, default True
Whether the Layer will be included in LayerControls.
show: bool, default True
Whether the layer will be shown on opening.
smooth_factor: float, default None
How much to simplify the polyline on each zoom level. More means
better performance and smoother look, and less means more accurate
representation. Leaflet defaults to 1.0.
tooltip: GeoJsonTooltip, Tooltip or str, default None
Display a text when hovering over the object. Can utilize the data,
see folium.GeoJsonTooltip for info on how to do that.
Examples
--------
>>> # Providing file that shall be embedded.
>>> TopoJson(open("foo.json"), "object.myobject")
>>> # Providing filename that shall not be embedded.
>>> TopoJson("foo.json", "object.myobject")
>>> # Providing dict.
>>> TopoJson(json.load(open("foo.json")), "object.myobject")
>>> # Providing string.
>>> TopoJson(open("foo.json").read(), "object.myobject")
>>> # Provide a style_function that color all states green but Alabama.
>>> style_function = lambda x: {
... "fillColor": (
... "#0000ff" if x["properties"]["name"] == "Alabama" else "#00ff00"
... )
... }
>>> TopoJson(topo_json, "object.myobject", style_function=style_function)
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }}_data = {{ this.data|tojson }};
var {{ this.get_name() }} = L.geoJson(
topojson.feature(
{{ this.get_name() }}_data,
{{ this.get_name() }}_data{{ this._safe_object_path }}
),
{
{%- if this.smooth_factor is not none %}
smoothFactor: {{ this.smooth_factor|tojson }},
{%- endif %}
}
).addTo({{ this._parent.get_name() }});
{{ this.get_name() }}.setStyle(function(feature) {
return feature.properties.style;
});
{% endmacro %}
"""
) # noqa
default_js = [
(
"topojson",
"https://cdnjs.cloudflare.com/ajax/libs/topojson/1.6.9/topojson.min.js",
),
]
def __init__(
self,
data: Any,
object_path: str,
style_function: Optional[Callable] = None,
name: Optional[str] = None,
overlay: bool = True,
control: bool = True,
show: bool = True,
smooth_factor: Optional[float] = None,
tooltip: Union[str, Tooltip, None] = None,
):
super().__init__(name=name, overlay=overlay, control=control, show=show)
self._name = "TopoJson"
if "read" in dir(data):
self.embed = True
self.data = json.load(data)
elif type(data) is dict:
self.embed = True
self.data = data
else:
self.embed = False
self.data = data
self.object_path = object_path
self._safe_object_path = javascript_identifier_path_to_array_notation(
object_path
)
self.style_function = style_function or (lambda x: {})
self.smooth_factor = smooth_factor
if isinstance(tooltip, (GeoJsonTooltip, Tooltip)):
self.add_child(tooltip)
elif tooltip is not None:
self.add_child(Tooltip(tooltip))
def style_data(self) -> None:
"""Applies self.style_function to each feature of self.data."""
def recursive_get(data, keys):
if len(keys):
return recursive_get(data.get(keys[0]), keys[1:])
else:
return data
geometries = recursive_get(self.data, self.object_path.split("."))[
"geometries"
] # noqa
for feature in geometries:
feature.setdefault("properties", {}).setdefault("style", {}).update(
self.style_function(feature)
) # noqa
def render(self, **kwargs):
"""Renders the HTML representation of the element."""
self.style_data()
super().render(**kwargs)
def get_bounds(self) -> TypeBoundsReturn:
"""
Computes the bounds of the object itself (not including it's children)
in the form [[lat_min, lon_min], [lat_max, lon_max]]
"""
if not self.embed:
raise ValueError("Cannot compute bounds of non-embedded TopoJSON.")
xmin, xmax, ymin, ymax = None, None, None, None
for arc in self.data["arcs"]:
x, y = 0, 0
for dx, dy in arc:
x += dx
y += dy
xmin = none_min(x, xmin)
xmax = none_max(x, xmax)
ymin = none_min(y, ymin)
ymax = none_max(y, ymax)
return [
[
self.data["transform"]["translate"][1]
+ self.data["transform"]["scale"][1] * ymin, # noqa
self.data["transform"]["translate"][0]
+ self.data["transform"]["scale"][0] * xmin, # noqa
],
[
self.data["transform"]["translate"][1]
+ self.data["transform"]["scale"][1] * ymax, # noqa
self.data["transform"]["translate"][0]
+ self.data["transform"]["scale"][0] * xmax, # noqa
],
]
| TopoJson |
python | python-pillow__Pillow | src/PIL/FitsImagePlugin.py | {
"start": 3699,
"end": 4644
} | class ____(ImageFile.PyDecoder):
_pulls_fd = True
def decode(self, buffer: bytes | Image.SupportsArrayInterface) -> tuple[int, int]:
assert self.fd is not None
value = gzip.decompress(self.fd.read())
rows = []
offset = 0
number_of_bits = min(self.args[0] // 8, 4)
for y in range(self.state.ysize):
row = bytearray()
for x in range(self.state.xsize):
row += value[offset + (4 - number_of_bits) : offset + 4]
offset += 4
rows.append(row)
self.set_as_raw(bytes([pixel for row in rows[::-1] for pixel in row]))
return -1, 0
# --------------------------------------------------------------------
# Registry
Image.register_open(FitsImageFile.format, FitsImageFile, _accept)
Image.register_decoder("fits_gzip", FitsGzipDecoder)
Image.register_extensions(FitsImageFile.format, [".fit", ".fits"])
| FitsGzipDecoder |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 261662,
"end": 262005
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
node = sgqlc.types.Field("CreatedIssueContribution", graphql_name="node")
| CreatedIssueContributionEdge |
python | spack__spack | lib/spack/spack/cmd/common/arguments.py | {
"start": 5869,
"end": 7703
} | class ____(argparse.Action):
"""Pick the currently configured config scopes."""
def __init__(self, *args, **kwargs) -> None:
kwargs.setdefault("metavar", spack.config.SCOPES_METAVAR)
super().__init__(*args, **kwargs)
@property
def default(self):
return self._default() if callable(self._default) else self._default
@default.setter
def default(self, value):
self._default = value
@property
def choices(self):
return spack.config.scopes().keys()
@choices.setter
def choices(self, value):
pass
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
def _cdash_reporter(namespace):
"""Helper function to create a CDash reporter. This function gets an early reference to the
argparse namespace under construction, so it can later use it to create the object.
"""
def _factory():
def installed_specs(args):
packages = []
if getattr(args, "spec", ""):
packages = args.spec
elif getattr(args, "specs", ""):
packages = args.specs
elif getattr(args, "package", ""):
# Ensure CI 'spack test run' can output CDash results
packages = args.package
return [str(spack.spec.Spec(s)) for s in packages]
configuration = spack.reporters.CDashConfiguration(
upload_url=namespace.cdash_upload_url,
packages=installed_specs(namespace),
build=namespace.cdash_build,
site=namespace.cdash_site,
buildstamp=namespace.cdash_buildstamp,
track=namespace.cdash_track,
)
return spack.reporters.CDash(configuration=configuration)
return _factory
| ConfigScope |
python | django__django | tests/test_utils/tests.py | {
"start": 68364,
"end": 69109
} | class ____(SimpleTestCase):
def test_setup_test_environment_calling_more_than_once(self):
with self.assertRaisesMessage(
RuntimeError, "setup_test_environment() was already called"
):
setup_test_environment()
def test_allowed_hosts(self):
for type_ in (list, tuple):
with self.subTest(type_=type_):
allowed_hosts = type_("*")
with mock.patch("django.test.utils._TestState") as x:
del x.saved_data
with self.settings(ALLOWED_HOSTS=allowed_hosts):
setup_test_environment()
self.assertEqual(settings.ALLOWED_HOSTS, ["*", "testserver"])
| SetupTestEnvironmentTests |
python | PrefectHQ__prefect | src/prefect/utilities/schema_tools/hydration.py | {
"start": 2559,
"end": 2833
} | class ____(HydrationError):
@property
def message(self) -> str:
return f"Missing '{self.key}' key in __prefect object"
@property
@abstractmethod
def key(self) -> str:
raise NotImplementedError("Must be implemented by subclass")
| KeyNotFound |
python | PrefectHQ__prefect | src/prefect/client/schemas/actions.py | {
"start": 27631,
"end": 28598
} | class ____(ActionBaseModel):
"""Data used by the Prefect REST API to create a work pool."""
name: NonEmptyishName = Field(
description="The name of the work pool.",
)
description: Optional[str] = Field(default=None)
type: str = Field(
description="The work pool type.", default="prefect-agent"
) # TODO: change default
base_job_template: dict[str, Any] = Field(
default_factory=dict,
description="The base job template for the work pool.",
)
is_paused: bool = Field(
default=False,
description="Whether the work pool is paused.",
)
concurrency_limit: Optional[NonNegativeInteger] = Field(
default=None, description="A concurrency limit for the work pool."
)
storage_configuration: WorkPoolStorageConfiguration = Field(
default_factory=WorkPoolStorageConfiguration,
description="A storage configuration for the work pool.",
)
| WorkPoolCreate |
python | PyCQA__pylint | tests/functional/i/invalid/invalid_name/invalid_name_module_level.py | {
"start": 252,
"end": 1286
} | class ____:
pass
ClassB = ClassA
def A(): # [invalid-name]
return 1, 2, 3
CONSTA, CONSTB, CONSTC = A()
CONSTD = A()
CONST = "12 34 ".rstrip().split()
ASSIGNMENT_THAT_CRASHED_PYLINT = type(float.__new__.__code__)
# Exclusive assignment: uses const regex
if CONST:
OTHER_CONST = 1
elif CONSTA:
OTHER_CONST = 2
else:
OTHER_CONST = 3
# Lists, sets, and objects can pass against the variable OR const regexes.
if CONST:
other_const = [1]
elif CONSTA:
other_const = [2]
else:
other_const = [3]
if CONST:
ANOTHER_CONST = A()
else:
ANOTHER_CONST = 5
from importlib.metadata import PackageNotFoundError
from importlib.metadata import version
try:
VERSION = version("ty") # uninferable
except PackageNotFoundError:
VERSION = "0.0.0"
from typing import Annotated
IntWithAnnotation = Annotated[int, "anything"]
# Regression test for #10719: module-level constants should not be incorrectly
# classified as variables when a class-level attribute with the same name exists.
| ClassA |
python | kamyu104__LeetCode-Solutions | Python/min-cost-to-connect-all-points.py | {
"start": 1618,
"end": 2205
} | class ____(object):
def minCostConnectPoints(self, points):
"""
:type points: List[List[int]]
:rtype: int
"""
edges = []
for u in xrange(len(points)):
for v in xrange(u+1, len(points)):
edges.append((u, v, abs(points[v][0]-points[u][0]) + abs(points[v][1]-points[u][1])))
edges.sort(key=lambda x: x[2])
result = 0
union_find = UnionFind(len(points))
for u, v, val in edges:
if union_find.union_set(u, v):
result += val
return result
| Solution2 |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 154188,
"end": 162176
} | class ____:
if has_zarr_v3:
methods = [
"get",
"set",
"list_dir",
"list_prefix",
]
else:
methods = [
"__iter__",
"__contains__",
"__setitem__",
"__getitem__",
"listdir",
"list_prefix",
]
@contextlib.contextmanager
def create_zarr_target(self):
if Version(zarr.__version__) < Version("2.18.0"):
pytest.skip("Instrumented tests only work on latest Zarr.")
if has_zarr_v3:
kwargs = {"read_only": False}
else:
kwargs = {} # type: ignore[arg-type,unused-ignore]
store = KVStore({}, **kwargs) # type: ignore[arg-type,unused-ignore]
yield store
def make_patches(self, store):
from unittest.mock import MagicMock
return {
method: MagicMock(
f"KVStore.{method}",
side_effect=getattr(store, method),
autospec=True,
)
for method in self.methods
}
def summarize(self, patches):
summary = {}
for name, patch_ in patches.items():
count = 0
for call in patch_.mock_calls:
if "zarr.json" not in call.args:
count += 1
summary[name.strip("_")] = count
return summary
def check_requests(self, expected, patches):
summary = self.summarize(patches)
for k in summary:
assert summary[k] <= expected[k], (k, summary)
def test_append(self) -> None:
original = Dataset({"foo": ("x", [1])}, coords={"x": [0]})
modified = Dataset({"foo": ("x", [2])}, coords={"x": [1]})
with self.create_zarr_target() as store:
if has_zarr_v3:
# TODO: verify these
expected = {
"set": 5,
"get": 4,
"list_dir": 2,
"list_prefix": 1,
}
else:
expected = {
"iter": 1,
"contains": 18,
"setitem": 10,
"getitem": 13,
"listdir": 0,
"list_prefix": 3,
}
patches = self.make_patches(store)
with patch.multiple(KVStore, **patches):
original.to_zarr(store)
self.check_requests(expected, patches)
patches = self.make_patches(store)
# v2024.03.0: {'iter': 6, 'contains': 2, 'setitem': 5, 'getitem': 10, 'listdir': 6, 'list_prefix': 0}
# 6057128b: {'iter': 5, 'contains': 2, 'setitem': 5, 'getitem': 10, "listdir": 5, "list_prefix": 0}
if has_zarr_v3:
expected = {
"set": 4,
"get": 9, # TODO: fixme upstream (should be 8)
"list_dir": 2, # TODO: fixme upstream (should be 2)
"list_prefix": 0,
}
else:
expected = {
"iter": 1,
"contains": 11,
"setitem": 6,
"getitem": 15,
"listdir": 0,
"list_prefix": 1,
}
with patch.multiple(KVStore, **patches):
modified.to_zarr(store, mode="a", append_dim="x")
self.check_requests(expected, patches)
patches = self.make_patches(store)
if has_zarr_v3:
expected = {
"set": 4,
"get": 9, # TODO: fixme upstream (should be 8)
"list_dir": 2, # TODO: fixme upstream (should be 2)
"list_prefix": 0,
}
else:
expected = {
"iter": 1,
"contains": 11,
"setitem": 6,
"getitem": 15,
"listdir": 0,
"list_prefix": 1,
}
with patch.multiple(KVStore, **patches):
modified.to_zarr(store, mode="a-", append_dim="x")
self.check_requests(expected, patches)
with open_dataset(store, engine="zarr") as actual:
assert_identical(
actual, xr.concat([original, modified, modified], dim="x")
)
@requires_dask
def test_region_write(self) -> None:
ds = Dataset({"foo": ("x", [1, 2, 3])}, coords={"x": [1, 2, 3]}).chunk()
with self.create_zarr_target() as store:
if has_zarr_v3:
expected = {
"set": 5,
"get": 2,
"list_dir": 2,
"list_prefix": 4,
}
else:
expected = {
"iter": 1,
"contains": 16,
"setitem": 9,
"getitem": 13,
"listdir": 0,
"list_prefix": 5,
}
patches = self.make_patches(store)
with patch.multiple(KVStore, **patches):
ds.to_zarr(store, mode="w", compute=False)
self.check_requests(expected, patches)
# v2024.03.0: {'iter': 5, 'contains': 2, 'setitem': 1, 'getitem': 6, 'listdir': 5, 'list_prefix': 0}
# 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 5, 'listdir': 4, 'list_prefix': 0}
if has_zarr_v3:
expected = {
"set": 1,
"get": 3,
"list_dir": 0,
"list_prefix": 0,
}
else:
expected = {
"iter": 1,
"contains": 6,
"setitem": 1,
"getitem": 7,
"listdir": 0,
"list_prefix": 0,
}
patches = self.make_patches(store)
with patch.multiple(KVStore, **patches):
ds.to_zarr(store, region={"x": slice(None)})
self.check_requests(expected, patches)
# v2024.03.0: {'iter': 6, 'contains': 4, 'setitem': 1, 'getitem': 11, 'listdir': 6, 'list_prefix': 0}
# 6057128b: {'iter': 4, 'contains': 2, 'setitem': 1, 'getitem': 7, 'listdir': 4, 'list_prefix': 0}
if has_zarr_v3:
expected = {
"set": 1,
"get": 4,
"list_dir": 0,
"list_prefix": 0,
}
else:
expected = {
"iter": 1,
"contains": 6,
"setitem": 1,
"getitem": 8,
"listdir": 0,
"list_prefix": 0,
}
patches = self.make_patches(store)
with patch.multiple(KVStore, **patches):
ds.to_zarr(store, region="auto")
self.check_requests(expected, patches)
if has_zarr_v3:
expected = {
"set": 0,
"get": 5,
"list_dir": 0,
"list_prefix": 0,
}
else:
expected = {
"iter": 1,
"contains": 6,
"setitem": 0,
"getitem": 8,
"listdir": 0,
"list_prefix": 0,
}
patches = self.make_patches(store)
with patch.multiple(KVStore, **patches):
with open_dataset(store, engine="zarr") as actual:
assert_identical(actual, ds)
self.check_requests(expected, patches)
@requires_zarr
| TestInstrumentedZarrStore |
python | huggingface__transformers | tests/models/deepseek_vl_hybrid/test_image_processing_deepseek_vl_hybrid.py | {
"start": 1259,
"end": 3740
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
high_res_size=None,
do_normalize=True,
image_mean=[0.5, 0.5, 0.5],
image_std=[0.5, 0.5, 0.5],
high_res_image_mean=[0.5, 0.5, 0.5],
high_res_image_std=[0.5, 0.5, 0.5],
):
size = size if size is not None else {"height": 18, "width": 18}
high_res_size = high_res_size if high_res_size is not None else {"height": 36, "width": 36}
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size
self.high_res_size = high_res_size
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.high_res_image_mean = high_res_image_mean
self.high_res_image_std = high_res_image_std
def prepare_image_processor_dict(self):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"high_res_image_mean": self.high_res_image_mean,
"high_res_image_std": self.high_res_image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"high_res_size": self.high_res_size,
}
def expected_output_image_shape(self, images):
max_size = max(self.size["height"], self.size["width"])
return self.num_channels, max_size, max_size
def expected_output_high_res_image_shape(self, images):
max_size = max(self.high_res_size["height"], self.high_res_size["width"])
return self.num_channels, max_size, max_size
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| DeepseekVLHybridImageProcessingTester |
python | PyCQA__bandit | bandit/core/context.py | {
"start": 145,
"end": 10667
} | class ____:
def __init__(self, context_object=None):
"""Initialize the class with a context, empty dict otherwise
:param context_object: The context object to create class from
:return: -
"""
if context_object is not None:
self._context = context_object
else:
self._context = dict()
def __repr__(self):
"""Generate representation of object for printing / interactive use
Most likely only interested in non-default properties, so we return
the string version of _context.
Example string returned:
<Context {'node': <_ast.Call object at 0x110252510>, 'function': None,
'name': 'socket', 'imports': set(['socket']), 'module': None,
'filename': 'examples/binding.py',
'call': <_ast.Call object at 0x110252510>, 'lineno': 3,
'import_aliases': {}, 'qualname': 'socket.socket'}>
:return: A string representation of the object
"""
return f"<Context {self._context}>"
@property
def call_args(self):
"""Get a list of function args
:return: A list of function args
"""
args = []
if "call" in self._context and hasattr(self._context["call"], "args"):
for arg in self._context["call"].args:
if hasattr(arg, "attr"):
args.append(arg.attr)
else:
args.append(self._get_literal_value(arg))
return args
@property
def call_args_count(self):
"""Get the number of args a function call has
:return: The number of args a function call has or None
"""
if "call" in self._context and hasattr(self._context["call"], "args"):
return len(self._context["call"].args)
else:
return None
@property
def call_function_name(self):
"""Get the name (not FQ) of a function call
:return: The name (not FQ) of a function call
"""
return self._context.get("name")
@property
def call_function_name_qual(self):
"""Get the FQ name of a function call
:return: The FQ name of a function call
"""
return self._context.get("qualname")
@property
def call_keywords(self):
"""Get a dictionary of keyword parameters
:return: A dictionary of keyword parameters for a call as strings
"""
if "call" in self._context and hasattr(
self._context["call"], "keywords"
):
return_dict = {}
for li in self._context["call"].keywords:
if hasattr(li.value, "attr"):
return_dict[li.arg] = li.value.attr
else:
return_dict[li.arg] = self._get_literal_value(li.value)
return return_dict
else:
return None
@property
def node(self):
"""Get the raw AST node associated with the context
:return: The raw AST node associated with the context
"""
return self._context.get("node")
@property
def string_val(self):
"""Get the value of a standalone unicode or string object
:return: value of a standalone unicode or string object
"""
return self._context.get("str")
@property
def bytes_val(self):
"""Get the value of a standalone bytes object (py3 only)
:return: value of a standalone bytes object
"""
return self._context.get("bytes")
@property
def string_val_as_escaped_bytes(self):
"""Get escaped value of the object.
Turn the value of a string or bytes object into byte sequence with
unknown, control, and \\ characters escaped.
This function should be used when looking for a known sequence in a
potentially badly encoded string in the code.
:return: sequence of printable ascii bytes representing original string
"""
val = self.string_val
if val is not None:
# it's any of str or unicode in py2, or str in py3
return val.encode("unicode_escape")
val = self.bytes_val
if val is not None:
return utils.escaped_bytes_representation(val)
return None
@property
def statement(self):
"""Get the raw AST for the current statement
:return: The raw AST for the current statement
"""
return self._context.get("statement")
@property
def function_def_defaults_qual(self):
"""Get a list of fully qualified default values in a function def
:return: List of defaults
"""
defaults = []
if (
"node" in self._context
and hasattr(self._context["node"], "args")
and hasattr(self._context["node"].args, "defaults")
):
for default in self._context["node"].args.defaults:
defaults.append(
utils.get_qual_attr(
default, self._context["import_aliases"]
)
)
return defaults
def _get_literal_value(self, literal):
"""Utility function to turn AST literals into native Python types
:param literal: The AST literal to convert
:return: The value of the AST literal
"""
if isinstance(literal, ast.Constant):
if isinstance(literal.value, bool):
literal_value = str(literal.value)
elif literal.value is None:
literal_value = str(literal.value)
else:
literal_value = literal.value
elif isinstance(literal, ast.List):
return_list = list()
for li in literal.elts:
return_list.append(self._get_literal_value(li))
literal_value = return_list
elif isinstance(literal, ast.Tuple):
return_tuple = tuple()
for ti in literal.elts:
return_tuple += (self._get_literal_value(ti),)
literal_value = return_tuple
elif isinstance(literal, ast.Set):
return_set = set()
for si in literal.elts:
return_set.add(self._get_literal_value(si))
literal_value = return_set
elif isinstance(literal, ast.Dict):
literal_value = dict(zip(literal.keys, literal.values))
elif isinstance(literal, ast.Name):
literal_value = literal.id
else:
literal_value = None
return literal_value
def get_call_arg_value(self, argument_name):
"""Gets the value of a named argument in a function call.
:return: named argument value
"""
kwd_values = self.call_keywords
if kwd_values is not None and argument_name in kwd_values:
return kwd_values[argument_name]
def check_call_arg_value(self, argument_name, argument_values=None):
"""Checks for a value of a named argument in a function call.
Returns none if the specified argument is not found.
:param argument_name: A string - name of the argument to look for
:param argument_values: the value, or list of values to test against
:return: Boolean True if argument found and matched, False if
found and not matched, None if argument not found at all
"""
arg_value = self.get_call_arg_value(argument_name)
if arg_value is not None:
if not isinstance(argument_values, list):
# if passed a single value, or a tuple, convert to a list
argument_values = list((argument_values,))
for val in argument_values:
if arg_value == val:
return True
return False
else:
# argument name not found, return None to allow testing for this
# eventuality
return None
def get_lineno_for_call_arg(self, argument_name):
"""Get the line number for a specific named argument
In case the call is split over multiple lines, get the correct one for
the argument.
:param argument_name: A string - name of the argument to look for
:return: Integer - the line number of the found argument, or -1
"""
if hasattr(self.node, "keywords"):
for key in self.node.keywords:
if key.arg == argument_name:
return key.value.lineno
def get_call_arg_at_position(self, position_num):
"""Returns positional argument at the specified position (if it exists)
:param position_num: The index of the argument to return the value for
:return: Value of the argument at the specified position if it exists
"""
max_args = self.call_args_count
if max_args and position_num < max_args:
arg = self._context["call"].args[position_num]
return getattr(arg, "attr", None) or self._get_literal_value(arg)
else:
return None
def is_module_being_imported(self, module):
"""Check for the specified module is currently being imported
:param module: The module name to look for
:return: True if the module is found, False otherwise
"""
return self._context.get("module") == module
def is_module_imported_exact(self, module):
"""Check if a specified module has been imported; only exact matches.
:param module: The module name to look for
:return: True if the module is found, False otherwise
"""
return module in self._context.get("imports", [])
def is_module_imported_like(self, module):
"""Check if a specified module has been imported
Check if a specified module has been imported; specified module exists
as part of any import statement.
:param module: The module name to look for
:return: True if the module is found, False otherwise
"""
if "imports" in self._context:
for imp in self._context["imports"]:
if module in imp:
return True
return False
@property
def filename(self):
return self._context.get("filename")
@property
def file_data(self):
return self._context.get("file_data")
@property
def import_aliases(self):
return self._context.get("import_aliases")
| Context |
python | sympy__sympy | sympy/polys/matrices/exceptions.py | {
"start": 398,
"end": 492
} | class ____(DMError):
"""list of lists is inconsistent with shape"""
pass
| DMBadInputError |
python | sympy__sympy | sympy/codegen/cfunctions.py | {
"start": 10829,
"end": 11874
} | class ____(Function):
"""
Represents the hypotenuse function.
Explanation
===========
The hypotenuse function is provided by e.g. the math library
in the C99 standard, hence one may want to represent the function
symbolically when doing code-generation.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.codegen.cfunctions import hypot
>>> hypot(3, 4).evalf() == 5.0
True
>>> hypot(x, y)
hypot(x, y)
>>> hypot(x, y).diff(x)
x/hypot(x, y)
"""
nargs = 2
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex in (1, 2):
return 2*self.args[argindex-1]/(_Two*self.func(*self.args))
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _hypot(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _hypot(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
| hypot |
python | streamlit__streamlit | lib/streamlit/elements/widgets/select_slider.py | {
"start": 2310,
"end": 3428
} | class ____(Generic[T]):
options: Sequence[T]
value: list[int]
is_range_value: bool
def serialize(self, v: object) -> list[int]:
return self._as_index_list(v)
def deserialize(self, ui_value: list[int] | None) -> T | tuple[T, T]:
if not ui_value:
# Widget has not been used; fallback to the original value,
ui_value = self.value
# The widget always returns floats, so convert to ints before indexing
return_value: tuple[T, T] = cast(
"tuple[T, T]",
tuple(self.options[int(x)] for x in ui_value),
)
# If the original value was a list/tuple, so will be the output (and vice versa)
return return_value if self.is_range_value else return_value[0]
def _as_index_list(self, v: Any) -> list[int]:
if _is_range_value(v):
slider_value = [index_(self.options, val) for val in v]
start, end = slider_value
if start > end:
slider_value = [end, start]
return slider_value
return [index_(self.options, v)]
| SelectSliderSerde |
python | huggingface__transformers | src/transformers/models/phi4_multimodal/configuration_phi4_multimodal.py | {
"start": 5238,
"end": 11733
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`Phi4MultimodalAudioModel`]. It is used to instantiate a
Phi4Multimodal audio encoder according to the specified arguments, defining the model architecture. Instantiating a
configuration with the defaults will yield a similar configuration to that of the audio encoder of
[microsoft/Phi-4-multimodal-instruct](https://huggingface.co/microsoft/Phi-4-multimodal-instruct) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 1024):
Dimensionality of the encoder layers.
intermediate_size (`int`, *optional*, defaults to 1536):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
num_blocks (`int`, *optional*, defaults to 24):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
activation (`str`, *optional*, defaults to `"swish"`):
The non-linear activation function in the MLPs.
chunk_size (`int`, *optional*, defaults to -1):
The chunk size to create the masks.
left_chunk (`int`, *optional*, defaults to 18):
The left chunk to create the masks.
dropout_rate (`float`, *optional*, defaults to 0.0):
The dropout ratio.
ext_pw_out_channel (`int`, *optional*, defaults to 1024):
Number of out channels in the point-wise conv modules.
depthwise_separable_out_channel (`int`, *optional*, defaults to 1024):
Number of out channels in the depth-wise separable conv modules.
depthwise_multiplier (`int`, *optional*, defaults to 1):
Input size multiplier for the depth-wise separable conv modules.
kernel_size (`int`, *optional*, defaults to 3):
Kernel size for the depth-wise separable conv modules.
conv_activation (`str`, *optional*, defaults to `"swish"`):
The non-linear activation function in the conv modules.
input_size (`int`, *optional*, defaults to 80):
Input size for the audio model.
conv_glu_type (`str`, *optional*, defaults to `"swish"`):
The non-linear activation function in the point-wise conv modules.
time_reduction (`int`, *optional*, defaults to 8):
Time reduction (subsampling factor).
bias_max_distance (`int`, *optional*, defaults to 1000):
Max distance for the relative attention bias module.
bias_symmetric (`bool`, *optional*, defaults to `False`):
Whether the relative attention bias should be symmetric or not.
nemo_activation (`str`, *optional*, defaults to `"relu"`):
The non-linear activation function in the nemo conv modules.
nemo_conv_channels (`int`, *optional*, defaults to 1024):
Number of channels in the nemo conv modules.
downsample_rate (`int`, *optional*, defaults to 1):
Downsample rate for the audio feature extractor.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
audio_token_id (`int`, *optional*, defaults to 200011):
The audio token id.
feature_layer (`int`, *optional*, defaults to -2):
The index of the layer of the encoder from which to extract audio features.
Example:
```python
>>> from transformers import Phi4MultimodalAudioConfig
>>> # Initializing a Phi4MultimodalAudioConfig with microsoft/Phi-4-multimodal-instruct style configuration
>>> configuration = Phi4MultimodalAudioConfig()
```"""
model_type = "phi4_multimodal_audio"
def __init__(
self,
hidden_size: int = 1024,
intermediate_size: int = 1536,
num_blocks: int = 24,
num_attention_heads: int = 16,
activation: str = "swish",
chunk_size: int = -1,
left_chunk: int = 18,
dropout_rate: float = 0.0,
ext_pw_out_channel: int = 1024,
depthwise_separable_out_channel: int = 1024,
depthwise_multiplier: int = 1,
kernel_size: int = 3,
conv_activation: str = "swish",
input_size: int = 80,
conv_glu_type: str = "swish",
time_reduction: int = 8,
bias_max_distance: int = 1000,
bias_symmetric: bool = False,
nemo_activation: str = "relu",
nemo_conv_channels: int = 1024,
downsample_rate: int = 1,
initializer_range: float = 0.02,
audio_token_id: int = 200011,
feature_layer: int = -2,
**kwargs,
):
super().__init__(**kwargs)
self.hidden_size = hidden_size
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.activation = activation
self.chunk_size = chunk_size
self.left_chunk = left_chunk
self.num_blocks = num_blocks
self.dropout_rate = dropout_rate
self.ext_pw_out_channel = ext_pw_out_channel
self.depthwise_separable_out_channel = depthwise_separable_out_channel
self.depthwise_multiplier = depthwise_multiplier
self.kernel_size = kernel_size
self.conv_activation = conv_activation
self.input_size = input_size
self.conv_glu_type = conv_glu_type
self.time_reduction = time_reduction
self.bias_max_distance = bias_max_distance
self.bias_symmetric = bias_symmetric
self.nemo_activation = nemo_activation
self.nemo_conv_channels = nemo_conv_channels
self.downsample_rate = downsample_rate
self.audio_token_id = audio_token_id
self.initializer_range = initializer_range
self.feature_layer = feature_layer
if time_reduction % 2 != 0:
raise ValueError("`time_reduction` should be a multiple of 2!")
length = input_size
for _ in range(int(math.log2(time_reduction))):
length = math.floor((length - 1) / 2 + 1)
self.nemo_final_size = length
| Phi4MultimodalAudioConfig |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 157703,
"end": 162139
} | class ____(fixtures.TestBase):
"""test assignment of default fixures to columns"""
def _fixture(self, *arg, **kw):
return Column("x", Integer, *arg, **kw)
def test_server_default_positional(self):
target = schema.DefaultClause("y")
c = self._fixture(target)
assert c.server_default is target
assert target.column is c
def test_onupdate_default_not_server_default_one(self):
target1 = schema.DefaultClause("y")
target2 = schema.DefaultClause("z")
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_two(self):
target1 = schema.DefaultClause("y", for_update=True)
target2 = schema.DefaultClause("z", for_update=True)
c = self._fixture(server_default=target1, server_onupdate=target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_three(self):
target1 = schema.DefaultClause("y", for_update=False)
target2 = schema.DefaultClause("z", for_update=True)
c = self._fixture(target1, target2)
eq_(c.server_default.arg, "y")
eq_(c.server_onupdate.arg, "z")
def test_onupdate_default_not_server_default_four(self):
target1 = schema.DefaultClause("y", for_update=False)
c = self._fixture(server_onupdate=target1)
is_(c.server_default, None)
eq_(c.server_onupdate.arg, "y")
def test_server_default_keyword_as_schemaitem(self):
target = schema.DefaultClause("y")
c = self._fixture(server_default=target)
assert c.server_default is target
assert target.column is c
def test_server_default_keyword_as_clause(self):
target = "y"
c = self._fixture(server_default=target)
assert c.server_default.arg == target
assert c.server_default.column is c
def test_server_default_onupdate_positional(self):
target = schema.DefaultClause("y", for_update=True)
c = self._fixture(target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_schemaitem(self):
target = schema.DefaultClause("y", for_update=True)
c = self._fixture(server_onupdate=target)
assert c.server_onupdate is target
assert target.column is c
def test_server_default_onupdate_keyword_as_clause(self):
target = "y"
c = self._fixture(server_onupdate=target)
assert c.server_onupdate.arg == target
assert c.server_onupdate.column is c
def test_column_default_positional(self):
target = schema.ColumnDefault("y")
c = self._fixture(target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_schemaitem(self):
target = schema.ColumnDefault("y")
c = self._fixture(default=target)
assert c.default is target
assert target.column is c
def test_column_default_keyword_as_clause(self):
target = "y"
c = self._fixture(default=target)
assert c.default.arg == target
assert c.default.column is c
def test_column_default_onupdate_positional(self):
target = schema.ColumnDefault("y", for_update=True)
c = self._fixture(target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_schemaitem(self):
target = schema.ColumnDefault("y", for_update=True)
c = self._fixture(onupdate=target)
assert c.onupdate is target
assert target.column is c
def test_column_default_onupdate_keyword_as_clause(self):
target = "y"
c = self._fixture(onupdate=target)
assert c.onupdate.arg == target
assert c.onupdate.column is c
def test_column_insert_default(self):
c = self._fixture(insert_default="y")
assert c.default.arg == "y"
def test_column_insert_default_mututally_exclusive(self):
with expect_raises_message(
exc.ArgumentError,
"The 'default' and 'insert_default' parameters of "
"Column are mutually exclusive",
):
self._fixture(insert_default="x", default="y")
| ColumnDefaultsTest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictReadOnly2.py | {
"start": 1052,
"end": 1147
} | class ____(TypedDict, total=True):
a: str
b: NotRequired[str]
c: NotRequired[str]
| TD3 |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 128869,
"end": 132316
} | class ____(GroupedElement, SelectBase, Generic[_SB]):
"""Represent a grouping of a :class:`_expression.SelectBase`.
This differs from :class:`.Subquery` in that we are still
an "inner" SELECT statement, this is strictly for grouping inside of
compound selects.
"""
__visit_name__ = "select_statement_grouping"
_traverse_internals: _TraverseInternalsType = [
("element", InternalTraversal.dp_clauseelement)
] + SupportsCloneAnnotations._clone_annotations_traverse_internals
_is_select_container = True
element: _SB
def __init__(self, element: _SB) -> None:
self.element = cast(
_SB, coercions.expect(roles.SelectStatementRole, element)
)
def _ensure_disambiguated_names(self) -> SelectStatementGrouping[_SB]:
new_element = self.element._ensure_disambiguated_names()
if new_element is not self.element:
return SelectStatementGrouping(new_element)
else:
return self
def get_label_style(self) -> SelectLabelStyle:
return self.element.get_label_style()
def set_label_style(
self, label_style: SelectLabelStyle
) -> SelectStatementGrouping[_SB]:
return SelectStatementGrouping(
self.element.set_label_style(label_style)
)
@property
def select_statement(self) -> _SB:
return self.element
def self_group(self, against: Optional[OperatorType] = None) -> Self:
return self
if TYPE_CHECKING:
def _ungroup(self) -> _SB: ...
# def _generate_columns_plus_names(
# self, anon_for_dupe_key: bool
# ) -> List[Tuple[str, str, str, ColumnElement[Any], bool]]:
# return self.element._generate_columns_plus_names(anon_for_dupe_key)
def _generate_fromclause_column_proxies(
self,
subquery: FromClause,
columns: ColumnCollection[str, KeyedColumnElement[Any]],
primary_key: ColumnSet,
foreign_keys: Set[KeyedColumnElement[Any]],
*,
proxy_compound_columns: Optional[
Iterable[Sequence[ColumnElement[Any]]]
] = None,
) -> None:
self.element._generate_fromclause_column_proxies(
subquery,
columns,
proxy_compound_columns=proxy_compound_columns,
primary_key=primary_key,
foreign_keys=foreign_keys,
)
@util.ro_non_memoized_property
def _all_selected_columns(self) -> _SelectIterable:
return self.element._all_selected_columns
@util.ro_non_memoized_property
def selected_columns(self) -> ColumnCollection[str, ColumnElement[Any]]:
"""A :class:`_expression.ColumnCollection`
representing the columns that
the embedded SELECT statement returns in its result set, not including
:class:`_sql.TextClause` constructs.
.. versionadded:: 1.4
.. seealso::
:attr:`_sql.Select.selected_columns`
"""
return self.element.selected_columns
@util.ro_non_memoized_property
def _from_objects(self) -> List[FromClause]:
return self.element._from_objects
def _scalar_type(self) -> TypeEngine[Any]:
return self.element._scalar_type()
def add_cte(self, *ctes: CTE, nest_here: bool = False) -> Self:
# SelectStatementGrouping not generative: has no attribute '_generate'
raise NotImplementedError
| SelectStatementGrouping |
python | encode__django-rest-framework | tests/test_relations_hyperlink.py | {
"start": 1214,
"end": 1383
} | class ____(serializers.HyperlinkedModelSerializer):
class Meta:
model = ManyToManyTarget
fields = ('url', 'name', 'sources')
| ManyToManyTargetSerializer |
python | pypa__warehouse | warehouse/manage/forms.py | {
"start": 12481,
"end": 12956
} | class ____:
role_name = wtforms.SelectField(
"Select role",
choices=[
("", "Select role"),
("Member", "Member"),
("Manager", "Manager"),
("Owner", "Owner"),
("Billing Manager", "Billing Manager"),
],
coerce=lambda string: OrganizationRoleType(string) if string else None,
validators=[wtforms.validators.InputRequired(message="Select role")],
)
| OrganizationRoleNameMixin |
python | falconry__falcon | falcon/testing/helpers.py | {
"start": 10142,
"end": 14081
} | class ____:
"""Collects and validates ASGI events returned by an app.
Raises:
TypeError: An event field emitted by the app was of an unexpected type.
ValueError: Invalid event name or field value.
"""
_LIFESPAN_EVENT_TYPES = frozenset(
[
'lifespan.startup.complete',
'lifespan.startup.failed',
'lifespan.shutdown.complete',
'lifespan.shutdown.failed',
]
)
_HEADER_NAME_RE = re.compile(rb'^[a-zA-Z][a-zA-Z0-9\-_]*$')
_BAD_HEADER_VALUE_RE = re.compile(rb'[\000-\037]')
events: list[AsgiEvent]
"""An iterable of events that were emitted by the app,
collected as-is from the app.
"""
headers: list[tuple[str, str]]
"""An iterable of (str, str) tuples representing the ISO-8859-1 decoded
headers emitted by the app in the body of the ``'http.response.start'`` event.
"""
status: ResponseStatus | None
"""HTTP status code emitted by the app in the body of the
``'http.response.start'`` event.
"""
body_chunks: list[bytes]
"""An iterable of ``bytes`` objects emitted by the app via
``'http.response.body'`` events.
"""
more_body: bool | None
"""Whether or not the app expects to emit more body chunks.
Will be ``None`` if unknown (i.e., the app has not yet emitted
any ``'http.response.body'`` events.)
"""
def __init__(self) -> None:
self.events = []
self.headers = []
self.status = None
self.body_chunks = []
self.more_body = None
async def collect(self, event: AsgiEvent) -> None:
if self.more_body is False:
# NOTE(kgriffs): According to the ASGI spec, once we get a
# message setting more_body to False, any further messages
# on the channel are ignored.
return
self.events.append(event)
event_type = event['type']
if not isinstance(event_type, str):
raise TypeError('ASGI event type must be a Unicode string')
if event_type == EventType.HTTP_RESPONSE_START:
for name, value in event.get('headers', []):
if not isinstance(name, bytes):
raise TypeError('ASGI header names must be byte strings')
if not isinstance(value, bytes):
raise TypeError('ASGI header names must be byte strings')
# NOTE(vytas): Ported basic validation from wsgiref.validate.
if not self._HEADER_NAME_RE.match(name):
raise ValueError('Bad header name: {!r}'.format(name))
if self._BAD_HEADER_VALUE_RE.search(value):
raise ValueError('Bad header value: {!r}'.format(value))
# NOTE(vytas): After the name validation above, the name is
# guaranteed to only contain a subset of ASCII.
name_decoded = name.decode()
if not name_decoded.islower():
raise ValueError('ASGI header names must be lowercase')
self.headers.append((name_decoded, value.decode('latin1')))
self.status = event['status']
if not isinstance(self.status, int):
raise TypeError('ASGI status must be an int')
elif event_type == EventType.HTTP_RESPONSE_BODY:
chunk = event.get('body', b'')
if not isinstance(chunk, bytes):
raise TypeError('ASGI body content must be a byte string')
self.body_chunks.append(chunk)
self.more_body = event.get('more_body', False)
if not isinstance(self.more_body, bool):
raise TypeError('ASGI more_body flag must be a bool')
elif event_type not in self._LIFESPAN_EVENT_TYPES:
raise ValueError('Invalid ASGI event type: ' + event_type)
__call__ = collect
| ASGIResponseEventCollector |
python | airbytehq__airbyte | airbyte-integrations/connectors/destination-chroma/destination_chroma/config.py | {
"start": 2793,
"end": 3292
} | class ____(VectorDBConfigModel):
indexing: ChromaIndexingConfigModel
embedding: Union[
AzureOpenAIEmbeddingConfigModel,
OpenAIEmbeddingConfigModel,
CohereEmbeddingConfigModel,
FromFieldEmbeddingConfigModel,
FakeEmbeddingConfigModel,
OpenAICompatibleEmbeddingConfigModel,
NoEmbeddingConfigModel,
] = Field(..., title="Embedding", description="Embedding configuration", discriminator="mode", group="embedding", type="object")
| ConfigModel |
python | doocs__leetcode | solution/2200-2299/2262.Total Appeal of A String/Solution.py | {
"start": 0,
"end": 259
} | class ____:
def appealSum(self, s: str) -> int:
ans = t = 0
pos = [-1] * 26
for i, c in enumerate(s):
c = ord(c) - ord('a')
t += i - pos[c]
ans += t
pos[c] = i
return ans
| Solution |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 1288,
"end": 2146
} | class ____(App[None]):
"""An app with zero bindings."""
async def test_just_app_no_bindings() -> None:
"""An app with no bindings should have no bindings, other than the app's hard-coded ones."""
async with NoBindings().run_test() as pilot:
assert list(pilot.app._bindings.key_to_bindings.keys()) == [
"ctrl+q",
"ctrl+c",
"ctrl+p",
]
assert pilot.app._bindings.get_bindings_for_key("ctrl+q")[0].priority is True
##############################################################################
# An application with a single alpha binding.
#
# Sticking with just an app and the default screen: this configuration has a
# BINDINGS on the app itself, and simply binds the letter a. The result
# should be that we see the letter a, the app's default bindings, and
# nothing else.
| NoBindings |
python | walkccc__LeetCode | solutions/3300. Minimum Element After Replacement With Digit Sum/3300.py | {
"start": 0,
"end": 120
} | class ____:
def minElement(self, nums: list[int]) -> int:
return min(sum(map(int, str(num))) for num in nums)
| Solution |
python | getsentry__sentry | src/sentry/plugins/sentry_useragents/models.py | {
"start": 2099,
"end": 2397
} | class ____(UserAgentPlugin):
"""
Automatically adds the 'device' tag from events containing interface data
from ``request``.
"""
slug = "device"
title = "Auto Tag: Device"
tag = "device"
def get_tag_from_ua(self, ua):
return ua["device"]["family"]
| DevicePlugin |
python | conda__conda | tests/plugins/test_manager.py | {
"start": 1247,
"end": 1442
} | class ____:
@plugins.hookimpl
def conda_solvers(*args):
yield VerboseCondaSolver
DummyVirtualPackage = plugins.CondaVirtualPackage("dummy", "version", "build")
| VerboseSolverPlugin |
python | pypa__hatch | backend/src/hatchling/metadata/core.py | {
"start": 58289,
"end": 62187
} | class ____(Generic[PluginManagerBound]):
def __init__(self, root: str, config: dict[str, Any], plugin_manager: PluginManagerBound) -> None:
self.root = root
self.config = config
self.plugin_manager = plugin_manager
self._allow_direct_references: bool | None = None
self._allow_ambiguous_features: bool | None = None
self._hook_config: dict[str, Any] | None = None
self._hooks: dict[str, MetadataHookInterface] | None = None
@property
def allow_direct_references(self) -> bool:
if self._allow_direct_references is None:
allow_direct_references: bool = self.config.get("allow-direct-references", False)
if not isinstance(allow_direct_references, bool):
message = "Field `tool.hatch.metadata.allow-direct-references` must be a boolean"
raise TypeError(message)
self._allow_direct_references = allow_direct_references
return self._allow_direct_references
@property
def allow_ambiguous_features(self) -> bool:
# TODO: remove in the first minor release after Jan 1, 2024
if self._allow_ambiguous_features is None:
allow_ambiguous_features: bool = self.config.get("allow-ambiguous-features", False)
if not isinstance(allow_ambiguous_features, bool):
message = "Field `tool.hatch.metadata.allow-ambiguous-features` must be a boolean"
raise TypeError(message)
self._allow_ambiguous_features = allow_ambiguous_features
return self._allow_ambiguous_features
@property
def hook_config(self) -> dict[str, Any]:
if self._hook_config is None:
hook_config: dict[str, Any] = self.config.get("hooks", {})
if not isinstance(hook_config, dict):
message = "Field `tool.hatch.metadata.hooks` must be a table"
raise TypeError(message)
self._hook_config = hook_config
return self._hook_config
@property
def hooks(self) -> dict[str, MetadataHookInterface]:
if self._hooks is None:
hook_config = self.hook_config
configured_hooks = {}
for hook_name, config in hook_config.items():
metadata_hook = self.plugin_manager.metadata_hook.get(hook_name)
if metadata_hook is None:
from hatchling.plugin.exceptions import UnknownPluginError
message = f"Unknown metadata hook: {hook_name}"
raise UnknownPluginError(message)
configured_hooks[hook_name] = metadata_hook(self.root, config)
self._hooks = configured_hooks
return self._hooks
def _resolve_optional_dependencies(
optional_dependencies_complex, dependent_option, inherited_options, visited, resolved
):
if dependent_option in resolved:
return
if dependent_option in visited:
message = f"Field `project.optional-dependencies` defines a circular dependency group: {dependent_option}"
raise ValueError(message)
visited.add(dependent_option)
if dependent_option in inherited_options:
for selected_option in inherited_options[dependent_option]:
_resolve_optional_dependencies(
optional_dependencies_complex, selected_option, inherited_options, visited, resolved
)
if selected_option not in optional_dependencies_complex:
message = (
f"Unknown recursive dependency group in field `project.optional-dependencies`: {selected_option}"
)
raise ValueError(message)
optional_dependencies_complex[dependent_option].update(optional_dependencies_complex[selected_option])
resolved.add(dependent_option)
visited.remove(dependent_option)
| HatchMetadataSettings |
python | marshmallow-code__marshmallow | tests/base.py | {
"start": 6914,
"end": 7039
} | class ____(BlogSchema):
user = fields.Nested(UserSchema, only=("name",), exclude=("name", "species"))
| BlogSchemaOnlyExclude |
python | great-expectations__great_expectations | tests/metrics/test_metric.py | {
"start": 711,
"end": 768
} | class ____(MetricResult[bool]): ...
| ColumnValuesAboveResult |
python | fsspec__filesystem_spec | fsspec/implementations/reference.py | {
"start": 20376,
"end": 47349
} | class ____(AsyncFileSystem):
"""View byte ranges of some other file as a file system
Initial version: single file system target, which must support
async, and must allow start and end args in _cat_file. Later versions
may allow multiple arbitrary URLs for the targets.
This FileSystem is read-only. It is designed to be used with async
targets (for now). We do not get original file details from the target FS.
Configuration is by passing a dict of references at init, or a URL to
a JSON file containing the same; this dict
can also contain concrete data for some set of paths.
Reference dict format:
{path0: bytes_data, path1: (target_url, offset, size)}
https://github.com/fsspec/kerchunk/blob/main/README.md
"""
protocol = "reference"
cachable = False
def __init__(
self,
fo,
target=None,
ref_storage_args=None,
target_protocol=None,
target_options=None,
remote_protocol=None,
remote_options=None,
fs=None,
template_overrides=None,
simple_templates=True,
max_gap=64_000,
max_block=256_000_000,
cache_size=128,
**kwargs,
):
"""
Parameters
----------
fo : dict or str
The set of references to use for this instance, with a structure as above.
If str referencing a JSON file, will use fsspec.open, in conjunction
with target_options and target_protocol to open and parse JSON at this
location. If a directory, then assume references are a set of parquet
files to be loaded lazily.
target : str
For any references having target_url as None, this is the default file
target to use
ref_storage_args : dict
If references is a str, use these kwargs for loading the JSON file.
Deprecated: use target_options instead.
target_protocol : str
Used for loading the reference file, if it is a path. If None, protocol
will be derived from the given path
target_options : dict
Extra FS options for loading the reference file ``fo``, if given as a path
remote_protocol : str
The protocol of the filesystem on which the references will be evaluated
(unless fs is provided). If not given, will be derived from the first
URL that has a protocol in the templates or in the references, in that
order.
remote_options : dict
kwargs to go with remote_protocol
fs : AbstractFileSystem | dict(str, (AbstractFileSystem | dict))
Directly provide a file system(s):
- a single filesystem instance
- a dict of protocol:filesystem, where each value is either a filesystem
instance, or a dict of kwargs that can be used to create in
instance for the given protocol
If this is given, remote_options and remote_protocol are ignored.
template_overrides : dict
Swap out any templates in the references file with these - useful for
testing.
simple_templates: bool
Whether templates can be processed with simple replace (True) or if
jinja is needed (False, much slower). All reference sets produced by
``kerchunk`` are simple in this sense, but the spec allows for complex.
max_gap, max_block: int
For merging multiple concurrent requests to the same remote file.
Neighboring byte ranges will only be merged when their
inter-range gap is <= ``max_gap``. Default is 64KB. Set to 0
to only merge when it requires no extra bytes. Pass a negative
number to disable merging, appropriate for local target files.
Neighboring byte ranges will only be merged when the size of
the aggregated range is <= ``max_block``. Default is 256MB.
cache_size : int
Maximum size of LRU cache, where cache_size*record_size denotes
the total number of references that can be loaded in memory at once.
Only used for lazily loaded references.
kwargs : passed to parent class
"""
super().__init__(**kwargs)
self.target = target
self.template_overrides = template_overrides
self.simple_templates = simple_templates
self.templates = {}
self.fss = {}
self._dircache = {}
self.max_gap = max_gap
self.max_block = max_block
if isinstance(fo, str):
dic = dict(
**(ref_storage_args or target_options or {}), protocol=target_protocol
)
ref_fs, fo2 = fsspec.core.url_to_fs(fo, **dic)
if ".json" not in fo2 and (
fo.endswith(("parq", "parquet", "/")) or ref_fs.isdir(fo2)
):
# Lazy parquet refs
logger.info("Open lazy reference dict from URL %s", fo)
self.references = LazyReferenceMapper(
fo2,
fs=ref_fs,
cache_size=cache_size,
)
else:
# text JSON
with fsspec.open(fo, "rb", **dic) as f:
logger.info("Read reference from URL %s", fo)
text = json.load(f)
self._process_references(text, template_overrides)
else:
# dictionaries
self._process_references(fo, template_overrides)
if isinstance(fs, dict):
self.fss = {
k: (
fsspec.filesystem(k.split(":", 1)[0], **opts)
if isinstance(opts, dict)
else opts
)
for k, opts in fs.items()
}
if None not in self.fss:
self.fss[None] = filesystem("file")
return
if fs is not None:
# single remote FS
remote_protocol = (
fs.protocol[0] if isinstance(fs.protocol, tuple) else fs.protocol
)
self.fss[remote_protocol] = fs
if remote_protocol is None:
# get single protocol from any templates
for ref in self.templates.values():
if callable(ref):
ref = ref()
protocol, _ = fsspec.core.split_protocol(ref)
if protocol and protocol not in self.fss:
fs = filesystem(protocol, **(remote_options or {}))
self.fss[protocol] = fs
if remote_protocol is None:
# get single protocol from references
# TODO: warning here, since this can be very expensive?
for ref in self.references.values():
if callable(ref):
ref = ref()
if isinstance(ref, list) and ref[0]:
protocol, _ = fsspec.core.split_protocol(ref[0])
if protocol not in self.fss:
fs = filesystem(protocol, **(remote_options or {}))
self.fss[protocol] = fs
# only use first remote URL
break
if remote_protocol and remote_protocol not in self.fss:
fs = filesystem(remote_protocol, **(remote_options or {}))
self.fss[remote_protocol] = fs
self.fss[None] = fs or filesystem("file") # default one
# Wrap any non-async filesystems to ensure async methods are available below
for k, f in self.fss.items():
if not f.async_impl:
self.fss[k] = AsyncFileSystemWrapper(f, asynchronous=self.asynchronous)
elif self.asynchronous ^ f.asynchronous:
raise ValueError(
"Reference-FS's target filesystem must have same value "
"of asynchronous"
)
def _cat_common(self, path, start=None, end=None):
path = self._strip_protocol(path)
logger.debug(f"cat: {path}")
try:
part = self.references[path]
except KeyError as exc:
raise FileNotFoundError(path) from exc
if isinstance(part, str):
part = part.encode()
if hasattr(part, "to_bytes"):
part = part.to_bytes()
if isinstance(part, bytes):
logger.debug(f"Reference: {path}, type bytes")
if part.startswith(b"base64:"):
part = base64.b64decode(part[7:])
return part, None, None
if len(part) == 1:
logger.debug(f"Reference: {path}, whole file => {part}")
url = part[0]
start1, end1 = start, end
else:
url, start0, size = part
logger.debug(f"Reference: {path} => {url}, offset {start0}, size {size}")
end0 = start0 + size
if start is not None:
if start >= 0:
start1 = start0 + start
else:
start1 = end0 + start
else:
start1 = start0
if end is not None:
if end >= 0:
end1 = start0 + end
else:
end1 = end0 + end
else:
end1 = end0
if url is None:
url = self.target
return url, start1, end1
async def _cat_file(self, path, start=None, end=None, **kwargs):
part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
if isinstance(part_or_url, bytes):
return part_or_url[start:end]
protocol, _ = split_protocol(part_or_url)
try:
return await self.fss[protocol]._cat_file(
part_or_url, start=start0, end=end0
)
except Exception as e:
raise ReferenceNotReachable(path, part_or_url) from e
def cat_file(self, path, start=None, end=None, **kwargs):
part_or_url, start0, end0 = self._cat_common(path, start=start, end=end)
if isinstance(part_or_url, bytes):
return part_or_url[start:end]
protocol, _ = split_protocol(part_or_url)
try:
return self.fss[protocol].cat_file(part_or_url, start=start0, end=end0)
except Exception as e:
raise ReferenceNotReachable(path, part_or_url) from e
def pipe_file(self, path, value, **_):
"""Temporarily add binary data or reference as a file"""
self.references[path] = value
async def _get_file(self, rpath, lpath, **kwargs):
if self.isdir(rpath):
return os.makedirs(lpath, exist_ok=True)
data = await self._cat_file(rpath)
with open(lpath, "wb") as f:
f.write(data)
def get_file(self, rpath, lpath, callback=DEFAULT_CALLBACK, **kwargs):
if self.isdir(rpath):
return os.makedirs(lpath, exist_ok=True)
data = self.cat_file(rpath, **kwargs)
callback.set_size(len(data))
if isfilelike(lpath):
lpath.write(data)
else:
with open(lpath, "wb") as f:
f.write(data)
callback.absolute_update(len(data))
def get(self, rpath, lpath, recursive=False, **kwargs):
if recursive:
# trigger directory build
self.ls("")
rpath = self.expand_path(rpath, recursive=recursive)
fs = fsspec.filesystem("file", auto_mkdir=True)
targets = other_paths(rpath, lpath)
if recursive:
data = self.cat([r for r in rpath if not self.isdir(r)])
else:
data = self.cat(rpath)
for remote, local in zip(rpath, targets):
if remote in data:
fs.pipe_file(local, data[remote])
def cat(self, path, recursive=False, on_error="raise", **kwargs):
if isinstance(path, str) and recursive:
raise NotImplementedError
if isinstance(path, list) and (recursive or any("*" in p for p in path)):
raise NotImplementedError
# TODO: if references is lazy, pre-fetch all paths in batch before access
proto_dict = _protocol_groups(path, self.references)
out = {}
for proto, paths in proto_dict.items():
fs = self.fss[proto]
urls, starts, ends, valid_paths = [], [], [], []
for p in paths:
# find references or label not-found. Early exit if any not
# found and on_error is "raise"
try:
u, s, e = self._cat_common(p)
if not isinstance(u, (bytes, str)):
# nan/None from parquet
continue
except FileNotFoundError as err:
if on_error == "raise":
raise
if on_error != "omit":
out[p] = err
else:
urls.append(u)
starts.append(s)
ends.append(e)
valid_paths.append(p)
# process references into form for merging
urls2 = []
starts2 = []
ends2 = []
paths2 = []
whole_files = set()
for u, s, e, p in zip(urls, starts, ends, valid_paths):
if isinstance(u, bytes):
# data
out[p] = u
elif s is None:
# whole file - limits are None, None, but no further
# entries take for this file
whole_files.add(u)
urls2.append(u)
starts2.append(s)
ends2.append(e)
paths2.append(p)
for u, s, e, p in zip(urls, starts, ends, valid_paths):
# second run to account for files that are to be loaded whole
if s is not None and u not in whole_files:
urls2.append(u)
starts2.append(s)
ends2.append(e)
paths2.append(p)
# merge and fetch consolidated ranges
new_paths, new_starts, new_ends = merge_offset_ranges(
list(urls2),
list(starts2),
list(ends2),
sort=True,
max_gap=self.max_gap,
max_block=self.max_block,
)
bytes_out = fs.cat_ranges(new_paths, new_starts, new_ends)
# unbundle from merged bytes - simple approach
for u, s, e, p in zip(urls, starts, ends, valid_paths):
if p in out:
continue # was bytes, already handled
for np, ns, ne, b in zip(new_paths, new_starts, new_ends, bytes_out):
if np == u and (ns is None or ne is None):
if isinstance(b, Exception):
out[p] = b
else:
out[p] = b[s:e]
elif np == u and s >= ns and e <= ne:
if isinstance(b, Exception):
out[p] = b
else:
out[p] = b[s - ns : (e - ne) or None]
for k, v in out.copy().items():
# these were valid references, but fetch failed, so transform exc
if isinstance(v, Exception) and k in self.references:
ex = out[k]
new_ex = ReferenceNotReachable(k, self.references[k])
new_ex.__cause__ = ex
if on_error == "raise":
raise new_ex
elif on_error != "omit":
out[k] = new_ex
if len(out) == 1 and isinstance(path, str) and "*" not in path:
return _first(out)
return out
def _process_references(self, references, template_overrides=None):
vers = references.get("version", None)
if vers is None:
self._process_references0(references)
elif vers == 1:
self._process_references1(references, template_overrides=template_overrides)
else:
raise ValueError(f"Unknown reference spec version: {vers}")
# TODO: we make dircache by iterating over all entries, but for Spec >= 1,
# can replace with programmatic. Is it even needed for mapper interface?
def _process_references0(self, references):
"""Make reference dict for Spec Version 0"""
if isinstance(references, dict):
# do not do this for lazy/parquet backend, which will not make dicts,
# but must remain writable in the original object
references = {
key: json.dumps(val) if isinstance(val, dict) else val
for key, val in references.items()
}
self.references = references
def _process_references1(self, references, template_overrides=None):
if not self.simple_templates or self.templates:
import jinja2
self.references = {}
self._process_templates(references.get("templates", {}))
@lru_cache(1000)
def _render_jinja(u):
return jinja2.Template(u).render(**self.templates)
for k, v in references.get("refs", {}).items():
if isinstance(v, str):
if v.startswith("base64:"):
self.references[k] = base64.b64decode(v[7:])
self.references[k] = v
elif isinstance(v, dict):
self.references[k] = json.dumps(v)
elif self.templates:
u = v[0]
if "{{" in u:
if self.simple_templates:
u = (
u.replace("{{", "{")
.replace("}}", "}")
.format(**self.templates)
)
else:
u = _render_jinja(u)
self.references[k] = [u] if len(v) == 1 else [u, v[1], v[2]]
else:
self.references[k] = v
self.references.update(self._process_gen(references.get("gen", [])))
def _process_templates(self, tmp):
self.templates = {}
if self.template_overrides is not None:
tmp.update(self.template_overrides)
for k, v in tmp.items():
if "{{" in v:
import jinja2
self.templates[k] = lambda temp=v, **kwargs: jinja2.Template(
temp
).render(**kwargs)
else:
self.templates[k] = v
def _process_gen(self, gens):
out = {}
for gen in gens:
dimension = {
k: (
v
if isinstance(v, list)
else range(v.get("start", 0), v["stop"], v.get("step", 1))
)
for k, v in gen["dimensions"].items()
}
products = (
dict(zip(dimension.keys(), values))
for values in itertools.product(*dimension.values())
)
for pr in products:
import jinja2
key = jinja2.Template(gen["key"]).render(**pr, **self.templates)
url = jinja2.Template(gen["url"]).render(**pr, **self.templates)
if ("offset" in gen) and ("length" in gen):
offset = int(
jinja2.Template(gen["offset"]).render(**pr, **self.templates)
)
length = int(
jinja2.Template(gen["length"]).render(**pr, **self.templates)
)
out[key] = [url, offset, length]
elif ("offset" in gen) ^ ("length" in gen):
raise ValueError(
"Both 'offset' and 'length' are required for a "
"reference generator entry if either is provided."
)
else:
out[key] = [url]
return out
def _dircache_from_items(self):
self.dircache = {"": []}
it = self.references.items()
for path, part in it:
if isinstance(part, (bytes, str)) or hasattr(part, "to_bytes"):
size = len(part)
elif len(part) == 1:
size = None
else:
_, _, size = part
par = path.rsplit("/", 1)[0] if "/" in path else ""
par0 = par
subdirs = [par0]
while par0 and par0 not in self.dircache:
# collect parent directories
par0 = self._parent(par0)
subdirs.append(par0)
subdirs.reverse()
for parent, child in zip(subdirs, subdirs[1:]):
# register newly discovered directories
assert child not in self.dircache
assert parent in self.dircache
self.dircache[parent].append(
{"name": child, "type": "directory", "size": 0}
)
self.dircache[child] = []
self.dircache[par].append({"name": path, "type": "file", "size": size})
def _open(self, path, mode="rb", block_size=None, cache_options=None, **kwargs):
part_or_url, start0, end0 = self._cat_common(path)
# This logic is kept outside `ReferenceFile` to avoid unnecessary redirection.
# That does mean `_cat_common` gets called twice if it eventually reaches `ReferenceFile`.
if isinstance(part_or_url, bytes):
return io.BytesIO(part_or_url[start0:end0])
protocol, _ = split_protocol(part_or_url)
if start0 is None and end0 is None:
return self.fss[protocol]._open(
part_or_url,
mode,
block_size=block_size,
cache_options=cache_options,
**kwargs,
)
return ReferenceFile(
self,
path,
mode,
block_size=block_size,
cache_options=cache_options,
**kwargs,
)
def ls(self, path, detail=True, **kwargs):
logger.debug("list %s", path)
path = self._strip_protocol(path)
if isinstance(self.references, LazyReferenceMapper):
try:
return self.references.ls(path, detail)
except KeyError:
pass
raise FileNotFoundError(f"'{path}' is not a known key")
if not self.dircache:
self._dircache_from_items()
out = self._ls_from_cache(path)
if out is None:
raise FileNotFoundError(path)
if detail:
return out
return [o["name"] for o in out]
def exists(self, path, **kwargs): # overwrite auto-sync version
return self.isdir(path) or self.isfile(path)
def isdir(self, path): # overwrite auto-sync version
if self.dircache:
return path in self.dircache
elif isinstance(self.references, LazyReferenceMapper):
return path in self.references.listdir()
else:
# this may be faster than building dircache for single calls, but
# by looping will be slow for many calls; could cache it?
return any(_.startswith(f"{path}/") for _ in self.references)
def isfile(self, path): # overwrite auto-sync version
return path in self.references
async def _ls(self, path, detail=True, **kwargs): # calls fast sync code
return self.ls(path, detail, **kwargs)
def find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs):
if withdirs:
return super().find(
path, maxdepth=maxdepth, withdirs=withdirs, detail=detail, **kwargs
)
if path:
path = self._strip_protocol(path)
r = sorted(k for k in self.references if k.startswith(path))
else:
r = sorted(self.references)
if detail:
if not self.dircache:
self._dircache_from_items()
return {k: self._ls_from_cache(k)[0] for k in r}
else:
return r
def info(self, path, **kwargs):
out = self.references.get(path)
if out is not None:
if isinstance(out, (str, bytes)):
# decode base64 here
return {"name": path, "type": "file", "size": len(out)}
elif len(out) > 1:
return {"name": path, "type": "file", "size": out[2]}
else:
out0 = [{"name": path, "type": "file", "size": None}]
else:
out = self.ls(path, True)
out0 = [o for o in out if o["name"] == path]
if not out0:
return {"name": path, "type": "directory", "size": 0}
if out0[0]["size"] is None:
# if this is a whole remote file, update size using remote FS
prot, _ = split_protocol(self.references[path][0])
out0[0]["size"] = self.fss[prot].size(self.references[path][0])
return out0[0]
async def _info(self, path, **kwargs): # calls fast sync code
return self.info(path)
async def _rm_file(self, path, **kwargs):
self.references.pop(
path, None
) # ignores FileNotFound, just as well for directories
self.dircache.clear() # this is a bit heavy handed
async def _pipe_file(self, path, data, mode="overwrite", **kwargs):
if mode == "create" and self.exists(path):
raise FileExistsError
# can be str or bytes
self.references[path] = data
self.dircache.clear() # this is a bit heavy handed
async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs):
# puts binary
if mode == "create" and self.exists(rpath):
raise FileExistsError
with open(lpath, "rb") as f:
self.references[rpath] = f.read()
self.dircache.clear() # this is a bit heavy handed
def save_json(self, url, **storage_options):
"""Write modified references into new location"""
out = {}
for k, v in self.references.items():
if isinstance(v, bytes):
try:
out[k] = v.decode("ascii")
except UnicodeDecodeError:
out[k] = (b"base64:" + base64.b64encode(v)).decode()
else:
out[k] = v
with fsspec.open(url, "wb", **storage_options) as f:
f.write(json.dumps({"version": 1, "refs": out}).encode())
| ReferenceFileSystem |
python | getsentry__sentry | src/sentry/utils/math.py | {
"start": 725,
"end": 872
} | class ____(ABC):
@abstractmethod
def update(self, n: int, avg: float, value: float) -> float:
raise NotImplementedError
| MovingAverage |
python | openai__openai-python | src/openai/types/beta/assistant_update_params.py | {
"start": 610,
"end": 6289
} | class ____(TypedDict, total=False):
description: Optional[str]
"""The description of the assistant. The maximum length is 512 characters."""
instructions: Optional[str]
"""The system instructions that the assistant uses.
The maximum length is 256,000 characters.
"""
metadata: Optional[Metadata]
"""Set of 16 key-value pairs that can be attached to an object.
This can be useful for storing additional information about the object in a
structured format, and querying for objects via API or the dashboard.
Keys are strings with a maximum length of 64 characters. Values are strings with
a maximum length of 512 characters.
"""
model: Union[
str,
Literal[
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5-2025-08-07",
"gpt-5-mini-2025-08-07",
"gpt-5-nano-2025-08-07",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-4.1-2025-04-14",
"gpt-4.1-mini-2025-04-14",
"gpt-4.1-nano-2025-04-14",
"o3-mini",
"o3-mini-2025-01-31",
"o1",
"o1-2024-12-17",
"gpt-4o",
"gpt-4o-2024-11-20",
"gpt-4o-2024-08-06",
"gpt-4o-2024-05-13",
"gpt-4o-mini",
"gpt-4o-mini-2024-07-18",
"gpt-4.5-preview",
"gpt-4.5-preview-2025-02-27",
"gpt-4-turbo",
"gpt-4-turbo-2024-04-09",
"gpt-4-0125-preview",
"gpt-4-turbo-preview",
"gpt-4-1106-preview",
"gpt-4-vision-preview",
"gpt-4",
"gpt-4-0314",
"gpt-4-0613",
"gpt-4-32k",
"gpt-4-32k-0314",
"gpt-4-32k-0613",
"gpt-3.5-turbo",
"gpt-3.5-turbo-16k",
"gpt-3.5-turbo-0613",
"gpt-3.5-turbo-1106",
"gpt-3.5-turbo-0125",
"gpt-3.5-turbo-16k-0613",
],
]
"""ID of the model to use.
You can use the
[List models](https://platform.openai.com/docs/api-reference/models/list) API to
see all of your available models, or see our
[Model overview](https://platform.openai.com/docs/models) for descriptions of
them.
"""
name: Optional[str]
"""The name of the assistant. The maximum length is 256 characters."""
reasoning_effort: Optional[ReasoningEffort]
"""
Constrains effort on reasoning for
[reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently
supported values are `none`, `minimal`, `low`, `medium`, and `high`. Reducing
reasoning effort can result in faster responses and fewer tokens used on
reasoning in a response.
- `gpt-5.1` defaults to `none`, which does not perform reasoning. The supported
reasoning values for `gpt-5.1` are `none`, `low`, `medium`, and `high`. Tool
calls are supported for all reasoning values in gpt-5.1.
- All models before `gpt-5.1` default to `medium` reasoning effort, and do not
support `none`.
- The `gpt-5-pro` model defaults to (and only supports) `high` reasoning effort.
"""
response_format: Optional[AssistantResponseFormatOptionParam]
"""Specifies the format that the model must output.
Compatible with [GPT-4o](https://platform.openai.com/docs/models#gpt-4o),
[GPT-4 Turbo](https://platform.openai.com/docs/models#gpt-4-turbo-and-gpt-4),
and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`.
Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured
Outputs which ensures the model will match your supplied JSON schema. Learn more
in the
[Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs).
Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the
message the model generates is valid JSON.
**Important:** when using JSON mode, you **must** also instruct the model to
produce JSON yourself via a system or user message. Without this, the model may
generate an unending stream of whitespace until the generation reaches the token
limit, resulting in a long-running and seemingly "stuck" request. Also note that
the message content may be partially cut off if `finish_reason="length"`, which
indicates the generation exceeded `max_tokens` or the conversation exceeded the
max context length.
"""
temperature: Optional[float]
"""What sampling temperature to use, between 0 and 2.
Higher values like 0.8 will make the output more random, while lower values like
0.2 will make it more focused and deterministic.
"""
tool_resources: Optional[ToolResources]
"""A set of resources that are used by the assistant's tools.
The resources are specific to the type of tool. For example, the
`code_interpreter` tool requires a list of file IDs, while the `file_search`
tool requires a list of vector store IDs.
"""
tools: Iterable[AssistantToolParam]
"""A list of tool enabled on the assistant.
There can be a maximum of 128 tools per assistant. Tools can be of types
`code_interpreter`, `file_search`, or `function`.
"""
top_p: Optional[float]
"""
An alternative to sampling with temperature, called nucleus sampling, where the
model considers the results of the tokens with top_p probability mass. So 0.1
means only the tokens comprising the top 10% probability mass are considered.
We generally recommend altering this or temperature but not both.
"""
| AssistantUpdateParams |
python | pytorch__pytorch | torch/_dynamo/code_context.py | {
"start": 1084,
"end": 1818
} | class ____:
def __init__(self) -> None:
self.code_context: ExactWeakKeyDictionary = ExactWeakKeyDictionary()
def has_context(self, code: types.CodeType) -> bool:
return code in self.code_context
def get_context(self, code: types.CodeType) -> dict[str, Any]:
ctx = self.code_context.get(code)
if ctx is None:
ctx = {}
self.code_context[code] = ctx
return ctx
def pop_context(self, code: types.CodeType) -> dict[str, Any]:
ctx = self.get_context(code)
self.code_context._remove_id(id(code))
return ctx
def clear(self) -> None:
self.code_context.clear()
code_context: CodeContextDict = CodeContextDict()
| CodeContextDict |
python | facebookresearch__faiss | benchs/bench_hybrid_cpu_gpu.py | {
"start": 2507,
"end": 4180
} | class ____:
"""
Separately manage the coarse quantizer and the IVF index.
"""
def __init__(self, quantizer, index, bs=-1, seq_tiling=False):
self.index = index
self.index_ivf = extract_index_ivf(index)
if isinstance(self.index_ivf, faiss.IndexIVF):
self.index_ivf.parallel_mode
self.index_ivf.parallel_mode = 3
self.quantizer = quantizer
assert self.quantizer.d == self.index_ivf.d
# populate quantizer if it was not done before
if quantizer.ntotal > 0:
assert quantizer.ntotal == self.index_ivf.nlist
else:
centroids = self.index_ivf.quantizer.reconstruct_n()
print(f"adding centroids size {centroids.shape} to quantizer")
quantizer.train(centroids)
quantizer.add(centroids)
self.bs = bs
self.seq_tiling = seq_tiling
def search(self, xq, k):
# perform coarse quantization
if isinstance(self.index, faiss.IndexPreTransform):
# print("applying pre-transform")
assert self.index.chain.size() == 1
xq = self.index.chain.at(0).apply(xq)
if self.bs <= 0:
# non batched
nprobe = self.index_ivf.nprobe
Dq, Iq = self.quantizer.search(xq, nprobe)
return self.index_ivf.search_preassigned(xq, k, Iq, Dq)
if self.seq_tiling:
return search_preassigned(
xq, k, self.index_ivf, self.quantizer, self.bs)
else:
return tiled_search_preassigned(
xq, k, self.index_ivf, self.quantizer, self.bs)
| SeparateCoarseQuantizationIndex |
python | pytorch__pytorch | torch/distributed/tensor/experimental/_context_parallel/_attention.py | {
"start": 44094,
"end": 59932
} | class ____(ParallelStyle):
class AttentionType(Enum):
FLEX = "flex_attention"
SDPA = "scaled_dot_product_attention"
def __init__(
self,
seq_dim: int,
attention_type: AttentionType,
) -> None:
super().__init__()
self.seq_dim = seq_dim
self.attention_type = attention_type
def _apply(self, module: nn.Module, mesh: DeviceMesh) -> nn.Module:
if self.attention_type == self.AttentionType.FLEX:
module.register_forward_pre_hook(
partial(self.flex_input_fn, mesh=mesh), with_kwargs=True
)
return module
elif self.attention_type == self.AttentionType.SDPA:
module.register_forward_pre_hook(
partial(self.sdpa_input_fn, mesh=mesh), with_kwargs=True
)
module.register_forward_hook(partial(self.sdpa_output_fn, mesh=mesh))
return module
else:
raise ValueError(f"Unknown attention type: {self.attention_type}")
def flex_input_fn(
self, module: nn.Module | None, args: Any, kwargs: Any, mesh: DeviceMesh
) -> Any:
args_list = list(args)
for idx, name in enumerate(
("query", "key", "value", "score_mod", "block_mask")
):
if idx >= len(args):
args_list.append(kwargs.pop(name, None))
query, key, value, score_mod, block_mask = args_list[:5]
assert isinstance(query, torch.Tensor)
assert isinstance(key, torch.Tensor)
assert isinstance(value, torch.Tensor)
assert isinstance(block_mask, BlockMask | tuple)
key = key.contiguous()
value = value.contiguous()
global_key, global_value = flex_cp_allgather(
key, value, self.seq_dim, c10d._get_process_group_name(mesh.get_group())
)
args_list[1] = global_key
args_list[2] = global_value
return tuple(args_list), kwargs
def sdpa_input_fn(
self,
module: nn.Module | None,
args: tuple[Any, ...],
kwargs: dict[str, Any],
mesh: DeviceMesh,
) -> tuple[tuple[Any, ...], dict[str, Any]]:
placement = [Shard(self.seq_dim)]
all_args = []
for arg in itertools.chain(args, kwargs.values()):
if isinstance(arg, torch.Tensor):
if isinstance(arg, DTensor):
assert arg._spec.placements == placement
else:
arg = DTensor.from_local(arg, mesh, placement, run_check=False)
all_args.append(arg)
new_args = tuple(all_args[0 : len(args)])
new_kwargs = dict(zip(kwargs.keys(), all_args[len(args) :]))
return new_args, new_kwargs
def sdpa_output_fn(
self, module: nn.Module | None, inputs: Any, outputs: Any, mesh: DeviceMesh
) -> Any:
new_outputs = []
for output in [outputs] if isinstance(outputs, torch.Tensor) else outputs:
output = output.to_local() if isinstance(output, DTensor) else output
new_outputs.append(output)
if isinstance(outputs, torch.Tensor):
return new_outputs[0]
return tuple(new_outputs)
CPBuffer: TypeAlias = torch.Tensor | BlockMask
CPBufferContainer: TypeAlias = Sequence[CPBuffer] | Mapping[str, CPBuffer]
CPBufferSeqDims: TypeAlias = Sequence[int] | Mapping[str, int]
def _context_parallel_shard(
mesh: DeviceMesh,
buffers: CPBufferContainer,
seq_dims: CPBufferSeqDims,
load_balancer: _LoadBalancer | None = None,
) -> list[torch.Tensor | BlockMask]:
"""
Shard the buffers along the specified sequence dimensions (`seq_dims`), so that each
rank retains only its corresponding shard according to the provided `mesh`. If a
`load_balancer` is provided, the buffers will be rearranged by the load balancer
before sharding to improve load balance. Buffers can be either tensors or `BlockMask`
objects. If a buffer is a `BlockMask`, its sharding dimension is determined by the
`BlockMask` implementation, and the corresponding `seq_dim` is ignored.
Note:
For `_context_parallel_shard`, a non-None `load_balancer` must be explicitly passed
if load balancing is required.
Args:
mesh (DeviceMesh): The device mesh used for context parallelism.
buffers (List[torch.Tensor | BlockMask]): Buffers whose usage depends on the sequence
dimension. Examples include input batches, labels, and positional embedding buffers.
These buffers must be sharded along the sequence dimension to ensure correctness.
seq_dims (List[int]): The sequence dimensions for each buffer in `buffers`. Must have
the same length as `buffers`.
load_balancer (Optional[_LoadBalancer]): An optional load balancer object. If provided,
it rearranges the buffers before sharding to achieve better load balance. If not
provided, no rearrangement is performed.
Returns:
List[torch.Tensor | BlockMask]: The sharded buffers, each corresponding to the local
shard for the current rank.
"""
# TODO: these global variables are going to bite us someday.
# We will have to remove them soon.
# For the new API, we only support the module wrapper mode.
global _dispatch_mode
_dispatch_mode = _DispatchMode.MODULE_WRAPPER
global _cp_options
if load_balancer is not None:
_cp_options.enable_load_balance = True
else:
_cp_options.enable_load_balance = False
if len(buffers) != len(seq_dims):
raise ValueError(
"`seq_dims` must have the same number of elements as `buffers`."
)
flat_buffers, spec = tree_flatten(buffers)
flat_seq_dims, _ = tree_flatten(seq_dims)
if len(flat_buffers) != len(flat_seq_dims):
raise ValueError("`seq_dims` must have the pytree structure as `buffers`.")
if isinstance(flat_buffers[0], torch.Tensor):
device = flat_buffers[0].device
else:
device = flat_buffers[0].kv_num_blocks.device
for buffer in flat_buffers:
if isinstance(buffer, torch.Tensor):
assert device == buffer.device, "All buffers must be on the same device"
else:
assert device == buffer.kv_num_blocks.device, (
"All buffers must be on the same device"
)
flat_sharded_buffers = _context_parallel_buffers(
mesh, flat_buffers, flat_seq_dims, load_balancer
)
return tree_unflatten(flat_sharded_buffers, spec)
def _enable_context_parallel_dispatcher() -> None:
"""
Enable the context parallel dispatcher. This API is experimental and subject to change.
"""
_enable_cp_dtensor_dispatcher()
def _disable_context_parallel_dispatcher() -> None:
"""
Disable the context parallel dispatcher. This API is experimental and subject to change.
"""
_disable_cp_dtensor_dispatcher()
#####################################################
# Current public APIs, but are also subject to change
#####################################################
@contextlib.contextmanager
@torch.no_grad()
def context_parallel(
mesh: DeviceMesh,
*,
buffers: list[torch.Tensor] | None = None,
buffer_seq_dims: list[int] | None = None,
no_restore_buffers: set[torch.Tensor] | None = None,
) -> Generator[None, None, None]:
"""
``context_parallel`` is an experimental API to enable context
parallelism (CP). This API performs two actions: 1) patch the SDPA
(``torch.nn.functional.scaled_dot_product_attention``) with the CP-enabled
one, 2) shard ``buffers`` along the sequence dimension and each rank will
preserve the corresponding shard according ``mesh``.
Args:
mesh (:class:`DeviceMesh`): the device mesh for the context parallelism.
buffers (Optional[List[torch.Tensor]]): buffers that the usage depend
on the sequence dimension. Examples are input batch, labels and
positional embedding buffers. These buffers must be sharded along
the sequence dimension to ensure the accuracy. The sharding will
happen in-place, the buffer's shape will change within the context.
The buffers will be restored after the context finishes.
``no_restore_buffers`` can be used to specify which buffers don't
need to be restored. Note that ``buffers`` should not contain any
nn.Parameter.
buffer_seq_dims (Optional[List[int]]): the sequence dimensions of ``buffers``.
no_restore_buffers (Optional[Set[torch.Tensor]]): buffers in these set
won't be restored after the context exits. This set must be a subset
of ``buffers``. If the buffers won't be used after the context exits,
these buffers can be put in this list to avoid extra restore time.
.. warning::
`torch.distributed.tensor.experimental.context_parallel` is a
prototype feature in PyTorch. The API is subject to change.
"""
# For the legacy API, we only support the monkey-patch mode.
# We will deprecate this API once the new API is widely used.
global _dispatch_mode
_dispatch_mode = _DispatchMode.MONKEY_PATCH
buffers = [] if buffers is None else buffers
buffer_seq_dims = [] if buffer_seq_dims is None else buffer_seq_dims
no_restore_buffers = set() if no_restore_buffers is None else no_restore_buffers
if len(buffers) != len(buffer_seq_dims):
raise ValueError(
"`seq_dims` must have the same number of elements as `buffers`."
)
for buffer in no_restore_buffers:
# Cannot use `if not buffer in buffers` which will incur tensor comparison.
if not any(b is buffer for b in buffers):
raise ValueError("`no_restore_buffers` must be a subset of `buffers`.")
original_buffers = [None if b in no_restore_buffers else b.clone() for b in buffers]
device = buffers[0].device
seq_length = buffers[0].shape[buffer_seq_dims[0]]
cp_world_size = mesh.size()
# If `enable_load_balance` is True, the default Head-tail load balancer
# (:class:`_HeadTailLoadBalancer`) is used to rearrange the buffers before
# sharding. Otherwise, we don't do any load-balance rearrange by passing
# `None` to `_context_parallel_shard()`.
load_balancer = _create_default_load_balancer(seq_length, cp_world_size, device)
shards = _context_parallel_buffers(
mesh,
cast(list[torch.Tensor | BlockMask], buffers),
buffer_seq_dims,
load_balancer,
)
for buffer, shard in zip(buffers, shards):
assert isinstance(shard, torch.Tensor), "ContextParallel only supports Tensor"
shard = shard.clone()
buffer.resize_(shard.shape)
buffer.copy_(shard)
_enable_context_parallel_dispatcher_impl(seq_dim=2, mesh=mesh)
yield
_disable_context_parallel_dispatcher_impl()
for buffer, original_buffer in zip(buffers, original_buffers):
if original_buffer is not None:
buffer.resize_(original_buffer.shape)
buffer.copy_(original_buffer)
@torch.no_grad()
def context_parallel_unshard(
mesh: DeviceMesh,
buffers: list[torch.Tensor],
seq_dims: list[int],
load_balancer: _LoadBalancer | None = None,
) -> list[torch.Tensor]:
"""
Unshard the tensors (e.g., output) that are sharded due to context parallelism.
Args:
mesh (:class:`DeviceMesh`): the device mesh for the context parallelism.
buffers (List[torch.Tensor]): the buffers to be unsharded.
seq_dims (List[int]): the sequence dimensions of ``buffers``. This list
must have the same length as ``buffers``.
load_balancer (Optional[:class:`_Loadbalancer`]): an optional `_LoadBalancer`
object. If this argument is `None`, it means the `buffers` were not
rearranged when being sharded and there's no need to put it back to order
after unsharding. If this argument is a `_LoadBalancer` object, call
its `_generate_indices(restore=True)` to generate the restore indices such
that `unsharded[restore_idx]` is the original buffer.
Returns:
List[torch.Tensor]: the unsharded buffers.
Note:
For `context_parallel_unshard` we require not-None `load_balancer` object be
explicitly passed if flex_attention() is to be used and load-balancing is needed.
This is different from the case of SDPA though we strongly suggest users follow
the same convention.
"""
device = buffers[0].device
cp_world_size = mesh.size()
seq_length = buffers[0].shape[seq_dims[0]] * cp_world_size
# If users don't pass in a `load_balancer`:
# - if `enable_load_balance` is True, we use the default round-robin
# load balancer.
# - if `enable_load_balance` is False, we don't do any load balancing
# by passing in `None` as `restore_indices`.
load_balancer = load_balancer or _create_default_load_balancer(
seq_length, cp_world_size, device
)
restore_indices = (
load_balancer._generate_indices(restore=True) if load_balancer else None
)
assert restore_indices is None or restore_indices.ndim == 2, (
"load balance restore index expects shape (1, seq_len) or (B, seq_len) "
f"but got {restore_indices.shape}."
)
unsharded_buffers = []
for b, dim in zip(buffers, seq_dims):
b = b.contiguous()
unsharded_b = _maybe_wait(ft_c.all_gather_tensor(b, dim, mesh))
if restore_indices is not None:
# NOTE: assuming batch dim is 0
idx_batch_size = restore_indices.size(0)
data_batch_size = unsharded_b.size(0)
if idx_batch_size != 1 and idx_batch_size != data_batch_size:
raise ValueError(
"Cannot restore buffer: "
f"restore_indices has shape {restore_indices.shape}, "
f"but unsharded_b has shape {unsharded_b.shape}."
)
for i in range(data_batch_size):
index = (
restore_indices[0] # identical load-balance in batch
if idx_batch_size == 1
else restore_indices[i]
)
unsharded_b_batch_i = torch.index_select(
unsharded_b[i], dim=dim - 1, index=index
)
unsharded_b[i] = unsharded_b_batch_i
unsharded_buffers.append(unsharded_b)
return unsharded_buffers
def set_rotate_method(rotate_method: str) -> None:
"""
Context Parallel SDPA requires the rotation of kv shards. Users can call this
API to specify which rotation method to use. "alltoall" shuffles the kv shards
using all-to-all collective. While "allgather" gathers the kv shards using
all-gather collective after the first sub-SDPA computation. If this API has not
been called, the default rotate method is "allgather".
Args:
rotate_method (str): the rotate method to use. Currently only supports
"allgather" and "alltoall". If a different string other than these two
is passed in, the function will raise an error.
Returns:
None
"""
logger.info("Note that FlexAttention CP doesn't support alltoall yet.")
if rotate_method == "allgather":
_cp_options.rotate_method = _RotateMethod.ALL_GATHER
elif rotate_method == "alltoall":
_cp_options.rotate_method = _RotateMethod.ALL_TO_ALL
else:
raise NotImplementedError(
"Context Parallel does not support "
f"using {rotate_method} for kv shards rotation"
)
| _ContextParallel |
python | walkccc__LeetCode | solutions/2785. Sort Vowels in a String/2785.py | {
"start": 0,
"end": 315
} | class ____:
def sortVowels(self, s: str) -> str:
VOWELS = 'aeiouAEIOU'
ans = []
vowels = sorted([c for c in s if c in VOWELS])
i = 0 # vowels' index
for c in s:
if c in VOWELS:
ans.append(vowels[i])
i += 1
else:
ans.append(c)
return ''.join(ans)
| Solution |
python | spack__spack | lib/spack/spack/solver/core.py | {
"start": 7129,
"end": 9057
} | class ____(NamedTuple):
flag_type: str
flag: str
flag_group: str
source: str
def intermediate_repr(sym):
"""Returns an intermediate representation of clingo models for Spack's spec builder.
Currently, transforms symbols from clingo models either to strings or to NodeArgument objects.
Returns:
This will turn a ``clingo.Symbol`` into a string or NodeArgument, or a sequence of
``clingo.Symbol`` objects into a tuple of those objects.
"""
# TODO: simplify this when we no longer have to support older clingo versions.
if isinstance(sym, (list, tuple)):
return tuple(intermediate_repr(a) for a in sym)
try:
if sym.name == "node":
return NodeArgument(
id=intermediate_repr(sym.arguments[0]), pkg=intermediate_repr(sym.arguments[1])
)
elif sym.name == "node_flag":
return NodeFlag(
flag_type=intermediate_repr(sym.arguments[0]),
flag=intermediate_repr(sym.arguments[1]),
flag_group=intermediate_repr(sym.arguments[2]),
source=intermediate_repr(sym.arguments[3]),
)
except RuntimeError:
# This happens when using clingo w/ CFFI and trying to access ".name" for symbols
# that are not functions
pass
if clingo_cffi():
# Clingo w/ CFFI will throw an exception on failure
try:
return sym.string
except RuntimeError:
return str(sym)
else:
return sym.string or str(sym)
def extract_args(model, predicate_name):
"""Extract the arguments to predicates with the provided name from a model.
Pull out all the predicates with name ``predicate_name`` from the model, and
return their intermediate representation.
"""
return [intermediate_repr(sym.arguments) for sym in model if sym.name == predicate_name]
| NodeFlag |
python | FactoryBoy__factory_boy | tests/test_transformer.py | {
"start": 433,
"end": 572
} | class ____(factory.Factory):
name = factory.Transformer("value", transform=transform)
class Meta:
model = Upper
| UpperFactory |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.