language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | getsentry__sentry | tests/sentry/integrations/slack/utils/test_channel.py | {
"start": 10673,
"end": 15762
} | class ____(TestCase):
def setUp(self) -> None:
self.integration = self.create_integration(
organization=self.organization,
external_id="sentry-workspace",
provider="slack",
metadata={"access_token": "abc-123"},
)
self.input_id = "U12345678"
self.slack_user = {
"id": self.input_id,
"name": "carmen.sandiego",
"profile": {
"display_name": "Carmen Sandiego 🔍",
"display_name_normalized": "Carmen Sandiego",
},
}
@patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=create_user_error(error="user_not_found"),
)
def test_invalid_user(self, mock_client_call: MagicMock) -> None:
with pytest.raises(ValidationError, match="User not found. Invalid ID provided."):
validate_user_id(
input_name="waldo", input_user_id=self.input_id, integration_id=self.integration.id
)
assert mock_client_call.call_count == 1
@patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=create_user_error(error="user_not_visible"),
)
def test_user_not_visible(self, mock_client_call: MagicMock) -> None:
with pytest.raises(
ValidationError, match="User not visible, you may need to modify your Slack settings."
):
validate_user_id(
input_name="waldo", input_user_id=self.input_id, integration_id=self.integration.id
)
assert mock_client_call.call_count == 1
@patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=create_user_error(error="ratelimited", status_code=429),
)
def test_rate_limited(self, mock_client_call: MagicMock) -> None:
with pytest.raises(ValidationError, match="Rate limited"):
validate_user_id(
input_name="waldo", input_user_id=self.input_id, integration_id=self.integration.id
)
assert mock_client_call.call_count == 1
@patch(
"slack_sdk.web.client.WebClient.users_info",
side_effect=create_user_error(error="some-unknown-error", status_code=500),
)
def test_unknown_error(self, mock_client_call: MagicMock) -> None:
with pytest.raises(ValidationError, match="Could not retrieve Slack user information."):
validate_user_id(
input_name="waldo", input_user_id=self.input_id, integration_id=self.integration.id
)
assert mock_client_call.call_count == 1
@patch(
"slack_sdk.web.client.WebClient.users_info",
return_value=SlackResponse(
client=None,
http_verb="POST",
api_url="https://slack.com/api/users.info",
req_args={},
data="<!doctype html><html><body>why is this html</body></html>", # type: ignore[arg-type]
headers={},
status_code=200,
),
)
def test_bad_slack_response(self, mock_client_call: MagicMock) -> None:
with pytest.raises(IntegrationError, match="Bad slack user list response."):
validate_user_id(
input_name="waldo",
input_user_id=self.input_id,
integration_id=self.integration.id,
)
assert mock_client_call.call_count == 1
def test_no_names_from_slack(self) -> None:
with patch(
"slack_sdk.web.client.WebClient.users_info",
return_value=create_user_response(user={"id": self.input_id}),
) as mock_client_call:
with pytest.raises(ValidationError, match="Did not receive user name from API results"):
validate_user_id(
input_name="waldo",
input_user_id=self.input_id,
integration_id=self.integration.id,
)
assert mock_client_call.call_count == 1
def test_no_matches_from_slack(self) -> None:
with patch(
"slack_sdk.web.client.WebClient.users_info",
return_value=create_user_response(user=self.slack_user),
) as mock_client_call:
with pytest.raises(
ValidationError, match="Slack username from ID does not match input username."
):
validate_user_id(
input_name="waldo",
input_user_id=self.input_id,
integration_id=self.integration.id,
)
assert mock_client_call.call_count == 1
def test_happy_path_does_not_raise(self) -> None:
with patch(
"slack_sdk.web.client.WebClient.users_info",
return_value=create_user_response(user=self.slack_user),
) as mock_client_call:
validate_user_id(
input_name="Carmen Sandiego",
input_user_id=self.input_id,
integration_id=self.integration.id,
)
assert mock_client_call.call_count == 1
| ValidateUserIdTest |
python | mahmoud__boltons | boltons/socketutils.py | {
"start": 25066,
"end": 25631
} | class ____(Error):
"""Raised from :meth:`BufferedSocket.recv_until` and
:meth:`BufferedSocket.recv_closed` when more than *maxsize* bytes are
read without encountering the delimiter or a closed connection,
respectively.
"""
def __init__(self, bytes_read=None, delimiter=None):
msg = 'message exceeded maximum size'
if bytes_read is not None:
msg += f'. {bytes_read} bytes read'
if delimiter is not None:
msg += f'. Delimiter not found: {delimiter!r}'
super().__init__(msg)
| MessageTooLong |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 11602,
"end": 11874
} | class ____:
def setup(self):
N = 18
self.df = DataFrame({"g": ["a", "b"] * 9, "v": list(range(N))})
def time_defaults(self):
self.df.groupby("g").shift()
def time_fill_value(self):
self.df.groupby("g").shift(fill_value=99)
| Shift |
python | kubernetes-client__python | kubernetes/client/models/v1_daemon_set_list.py | {
"start": 383,
"end": 6856
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1DaemonSet]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1DaemonSetList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1DaemonSetList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1DaemonSetList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1DaemonSetList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1DaemonSetList. # noqa: E501
A list of daemon sets. # noqa: E501
:return: The items of this V1DaemonSetList. # noqa: E501
:rtype: list[V1DaemonSet]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1DaemonSetList.
A list of daemon sets. # noqa: E501
:param items: The items of this V1DaemonSetList. # noqa: E501
:type: list[V1DaemonSet]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1DaemonSetList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1DaemonSetList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1DaemonSetList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1DaemonSetList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1DaemonSetList. # noqa: E501
:return: The metadata of this V1DaemonSetList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1DaemonSetList.
:param metadata: The metadata of this V1DaemonSetList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1DaemonSetList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1DaemonSetList):
return True
return self.to_dict() != other.to_dict()
| V1DaemonSetList |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1526056,
"end": 1528593
} | class ____(Transform):
"""
RegressionTransform schema wrapper.
Parameters
----------
on : str, :class:`FieldName`
The data field of the independent variable to use a predictor.
regression : str, :class:`FieldName`
The data field of the dependent variable to predict.
extent : Sequence[float]
A [min, max] domain over the independent (x) field for the starting and ending
points of the generated trend line.
groupby : Sequence[str, :class:`FieldName`]
The data fields to group by. If not specified, a single group containing all data
objects will be used.
method : Literal['linear', 'log', 'exp', 'pow', 'quad', 'poly']
The functional form of the regression model. One of ``"linear"``, ``"log"``,
``"exp"``, ``"pow"``, ``"quad"``, or ``"poly"``.
**Default value:** ``"linear"``
order : float
The polynomial order (number of coefficients) for the 'poly' method.
**Default value:** ``3``
params : bool
A boolean flag indicating if the transform should return the regression model
parameters (one object per group), rather than trend line points. The resulting
objects include a ``coef`` array of fitted coefficient values (starting with the
intercept term and then including terms of increasing order) and an ``rSquared``
value (indicating the total variance explained by the model).
**Default value:** ``false``
as : Sequence[str, :class:`FieldName`]
The output field names for the smoothed points generated by the regression
transform.
**Default value:** The field names of the input x and y values.
"""
_schema = {"$ref": "#/definitions/RegressionTransform"}
def __init__(
self,
on: Optional[str | SchemaBase] = Undefined,
regression: Optional[str | SchemaBase] = Undefined,
extent: Optional[Sequence[float]] = Undefined,
groupby: Optional[Sequence[str | SchemaBase]] = Undefined,
method: Optional[
Literal["linear", "log", "exp", "pow", "quad", "poly"]
] = Undefined,
order: Optional[float] = Undefined,
params: Optional[bool] = Undefined,
**kwds,
):
super().__init__(
on=on,
regression=regression,
extent=extent,
groupby=groupby,
method=method,
order=order,
params=params,
**kwds,
)
| RegressionTransform |
python | scipy__scipy | scipy/io/tests/test_idl.py | {
"start": 1186,
"end": 3534
} | class ____:
# Test that scalar values are read in with the correct value and type
def test_byte(self):
s = readsav(path.join(DATA_PATH, 'scalar_byte.sav'), verbose=False)
assert_identical(s.i8u, np.uint8(234))
def test_int16(self):
s = readsav(path.join(DATA_PATH, 'scalar_int16.sav'), verbose=False)
assert_identical(s.i16s, np.int16(-23456))
def test_int32(self):
s = readsav(path.join(DATA_PATH, 'scalar_int32.sav'), verbose=False)
assert_identical(s.i32s, np.int32(-1234567890))
def test_float32(self):
s = readsav(path.join(DATA_PATH, 'scalar_float32.sav'), verbose=False)
assert_identical(s.f32, np.float32(-3.1234567e+37))
def test_float64(self):
s = readsav(path.join(DATA_PATH, 'scalar_float64.sav'), verbose=False)
assert_identical(s.f64, np.float64(-1.1976931348623157e+307))
def test_complex32(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex32.sav'), verbose=False)
assert_identical(s.c32, np.complex64(3.124442e13-2.312442e31j))
def test_bytes(self):
s = readsav(path.join(DATA_PATH, 'scalar_string.sav'), verbose=False)
msg = "The quick brown fox jumps over the lazy python"
assert_identical(s.s, np.bytes_(msg))
def test_structure(self):
pass
def test_complex64(self):
s = readsav(path.join(DATA_PATH, 'scalar_complex64.sav'), verbose=False)
assert_identical(
s.c64,
np.complex128(1.1987253647623157e+112-5.1987258887729157e+307j)
)
def test_heap_pointer(self):
pass
def test_object_reference(self):
pass
def test_uint16(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint16.sav'), verbose=False)
assert_identical(s.i16u, np.uint16(65511))
def test_uint32(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint32.sav'), verbose=False)
assert_identical(s.i32u, np.uint32(4294967233))
def test_int64(self):
s = readsav(path.join(DATA_PATH, 'scalar_int64.sav'), verbose=False)
assert_identical(s.i64s, np.int64(-9223372036854774567))
def test_uint64(self):
s = readsav(path.join(DATA_PATH, 'scalar_uint64.sav'), verbose=False)
assert_identical(s.i64u, np.uint64(18446744073709529285))
| TestScalars |
python | getsentry__sentry | tests/sentry/utils/test_snuba.py | {
"start": 20635,
"end": 25107
} | class ____(TestCase):
def setUp(self) -> None:
mock_request = Request(
dataset="events",
app_id="test",
query=Query(
match=Entity("events"),
select=[Function("count", parameters=[], alias="count")],
where=[
Condition(Column("project_id"), Op.EQ, self.project.id),
Condition(Column("timestamp"), Op.GTE, datetime.now() - timedelta(hours=1)),
Condition(Column("timestamp"), Op.LT, datetime.now()),
],
),
)
self.snuba_request = SnubaRequest(
request=mock_request,
referrer="test_referrer",
forward=lambda x: x,
reverse=lambda x: x,
)
@mock.patch("sentry.utils.snuba._snuba_query")
def test_rate_limit_error_handling(self, mock_snuba_query) -> None:
"""
Test error handling for rate limit errors creates a RateLimitExceeded exception
with the correct quota used and rejection threshold
"""
mock_response = mock.Mock(spec=HTTPResponse)
mock_response.status = 429
mock_response.data = json.dumps(
{
"error": {
"message": "Query on could not be run due to allocation policies, info: ...",
},
"quota_allowance": {
"summary": {
"rejected_by": {
"policy": "ConcurrentRateLimitAllocationPolicy",
"quota_used": 1000,
"rejection_threshold": 100,
"quota_unit": "no_units",
"storage_key": "test_storage_key",
},
"throttled_by": {},
}
},
}
).encode()
mock_snuba_query.return_value = ("test_referrer", mock_response, lambda x: x, lambda x: x)
with pytest.raises(RateLimitExceeded) as exc_info:
_bulk_snuba_query([self.snuba_request])
assert exc_info.value.quota_used == 1000
assert exc_info.value.rejection_threshold == 100
assert (
str(exc_info.value) == "Query on could not be run due to allocation policies, info: ..."
)
@mock.patch("sentry.utils.snuba._snuba_query")
def test_rate_limit_error_handling_without_quota_details(self, mock_snuba_query) -> None:
"""
Test that error handling gracefully handles malformed message
"""
mock_response = mock.Mock(spec=HTTPResponse)
mock_response.status = 429
mock_response.data = json.dumps(
{
"error": {
"message": "Query on could not be run due to allocation policies, info: ...",
}
}
).encode()
mock_snuba_query.return_value = ("test_referrer", mock_response, lambda x: x, lambda x: x)
with pytest.raises(RateLimitExceeded) as exc_info:
_bulk_snuba_query([self.snuba_request])
assert exc_info.value.quota_used is None
assert exc_info.value.rejection_threshold is None
assert (
str(exc_info.value) == "Query on could not be run due to allocation policies, info: ..."
)
@mock.patch("sentry.utils.snuba._snuba_query")
def test_rate_limit_error_handling_with_stats_but_no_quota_details(
self, mock_snuba_query
) -> None:
"""
Test that error handling gracefully handles empty quota_allowance
"""
mock_response = mock.Mock(spec=HTTPResponse)
mock_response.status = 429
mock_response.data = json.dumps(
{
"error": {
"message": "Query on could not be run due to allocation policies, info: ...",
},
"quota_allowance": {},
}
).encode()
mock_snuba_query.return_value = ("test_referrer", mock_response, lambda x: x, lambda x: x)
with pytest.raises(RateLimitExceeded) as exc_info:
_bulk_snuba_query([self.snuba_request])
assert exc_info.value.quota_used is None
assert exc_info.value.rejection_threshold is None
assert (
str(exc_info.value) == "Query on could not be run due to allocation policies, info: ..."
)
| SnubaQueryRateLimitTest |
python | django__django | tests/indexes/tests.py | {
"start": 493,
"end": 4098
} | class ____(TestCase):
"""
Test index handling by the db.backends.schema infrastructure.
"""
def test_index_name_hash(self):
"""
Index names should be deterministic.
"""
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=("c1",),
suffix="123",
)
self.assertEqual(index_name, "indexes_article_c1_a52bd80b123")
def test_index_name(self):
"""
Index names on the built-in database backends::
* Are truncated as needed.
* Include all the column names.
* Include a deterministic hash.
"""
long_name = "l%sng" % ("o" * 100)
editor = connection.schema_editor()
index_name = editor._create_index_name(
table_name=Article._meta.db_table,
column_names=("c1", "c2", long_name),
suffix="ix",
)
expected = {
"mysql": "indexes_article_c1_c2_looooooooooooooooooo_255179b2ix",
"oracle": "indexes_a_c1_c2_loo_255179b2ix",
"postgresql": "indexes_article_c1_c2_loooooooooooooooooo_255179b2ix",
"sqlite": "indexes_article_c1_c2_l%sng_255179b2ix" % ("o" * 100),
}
if connection.vendor not in expected:
self.skipTest(
"This test is only supported on the built-in database backends."
)
self.assertEqual(index_name, expected[connection.vendor])
def test_quoted_index_name(self):
editor = connection.schema_editor()
index_sql = [str(statement) for statement in editor._model_indexes_sql(Article)]
self.assertEqual(len(index_sql), 1)
# Ensure the index name is properly quoted.
self.assertIn(
connection.ops.quote_name(Article._meta.indexes[0].name),
index_sql[0],
)
def test_columns_list_sql(self):
index = Index(fields=["headline"], name="whitespace_idx")
editor = connection.schema_editor()
self.assertIn(
"(%s)" % editor.quote_name("headline"),
str(index.create_sql(Article, editor)),
)
@skipUnlessDBFeature("supports_index_column_ordering")
def test_descending_columns_list_sql(self):
index = Index(fields=["-headline"], name="whitespace_idx")
editor = connection.schema_editor()
self.assertIn(
"(%s DESC)" % editor.quote_name("headline"),
str(index.create_sql(Article, editor)),
)
@skipUnlessDBFeature("can_create_inline_fk", "can_rollback_ddl")
def test_alter_field_unique_false_removes_deferred_sql(self):
field_added = CharField(max_length=127, unique=True)
field_added.set_attributes_from_name("charfield_added")
field_to_alter = CharField(max_length=127, unique=True)
field_to_alter.set_attributes_from_name("charfield_altered")
altered_field = CharField(max_length=127, unique=False)
altered_field.set_attributes_from_name("charfield_altered")
with connection.schema_editor() as editor:
editor.add_field(ArticleTranslation, field_added)
editor.add_field(ArticleTranslation, field_to_alter)
self.assertEqual(len(editor.deferred_sql), 2)
editor.alter_field(ArticleTranslation, field_to_alter, altered_field)
self.assertEqual(len(editor.deferred_sql), 1)
self.assertIn("charfield_added", str(editor.deferred_sql[0].parts["name"]))
| SchemaIndexesTests |
python | getsentry__sentry | src/sentry/testutils/silo.py | {
"start": 4043,
"end": 5899
} | class ____:
"""Decorate a test case that is expected to work in a given silo mode.
A test marked with a single silo mode runs only in that mode by default. An
`include_monolith_run=True` will add a secondary run in monolith mode.
If a test is marked with both control and region modes, then the primary run will
be in monolith mode and a secondary run will be generated in each silo mode.
When testing on more than one mode, if the decorator is on a test case class,
an additional class is dynamically generated and added to the module for Pytest
to pick up. For example, if you write
```
@control_silo_test(include_monolith_run=True)
class MyTest(TestCase):
def setUp(self): ...
def test_stuff(self): ...
```
then your result set should include test runs for both `MyTest` (in control mode)
and `MyTest__InMonolithMode`.
"""
def __init__(self, *silo_modes: SiloMode) -> None:
self.silo_modes = frozenset(silo_modes)
@overload
def __call__[T: (type[Any], Callable[..., Any])](self, decorated_obj: T) -> T: ...
@overload
def __call__[T: (
type[Any],
Callable[..., Any],
)](self, *, regions: Sequence[Region] = (), include_monolith_run: bool = False) -> Callable[
[T], T
]: ...
def __call__(
self,
decorated_obj: Any = None,
*,
regions: Sequence[Region] = (),
include_monolith_run: bool = False,
) -> Any:
silo_modes = self.silo_modes
if include_monolith_run:
silo_modes |= frozenset([SiloMode.MONOLITH])
mod = _SiloModeTestModification(silo_modes=silo_modes, regions=tuple(regions))
return mod.apply if decorated_obj is None else mod.apply(decorated_obj)
@dataclass(frozen=True)
| SiloModeTestDecorator |
python | astropy__astropy | astropy/table/tests/test_table.py | {
"start": 15084,
"end": 15821
} | class ____:
def test_1(self, table_types):
t = table_types.Table()
with pytest.raises(KeyError):
t["a"]
def test_2(self, table_types):
t = table_types.Table()
t.add_column(table_types.Column(name="a", data=[1, 2, 3]))
assert np.all(t["a"] == np.array([1, 2, 3]))
with pytest.raises(KeyError):
t["b"] # column does not exist
def test_itercols(self, table_types):
names = ["a", "b", "c"]
t = table_types.Table([[1], [2], [3]], names=names)
for name, col in zip(names, t.itercols()):
assert name == col.name
assert isinstance(col, table_types.Column)
@pytest.mark.usefixtures("table_types")
| TestColumnAccess |
python | getsentry__sentry | tests/apidocs/endpoints/organizations/test_org_stats_v2.py | {
"start": 325,
"end": 3905
} | class ____(APIDocsTestCase, OutcomesSnubaTest):
def setUp(self) -> None:
super().setUp()
self.now = datetime(2021, 3, 14, 12, 27, 28, tzinfo=timezone.utc)
self.login_as(user=self.user)
self.store_outcomes(
{
"org_id": self.organization.id,
"timestamp": self.now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.ERROR,
"quantity": 1,
},
5,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"timestamp": self.now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.PROFILE_DURATION,
"quantity": 1000, # Duration in milliseconds
},
3,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"timestamp": self.now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.ACCEPTED,
"reason": "none",
"category": DataCategory.PROFILE_DURATION_UI,
"quantity": 2000, # Duration in milliseconds
},
2,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"timestamp": self.now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.RATE_LIMITED,
"reason": "none",
"category": DataCategory.PROFILE_CHUNK,
"quantity": 420,
},
1,
)
self.store_outcomes(
{
"org_id": self.organization.id,
"timestamp": self.now - timedelta(hours=1),
"project_id": self.project.id,
"outcome": Outcome.RATE_LIMITED,
"reason": "none",
"category": DataCategory.PROFILE_CHUNK_UI,
"quantity": 69,
},
1,
)
self.url = reverse(
"sentry-api-0-organization-stats-v2",
kwargs={"organization_id_or_slug": self.organization.slug},
)
def test_get(self) -> None:
"""
Test that the organization stats endpoint returns valid schema.
This test verifies that the endpoint correctly handles basic queries with interval, field and groupBy parameters.
"""
query = {"interval": "1d", "field": "sum(quantity)", "groupBy": "category"}
response = self.client.get(self.url, query, format="json")
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
def test_continuous_profiling_categories(self) -> None:
for category in [
"profile_duration",
"profile_duration_ui",
"profile_chunk",
"profile_chunk_ui",
]:
query = {
"interval": "1d",
"field": "sum(quantity)",
"groupBy": "category",
"category": category,
}
response = self.client.get(self.url, query, format="json")
request = RequestFactory().get(self.url)
self.validate_schema(request, response)
| OrganizationStatsDocs |
python | pytorch__pytorch | test/distributed/test_cupy_as_tensor.py | {
"start": 618,
"end": 1204
} | class ____:
data_ptr: int
size_in_bytes: int
@property
def __cuda_array_interface__(self):
return {
"shape": (self.size_in_bytes,),
"typestr": "|u1",
"data": (self.data_ptr, False),
"version": 3,
}
def from_buffer(
data_ptr: int, size_in_bytes: int, device: str, dtype: torch.dtype
) -> torch.Tensor:
data = torch.as_tensor(CupyWrapper(data_ptr, size_in_bytes), device=device).view(
dtype
)
assert data.data_ptr() == data_ptr
return data
@requires_cuda_p2p_access()
| CupyWrapper |
python | pandas-dev__pandas | asv_bench/benchmarks/indexing.py | {
"start": 12872,
"end": 13059
} | class ____:
def setup_cache(self):
s = Series()
return s
def time_lookup_iloc(self, s):
s.iloc
def time_lookup_loc(self, s):
s.loc
| MethodLookup |
python | pytorch__pytorch | test/inductor/test_async_compile.py | {
"start": 778,
"end": 5594
} | class ____(TestCase):
@requires_gpu()
@requires_triton()
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_pool(self, method):
def fn(x, y):
return x + y
x = torch.rand(10).to(GPU_TYPE)
y = torch.rand(10).to(GPU_TYPE)
with config.patch("worker_start_method", method):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_fn = torch.compile(fn)
self.assertEqual(fn(x, y), compiled_fn(x, y))
@requires_gpu()
@requires_triton()
def test_bad_kernel(self):
shutdown_compile_workers()
with config.patch(worker_start_method="subprocess", compile_threads=8):
async_compile = AsyncCompile()
AsyncCompile.wait_pool_ready()
with self.assertRaises(SubprocException):
async_compile.triton(
"fake_kernel_name", source_code="This definitely doesn't exist"
).result()
@requires_gpu()
@requires_triton()
def test_wait_pool_ready(self):
shutdown_compile_workers()
with config.patch(worker_start_method="subprocess", compile_threads=8):
AsyncCompile.wait_pool_ready()
self.assertTrue(AsyncCompile._ready_future.done())
self.assertTrue(AsyncCompile.use_process_pool())
@requires_gpu()
@requires_triton()
@patch("torch._inductor.runtime.coordinate_descent_tuner.CoordescTuner.autotune")
@parametrize("method", ("subprocess", "fork", "spawn"))
def test_autotune_lookup_table(self, mock_autotune, method):
def f(a, b):
return (a @ b).to(torch.float32).sum(dim=1)
# Fake name to make sure the lookup table is name agnostic
# When codegen/triton.py is changed, func_def must be updated
loop_header = (
"for r0_offset in tl.range(0, r0_numel, R0_BLOCK, num_stages = 2):"
if torch.version.hip
else "for r0_offset in tl.range(0, r0_numel, R0_BLOCK):"
)
func_def = f"""
def triton_fused_fake_name(in_ptr0, out_ptr0, xnumel, r0_numel, XBLOCK : tl.constexpr, R0_BLOCK : tl.constexpr):
xnumel = 1024
r0_numel = 11776
rnumel = r0_numel
RBLOCK: tl.constexpr = R0_BLOCK
xoffset = tl.program_id(0) * XBLOCK
xindex = xoffset + tl.arange(0, XBLOCK)[:, None]
xmask = xindex < xnumel
r0_base = tl.arange(0, R0_BLOCK)[None, :]
rbase = r0_base
x0 = xindex
_tmp3 = tl.full([XBLOCK, R0_BLOCK], 0, tl.float32)
{loop_header}
r0_index = r0_offset + r0_base
r0_mask = r0_index < r0_numel
roffset = r0_offset
rindex = r0_index
r0_1 = r0_index
tmp0 = tl.load(in_ptr0 + (r0_1 + 11776*x0), r0_mask & xmask, eviction_policy='evict_first', other=0.0).to(tl.float32)
tmp1 = tmp0.to(tl.float32)
tmp2 = tl.broadcast_to(tmp1, [XBLOCK, R0_BLOCK])
tmp4 = _tmp3 + tmp2
_tmp3 = tl.where(r0_mask & xmask, tmp4, _tmp3)
tmp3 = tl.sum(_tmp3, 1)[:, None]
tl.store(out_ptr0 + (x0), tmp3, xmask)
"""
fn_hash = generate_lookup_hash_from_source_code(
str({"x": 1024, "r0_": 16384}), func_def
)
block_configs = {
"XBLOCK": 1,
"R0_BLOCK": 128,
}
num_warps = 16
num_stages = 1
autotune_lookup_table = {
fn_hash: {**block_configs, "num_warps": num_warps, "num_stages": num_stages}
}
autotune_config = Config(
block_configs, num_warps=num_warps, num_stages=num_stages
)
mock_autotune.return_value = autotune_config
a = torch.randn(1152, 1024, device=GPU_TYPE, dtype=torch.float16).T
b = torch.randn(1152, 11776, device=GPU_TYPE, dtype=torch.float16)
compiled_f = torch.compile(f)
with config.patch(
{
"autotune_lookup_table": autotune_lookup_table,
"coordinate_descent_tuning": True,
"worker_start_method": method,
}
):
shutdown_compile_workers()
AsyncCompile.wait_pool_ready()
with fresh_cache():
compiled_f(a, b)
# Check that the input to coordinate descent (the resulting chosen config)
# is the same as the one in the lookup table
mock_autotune.assert_called_once()
args, _ = mock_autotune.call_args
self.assertTrue(isinstance(args[1], Config))
self.assertEqual(args[1].kwargs, autotune_config.kwargs)
self.assertEqual(args[1].num_warps, autotune_config.num_warps)
self.assertEqual(args[1].num_stages, autotune_config.num_stages)
if __name__ == "__main__":
run_tests()
| TestAsyncCompile |
python | dagster-io__dagster | python_modules/libraries/dagster-docker/dagster_docker/container_context.py | {
"start": 1734,
"end": 6485
} | class ____(
NamedTuple(
"_DockerContainerContext",
[
("registry", Optional[Mapping[str, str]]),
("env_vars", Sequence[str]),
("networks", Sequence[str]),
("container_kwargs", Mapping[str, Any]),
],
)
):
"""Encapsulates the configuration that can be applied to a Docker container running
Dagster code. Can be set at the instance level (via config in the `DockerRunLauncher`),
repository location level, and at the individual step level (for runs using the
`docker_executor` to run each op in its own container). Config at each of these lower levels is
merged in with any config set at a higher level, following the policy laid out in the
merge() method below.
"""
def __new__(
cls,
registry: Optional[Mapping[str, str]] = None,
env_vars: Optional[Sequence[str]] = None,
networks: Optional[Sequence[str]] = None,
container_kwargs: Optional[Mapping[str, Any]] = None,
):
return super().__new__(
cls,
registry=check.opt_nullable_mapping_param(registry, "registry"),
env_vars=check.opt_sequence_param(env_vars, "env_vars", of_type=str),
networks=check.opt_sequence_param(networks, "networks", of_type=str),
container_kwargs=check.opt_mapping_param(container_kwargs, "container_kwargs"),
)
def merge(self, other: "DockerContainerContext"):
# Combines config set at a higher level with overrides/additions that are set at a lower
# level. For example, a certain set of config set in the `DockerRunLauncher`` can be
# combined with config set at the step level in the `docker_executor`.
# Lists of env vars and secrets are appended, the registry is replaced, and the
# `container_kwargs` field does a shallow merge so that different kwargs can be combined
# or replaced without replacing the full set of arguments.
return DockerContainerContext(
registry=other.registry if other.registry is not None else self.registry,
env_vars=[*self.env_vars, *other.env_vars],
networks=[*self.networks, *other.networks],
container_kwargs={**self.container_kwargs, **other.container_kwargs},
)
@staticmethod
def create_for_run(dagster_run: DagsterRun, run_launcher: Optional["DockerRunLauncher"]):
context = DockerContainerContext()
# First apply the instance / run_launcher-level context
if run_launcher:
context = context.merge(
DockerContainerContext(
registry=run_launcher.registry,
env_vars=run_launcher.env_vars,
networks=run_launcher.networks,
container_kwargs=run_launcher.container_kwargs,
)
)
run_container_context = (
dagster_run.job_code_origin.repository_origin.container_context
if dagster_run.job_code_origin
else None
)
if not run_container_context:
return context
return context.merge(DockerContainerContext.create_from_config(run_container_context))
@staticmethod
def create_from_config(run_container_context):
processed_shared_container_context = process_shared_container_context_config(
run_container_context or {}
)
shared_container_context = DockerContainerContext(
env_vars=processed_shared_container_context.get("env_vars", [])
)
run_docker_container_context = (
run_container_context.get("docker", {}) if run_container_context else {}
)
if not run_docker_container_context:
return shared_container_context
processed_container_context = process_config(
DOCKER_CONTAINER_CONTEXT_SCHEMA, run_docker_container_context
)
if not processed_container_context.success:
raise DagsterInvalidConfigError(
"Errors while parsing Docker container context",
processed_container_context.errors,
run_docker_container_context,
)
processed_context_value = cast("Mapping[str, Any]", processed_container_context.value)
return shared_container_context.merge(
DockerContainerContext(
registry=processed_context_value.get("registry"),
env_vars=processed_context_value.get("env_vars", []),
networks=processed_context_value.get("networks", []),
container_kwargs=processed_context_value.get("container_kwargs"),
)
)
| DockerContainerContext |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 10225,
"end": 10278
} | class ____(ShopifyStream):
data_field = "shop"
| Shop |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/d.py | {
"start": 631,
"end": 719
} | class ____(Task.Task):
color = 'BLUE'
run_str = '${D} ${D_HEADER} ${SRC}'
| d_header |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-bedrock-converse/tests/test_bedrock_converse_utils.py | {
"start": 811,
"end": 1944
} | class ____:
def __init__(self) -> None:
self.exceptions = MockExceptions()
async def __aenter__(self) -> "AsyncMockClient":
return self
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
pass
async def converse(self, *args, **kwargs):
return {"output": {"message": {"content": [{"text": EXP_RESPONSE}]}}}
async def converse_stream(self, *args, **kwargs):
async def stream_generator():
for element in EXP_STREAM_RESPONSE:
yield {
"contentBlockDelta": {
"delta": {"text": element},
"contentBlockIndex": 0,
}
}
# Add messageStop and metadata events for token usage testing
yield {"messageStop": {"stopReason": "end_turn"}}
yield {
"metadata": {
"usage": {"inputTokens": 15, "outputTokens": 26, "totalTokens": 41},
"metrics": {"latencyMs": 886},
}
}
return {"stream": stream_generator()}
| AsyncMockClient |
python | simonw__datasette | datasette/events.py | {
"start": 4173,
"end": 4611
} | class ____(Event):
"""
Event name: ``update-row``
A row was updated in a table.
:ivar database: The name of the database where the row was updated.
:type database: str
:ivar table: The name of the table where the row was updated.
:type table: str
:ivar pks: The primary key values of the updated row.
"""
name = "update-row"
database: str
table: str
pks: list
@dataclass
| UpdateRowEvent |
python | doocs__leetcode | solution/2100-2199/2127.Maximum Employees to Be Invited to a Meeting/Solution.py | {
"start": 0,
"end": 1269
} | class ____:
def maximumInvitations(self, favorite: List[int]) -> int:
def max_cycle(fa: List[int]) -> int:
n = len(fa)
vis = [False] * n
ans = 0
for i in range(n):
if vis[i]:
continue
cycle = []
j = i
while not vis[j]:
cycle.append(j)
vis[j] = True
j = fa[j]
for k, v in enumerate(cycle):
if v == j:
ans = max(ans, len(cycle) - k)
break
return ans
def topological_sort(fa: List[int]) -> int:
n = len(fa)
indeg = [0] * n
dist = [1] * n
for v in fa:
indeg[v] += 1
q = deque(i for i, v in enumerate(indeg) if v == 0)
while q:
i = q.popleft()
dist[fa[i]] = max(dist[fa[i]], dist[i] + 1)
indeg[fa[i]] -= 1
if indeg[fa[i]] == 0:
q.append(fa[i])
return sum(dist[i] for i, v in enumerate(fa) if i == fa[fa[i]])
return max(max_cycle(favorite), topological_sort(favorite))
| Solution |
python | numba__numba | numba/core/caching.py | {
"start": 21262,
"end": 25899
} | class ____(_Cache):
"""
A per-function compilation cache. The cache saves data in separate
data files and maintains information in an index file.
There is one index file per function and Python version
("function_name-<lineno>.pyXY.nbi") which contains a mapping of
signatures and architectures to data files.
It is prefixed by a versioning key and a timestamp of the Python source
file containing the function.
There is one data file ("function_name-<lineno>.pyXY.<number>.nbc")
per function, function signature, target architecture and Python version.
Separate index and data files per Python version avoid pickle
compatibility problems.
Note:
This contains the driver logic only. The core logic is provided
by a subclass of ``CacheImpl`` specified as *_impl_class* in the subclass.
"""
# The following class variables must be overridden by subclass.
_impl_class = None
def __init__(self, py_func):
self._name = repr(py_func)
self._py_func = py_func
self._impl = self._impl_class(py_func)
self._cache_path = self._impl.locator.get_cache_path()
# This may be a bit strict but avoids us maintaining a magic number
source_stamp = self._impl.locator.get_source_stamp()
filename_base = self._impl.filename_base
self._cache_file = IndexDataCacheFile(cache_path=self._cache_path,
filename_base=filename_base,
source_stamp=source_stamp)
self.enable()
def __repr__(self):
return "<%s py_func=%r>" % (self.__class__.__name__, self._name)
@property
def cache_path(self):
return self._cache_path
def enable(self):
self._enabled = True
def disable(self):
self._enabled = False
def flush(self):
self._cache_file.flush()
def load_overload(self, sig, target_context):
"""
Load and recreate the cached object for the given signature,
using the *target_context*.
"""
# Refresh the context to ensure it is initialized
target_context.refresh()
with self._guard_against_spurious_io_errors():
return self._load_overload(sig, target_context)
# None returned if the `with` block swallows an exception
def _load_overload(self, sig, target_context):
if not self._enabled:
return
key = self._index_key(sig, target_context.codegen())
data = self._cache_file.load(key)
if data is not None:
data = self._impl.rebuild(target_context, data)
return data
def save_overload(self, sig, data):
"""
Save the data for the given signature in the cache.
"""
with self._guard_against_spurious_io_errors():
self._save_overload(sig, data)
def _save_overload(self, sig, data):
if not self._enabled:
return
if not self._impl.check_cachable(data):
return
self._impl.locator.ensure_cache_path()
key = self._index_key(sig, data.codegen)
data = self._impl.reduce(data)
self._cache_file.save(key, data)
@contextlib.contextmanager
def _guard_against_spurious_io_errors(self):
if os.name == 'nt':
# Guard against permission errors due to accessing the file
# from several processes (see #2028)
try:
yield
except OSError as e:
if e.errno != errno.EACCES:
raise
else:
# No such conditions under non-Windows OSes
yield
def _index_key(self, sig, codegen):
"""
Compute index key for the given signature and codegen.
It includes a description of the OS, target architecture and hashes of
the bytecode for the function and, if the function has a __closure__,
a hash of the cell_contents.
"""
codebytes = self._py_func.__code__.co_code
if self._py_func.__closure__ is not None:
cvars = tuple([x.cell_contents for x in self._py_func.__closure__])
# Note: cloudpickle serializes a function differently depending
# on how the process is launched; e.g. multiprocessing.Process
cvarbytes = dumps(cvars)
else:
cvarbytes = b''
hasher = lambda x: hashlib.sha256(x).hexdigest()
return (sig, codegen.magic_tuple(), (hasher(codebytes),
hasher(cvarbytes),))
| Cache |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_reflection.py | {
"start": 14057,
"end": 16747
} | class ____:
@logged
def __init__(self, i: int):
pass
@given(st.builds(Bar))
def test_issue_2495_regression(_):
"""See https://github.com/HypothesisWorks/hypothesis/issues/2495"""
@pytest.mark.skipif(
sys.version_info[:2] >= (3, 11),
reason="handled upstream in https://github.com/python/cpython/pull/92065",
)
def test_error_on_keyword_parameter_name():
def f(source):
pass
f.__signature__ = Signature(
parameters=[Parameter("from", Parameter.KEYWORD_ONLY)],
return_annotation=Parameter.empty,
)
with pytest.raises(ValueError, match="SyntaxError because `from` is a keyword"):
get_signature(f)
def test_param_is_called_within_func():
def f(any_name):
any_name()
assert is_first_param_referenced_in_function(f)
def test_param_is_called_within_subfunc():
def f(any_name):
def f2():
any_name()
assert is_first_param_referenced_in_function(f)
def test_param_is_not_called_within_func():
def f(any_name):
pass
assert not is_first_param_referenced_in_function(f)
def test_param_called_within_defaults_on_error():
# Create a function object for which we cannot retrieve the source.
f = compile("lambda: ...", "_.py", "eval")
assert is_first_param_referenced_in_function(f)
def _prep_source(*pairs):
return [
pytest.param(
dedent(x).strip(), dedent(y).strip().encode(), id=f"case-{i}", marks=marks
)
for i, (x, y, *marks) in enumerate(pairs)
]
@pytest.mark.parametrize(
"src, clean",
_prep_source(
("", ""),
("def test(): pass", "def test(): pass"),
("def invalid syntax", "def invalid syntax"),
("def also invalid(", "def also invalid("),
(
"""
@example(1)
@given(st.integers())
def test(x):
# line comment
assert x # end-of-line comment
"Had some blank lines above"
""",
"""
def test(x):
assert x
"Had some blank lines above"
""",
),
(
"""
@dec
async def f():
pass
""",
"""
async def f():
pass
""",
),
),
)
def test_clean_source(src, clean):
assert reflection._clean_source(src).splitlines() == clean.splitlines()
def test_overlong_repr_warns():
with pytest.warns(HypothesisWarning, match="overly large"):
repr(LazyStrategy(st.one_of, [st.none()] * 10000, {}))
def identity(x):
return x
| Bar |
python | pytorch__pytorch | torch/_dynamo/package.py | {
"start": 2738,
"end": 4108
} | class ____:
"""
Contains the serializable information associated with a single compilation in dynamo.
To restore an execution of compiled code, we will need to serialize the following data:
- Dynamo bytecode for mapping Python inputs/outputs.
- Dynamo guards.
"""
guards_state: bytes
dynamo_code: SerializedCode
def load_guards_state(guards_state: bytes) -> Any:
try:
import torch.distributed.fsdp._fully_shard._fully_shard as _fully_shard
ctx = _fully_shard.disable_fsdp_module_new_init()
except ImportError:
ctx = nullcontext() # type: ignore[assignment]
with ctx:
return pickle.loads(guards_state)
def load_guard_manager(
guards_state: "GuardsState",
target_code: types.CodeType,
runtime_global_scope: Any,
) -> "GuardManagerWrapper":
from .output_graph import OutputGraphCommon
return torch._dynamo.guards.CheckFunctionManager(
target_code,
OutputGraphCommon(guards_state.output_graph),
shape_code_parts=guards_state.shape_code_parts,
runtime_global_scope=runtime_global_scope,
source_get_cache=guards_state.source_get_cache,
).guard_manager
_BackendId = NewType("_BackendId", str) # __compiled_fn
_FunctionId = NewType("_FunctionId", str) # __resume_at
@dataclasses.dataclass(frozen=True)
| _GuardedCodeCacheEntry |
python | doocs__leetcode | solution/1800-1899/1888.Minimum Number of Flips to Make the Binary String Alternating/Solution.py | {
"start": 0,
"end": 366
} | class ____:
def minFlips(self, s: str) -> int:
n = len(s)
target = "01"
cnt = sum(c != target[i & 1] for i, c in enumerate(s))
ans = min(cnt, n - cnt)
for i in range(n):
cnt -= s[i] != target[i & 1]
cnt += s[i] != target[(i + n) & 1]
ans = min(ans, cnt, n - cnt)
return ans
| Solution |
python | pydata__xarray | asv_bench/benchmarks/repr.py | {
"start": 663,
"end": 1158
} | class ____:
def setup(self):
# construct a datatree with 500 nodes
number_of_files = 20
number_of_groups = 25
tree_dict = {}
for f in range(number_of_files):
for g in range(number_of_groups):
tree_dict[f"file_{f}/group_{g}"] = xr.Dataset({"g": f * g})
self.dt = xr.DataTree.from_dict(tree_dict)
def time_repr(self):
repr(self.dt)
def time_repr_html(self):
self.dt._repr_html_()
| ReprDataTree |
python | getsentry__sentry | src/sentry/sentry_apps/api/endpoints/installation_external_issue_actions.py | {
"start": 1284,
"end": 1593
} | class ____(serializers.Serializer):
groupId = serializers.CharField(required=True, allow_null=False)
action = serializers.CharField(required=True, allow_null=False)
uri = serializers.CharField(required=True, allow_null=False)
@region_silo_endpoint
| SentryAppInstallationExternalIssueActionsSerializer |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/call9.py | {
"start": 420,
"end": 550
} | class ____:
def __getitem__(self, __key: str) -> str: ...
def keys(self) -> KeysView[str]: ...
T = TypeVar("T")
| StrRecord |
python | spack__spack | .github/workflows/bin/format-rst.py | {
"start": 3523,
"end": 9771
} | class ____:
lineno: int
end_lineno: int
src: str
lines: List[str]
def __init__(self, line: int, src: str) -> None:
self.lineno = line
self.src = src
self.lines = src.splitlines()
self.end_lineno = line + len(self.lines) - 1
def _is_node_in_table(node: nodes.Node) -> bool:
"""Check if a node is inside a table by walking up the parent chain."""
while node.parent:
node = node.parent
if isinstance(node, nodes.table):
return True
return False
def _validate_schema(data: object) -> None:
if not isinstance(data, dict):
return
for section, schema in SECTION_AND_SCHEMA:
if section in data:
jsonschema.validate(data, schema)
def _format_code_blocks(document: nodes.document, path: str) -> List[Warning]:
"""Try to parse and format Python, YAML, and JSON code blocks. This does *not* update the
sources, but collects issues for later reporting. Returns a list of warnings."""
issues: List[Warning] = []
for code_block in document.findall(nodes.literal_block):
language = code_block.attributes.get("language", "")
if language not in ("python", "yaml", "json"):
continue
original = code_block.astext()
line = code_block.line if code_block.line else 0
possible_config_data = None
try:
if language == "python":
formatted = black.format_str(original, mode=black.FileMode(line_length=99))
elif language == "yaml":
yaml = YAML(pure=True)
yaml.width = 10000 # do not wrap lines
yaml.preserve_quotes = True # do not force particular quotes
buf = io.BytesIO()
possible_config_data = yaml.load(original)
yaml.dump(possible_config_data, buf)
formatted = buf.getvalue().decode("utf-8")
elif language == "json":
formatted = json.dumps(json.loads(original), indent=2)
else:
assert False
except Exception as e:
issues.append(Warning(path, line, f"formatting failed: {e}: {original!r}"))
continue
try:
_validate_schema(possible_config_data)
except jsonschema.ValidationError as e:
issues.append(ValidationWarning(path, line, f"schema validation failed: {e.message}"))
if formatted == original:
continue
diff = "\n".join(
difflib.unified_diff(
original.splitlines(),
formatted.splitlines(),
lineterm="",
fromfile=f"{path}:{line} (original)",
tofile=f"{path}:{line} (suggested, NOT required)",
)
)
# ignore suggestions to quote double colons like this:
#
# - build_stage::
# + 'build_stage:':
#
if diff and not DOUBLE_COLON_WARNING.search(diff):
issues.append(CodeBlockWarning(path, line, "formatting suggested:", diff))
return issues
def _format_paragraphs(document: nodes.document, path: str, src_lines: List[str]) -> bool:
"""Format paragraphs in the document. Returns True if ``src_lines`` was modified."""
paragraphs = [
ParagraphInfo(line=p.line, src=p.rawsource)
for p in document.findall(nodes.paragraph)
if p.line is not None and p.rawsource and not _is_node_in_table(p)
]
# Work from bottom to top to avoid messing up line numbers
paragraphs.sort(key=lambda p: p.lineno, reverse=True)
modified = False
for p in paragraphs:
# docutils does not give us the column offset, so we'll find it ourselves.
col_offset = src_lines[p.lineno - 1].rfind(p.lines[0])
assert col_offset >= 0, f"{path}:{p.lineno}: rst parsing error."
prefix = lambda i: " " * col_offset if i > 0 else src_lines[p.lineno - 1][:col_offset]
# Defensive check to ensure the source paragraph matches the docutils paragraph
for i, line in enumerate(p.lines):
line_lhs = f"{prefix(i)}{line}"
line_rhs = src_lines[p.lineno - 1 + i].rstrip() # docutils trims trailing whitespace
assert line_lhs == line_rhs, f"{path}:{p.lineno + i}: rst parsing error."
# Replace current newlines with whitespace, and then split sentences.
new_paragraph_src = END_OF_SENTENCE.sub(r"\1\n", p.src.replace("\n", " "))
new_paragraph_lines = [
f"{prefix(i)}{line.lstrip()}" for i, line in enumerate(new_paragraph_src.splitlines())
]
if new_paragraph_lines != src_lines[p.lineno - 1 : p.end_lineno]:
modified = True
src_lines[p.lineno - 1 : p.end_lineno] = new_paragraph_lines
return modified
def reformat_rst_file(path: str, warnings: List[Warning]) -> bool:
"""Reformat a reStructuredText file "in-place". Returns True if modified, False otherwise."""
with open(path, "r", encoding="utf-8") as f:
src = f.read()
src_lines = src.splitlines()
document: nodes.document = publish_doctree(src, settings_overrides=DOCUTILS_SETTING)
warnings.extend(_format_code_blocks(document, path))
if not _format_paragraphs(document, path, src_lines):
return False
with open(f"{path}.tmp", "w", encoding="utf-8") as f:
f.write("\n".join(src_lines))
f.write("\n")
os.rename(f"{path}.tmp", path)
print(f"Fixed reStructuredText formatting: {path}", flush=True)
return True
def main(*files: str) -> None:
modified = False
warnings: List[Warning] = []
for f in files:
modified |= reformat_rst_file(f, warnings)
if modified:
subprocess.run(["git", "--no-pager", "diff", "--color=always", "--", *files])
for warning in sorted(warnings, key=lambda w: isinstance(w, ValidationWarning)):
print(warning, flush=True, file=sys.stderr)
if warnings:
print(
_warning(f"completed with {len(warnings)} potential issues"),
flush=True,
file=sys.stderr,
)
sys.exit(1 if modified else 0)
if __name__ == "__main__":
main(*sys.argv[1:])
| ParagraphInfo |
python | falconry__falcon | tests/asgi/test_request_context_asgi.py | {
"start": 82,
"end": 1652
} | class ____:
def test_default_request_context(
self,
):
req = testing.create_asgi_req()
req.context.hello = 'World'
assert req.context.hello == 'World'
assert req.context['hello'] == 'World'
req.context['note'] = 'Default Request.context_type used to be dict.'
assert 'note' in req.context
assert hasattr(req.context, 'note')
assert req.context.get('note') == req.context['note']
def test_custom_request_context(self):
# Define a Request-alike with a custom context type
class MyCustomContextType:
pass
class MyCustomRequest(Request):
context_type = MyCustomContextType
req = testing.create_asgi_req(req_type=MyCustomRequest)
assert isinstance(req.context, MyCustomContextType)
def test_custom_request_context_failure(self):
# Define a Request-alike with a non-callable custom context type
class MyCustomRequest(Request):
context_type = False
with pytest.raises(TypeError):
testing.create_asgi_req(req_type=MyCustomRequest)
def test_custom_request_context_request_access(self):
def create_context(req):
return {'uri': req.uri}
# Define a Request-alike with a custom context type
class MyCustomRequest(Request):
context_type = create_context
req = testing.create_asgi_req(req_type=MyCustomRequest)
assert isinstance(req.context, dict)
assert req.context['uri'] == req.uri
| TestRequestContext |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/base.py | {
"start": 137267,
"end": 138965
} | class ____:
"""Return unicode-decoded values based on type inspection.
Smooth over data type issues (esp. with alpha driver versions) and
normalize strings as Unicode regardless of user-configured driver
encoding settings.
"""
# Some MySQL-python versions can return some columns as
# sets.Set(['value']) (seriously) but thankfully that doesn't
# seem to come up in DDL queries.
_encoding_compat: dict[str, str] = {
"koi8r": "koi8_r",
"koi8u": "koi8_u",
"utf16": "utf-16-be", # MySQL's uft16 is always bigendian
"utf8mb4": "utf8", # real utf8
"utf8mb3": "utf8", # real utf8; saw this happen on CI but I cannot
# reproduce, possibly mariadb10.6 related
"eucjpms": "ujis",
}
def __init__(self, rowproxy: Row[Unpack[_Ts]], charset: Optional[str]):
self.rowproxy = rowproxy
self.charset = (
self._encoding_compat.get(charset, charset)
if charset is not None
else None
)
def __getitem__(self, index: int) -> Any:
item = self.rowproxy[index]
if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
def __getattr__(self, attr: str) -> Any:
item = getattr(self.rowproxy, attr)
if self.charset and isinstance(item, bytes):
return item.decode(self.charset)
else:
return item
_info_columns = sql.table(
"columns",
sql.column("table_schema", VARCHAR(64)),
sql.column("table_name", VARCHAR(64)),
sql.column("column_name", VARCHAR(64)),
schema="information_schema",
)
| _DecodingRow |
python | django__django | tests/fixtures_regress/models.py | {
"start": 4696,
"end": 4901
} | class ____(models.Model):
name = models.CharField(max_length=255, unique=True)
def natural_key(self):
return (self.name,)
natural_key.dependencies = ["fixtures_regress.circle2"]
| Circle1 |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec32.py | {
"start": 894,
"end": 1430
} | class ____(Generic[P, T1, T2]):
def __init__(
self, fn: Callable[Concatenate[T1, P], T2], *args: P.args, **kwargs: P.kwargs
) -> None:
self.fn = fn
self.args = args
self.kwargs = kwargs
def __call__(self, value: T1) -> T2:
return self.fn(value, *self.args, **self.kwargs)
# This should generate an error because argument x is missing.
Class2(add_k)
# This should generate an error because arguments x has the wrong type.
Class2(add_k, "3")
Class2(add_k, 2)
Class2(add_k, k=2)
| Class2 |
python | mkdocs__mkdocs | mkdocs/plugins.py | {
"start": 16117,
"end": 17263
} | class ____(Generic[P, T]):
"""
A descriptor that allows defining multiple event handlers and declaring them under one event's name.
Usage example:
```python
@plugins.event_priority(100)
def _on_page_markdown_1(self, markdown: str, **kwargs):
...
@plugins.event_priority(-50)
def _on_page_markdown_2(self, markdown: str, **kwargs):
...
on_page_markdown = plugins.CombinedEvent(_on_page_markdown_1, _on_page_markdown_2)
```
NOTE: The names of the sub-methods **can't** start with `on_`;
instead they can start with `_on_` like in the the above example, or anything else.
"""
def __init__(self, *methods: Callable[Concatenate[Any, P], T]):
self.methods = methods
# This is only for mypy, so CombinedEvent can be a valid override of the methods in BasePlugin
def __call__(self, instance: BasePlugin, *args: P.args, **kwargs: P.kwargs) -> T:
raise TypeError(f"{type(self).__name__!r} object is not callable")
def __get__(self, instance, owner=None):
return CombinedEvent(*(f.__get__(instance, owner) for f in self.methods))
| CombinedEvent |
python | keon__algorithms | tests/test_strings.py | {
"start": 4825,
"end": 5939
} | class ____(unittest.TestCase):
"""[summary]
Test for the file is_palindrome.py
Arguments:
unittest {[type]} -- [description]
"""
def test_is_palindrome(self):
# 'Otto' is a old german name.
self.assertTrue(is_palindrome("Otto"))
self.assertFalse(is_palindrome("house"))
def test_is_palindrome_reverse(self):
# 'Otto' is a old german name.
self.assertTrue(is_palindrome_reverse("Otto"))
self.assertFalse(is_palindrome_reverse("house"))
def test_is_palindrome_two_pointer(self):
# 'Otto' is a old german name.
self.assertTrue(is_palindrome_two_pointer("Otto"))
self.assertFalse(is_palindrome_two_pointer("house"))
def test_is_palindrome_stack(self):
# 'Otto' is a old german name.
self.assertTrue(is_palindrome_stack("Otto"))
self.assertFalse(is_palindrome_stack("house"))
def test_is_palindrome_deque(self):
# 'Otto' is a old german name.
self.assertTrue(is_palindrome_deque("Otto"))
self.assertFalse(is_palindrome_deque("house"))
| TestIsPalindrome |
python | pytorch__pytorch | torch/utils/_strobelight/cli_function_profiler.py | {
"start": 1331,
"end": 11360
} | class ____:
"""
Note: this is a meta only tool.
StrobelightCLIFunctionProfiler can be used to profile a python function and
generate a strobelight link with the results. It works on meta servers but
does not requires an fbcode target.
When stop_at_error is false(default), error during profiling does not prevent
the work function from running.
Check function_profiler_example.py for an example.
"""
# This lock is used to make sure only one thread is running the profiler at any point.
_lock = Lock()
def __init__(
self,
*,
stop_at_error: bool = False,
max_profile_duration_sec: int = 60 * 10,
sample_each: float = 1e7, # sample each sample_each cycles.
run_user_name: str = "pytorch-strobelight-ondemand",
timeout_wait_for_running_sec: int = 60,
timeout_wait_for_finished_sec: int = 60,
recorded_env_variables: list[str] | None = None,
sample_tags: list[str] | None = None,
stack_max_len: int = 127,
async_stack_max_len: int = 127,
) -> None:
self.stop_at_error = stop_at_error
self.max_profile_duration_sec = max_profile_duration_sec
self.sample_each = sample_each
self.run_user_name = run_user_name
self.timeout_wait_for_running_sec = timeout_wait_for_running_sec
self.timeout_wait_for_finished_sec = timeout_wait_for_finished_sec
# Results of the most recent run.
# Tracks the strobelight run id of the most recent run
self.current_run_id: int | None = None
self.sample_tags = sample_tags
def _run_async(self) -> None:
processId = os.getpid()
namespace = _pid_namespace(processId)
command = [
"strobeclient",
"run",
"--profiler",
"pyperf",
"--event",
"cycles",
"--async",
"--sample-interval",
f"{int(self.sample_each)}",
"--duration-ms",
f"{int(self.max_profile_duration_sec * 1000)}",
"--pid",
f"{namespace}:{processId}",
]
if self.sample_tags:
command.append("--sample-tags")
command.append(",".join(self.sample_tags))
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, error in run_async:{output}"
)
if match := re.search(r"INFO Run Id: (-?\d+)", output):
self.current_run_id = int(match.group(1))
return
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, unexpected result {output}"
)
def _wait_for_running(self, counter: int = 0) -> None:
if counter > 20:
raise StrobelightCLIProfilerError(
"wait_for_running called more than 20 times"
)
command = ["strobeclient", "getRunStatus", "--run-id", f"{self.current_run_id}"]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to start strobelight profiling, error in wait_for_running:{output}"
)
if match := re.search("Profile run status: (.*)", output):
current_status = match.group(1)
if current_status == "RUNNING":
return
elif current_status == "PREPARING":
time.sleep(10)
self._wait_for_running(counter + 1)
return
else:
raise StrobelightCLIProfilerError(f"unexpected {current_status} phase")
raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
def _stop_run(self) -> None:
command = ["strobeclient", "stopRun", "--run-id", str(self.current_run_id)]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to stop strobelight profiling, return code is not 0 :{output}"
)
if match := re.search("INFO ::1:(.*)", output):
current_status = match.group(1)
if current_status.__contains__("Success!"):
return
else:
raise StrobelightCLIProfilerError(
f"failed to stop strobelight profiling, got {current_status} result"
)
raise StrobelightCLIProfilerError(f"unexpected output\n: {output} ")
def _get_results(self) -> None:
command = ["strobeclient", "getRunStatus", "--run-id", str(self.current_run_id)]
logger.debug("running command: %s", _command_to_string(command))
result = subprocess.run(command, capture_output=True)
output = result.stderr.decode("utf-8")
logger.debug("output:\n{%s}", output)
if result.returncode != 0:
raise StrobelightCLIProfilerError(
f"failed to extract profiling results, return code is not 0 : {output}"
)
if match := re.search("INFO ::1:(.*)", output):
current_status = match.group(1)
if current_status.__contains__("Profile run status: PROCESSING"):
time.sleep(10)
self._get_results()
return
elif not current_status.__contains__("Profile run finished with SUCCESS"):
raise StrobelightCLIProfilerError(
f"failed to extract profiling results, unexpected response {output}"
)
for item in re.findall(
r"(Total samples(.*)|GraphProfiler(.*)|Icicle view \(python stack\)(.*))",
output,
):
logger.info(item[0])
def _stop_strobelight_no_throw(
self,
collect_results: bool,
) -> None:
try:
# call stop run
self._stop_run()
logger.info("strobelight profiling stopped")
logger.debug("collection stopped")
if not collect_results:
return
self._get_results()
except Exception:
logger.warning("error during stop_strobelight", exc_info=True)
# Return true if strobelight started and is running. Never throw.
def _start_strobelight(self) -> bool:
strobelight_started = False
try:
self._run_async()
strobelight_started = True
logger.info("strobelight run id is: %s", self.current_run_id)
self._wait_for_running()
logger.info("strobelight profiling running")
return True
except Exception:
logger.warning("error during start_strobelight:", exc_info=True)
if strobelight_started:
self._stop_strobelight_no_throw(collect_results=False)
return False
def profile(
self, work_function: Callable[_P, _R], *args: _P.args, **kwargs: _P.kwargs
) -> _R | None:
self.current_run_id = None
if locked := StrobelightCLIFunctionProfiler._lock.acquire(False):
if not locked:
if self.stop_at_error:
raise StrobelightCLIProfilerError("concurrent runs not supported")
logger.warning("concurrent runs not supported")
return work_function(*args, **kwargs)
started = self._start_strobelight()
if not started:
if self.stop_at_error:
StrobelightCLIFunctionProfiler._lock.release()
raise StrobelightCLIProfilerError(
"failed to start strobelight profiling"
)
result = work_function(*args, **kwargs)
StrobelightCLIFunctionProfiler._lock.release()
return result
try:
logger.debug("collection started")
result = work_function(*args, **kwargs)
self._stop_strobelight_no_throw(collect_results=True)
StrobelightCLIFunctionProfiler._lock.release()
return result
except Exception as error:
logger.warning("work function throw exception", exc_info=True)
self._stop_strobelight_no_throw(collect_results=False)
StrobelightCLIFunctionProfiler._lock.release()
raise error
return None
# A function decorator that wraps profile, if no profiler is provided one with
# default args is created. A function can be annotated as:
# @strobelight()
# @strobelight(profiler = StrobelightFunctionProfiler(stop_at_error=True,..))
# @strobelight(stop_at_error=True,...)
def strobelight(
profiler: StrobelightCLIFunctionProfiler | None = None, **kwargs: Any
) -> Callable[[Callable[_P, _R]], Callable[_P, _R | None]]:
if not profiler:
profiler = StrobelightCLIFunctionProfiler(**kwargs)
def strobelight_inner(
work_function: Callable[_P, _R],
) -> Callable[_P, _R | None]:
@functools.wraps(work_function)
def wrapper_function(*args: _P.args, **kwargs: _P.kwargs) -> _R | None:
# pyrefly: ignore [bad-argument-type]
return profiler.profile(work_function, *args, **kwargs)
return wrapper_function
return strobelight_inner
| StrobelightCLIFunctionProfiler |
python | cython__cython | Cython/Compiler/Optimize.py | {
"start": 3410,
"end": 4850
} | class ____(Visitor.TreeVisitor):
"""
YieldExprNode finder for generator expressions.
"""
def __init__(self):
Visitor.TreeVisitor.__init__(self)
self.yield_stat_nodes = {}
self.yield_nodes = []
visit_Node = Visitor.TreeVisitor.visitchildren
def visit_YieldExprNode(self, node):
self.yield_nodes.append(node)
self.visitchildren(node)
def visit_ExprStatNode(self, node):
self.visitchildren(node)
if node.expr in self.yield_nodes:
self.yield_stat_nodes[node.expr] = node
# everything below these nodes is out of scope:
def visit_GeneratorExpressionNode(self, node):
pass
def visit_LambdaNode(self, node):
pass
def visit_FuncDefNode(self, node):
pass
def _find_single_yield_expression(node):
yield_statements = _find_yield_statements(node)
if len(yield_statements) != 1:
return None, None
return yield_statements[0]
def _find_yield_statements(node):
collector = _YieldNodeCollector()
collector.visitchildren(node)
try:
yield_statements = [
(yield_node.arg, collector.yield_stat_nodes[yield_node])
for yield_node in collector.yield_nodes
]
except KeyError:
# found YieldExprNode without ExprStatNode (i.e. a non-statement usage of 'yield')
yield_statements = []
return yield_statements
| _YieldNodeCollector |
python | google__jax | jax/experimental/sparse/bcoo.py | {
"start": 5339,
"end": 117078
} | class ____(Protocol):
@property
def shape(self) -> Shape: ...
@property
def dtype(self) -> Any: ...
def _validate_bcoo(data: Buffer, indices: Buffer, shape: Sequence[int]) -> BCOOProperties:
props = _validate_bcoo_indices(indices, shape)
n_batch, n_sparse, n_dense, nse = props
shape = tuple(shape)
if any(s1 not in (1, s2) for s1, s2 in safe_zip(data.shape[:n_batch], shape[:n_batch])):
raise ValueError(f"data batch dimensions not compatible for {data.shape=}, {shape=}")
if data.shape[n_batch:] != (nse,) + shape[n_batch + n_sparse:]:
raise ValueError(f"Invalid {data.shape=} for {nse=}, {n_batch=}, {n_dense=}")
return props
def _validate_bcoo_indices(indices: Buffer, shape: Sequence[int]) -> BCOOProperties:
assert jnp.issubdtype(indices.dtype, jnp.integer)
shape = tuple(shape)
nse, n_sparse = indices.shape[-2:]
n_batch = len(indices.shape) - 2
n_dense = len(shape) - n_batch - n_sparse
assert n_dense >= 0
if any(s1 not in (1, s2) for s1, s2 in safe_zip(indices.shape[:n_batch], shape[:n_batch])):
raise ValueError(f"indices batch dimensions not compatible for {indices.shape=}, {shape=}")
if indices.shape[n_batch:] != (nse, n_sparse):
raise ValueError(f"Invalid ={indices.shape=} for {nse=}, {n_batch=}, {n_dense=}")
return BCOOProperties(n_batch=n_batch, n_sparse=n_sparse, n_dense=n_dense, nse=nse)
#----------------------------------------------------------------------
# bcoo_todense
bcoo_todense_p = core.Primitive('bcoo_todense')
def bcoo_todense(mat: BCOO) -> Array:
"""Convert batched sparse matrix to a dense matrix.
Args:
mat: BCOO matrix.
Returns:
mat_dense: dense version of ``mat``.
"""
return _bcoo_todense(mat.data, mat.indices, spinfo=mat._info)
def _bcoo_todense(data: Array, indices: Array, *, spinfo: SparseInfo
) -> Array:
"""Convert batched sparse matrix to a dense matrix.
Args:
data : array of shape ``batch_dims + (nse,) + block_dims``.
indices : array of shape ``batch_dims + (n_sparse, nse)``
spinfo : SparseInfo. In particular, this includes the shape
of the matrix, which is equal to ``batch_dims + sparse_dims + block_dims``
where ``len(sparse_dims) == n_sparse``
Returns:
mat : array with specified shape and dtype matching ``data``
"""
return bcoo_todense_p.bind(jnp.asarray(data), jnp.asarray(indices), spinfo=spinfo)
@bcoo_todense_p.def_impl
def _bcoo_todense_impl(data, indices, *, spinfo):
shape = spinfo.shape
n_batch, n_sparse, _, _ = _validate_bcoo(data, indices, shape)
ind_slices = tuple(np.zeros(s, int) if i_s == 1 else np.arange(s)
for s, i_s in zip(shape[:n_batch], indices.shape[:n_batch]))
grid = tuple(np.meshgrid(*ind_slices, indexing='ij', sparse=True))
sparse_ind = tuple(indices[grid + (slice(None), i)] for i in range(n_sparse))
batch_slices = tuple(np.arange(s) for s in shape[:n_batch])
grid = np.meshgrid(*batch_slices, np.arange(1), indexing='ij', sparse=True)
batch_ind = tuple(grid)[:-1]
if not sparse_ind:
data = data.sum(n_batch, keepdims=bool(batch_ind), dtype=data.dtype)
return jnp.zeros(shape, data.dtype).at[batch_ind + sparse_ind].add(data)
@bcoo_todense_p.def_abstract_eval
def _bcoo_todense_abstract_eval(data, indices, *, spinfo):
shape = spinfo.shape
_validate_bcoo(data, indices, shape)
return core.ShapedArray(shape, data.dtype)
def _bcoo_todense_jvp(data_dot, data, indices, *, spinfo):
return _bcoo_todense(data_dot, indices, spinfo=spinfo)
def _bcoo_todense_transpose(ct, data, indices, *, spinfo):
shape = spinfo.shape
assert ad.is_undefined_primal(data)
if ad.is_undefined_primal(indices):
raise ValueError("Cannot transpose with respect to sparse indices")
assert ct.shape == shape
assert ct.dtype == data.aval.dtype
return _bcoo_extract(indices, ct), indices
def _bcoo_todense_batching_rule(batched_args, batch_dims, *, spinfo):
data, indices, spinfo = _bcoo_batch_dims_to_front(batched_args, batch_dims, spinfo)
return _bcoo_todense(data, indices, spinfo=spinfo), 0
ad.defjvp(bcoo_todense_p, _bcoo_todense_jvp, None)
ad.primitive_transposes[bcoo_todense_p] = _bcoo_todense_transpose
batching.primitive_batchers[bcoo_todense_p] = _bcoo_todense_batching_rule
mlir.register_lowering(bcoo_todense_p, mlir.lower_fun(
_bcoo_todense_impl, multiple_results=False))
#--------------------------------------------------------------------
# bcoo_fromdense
bcoo_fromdense_p = core.Primitive('bcoo_fromdense')
bcoo_fromdense_p.multiple_results = True
_TRACED_NSE_ERROR = """
The error arose for the nse argument of bcoo_fromdense. In order for
BCOO.fromdense() to be used in traced/compiled code, you must pass a concrete
value to the nse (number of stored elements) argument.
"""
def bcoo_fromdense(mat: Array, *, nse: int | None = None, n_batch: int = 0,
n_dense: int = 0, index_dtype: DTypeLike = jnp.int32) -> BCOO:
"""Create BCOO-format sparse matrix from a dense matrix.
Args:
mat : array to be converted to BCOO.
nse : number of specified elements in each batch
n_batch : number of batch dimensions (default: 0)
n_dense : number of block_dimensions (default: 0)
index_dtype : dtype of sparse indices (default: int32)
Returns:
mat_bcoo: BCOO representation of the matrix.
"""
mat = jnp.asarray(mat)
nse_arr: int | Array | None = nse
if nse_arr is None:
nse_arr = _count_stored_elements(mat, n_batch, n_dense)
nse_int = core.concrete_or_error(operator.index, nse_arr, _TRACED_NSE_ERROR)
return BCOO(_bcoo_fromdense(mat, nse=nse_int, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype),
shape=mat.shape, indices_sorted=True, unique_indices=True)
def _bcoo_fromdense(mat: Array, *, nse: int, n_batch: int = 0, n_dense: int = 0,
index_dtype: DTypeLike = jnp.int32) -> tuple[Array, Array]:
"""Create BCOO-format sparse matrix from a dense matrix.
Args:
mat : array to be converted to BCOO, with ``ndim = n_batch + n_sparse + n_dense``.
nse : number of specified elements in each batch
n_batch : number of batch dimensions (default: 0)
n_dense : number of block_dimensions (default: 0)
index_dtype : dtype of sparse indices (default: int32)
Returns:
data : array of shape ``mat.shape[:n_batch] + (nse,) + mat.shape[mat.ndim - n_dense:]``
and dtype ``mat.dtype``
indices : array of shape ``mat.shape[:n_batch] + (n_sparse, nse)``
"""
mat = jnp.asarray(mat)
nse = core.concrete_or_error(operator.index, nse, _TRACED_NSE_ERROR)
return bcoo_fromdense_p.bind(mat, nse=nse, n_batch=n_batch, n_dense=n_dense,
index_dtype=index_dtype)
@bcoo_fromdense_p.def_impl
def _bcoo_fromdense_impl(mat, *, nse, n_batch, n_dense, index_dtype):
mat = jnp.asarray(mat)
n_sparse = mat.ndim - n_dense - n_batch
mask = (mat != 0)
if n_dense > 0:
mask = mask.any([-(i + 1) for i in range(n_dense)])
@partial(nfold_vmap, N=n_batch, broadcasted=False)
def _nonzero(a):
if a.ndim:
return jnp.nonzero(a, size=nse, fill_value=a.shape[:n_sparse])
return ()
indices = _nonzero(mask)
if not indices:
indices = jnp.zeros(mask.shape[:n_batch] + (nse, 0), index_dtype)
else:
indices = jnp.moveaxis(jnp.array(indices, index_dtype), 0, n_batch + 1)
data = _bcoo_extract(indices, mat)
true_nse = mask.sum(list(range(n_batch, mask.ndim)))[..., None]
true_nonzeros = lax.broadcasted_iota(true_nse.dtype, (1,) * n_batch + (nse,), n_batch) < true_nse
true_nonzeros = true_nonzeros[(n_batch + 1) * (slice(None),) + n_dense * (None,)]
data = jnp.where(true_nonzeros, data, 0)
return data, indices
@bcoo_fromdense_p.def_abstract_eval
def _bcoo_fromdense_abstract_eval(mat, *, nse, n_batch, n_dense, index_dtype):
n_sparse = mat.ndim - n_batch - n_dense
data_shape = mat.shape[:n_batch] + (nse,) + mat.shape[n_batch + n_sparse:]
index_shape = mat.shape[:n_batch] + (nse, n_sparse)
return core.ShapedArray(data_shape, mat.dtype), core.ShapedArray(index_shape, index_dtype)
def _bcoo_fromdense_jvp(primals, tangents, *, nse, n_batch, n_dense, index_dtype):
M, = primals
Mdot, = tangents
primals_out = _bcoo_fromdense(M, nse=nse, n_batch=n_batch, n_dense=n_dense, index_dtype=index_dtype)
data, indices = primals_out
if type(Mdot) is ad.Zero:
data_dot = ad.Zero.from_primal_value(data)
else:
data_dot = _bcoo_extract(indices, Mdot)
tangents_out = (data_dot, ad.Zero.from_primal_value(indices))
return primals_out, tangents_out
def _bcoo_fromdense_transpose(ct, M, *, nse, n_batch, n_dense, index_dtype):
data, indices = ct
n_sparse = M.ndim - n_batch - n_dense
assert data.shape == M.shape[:n_batch] + (nse,) + M.shape[n_batch + n_sparse:]
assert indices.shape == M.shape[:n_batch] + (n_sparse, nse)
assert indices.dtype == index_dtype
if isinstance(indices, ad.Zero):
raise ValueError("Cannot transpose with respect to sparse indices")
assert ad.is_undefined_primal(M)
return _bcoo_todense(data, indices, spinfo=SparseInfo(M.aval.shape))
def _bcoo_fromdense_batching_rule(batched_args, batch_dims, *, nse, n_batch, n_dense, index_dtype):
M, = batched_args
bdim, = batch_dims
if not (0 <= bdim <= n_batch):
raise ValueError(f"Expected 0 < bdim <= n_batch; got {bdim=}, {n_batch=}")
return _bcoo_fromdense(M, nse=nse, n_batch=n_batch + 1, n_dense=n_dense, index_dtype=index_dtype), (bdim, bdim)
ad.primitive_jvps[bcoo_fromdense_p] = _bcoo_fromdense_jvp
ad.primitive_transposes[bcoo_fromdense_p] = _bcoo_fromdense_transpose
batching.primitive_batchers[bcoo_fromdense_p] = _bcoo_fromdense_batching_rule
mlir.register_lowering(bcoo_fromdense_p, mlir.lower_fun(
_bcoo_fromdense_impl, multiple_results=True))
#----------------------------------------------------------------------
# bcoo_extract
bcoo_extract_p = core.Primitive('bcoo_extract')
def bcoo_extract(sparr: BCOO, arr: ArrayLike, *, assume_unique: bool | None = None) -> BCOO:
"""Extract values from a dense array according to the sparse array's indices.
Args:
sparr : BCOO array whose indices will be used for the output.
arr : ArrayLike with shape equal to self.shape
assume_unique : bool, defaults to sparr.unique_indices
If True, extract values for every index, even if index contains duplicates.
If False, duplicate indices will have their values summed and returned in
the position of the first index.
Returns:
extracted : a BCOO array with the same sparsity pattern as self.
"""
if not isinstance(sparr, BCOO):
raise TypeError(f"First argument to bcoo_extract should be a BCOO array. Got {type(sparr)=}")
a = jnp.asarray(arr)
if a.shape != sparr.shape:
raise ValueError(f"shape mismatch: {sparr.shape=} {a.shape=}")
if assume_unique is None:
assume_unique = sparr.unique_indices
data = _bcoo_extract(sparr.indices, a, assume_unique=assume_unique)
return BCOO((data, sparr.indices), **sparr._info._asdict())
def _bcoo_extract(indices: Array, arr: Array, *, assume_unique=True) -> Array:
"""Extract BCOO data values from a dense array at given BCOO indices.
Args:
indices: An ndarray; see BCOO indices.
arr: A dense array.
assume_unique: bool, default=True
If True, then indices will be assumed unique and a value will be extracted
from arr for each index. Otherwise, extra work will be done to de-duplicate
indices to zero-out duplicate extracted values.
Returns:
An ndarray; see BCOO data.
"""
return bcoo_extract_p.bind(indices, arr, assume_unique=assume_unique)
@bcoo_extract_p.def_impl
def _bcoo_extract_impl(indices, arr, *, assume_unique):
arr = jnp.asarray(arr)
props = _validate_bcoo_indices(indices, arr.shape)
if not assume_unique:
indices, sort_ind = _unique_indices(indices, shape=arr.shape, return_index=True)
original_props = props
props = _validate_bcoo_indices(indices, arr.shape)
ind_slices = tuple(np.zeros(s, int) if i_s == 1 else np.arange(s)
for s, i_s in zip(arr.shape[:props.n_batch], indices.shape[:props.n_batch]))
grid = tuple(np.meshgrid(*ind_slices, indexing='ij', sparse=True))
sparse_ind = tuple(indices[grid + (slice(None), i)] for i in range(props.n_sparse))
batch_slices = tuple(np.arange(s) for s in arr.shape[:props.n_batch])
grid = np.meshgrid(*batch_slices, np.arange(1), indexing='ij', sparse=True)
batch_ind = tuple(grid)[:-1]
if not sparse_ind + batch_ind:
result = arr[None]
else:
result = arr.at[batch_ind + sparse_ind].get(mode='fill', fill_value=0)
if props.n_sparse == 0 and props.nse != 1:
if assume_unique:
result = lax.broadcast_in_dim(
result, _tuple_replace(result.shape, props.n_batch, props.nse), range(result.ndim))
else:
out_shape = _tuple_replace(result.shape, props.n_batch, original_props.nse)
ind = props.n_batch * (slice(None),) + (slice(1),)
result = jnp.zeros_like(result, shape=out_shape).at[ind].set(result)
if not assume_unique:
unbatched_out_shape = (original_props.nse, *result.shape[props.n_batch + 1:])
def f(r, i):
return jnp.zeros_like(r, shape=unbatched_out_shape).at[i].add(r)
for _ in range(props.n_batch):
f = vmap(f)
result = f(result, sort_ind)
return result
@bcoo_extract_p.def_abstract_eval
def _bcoo_extract_abstract_eval(indices, arr, *, assume_unique):
_ = bool(assume_unique)
n_batch, _, n_dense, nse = _validate_bcoo_indices(indices, arr.shape)
out_shape = arr.shape[:n_batch] + (nse,) + arr.shape[arr.ndim - n_dense:]
return core.ShapedArray(out_shape, arr.dtype)
def _bcoo_extract_jvp(arr_dot, indices, arr, *, assume_unique):
assert arr_dot.shape == arr.shape
return _bcoo_extract(indices, arr_dot, assume_unique=assume_unique)
def _bcoo_extract_transpose(ct, indices, arr, *, assume_unique):
if not assume_unique:
raise NotImplementedError("transpose of bcoo_extract with assume_unique=False")
assert ad.is_undefined_primal(arr)
if ad.is_undefined_primal(indices):
raise ValueError("Cannot transpose with respect to sparse indices")
assert ct.dtype == arr.aval.dtype
return indices, _bcoo_todense(ct, indices, spinfo=SparseInfo(arr.aval.shape))
def _bcoo_extract_batching_rule(batched_args, batch_dims, *, assume_unique):
indices, arr = batched_args
assert any(b is not None for b in batch_dims)
if batch_dims[0] is None:
bdim = batch_dims[1]
indices = lax.expand_dims(indices, (bdim,))
elif batch_dims[1] is None:
# TODO(jakevdp) can we handle this case without explicit broadcasting?
bdim = batch_dims[0]
result_shape = list(arr.shape)
result_shape.insert(bdim, indices.shape[bdim])
arr = lax.broadcast_in_dim(arr, result_shape, (bdim,))
else:
if batch_dims[0] != batch_dims[1]:
raise NotImplementedError("bcoo_extract with unequal batch dimensions.")
bdim = batch_dims[0]
n_batch = indices.ndim - 2
if bdim >= n_batch:
raise ValueError(f"{batch_dims=} out of range for indices with {n_batch=}")
return _bcoo_extract(indices, arr, assume_unique=assume_unique), bdim
ad.defjvp(bcoo_extract_p, None, _bcoo_extract_jvp)
ad.primitive_transposes[bcoo_extract_p] = _bcoo_extract_transpose
batching.primitive_batchers[bcoo_extract_p] = _bcoo_extract_batching_rule
mlir.register_lowering(bcoo_extract_p, mlir.lower_fun(
_bcoo_extract_impl, multiple_results=False))
#----------------------------------------------------------------------
# bcoo_transpose
# transpose of a BCOO array
bcoo_transpose_p = core.Primitive('bcoo_transpose')
bcoo_transpose_p.multiple_results = True
def bcoo_transpose(mat: BCOO, *, permutation: Sequence[int]) -> BCOO:
"""Transpose a BCOO-format array.
Args:
mat: A BCOO-format array.
permutation: A tuple or list or ndarray which contains a permutation of
[0,1,..,N-1] where N is the number of axes of ``mat`` in the order of
batch, sparse, and dense dimensions. The i’th axis of the returned array
corresponds to the axis numbered permutation[i] of ``mat``. Transpose
permutation currently does not support permuting batch axes with non-batch
axes nor permuting dense axes with non-dense axes.
Returns:
A BCOO-format array.
"""
buffers = _bcoo_transpose(mat.data, mat.indices, permutation=permutation, spinfo=mat._info)
out_shape = tuple(mat.shape[p] for p in permutation)
return BCOO(buffers, shape=out_shape, unique_indices=mat.unique_indices)
def _bcoo_transpose(data: Array, indices: Array, *,
permutation: Sequence[int], spinfo: SparseInfo) -> tuple[Array, Array]:
permutation = tuple(permutation)
if permutation == tuple(range(len(spinfo.shape))):
return data, indices
else:
return bcoo_transpose_p.bind(data, indices, permutation=permutation,
spinfo=spinfo)
def _validate_permutation(data, indices, permutation, shape):
if not isinstance(permutation, (tuple, list, np.ndarray)):
raise TypeError(f"transpose permutation must be a tuple/list/ndarray, got {type(permutation)}.")
if tuple(sorted(permutation)) != tuple(range(len(shape))):
raise TypeError("transpose permutation isn't a permutation of operand dimensions, "
f"got permutation {permutation} for shape {shape}.")
n_batch, n_sparse, n_dense, _ = _validate_bcoo(data, indices, shape)
batch_perm = permutation[:n_batch]
sparse_perm = [p - n_batch for p in permutation[n_batch: n_batch + n_sparse]]
dense_perm = [p - n_sparse - n_batch for p in permutation[n_batch + n_sparse:]]
if n_batch and tuple(sorted(batch_perm)) != tuple(range(n_batch)):
raise NotImplementedError("transpose permutation cannot permute batch axes with non-batch axes; "
f"got permutation {permutation}, with {n_batch=}.")
if n_dense and tuple(sorted(dense_perm)) != tuple(range(n_dense)):
raise NotImplementedError("transpose permutation cannot permute dense axes with non-dense axes; "
f"got permutation {permutation}, with {n_dense=}.")
return batch_perm, sparse_perm, dense_perm
@bcoo_transpose_p.def_impl
def _bcoo_transpose_impl(data, indices, *, permutation: Sequence[int], spinfo: SparseInfo):
batch_perm, sparse_perm, dense_perm = _validate_permutation(data, indices, permutation, spinfo.shape)
n_batch = len(batch_perm)
indices = indices[..., sparse_perm].transpose(*batch_perm, n_batch, n_batch + 1)
data = data.transpose(*batch_perm, n_batch, *(d + n_batch + 1 for d in dense_perm))
return data, indices
@bcoo_transpose_p.def_abstract_eval
def _bcoo_transpose_abstract_eval(data, indices, *, permutation: Sequence[int], spinfo: SparseInfo):
batch_perm, _, dense_perm = _validate_permutation(data, indices, permutation, spinfo.shape)
n_batch = len(batch_perm)
indices_shape = np.array(indices.shape)[[*batch_perm, n_batch, n_batch + 1]]
data_shape = np.array(data.shape)[[*batch_perm, n_batch, *(d + n_batch + 1 for d in dense_perm)]]
return core.ShapedArray(data_shape, data.dtype), core.ShapedArray(indices_shape, indices.dtype)
def _bcoo_transpose_jvp(primals, tangents, *, permutation: Sequence[int], spinfo: SparseInfo):
data, indices = primals
data_dot, _ = tangents
primals_out = _bcoo_transpose(data, indices, permutation=permutation, spinfo=spinfo)
data_dot_out, _ = _bcoo_transpose(data_dot, indices, permutation=permutation, spinfo=spinfo)
return primals_out, (data_dot_out, ad.Zero.from_primal_value(indices))
def _bcoo_transpose_transpose(ct, data, indices, *, permutation: Sequence[int], spinfo: SparseInfo):
data_ct, indices_ct = ct
assert isinstance(indices_ct, ad.Zero)
if ad.is_undefined_primal(indices):
raise ValueError("Cannot transpose with respect to sparse indices")
assert data_ct.dtype == data.aval.dtype
ct_spinfo = SparseInfo(tuple(spinfo.shape[p] for p in permutation))
rev_permutation = list(map(int, np.argsort(permutation)))
# TODO(jakevdp) avoid dummy indices?
dummy_indices = jnp.zeros([1 for i in range(indices.ndim - 2)] + list(indices.shape[-2:]), dtype=int)
data_trans, _ = _bcoo_transpose(data_ct, dummy_indices, permutation=rev_permutation, spinfo=ct_spinfo)
return data_trans, indices_ct
def _bcoo_transpose_batch_rule(batched_args, batch_dims, *, permutation: Sequence[int], spinfo: SparseInfo):
data, indices, spinfo = _bcoo_batch_dims_to_front(batched_args, batch_dims, spinfo)
batched_permutation = (0, *(p + 1 for p in permutation))
data, indices = _bcoo_transpose(data, indices, permutation=batched_permutation, spinfo=spinfo)
batch_dims_out = [None if bdim is None else 0 for bdim in batch_dims]
args_out = [lax.squeeze(arg, [0]) if bdim is None else arg
for arg, bdim in zip((data, indices), batch_dims_out)]
return args_out, batch_dims_out
ad.primitive_jvps[bcoo_transpose_p] = _bcoo_transpose_jvp
ad.primitive_transposes[bcoo_transpose_p] = _bcoo_transpose_transpose
batching.primitive_batchers[bcoo_transpose_p] = _bcoo_transpose_batch_rule
mlir.register_lowering(bcoo_transpose_p, mlir.lower_fun(
_bcoo_transpose_impl, multiple_results=True))
#----------------------------------------------------------------------
# bcoo_dot_general
# (batched) general dot product of a BCOO sparse ND array and a dense ND array,
# returning a dense ND array.
bcoo_dot_general_p = core.Primitive('bcoo_dot_general')
def bcoo_dot_general(lhs: BCOO | Array, rhs: BCOO | Array, *,
dimension_numbers: DotDimensionNumbers,
precision: None = None,
preferred_element_type: None = None,
out_sharding=None) -> BCOO | Array:
"""A general contraction operation.
Args:
lhs: An ndarray or BCOO-format sparse array.
rhs: An ndarray or BCOO-format sparse array..
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`.
precision: unused
preferred_element_type: unused
Returns:
An ndarray or BCOO-format sparse array containing the result. If both inputs
are sparse, the result will be sparse, of type BCOO. If either input is dense,
the result will be dense, of type ndarray.
"""
# TODO(jakevdp) make use of these?
del precision, out_sharding # unused
if isinstance(lhs, BCOO) and isinstance(rhs, BCOO):
shape = _dot_general_validated_shape(lhs.shape, rhs.shape,
dimension_numbers)
bufs = _bcoo_spdot_general(lhs.data, lhs.indices, rhs.data, rhs.indices,
lhs_spinfo=lhs._info, rhs_spinfo=rhs._info,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type)
return BCOO(bufs, shape=shape)
elif isinstance(lhs, BCOO):
return _bcoo_dot_general(lhs.data, lhs.indices, rhs, dimension_numbers=dimension_numbers, # type: ignore[arg-type]
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs._info)
elif isinstance(rhs, BCOO):
return _bcoo_rdot_general(lhs, rhs.data, rhs.indices, dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
rhs_spinfo=rhs._info)
else:
return lax.dot_general(lhs, rhs, dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type)
def _bcoo_dot_general(lhs_data: Array, lhs_indices: Array, rhs: Array, *,
dimension_numbers: DotDimensionNumbers,
preferred_element_type: Any,
lhs_spinfo: SparseInfo) -> Array:
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
cdims = (api_util._ensure_index_tuple(lhs_contract),
api_util._ensure_index_tuple(rhs_contract))
bdims = (api_util._ensure_index_tuple(lhs_batch),
api_util._ensure_index_tuple(rhs_batch))
if preferred_element_type is not None:
preferred_element_type = np.dtype(preferred_element_type)
return bcoo_dot_general_p.bind(jnp.asarray(lhs_data), jnp.asarray(lhs_indices), jnp.asarray(rhs),
dimension_numbers=(cdims, bdims),
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs_spinfo)
def _bcoo_rdot_general(lhs: Array, rhs_data: Array, rhs_indices: Array, *,
dimension_numbers: DotDimensionNumbers,
preferred_element_type: Any, rhs_spinfo: SparseInfo) -> Array:
# TODO(jakevdp): perhaps this should be part of the bcoo_dot_general primitive?
dimension_numbers_reversed: DotDimensionNumbers = tuple(d[::-1] for d in dimension_numbers) # type: ignore[assignment]
result = _bcoo_dot_general(rhs_data, rhs_indices, lhs, lhs_spinfo=rhs_spinfo,
dimension_numbers=dimension_numbers_reversed,
preferred_element_type=preferred_element_type)
n_contract, n_batch = (len(d[0]) for d in dimension_numbers)
n_swap = len(rhs_spinfo.shape) - n_contract
permutation = (*range(n_batch), *range(n_swap, result.ndim), *range(n_batch, n_swap))
return lax.transpose(result, permutation)
def _bcoo_dot_general_impl(lhs_data, lhs_indices, rhs, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
lhs_data = jnp.asarray(lhs_data)
lhs_indices = jnp.asarray(lhs_indices)
rhs = jnp.asarray(rhs)
# Validate all inputs via abstract_eval
out_aval = _bcoo_dot_general_abstract_eval(lhs_data.aval, lhs_indices.aval, rhs.aval,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs_spinfo)
n_sparse = lhs_indices.shape[-1]
n_batch = lhs_indices.ndim - 2
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
lhs_contracting_b, rhs_contracting_b = unzip2([
(l, r) for l, r in safe_zip(lhs_contracting, rhs_contracting) if l < n_batch])
lhs_contracting_s, rhs_contracting_s = unzip2([
(l, r) for l, r in safe_zip(lhs_contracting, rhs_contracting) if l >= n_batch])
# Reorder lhs batch dimensions
if lhs_batch or lhs_contracting_b:
batch_perm = [*lhs_batch, *remaining(range(n_batch), lhs_batch, lhs_contracting_b), *lhs_contracting_b]
lhs_data = lhs_data.transpose([*batch_perm, *range(n_batch, lhs_data.ndim)])
lhs_indices = lhs_indices.transpose([*batch_perm, *range(n_batch, lhs_indices.ndim)])
# Reorder lhs sparse dimensions
if lhs_contracting_s:
lhs_contracting_s = tuple(d - n_batch for d in lhs_contracting_s)
sparse_perm = jnp.array([*lhs_contracting_s, *remaining(range(n_sparse), lhs_contracting_s)])
lhs_indices = lhs_indices[..., sparse_perm]
# Reorder rhs dimensions
rhs_perm = [*rhs_batch, *rhs_contracting_b, *rhs_contracting_s,
*remaining(range(rhs.ndim), rhs_batch, rhs_contracting)]
rhs = rhs.transpose(rhs_perm)
def result(out_array, lhs_data, lhs_indices, rhs):
idx = tuple(lhs_indices[..., i] for i in range(n_sparse))
idx_right = idx[:len(lhs_contracting_s)]
idx_out = idx[len(lhs_contracting_s):]
if idx_right and lhs_indices.ndim > 2:
idx_batch = jnp.meshgrid(
*(jnp.arange(n) for n in lhs_indices.shape[:-1]),
indexing='ij')[:lhs_indices.ndim - 2]
idx_right = (*idx_batch, *idx_right)
batch_dims = list(range(len(lhs_contracting_b) + bool(lhs_contracting_s)))
prod = lax.dot_general(lhs_data, rhs.at[idx_right].get(mode='fill', fill_value=0),
(([], []), (batch_dims, batch_dims)),
preferred_element_type=preferred_element_type)
if idx_out:
return out_array.at[idx_out].add(prod)
else:
return prod.sum(tuple(range(prod.ndim - out_array.ndim)), dtype=out_array.dtype)
result = nfold_vmap(result, n_batch - len(lhs_contracting_b))
rhs = lax.expand_dims(rhs, range(len(rhs_batch), n_batch - len(lhs_contracting_b)))
out_array = jnp.zeros(out_aval.shape, out_aval.dtype)
return result(out_array, lhs_data, lhs_indices, rhs)
@bcoo_dot_general_p.def_abstract_eval
def _bcoo_dot_general_abstract_eval(lhs_data, lhs_indices, rhs, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
out_aval = jax.jit(lax.dot_general, static_argnames=("dimension_numbers", "preferred_element_type")).eval_shape(
jax.ShapeDtypeStruct(lhs_spinfo.shape, lhs_data.dtype),
jax.ShapeDtypeStruct(rhs.shape, rhs.dtype),
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type)
(lhs_contracting, _), (lhs_batch, _) = dimension_numbers
n_batch, n_sparse, _, _ = _validate_bcoo(lhs_data, lhs_indices, lhs_spinfo.shape)
if lhs_batch and max(lhs_batch) >= n_batch:
raise NotImplementedError(
"bcoo_dot_general batch dimensions must be among the batch dimensions in the sparse representation.\n"
f"got {lhs_batch=}, {n_batch=}")
# TODO: support contraction of dense dimensions?
if any(d >= n_batch + n_sparse for d in lhs_contracting):
raise NotImplementedError("bcoo_dot_general: contracting over dense dimensions.")
return core.ShapedArray(out_aval.shape, out_aval.dtype)
_bcoo_dot_general_default_lowering = mlir.lower_fun(
_bcoo_dot_general_impl, multiple_results=False)
def _bcoo_dot_general_fallback(data, indices, spinfo):
if data.dtype not in CUSPARSE_DATA_DTYPES:
warnings.warn('bcoo_dot_general cusparse/hipsparse lowering not available '
f'for {data.dtype=}. Falling back to default implementation.',
CuSparseEfficiencyWarning)
return True
elif indices.dtype not in CUSPARSE_INDEX_DTYPES:
warnings.warn('bcoo_dot_general cusparse/hipsparse lowering not available '
f'for {indices.dtype=}. Falling back to default implementation.',
CuSparseEfficiencyWarning)
return True
elif not spinfo.indices_sorted:
warnings.warn("bcoo_dot_general GPU lowering requires matrices with "
"sorted indices. To sort the rows in your matrix, use e.g. "
"mat = mat.sort_indices(). Falling back to the default "
"implementation.", CuSparseEfficiencyWarning)
return True
else:
return False
def _bcoo_dot_general_gpu_impl(lhs_data, lhs_indices, rhs, *,
dimension_numbers, preferred_element_type,
lhs_spinfo):
if not config.bcoo_cusparse_lowering.value:
return _bcoo_dot_general_impl(lhs_data, lhs_indices, rhs,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs_spinfo)
(lhs_contract, rhs_contract), (lhs_batch, _) = dimension_numbers
n_batch, n_sparse, n_dense, _ = _validate_bcoo(
lhs_data, lhs_indices, lhs_spinfo.shape)
coo_matmul_p = coo_spmv_p if rhs.ndim == 1 else coo_spmm_p
out_aval = _bcoo_dot_general_abstract_eval(
lhs_data, lhs_indices, rhs,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs_spinfo)
if out_aval.dtype not in CUSPARSE_DATA_DTYPES:
return _bcoo_dot_general_impl(lhs_data, lhs_indices, rhs,
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type,
lhs_spinfo=lhs_spinfo)
lhs_data = lhs_data.astype(out_aval.dtype)
rhs = rhs.astype(out_aval.dtype)
# TODO(jakevdp, tianjianlu): add support for batched lowerings
if (len(lhs_contract) == 1 and len(lhs_batch) == 0 and rhs.ndim in (1, 2)
and (n_batch, n_sparse, n_dense) == (0, 1, 0)
and not _bcoo_dot_general_fallback(lhs_data, lhs_indices, lhs_spinfo)):
row, col = jnp.zeros(lhs_indices.shape[0], lhs_indices.dtype), lhs_indices.ravel()
transpose = False
shape = (1, *lhs_spinfo.shape)
row, col, shape = _coo_correct_out_of_bound_indices(row, col, shape, transpose)
out = coo_matmul_p.bind(lhs_data, row, col,
rhs.T if rhs_contract[0] == 1 else rhs,
transpose=transpose, shape=shape)
return out[0]
elif (len(lhs_contract) == 1 and len(lhs_batch) == 0 and rhs.ndim in (1, 2)
and (n_batch, n_sparse, n_dense) == (0, 2, 0)
and not _bcoo_dot_general_fallback(lhs_data, lhs_indices, lhs_spinfo)):
row, col = lhs_indices[:, 0], lhs_indices[:, 1]
transpose = (lhs_contract[0] == 0)
shape = lhs_spinfo.shape
row, col, shape = _coo_correct_out_of_bound_indices(row, col, shape, transpose)
out = coo_matmul_p.bind(lhs_data, row, col,
rhs.T if rhs_contract[0] == 1 else rhs,
transpose=transpose, shape=shape)
return out[:-1]
else:
return _bcoo_dot_general_impl(lhs_data, lhs_indices, rhs,
dimension_numbers=dimension_numbers, lhs_spinfo=lhs_spinfo,
preferred_element_type=preferred_element_type)
_bcoo_dot_general_gpu_lowering = mlir.lower_fun(
_bcoo_dot_general_gpu_impl, multiple_results=False)
def _bcoo_dot_general_jvp_lhs(lhs_data_dot, lhs_data, lhs_indices, rhs, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
return _bcoo_dot_general(lhs_data_dot, lhs_indices, rhs, dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type, lhs_spinfo=lhs_spinfo)
def _bcoo_dot_general_jvp_rhs(rhs_dot, lhs_data, lhs_indices, rhs, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
return _bcoo_dot_general(lhs_data, lhs_indices, rhs_dot, dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type, lhs_spinfo=lhs_spinfo)
def _bcoo_dot_general_transpose(ct, lhs_data, lhs_indices, rhs, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
assert not ad.is_undefined_primal(lhs_indices)
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
lhs_ndim = len(lhs_spinfo.shape)
rhs_ndim = rhs.aval.ndim if ad.is_undefined_primal(rhs) else rhs.ndim
lhs_kept = remaining(range(lhs_ndim), lhs_contract, lhs_batch)
rhs_kept = remaining(range(rhs_ndim), rhs_contract, rhs_batch)
ans_batch, ans_lhs, ans_rhs = map(list, ranges_like(lhs_batch, lhs_kept, rhs_kept))
if ad.is_undefined_primal(lhs_data):
dims: DotDimensionNumbers = ((ans_rhs, rhs_kept), (ans_batch, rhs_batch))
lhs_contract_sorted_by_rhs = list(np.take(lhs_contract, np.argsort(rhs_contract)))
permutation = list(lhs_batch) + lhs_kept + lhs_contract_sorted_by_rhs
out_axes = list(map(int, np.argsort(permutation)))
# Determine whether efficient approach is possible:
placeholder_data = jnp.empty((lhs_indices.ndim - 2) * (1,) + (lhs_indices.shape[-2],))
placeholder_shape = tuple(lhs_indices.shape[:-2]) + lhs_indices.shape[-1] * (1,)
try:
_validate_permutation(placeholder_data, lhs_indices, permutation, placeholder_shape)
except NotImplementedError:
indices_can_be_untransposed = False
else:
indices_can_be_untransposed = True
# TODO(jakevdp): explore implementing the efficient approach without actually un-transposing
# the indices. Could this be done by un-permuting ct, rhs, and dims?
if indices_can_be_untransposed:
# Efficient approach: (1) un-transpose indices, (2) compute SDDMM, (3) re-transpose result.
_, lhs_indices_T = _bcoo_transpose(placeholder_data, lhs_indices, permutation=permutation,
spinfo=SparseInfo(placeholder_shape))
result_T_shape = tuple(placeholder_shape[i] for i in permutation)
result_T = bcoo_dot_general_sampled(ct, rhs, lhs_indices_T, dimension_numbers=dims)
result, _ = _bcoo_transpose(result_T, lhs_indices_T, permutation=out_axes,
spinfo=SparseInfo(result_T_shape))
else:
# Fallback to direct approach when above is not possible.
out_dense_T = lax.dot_general(ct, rhs, dimension_numbers=dims)
out_dense = lax.transpose(out_dense_T, out_axes)
result = _bcoo_extract(lhs_indices, out_dense)
return result, lhs_indices, rhs
else:
dims = ((lhs_kept, ans_lhs), (lhs_batch, ans_batch))
rhs_contract_sorted_by_lhs = list(np.take(rhs_contract, np.argsort(lhs_contract)))
out_axes = list(np.argsort(list(rhs_batch) + rhs_contract_sorted_by_lhs + rhs_kept))
result = _bcoo_dot_general(lhs_data, lhs_indices, ct, lhs_spinfo=lhs_spinfo,
preferred_element_type=preferred_element_type,
dimension_numbers=dims)
return lhs_data, lhs_indices, lax.transpose(result, out_axes)
def _bcoo_dot_general_batch_rule(batched_args, batch_dims, *, dimension_numbers,
preferred_element_type, lhs_spinfo: SparseInfo):
_, _, rhs = batched_args
_, _, rhs_bdim = batch_dims
new_lhs_data, new_lhs_indices, new_lhs_spinfo = _bcoo_batch_dims_to_front(
batched_args[:2], batch_dims[:2], lhs_spinfo,
batch_size=None if rhs_bdim is None else rhs.shape[rhs_bdim])
new_dimension_numbers, result_batch_dim = _dot_general_batch_dim_nums(
(len(lhs_spinfo.shape), rhs.ndim), (0, rhs_bdim), dimension_numbers)
batched_out = _bcoo_dot_general(new_lhs_data, new_lhs_indices, rhs, lhs_spinfo=new_lhs_spinfo,
preferred_element_type=preferred_element_type,
dimension_numbers=new_dimension_numbers)
return batched_out, result_batch_dim
ad.defjvp(bcoo_dot_general_p, _bcoo_dot_general_jvp_lhs, None, _bcoo_dot_general_jvp_rhs)
ad.primitive_transposes[bcoo_dot_general_p] = _bcoo_dot_general_transpose
batching.primitive_batchers[bcoo_dot_general_p] = _bcoo_dot_general_batch_rule
mlir.register_lowering(bcoo_dot_general_p, _bcoo_dot_general_default_lowering)
dispatch.simple_impl(bcoo_dot_general_p)
mlir.register_lowering(
bcoo_dot_general_p, _bcoo_dot_general_gpu_lowering, platform='cuda')
mlir.register_lowering(
bcoo_dot_general_p, _bcoo_dot_general_gpu_lowering, platform='rocm')
#----------------------------------------------------------------------
# bcoo_dot_general_sampled
# (batched) general sampled dot product of two dense ND arrays, with
# output computed only at a given set of sparse indices.
bcoo_dot_general_sampled_p = core.Primitive("bcoo_dot_general_sampled")
def bcoo_dot_general_sampled(A: Array, B: Array, indices: Array, *, dimension_numbers: DotDimensionNumbers) -> Array:
"""A contraction operation with output computed at given sparse indices.
Args:
lhs: An ndarray.
rhs: An ndarray.
indices: BCOO indices.
dimension_numbers: a tuple of tuples of the form
`((lhs_contracting_dims, rhs_contracting_dims),
(lhs_batch_dims, rhs_batch_dims))`.
Returns:
BCOO data, an ndarray containing the result.
"""
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
cdims = (api_util._ensure_index_tuple(lhs_contract),
api_util._ensure_index_tuple(rhs_contract))
bdims = (api_util._ensure_index_tuple(lhs_batch),
api_util._ensure_index_tuple(rhs_batch))
return bcoo_dot_general_sampled_p.bind(A, B, indices,
dimension_numbers=(cdims, bdims))
def _bcoo_dot_general_sampled_slow(A, B, indices, *, dimension_numbers, precision):
return _bcoo_extract(indices, lax.dot_general(A, B, dimension_numbers=dimension_numbers, precision=precision))
def _bcoo_dot_general_sampled_simple(A, B, indices, *, dimension_numbers, precision):
# This case used in transpose of sparse matvec
# TODO(jakevdp) generalize this
del precision # Unused here
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
assert not (lhs_contract or rhs_contract or lhs_batch or rhs_batch)
assert A.ndim == B.ndim == 1
n_batch = indices.ndim - 2
n_sparse = indices.shape[-1]
nse = indices.shape[-2]
assert n_batch + n_sparse == 2
if n_batch == 0:
return (A.at[indices[:, 0]].get(mode='fill', fill_value=0)
* B.at[indices[:, 1]].get(mode='fill', fill_value=0))
elif n_batch == 1:
return A[:, None] * B.at[indices[..., 0]].get(mode='fill', fill_value=0)
elif n_batch == 2:
out = A[:, None, None] * B[None, :, None]
return lax.broadcast_in_dim(out, (len(A), len(B), nse), (0, 1, 2))
else:
raise ValueError("too many batch dimensions.")
def _bcoo_dot_general_sampled_simple2(A, B, indices, *, dimension_numbers, precision):
# This case used in transpose of sparse matmat
# TODO(jakevdp) generalize this
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
assert not (lhs_batch or rhs_batch)
assert len(lhs_contract) == len(rhs_contract) == 1
assert A.ndim == B.ndim == 2
n_batch = indices.ndim - 2
n_sparse = indices.shape[-1]
nse = indices.shape[-2]
assert n_batch + n_sparse == 2
if n_batch == 0:
lhs_batch = [1] if lhs_contract[0] == 0 else [0]
rhs_batch = [1] if rhs_contract[0] == 0 else [0]
A = A.at[_tuple_replace((slice(None), slice(None)), lhs_batch[0], indices[:, 0])].get(mode='fill', fill_value=0)
B = B.at[_tuple_replace((slice(None), slice(None)), rhs_batch[0], indices[:, 1])].get(mode='fill', fill_value=0)
return lax.dot_general(A, B, dimension_numbers=((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)),
precision=precision)
if n_batch == 1:
lhs_batch = [1] if lhs_contract[0] == 0 else [0]
rhs_batch = [1] if rhs_contract[0] == 0 else [0]
B = B.at[_tuple_replace((slice(None), slice(None)), rhs_batch[0], indices[..., 0])].get(mode='fill', fill_value=0)
if rhs_contract[0] == 1:
rhs_contract = [2]
return lax.dot_general(A, B, dimension_numbers=((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)),
precision=precision)
if n_batch == 2:
out = lax.dot_general(A, B, dimension_numbers=((lhs_contract, rhs_contract), (lhs_batch, rhs_batch)),
precision=precision)
return lax.broadcast_in_dim(lax.expand_dims(out, (2,)), (*out.shape, nse), (0, 1, 2))
else:
raise ValueError("too many batch dimensions.")
@bcoo_dot_general_sampled_p.def_impl
def _bcoo_dot_general_sampled_impl(A, B, indices, *, dimension_numbers):
A = jnp.asarray(A)
B = jnp.asarray(B)
indices = jnp.asarray(indices)
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
n_batch = indices.ndim - 2
n_sparse = indices.shape[-1]
precision = lax.Precision.HIGHEST
# TODO(jakevdp): add fast approach for more general cases / combine the following:
if (not (lhs_contract or rhs_contract or lhs_batch or rhs_batch)
and A.ndim == B.ndim == 1 and n_sparse + n_batch == 2):
return _bcoo_dot_general_sampled_simple(A, B, indices, dimension_numbers=dimension_numbers, precision=precision)
if len(lhs_contract) == 1 and not lhs_batch and A.ndim == B.ndim == 2 and n_sparse + n_batch == 2:
return _bcoo_dot_general_sampled_simple2(A, B, indices, dimension_numbers=dimension_numbers, precision=precision)
return _bcoo_dot_general_sampled_slow(A, B, indices, dimension_numbers=dimension_numbers, precision=precision)
@bcoo_dot_general_sampled_p.def_abstract_eval
def _bcoo_dot_general_sampled_abstract_eval(A, B, indices, *, dimension_numbers):
dbg = api_util.debug_info("bcoo_dot_general_sampled_abstract_eval",
lax.dot_general, (A, B), {})
dense_result, = pe.abstract_eval_fun(lambda *args: [lax.dot_general(*args, dimension_numbers=dimension_numbers)], A, B,
debug_info=dbg)
dbg = api_util.debug_info("bcoo_dot_general_sampled_abstract_eval",
_bcoo_extract, (indices, dense_result), {})
sparse_result, = pe.abstract_eval_fun(lambda *args: [_bcoo_extract(*args)], indices, dense_result,
debug_info=dbg)
return sparse_result
def _bcoo_dot_general_sampled_transpose(ct, A, B, indices, *, dimension_numbers):
A_shape = A.aval.shape if hasattr(A, 'aval') else A.shape
B_shape = B.aval.shape if hasattr(B, 'aval') else B.shape
mat_shape = _dot_general_validated_shape(A_shape, B_shape, dimension_numbers)
mat = ad.UndefinedPrimal(core.ShapedArray(mat_shape, ct.dtype))
indices, ct = _bcoo_extract_transpose(ct, indices, mat, assume_unique=True)
kwds = {'dimension_numbers': dimension_numbers,
'precision': None,
'preferred_element_type': None,
'out_sharding': None}
A, B = ad.get_primitive_transpose(lax.dot_general_p)(ct, A, B, **kwds)
return A, B, indices
def _bcoo_dot_general_sampled_jvp_A(A_dot, A, B, indices, *, dimension_numbers):
return bcoo_dot_general_sampled(A_dot, B, indices, dimension_numbers=dimension_numbers)
def _bcoo_dot_general_sampled_jvp_B(B_dot, A, B, indices, *, dimension_numbers):
return bcoo_dot_general_sampled(A, B_dot, indices, dimension_numbers=dimension_numbers)
def _bcoo_dot_general_sampled_batch_rule(batched_args, batch_dims, *, dimension_numbers):
def impl(A, B, indices):
return _bcoo_dot_general_sampled_impl(A, B, indices, dimension_numbers=dimension_numbers)
return vmap(impl, in_axes=batch_dims, out_axes=0)(*batched_args), 0
ad.defjvp(bcoo_dot_general_sampled_p, _bcoo_dot_general_sampled_jvp_A,
_bcoo_dot_general_sampled_jvp_B, None)
ad.primitive_transposes[bcoo_dot_general_sampled_p] = _bcoo_dot_general_sampled_transpose
batching.primitive_batchers[bcoo_dot_general_sampled_p] = _bcoo_dot_general_sampled_batch_rule
mlir.register_lowering(
bcoo_dot_general_sampled_p,
mlir.lower_fun(_bcoo_dot_general_sampled_impl, multiple_results=False))
#----------------------------------------------------------------------
# bcoo_spdot_general
# (batched) general dot product of two BCOO sparse arrays returning a
# Dense ND array.
bcoo_spdot_general_p = core.Primitive('bcoo_spdot_general')
bcoo_spdot_general_p.multiple_results = True
def _bcoo_spdot_general(lhs_data: Array, lhs_indices: Array, rhs_data: Array, rhs_indices: Array, *,
lhs_spinfo: SparseInfo, rhs_spinfo: SparseInfo, dimension_numbers: DotDimensionNumbers,
preferred_element_type: Any) -> tuple[Array, Array]:
(lhs_contract, rhs_contract), (lhs_batch, rhs_batch) = dimension_numbers
cdims = (api_util._ensure_index_tuple(lhs_contract),
api_util._ensure_index_tuple(rhs_contract))
bdims = (api_util._ensure_index_tuple(lhs_batch),
api_util._ensure_index_tuple(rhs_batch))
return bcoo_spdot_general_p.bind(lhs_data, lhs_indices, rhs_data, rhs_indices,
lhs_spinfo=lhs_spinfo, rhs_spinfo=rhs_spinfo,
dimension_numbers=(cdims, bdims),
preferred_element_type=preferred_element_type)
def _bcoo_spdot_general_unbatched(lhs_data, lhs_indices, rhs_data, rhs_indices, *, lhs_spinfo, rhs_spinfo, lhs_contracting, rhs_contracting, out_nse):
lhs_shape = lhs_spinfo.shape
rhs_shape = rhs_spinfo.shape
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
assert lhs.n_batch == rhs.n_batch == 0
assert lhs.n_dense == rhs.n_dense == 0
assert [lhs_shape[d] for d in lhs_contracting] == [rhs_shape[d] for d in rhs_contracting]
assert max(lhs_contracting, default=-1) < lhs.n_sparse
assert max(rhs_contracting, default=-1) < rhs.n_sparse
out_shape = (
*(s for i, s in enumerate(lhs_shape) if i not in lhs_contracting),
*(s for i, s in enumerate(rhs_shape) if i not in rhs_contracting))
lhs_i = lhs_indices[:, jnp.array(lhs_contracting, dtype=int)]
rhs_i = rhs_indices[:, jnp.array(rhs_contracting, dtype=int)]
lhs_j = lhs_indices[:, jnp.array(remaining(range(lhs.n_sparse), lhs_contracting), dtype=int)]
rhs_j = rhs_indices[:, jnp.array(remaining(range(rhs.n_sparse), rhs_contracting), dtype=int)]
# TODO(jakevdp): can we do this more efficiently than using an outer product? Note that
# jnp.isin() currently doesn't help much, because it also does all() over an outer
# comparison.
overlap = (lhs_i[:, None] == rhs_i[None, :]).all(-1)
lhs_fill_value = jnp.expand_dims(
jnp.array([lhs_shape[d] for d in lhs_contracting], dtype=lhs_i.dtype),
range(lhs_i.ndim - 1))
rhs_fill_value = jnp.expand_dims(
jnp.array([rhs_shape[d] for d in rhs_contracting], dtype=rhs_i.dtype),
range(rhs_i.ndim - 1))
lhs_valid = (lhs_i < lhs_fill_value).all(-1)
rhs_valid = (rhs_i < rhs_fill_value).all(-1)
out_data = jnp.where(overlap & lhs_valid[:, None] & rhs_valid[None, :],
lhs_data[:, None] * rhs_data[None, :], 0).ravel()
out_indices = jnp.empty([lhs.nse, rhs.nse, lhs_j.shape[-1] + rhs_j.shape[-1]],
dtype=jnp.result_type(lhs_indices, rhs_indices))
out_indices = out_indices.at[:, :, :lhs_j.shape[-1]].set(lhs_j[:, None])
out_indices = out_indices.at[:, :, lhs_j.shape[-1]:].set(rhs_j[None, :])
out_indices = out_indices.reshape(len(out_data), out_indices.shape[-1])
# Note: we do not eliminate zeros here, because it can cause issues with autodiff.
# See https://github.com/jax-ml/jax/issues/10163.
return _bcoo_sum_duplicates(out_data, out_indices, spinfo=SparseInfo(shape=out_shape), nse=out_nse)
@bcoo_spdot_general_p.def_impl
def _bcoo_spdot_general_impl(lhs_data, lhs_indices, rhs_data, rhs_indices, *, lhs_spinfo: SparseInfo, rhs_spinfo: SparseInfo,
dimension_numbers, preferred_element_type):
lhs_shape = lhs_spinfo.shape
rhs_shape = rhs_spinfo.shape
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
assert lhs.n_dense == rhs.n_dense == 0
data_aval, _ = _bcoo_spdot_general_abstract_eval(
lhs_data.aval, lhs_indices.aval, rhs_data.aval, rhs_indices.aval,
lhs_spinfo=lhs_spinfo, rhs_spinfo=rhs_spinfo, dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type)
out_nse = data_aval.shape[-1]
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
# Move batch dimensions to front of each array.
lhs_batch_perm = [*lhs_batch, *remaining(range(lhs.n_batch), lhs_batch)]
rhs_batch_perm = [*rhs_batch, *remaining(range(rhs.n_batch), rhs_batch)]
lhs_data = lhs_data.transpose([*lhs_batch_perm, *range(lhs.n_batch, lhs_data.ndim)])
rhs_data = rhs_data.transpose([*rhs_batch_perm, *range(rhs.n_batch, rhs_data.ndim)])
lhs_indices = lhs_indices.transpose([*lhs_batch_perm, *range(lhs.n_batch, lhs_indices.ndim)])
rhs_indices = rhs_indices.transpose([*rhs_batch_perm, *range(rhs.n_batch, rhs_indices.ndim)])
# Implement batched dot product via vmap
func = functools.partial(_bcoo_spdot_general_unbatched,
lhs_spinfo=SparseInfo(lhs_shape[lhs.n_batch:]),
rhs_spinfo=SparseInfo(rhs_shape[rhs.n_batch:]),
lhs_contracting=[d - lhs.n_batch for d in lhs_contracting],
rhs_contracting=[d - rhs.n_batch for d in rhs_contracting],
out_nse=out_nse)
func = nfold_vmap(func, rhs.n_batch - len(rhs_batch), in_axes=(None, None, 0, 0))
func = nfold_vmap(func, lhs.n_batch - len(lhs_batch), in_axes=(0, 0, None, None))
func = nfold_vmap(func, len(lhs_batch))
return func(lhs_data, lhs_indices, rhs_data, rhs_indices)
@bcoo_spdot_general_p.def_abstract_eval
def _bcoo_spdot_general_abstract_eval(lhs_data, lhs_indices, rhs_data, rhs_indices, *, lhs_spinfo: SparseInfo, rhs_spinfo: SparseInfo,
dimension_numbers, preferred_element_type):
lhs_shape = lhs_spinfo.shape
rhs_shape = rhs_spinfo.shape
out_aval = jax.jit(lax.dot_general, static_argnames=("dimension_numbers", "preferred_element_type")).eval_shape(
jax.ShapeDtypeStruct(lhs_shape, lhs_data.dtype),
jax.ShapeDtypeStruct(rhs_shape, rhs_data.dtype),
dimension_numbers=dimension_numbers,
preferred_element_type=preferred_element_type)
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if lhs.n_dense or rhs.n_dense:
# TODO(jakevdp): handle dense dimensions
raise NotImplementedError("bcoo_spdot_general with dense dimensions.")
if (lhs_batch and max(lhs_batch) >= lhs.n_batch) or (rhs_batch and max(rhs_batch) >= rhs.n_batch):
raise NotImplementedError("bcoo_spdot_general: batch_dims must correspond to batch dimensions of the sparse representation.")
if lhs_contracting and (min(lhs_contracting) < lhs.n_batch or max(lhs_contracting) >= lhs.n_batch + lhs.n_sparse):
raise NotImplementedError("bcoo_spdot_general only supports contraction of sparse indices.")
if rhs_contracting and (min(rhs_contracting) < rhs.n_batch or max(rhs_contracting) >= rhs.n_batch + rhs.n_sparse):
raise NotImplementedError("bcoo_spdot_general only supports contraction of sparse indices.")
if rhs.n_batch > len(rhs_batch) and lhs.n_sparse > len(lhs_contracting):
raise ValueError("bcoo_spdot_general: cannot have unused batch dims on rhs with unused sparse dims on lhs.")
out_nse = (
(lhs.nse if lhs.n_sparse > len(lhs_contracting) else 1) *
(rhs.nse if rhs.n_sparse > len(rhs_contracting) else 1)
)
# Ensure we're not storing more output elements than necessary.
# TODO(jakevdp): should we warn here if output is effectively dense?
out_n_batch = lhs.n_batch + rhs.n_batch - len(lhs_batch)
out_nse = min(out_nse, math.prod(out_aval.shape[out_n_batch:]))
lhs_batch_shape = np.broadcast_shapes(
tuple(lhs_data.shape[dim] for dim in range(lhs.n_batch) if dim not in lhs_batch),
tuple(lhs_indices.shape[dim] for dim in range(lhs.n_batch) if dim not in lhs_batch),
)
rhs_batch_shape = np.broadcast_shapes(
tuple(rhs_data.shape[dim] for dim in range(rhs.n_batch) if dim not in rhs_batch),
tuple(rhs_indices.shape[dim] for dim in range(rhs.n_batch) if dim not in rhs_batch),
)
data_shape = (
*(lhs_shape[dim] for dim in lhs_batch),
*lhs_batch_shape,
*rhs_batch_shape,
out_nse)
indices_shape = (
*(lhs_shape[dim] for dim in lhs_batch),
*lhs_batch_shape,
*rhs_batch_shape,
out_nse, lhs.n_sparse + rhs.n_sparse - 2 * len(lhs_contracting))
data_aval = core.ShapedArray(data_shape, out_aval.dtype)
indices_aval = core.ShapedArray(indices_shape, lhs_indices.dtype)
_validate_bcoo(data_aval, indices_aval, out_aval.shape) # pytype: disable=wrong-arg-types # always-use-return-annotations
return data_aval, indices_aval
def _bcoo_spdot_general_batch_rule(batched_args, batch_dims, *, lhs_spinfo: SparseInfo, rhs_spinfo: SparseInfo,
preferred_element_type, dimension_numbers):
lhs_ndim = len(lhs_spinfo.shape)
rhs_ndim = len(rhs_spinfo.shape)
batch_size = max(arg.shape[dim] for arg, dim in zip(batched_args, batch_dims) if dim is not None)
lhs_data, lhs_indices, lhs_spinfo = _bcoo_batch_dims_to_front(
batched_args[:2], batch_dims[:2], lhs_spinfo, batch_size=batch_size)
rhs_data, rhs_indices, rhs_spinfo = _bcoo_batch_dims_to_front(
batched_args[2:], batch_dims[2:], rhs_spinfo, batch_size=batch_size)
dimension_numbers, result_batch_dim = _dot_general_batch_dim_nums(
(lhs_ndim, rhs_ndim), (0, 0), dimension_numbers)
batched_out = _bcoo_spdot_general(lhs_data, lhs_indices, rhs_data, rhs_indices,
dimension_numbers=dimension_numbers,
lhs_spinfo=lhs_spinfo, rhs_spinfo=rhs_spinfo,
preferred_element_type=preferred_element_type)
return batched_out, (result_batch_dim, result_batch_dim)
def _bcoo_spdot_general_jvp(primals, tangents, **kwds):
lhs_data, lhs_indices, rhs_data, rhs_indices = primals
lhs_data_dot, lhs_indices_dot, rhs_data_dot, rhs_indices_dot = tangents
primals_out = _bcoo_spdot_general(*primals, **kwds)
assert type(lhs_indices_dot) is ad.Zero
assert type(rhs_indices_dot) is ad.Zero
data_dot_out = 0
if type(lhs_data_dot) is not ad.Zero:
data_dot_out += _bcoo_spdot_general(lhs_data_dot, lhs_indices, rhs_data, rhs_indices, **kwds)[0]
if type(rhs_data_dot) is not ad.Zero:
data_dot_out += _bcoo_spdot_general(lhs_data, lhs_indices, rhs_data_dot, rhs_indices, **kwds)[0]
return primals_out, [data_dot_out, ad.Zero.from_primal_value(primals_out[1])]
# TODO(JVP): transpose rule
batching.primitive_batchers[bcoo_spdot_general_p] = _bcoo_spdot_general_batch_rule
ad.primitive_jvps[bcoo_spdot_general_p] = _bcoo_spdot_general_jvp
mlir.register_lowering(bcoo_spdot_general_p, mlir.lower_fun(
_bcoo_spdot_general_impl, multiple_results=True))
#----------------------------------------------------------------------
# bcoo_sort_indices
# Utility to sort the indices of a BCOO representation. This primitive
# does not support deduplication or removing of zeros; see bcoo_sum_duplicates.
bcoo_sort_indices_p = core.Primitive("bcoo_sort_indices")
bcoo_sort_indices_p.multiple_results = True
def bcoo_sort_indices(mat: BCOO) -> BCOO:
"""Sort indices of a BCOO array.
Args:
mat : BCOO array
Returns:
mat_out : BCOO array with sorted indices.
"""
data, indices = bcoo_sort_indices_p.bind(*mat._bufs, spinfo=mat._info)
return BCOO((data, indices), shape=mat.shape, indices_sorted=True,
unique_indices=mat.unique_indices)
@bcoo_sort_indices_p.def_impl
def _bcoo_sort_indices_impl(data, indices, *, spinfo):
props = _validate_bcoo(data, indices, spinfo.shape)
if props.n_sparse == 0:
return data, indices
f = nfold_vmap(_bcoo_sort_indices_unbatched, props.n_batch, broadcasted=False)
indices, perm = f(indices)
permute = nfold_vmap(lambda d, p: d[p], props.n_batch)
data = permute(data, perm)
return data, indices
def _bcoo_sort_indices_unbatched(indices):
# sort indices without summing duplicates
nse, N = indices.shape
idx_cols = (indices[:, i] for i in range(N))
*indices, perm = lax.sort((*idx_cols, lax.iota(indices.dtype, nse)), num_keys=N)
return jnp.column_stack(indices), perm
@bcoo_sort_indices_p.def_abstract_eval
def _bcoo_sort_indices_abstract_eval(data, indices, *, spinfo):
props = _validate_bcoo(data, indices, spinfo.shape)
if props.n_sparse == 0:
return data, indices
data_out = core.ShapedArray(
(*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
props.nse, *data.shape[props.n_batch + 1:]), data.dtype, weak_type=data.weak_type)
return data_out, indices
def _bcoo_sort_indices_batching_rule(batched_args, batch_dims, *, spinfo):
data, indices, spinfo = _bcoo_batch_dims_to_front(batched_args, batch_dims, spinfo)
data_out, indices_out = bcoo_sort_indices_p.bind(data, indices, spinfo=spinfo)
out_axes = (0, 0)
# Note: if data is unbatched on input, it will be batched on output.
# However, if indices are unbatched on input, they will be unbatched on output.
if batch_dims[1] is None:
indices_out = indices_out[0]
out_axes = (0, None)
return (data_out, indices_out), out_axes
def _bcoo_sort_indices_jvp(primals, tangents, *, spinfo):
props = _validate_bcoo(*primals, spinfo.shape)
if props.n_sparse == 0:
return primals, tangents
data, indices = primals
data_dot, _ = tangents
f = nfold_vmap(_bcoo_sort_indices_unbatched, props.n_batch)
indices_out, perm = f(indices)
permute = nfold_vmap(lambda d, p: d[p], props.n_batch)
data_out = permute(data, perm)
indices_dot_out = ad.Zero.from_primal_value(indices)
data_dot_out = ad.Zero.from_primal_value(data_out) if type(data_dot) is ad.Zero else permute(data_dot, perm)
return (data_out, indices_out), (data_dot_out, indices_dot_out)
_bcoo_sort_indices_hlo = mlir.lower_fun(
_bcoo_sort_indices_impl, multiple_results=True)
ad.primitive_jvps[bcoo_sort_indices_p] = _bcoo_sort_indices_jvp
batching.primitive_batchers[bcoo_sort_indices_p] = _bcoo_sort_indices_batching_rule
mlir.register_lowering(bcoo_sort_indices_p, _bcoo_sort_indices_hlo)
#----------------------------------------------------------------------
# bcoo_sum_duplicates
# Utility to sum duplicate indices in a BCOO array representation.
bcoo_sum_duplicates_p = core.Primitive("bcoo_sum_duplicates")
bcoo_sum_duplicates_p.multiple_results = True
def bcoo_sum_duplicates(mat: BCOO, nse: int | None = None) -> BCOO:
"""Sums duplicate indices within a BCOO array, returning an array with sorted indices.
Args:
mat : BCOO array
nse : integer (optional). The number of specified elements in the output matrix. This must
be specified for bcoo_sum_duplicates to be compatible with JIT and other JAX transformations.
If not specified, the optimal nse will be computed based on the contents of the data and
index arrays. If specified nse is larger than necessary, data and index arrays will be padded
with standard fill values. If smaller than necessary, data elements will be dropped from the
output matrix.
Returns:
mat_out : BCOO array with sorted indices and no duplicate indices.
"""
data, indices = _bcoo_sum_duplicates(mat.data, mat.indices, spinfo=mat._info, nse=nse)
return BCOO((data, indices), shape=mat.shape, indices_sorted=True,
unique_indices=True)
def _bcoo_sum_duplicates(data: Array, indices: Array, *, spinfo: SparseInfo, nse: int | None) -> tuple[Array, Array]:
if nse is not None:
nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_sum_duplicates.")
return bcoo_sum_duplicates_p.bind(data, indices, spinfo=spinfo, nse=nse)
@bcoo_sum_duplicates_p.def_impl
def _bcoo_sum_duplicates_impl(data, indices, *, spinfo, nse):
props = _validate_bcoo(data, indices, spinfo.shape)
indices_out, mapping, nse_batched = _unique_indices(
indices, shape=spinfo.shape, return_inverse=True, return_true_size=True)
if nse is None:
nse = 1 if props.n_sparse == 0 else nse_batched.max()
indices_out = _adjust_indices_nse(indices_out, nse=nse, shape=spinfo.shape)
if props.n_sparse == 0:
data = data.sum(props.n_batch, keepdims=True)
data_out = jnp.empty((*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), dtype=data.dtype)
permute = lambda d_out, m, d: d_out.at[m].add(d, mode='drop')
permute = nfold_vmap(permute, props.n_batch)
data_out = permute(data_out, mapping, data)
return data_out, indices_out
def _adjust_indices_nse(indices, *, nse, shape):
props = _validate_bcoo_indices(indices, shape)
if nse <= props.nse:
indices = indices[..., :nse, :]
else:
fill = lax.broadcast_in_dim(
operand=jnp.array(shape[props.n_batch:props.n_batch + props.n_sparse], dtype=indices.dtype),
shape=(*indices.shape[:-2], nse - props.nse, indices.shape[-1]),
broadcast_dimensions=(indices.ndim - 1,)
)
indices = lax.concatenate([indices, fill], dimension=indices.ndim - 2)
return indices
def _unique_indices(indices, *, shape, return_inverse=False,
return_index=False, return_true_size=False):
props = _validate_bcoo_indices(indices, shape)
f = partial(_unique_indices_unbatched, shape=shape[props.n_batch:],
return_inverse=return_inverse, return_index=return_index,
return_true_size=return_true_size)
f = nfold_vmap(f, props.n_batch, broadcasted=False)
return f(indices)
def _unique_indices_unbatched(indices, *, shape, return_inverse=False,
return_index=False, return_true_size=False):
props = _validate_bcoo_indices(indices, shape)
if props.n_sparse == 0:
nse = 1
indices_out = jnp.zeros_like(indices, shape=(nse, 0))
out = (indices_out,)
if return_index:
out = (*out, jnp.zeros(nse, dtype='int32'))
if return_inverse:
out = (*out, jnp.zeros(nse, dtype='int32'))
if return_true_size:
out = (*out, nse)
return out[0] if len(out) == 1 else out
fill_value = jnp.expand_dims(jnp.array(shape[:props.n_sparse], dtype=indices.dtype), (0,))
out_of_bounds = (indices >= fill_value).any(-1, keepdims=True)
indices = jnp.where(out_of_bounds, fill_value, indices)
# TODO: check if `indices_sorted` is True.
out = _unique(indices, axis=0, return_inverse=return_inverse, return_index=return_index,
return_true_size=return_true_size, size=props.nse, fill_value=fill_value)
if return_inverse:
idx = 2 if return_index else 1
out = (*out[:idx], out[idx].ravel(), *out[idx + 1:])
if return_true_size:
nse = out[-1]
nse = nse - (indices == fill_value).any().astype(nse.dtype)
out = (*out[:-1], nse)
return out
def _coo_correct_out_of_bound_indices(row, col, shape, transpose):
# Since cusparse does not have any well-tested support for padded indices,
# we push them into an extra row/col of the matrix, which will then be
# sliced away in the output.
assert row.ndim == col.ndim, f"{row.ndim} != {col.ndim}"
assert len(shape) == row.ndim + 1, f"{len(shape)} != {row.ndim + 1}"
if row.ndim > 1:
f = partial(_coo_correct_out_of_bound_indices,
shape=shape[row.ndim:], transpose=transpose)
return nfold_vmap(f, row.ndim)(row, col)
mask = (row >= shape[0]) | (col >= shape[1])
if transpose:
row = jnp.where(mask, 0, row)
col = jnp.where(mask, shape[1], col)
shape = (shape[0], shape[1] + 1)
else:
row = jnp.where(mask, shape[0], row)
col = jnp.where(mask, 0, col)
shape = (shape[0] + 1, shape[1])
return row, col, shape
@bcoo_sum_duplicates_p.def_abstract_eval
def _bcoo_sum_duplicates_abstract_eval(data, indices, *, spinfo, nse):
if nse is None:
raise ValueError("bcoo_sum_duplicates: nse must be specified when using the function within "
"jit, vmap, and other transformations requiring abstract evaluation.")
props = _validate_bcoo(data, indices, spinfo.shape)
indices_out = core.ShapedArray((*indices.shape[:props.n_batch], nse, props.n_sparse),
dtype=indices.dtype, weak_type=indices.weak_type)
data_out = core.ShapedArray(
(*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), data.dtype, weak_type=data.weak_type)
return data_out, indices_out
def _bcoo_sum_duplicates_batching_rule(batched_args, batch_dims, *, spinfo, nse):
data, indices, new_spinfo = _bcoo_batch_dims_to_front(batched_args, batch_dims, spinfo)
data_out, indices_out = bcoo_sum_duplicates_p.bind(data, indices, spinfo=new_spinfo, nse=nse)
# Note: if data is unbatched on input, it will be batched on output.
# However, if indices are unbatched on input, they will be unbatched on output.
if batch_dims[1] is None:
indices_out = lax.squeeze(indices_out, [0])
out_axes = (0, None)
else:
out_axes = (0, 0)
return (data_out, indices_out), out_axes
def _bcoo_sum_duplicates_jvp(primals, tangents, *, spinfo, nse):
props = _validate_bcoo(*primals, spinfo.shape)
data, indices = primals
data_dot, _ = tangents
indices_out, mapping, nse_batched = _unique_indices(
indices, shape=spinfo.shape, return_inverse=True, return_true_size=True)
if nse is None:
nse = jnp.sum(nse_batched)
try:
nse = core.concrete_or_error(operator.index, nse, "nse argument of bcoo_sum_duplicates.")
except core.ConcretizationTypeError:
raise ValueError("bcoo_sum_duplicates: nse must be specified when using the function within "
"jit, vmap, and other transformations requiring abstract evaluation.")
indices_out = _adjust_indices_nse(indices_out, nse=nse, shape=spinfo.shape)
if props.n_sparse == 0:
data = data.sum(props.n_batch, keepdims=True)
data_dot = data_dot.sum(props.n_batch, keepdims=True)
data_out = jnp.empty((*map(max, indices.shape[:props.n_batch], data.shape[:props.n_batch]),
nse, *data.shape[props.n_batch + 1:]), dtype=data.dtype)
data_dot_out = data_out
# This check is because scatter-add on zero-sized arrays has poorly defined
# semantics; see https://github.com/jax-ml/jax/issues/13656.
if data_out.size:
permute = lambda x, i, y: x.at[i].add(y, mode='drop')
else:
permute = lambda x, i, y: x
permute = nfold_vmap(permute, props.n_batch)
data_out = permute(data_out, mapping, data)
indices_dot_out = ad.Zero.from_primal_value(indices_out)
data_dot_out = ad.Zero.from_primal_value(data_out) if type(data_dot) is ad.Zero else permute(data_dot_out, mapping, data_dot)
return (data_out, indices_out), (data_dot_out, indices_dot_out)
_bcoo_sum_duplicates_hlo = mlir.lower_fun(
_bcoo_sum_duplicates_impl, multiple_results=True)
ad.primitive_jvps[bcoo_sum_duplicates_p] = _bcoo_sum_duplicates_jvp
batching.primitive_batchers[bcoo_sum_duplicates_p] = _bcoo_sum_duplicates_batching_rule
# TODO(phawkins): caching this primitive seems to cause x64 context problems.
mlir.register_lowering(bcoo_sum_duplicates_p, _bcoo_sum_duplicates_hlo,
cacheable=False)
#----------------------------------------------------------------------
# BCOO functions that maybe should be primitives?
def bcoo_update_layout(mat: BCOO, *, n_batch: int | None = None, n_dense: int | None = None,
on_inefficient: str | None = 'error') -> BCOO:
"""Update the storage layout (i.e. n_batch & n_dense) of a BCOO matrix.
In many cases this can be done without introducing undue storage overhead. However,
increasing ``mat.n_batch`` or ``mat.n_dense`` will lead to very inefficient storage,
with many explicitly-stored zeros, unless the new batch or dense dimensions have size
0 or 1. In such cases, ``bcoo_update_layout`` will raise a :class:`SparseEfficiencyError`.
This can be silenced by specifying the ``on_inefficient`` argument.
Args:
mat : BCOO array
n_batch : optional(int) the number of batch dimensions in the output matrix. If None,
then n_batch = mat.n_batch.
n_dense : optional(int) the number of dense dimensions in the output matrix. If None,
then n_dense = mat.n_dense.
on_inefficient : optional(string), one of ``['error', 'warn', None]``. Specify the
behavior in case of an inefficient reconfiguration. This is defined as a reconfiguration
where the size of the resulting representation is much larger than the size of the
input representation.
Returns:
mat_out : BCOO array
A BCOO array representing the same sparse array as the input, with the specified
layout. ``mat_out.todense()`` will match ``mat.todense()`` up to appropriate precision.
"""
# TODO(jakevdp): allow specification of nse?
# TODO(jakevdp): there is room for some improvements here:
# - we could probably do better in the case of converting a dense dim to
# a batch dim or vice-versa. Worth adding that special case?
# - we could work to preserve broadcasted batch dimensions when possible.
# - if indices are known to be unique, we can convert them to batch/dense
# dimensions more efficiently.
n_batch = mat.n_batch if n_batch is None else operator.index(n_batch)
n_dense = mat.n_dense if n_dense is None else operator.index(n_dense)
if (n_batch, n_dense) == (mat.n_batch, mat.n_dense):
return mat
n_sparse = mat.ndim - n_batch - n_dense
if on_inefficient not in ['error', 'warn', None]:
raise ValueError("on_inefficent={on_inefficient!r}; expected one of ['error', 'warn', None].")
if n_batch < 0:
raise ValueError(f"n_batch must be non-negative; got {n_batch}")
if n_dense < 0:
raise ValueError(f"n_dense must be non-negative; got {n_dense}")
if n_sparse < 0:
raise ValueError(f"sum of {n_batch=} and {n_dense=} "
f"cannot be larger than mat.ndim={mat.ndim}.")
def _maybe_err_or_warn(msg):
if on_inefficient == 'error':
msg += (" To disable this error, set the on_inefficient argument "
"of bcoo_update_layout to 'warn' or None.")
raise SparseEfficiencyError(msg)
elif on_inefficient == 'warn':
msg += (" To disable this warning, set the on_inefficient argument "
"of bcoo_update_layout to None.")
warnings.warn(msg, category=SparseEfficiencyWarning)
# TODO(jakevdp): are efficiency warnings necessary when nse is 0 or 1?
if (n_dense > mat.n_dense and
any(d > 1 for d in mat.shape[-n_dense:mat.ndim - mat.n_dense])):
_maybe_err_or_warn(f"For matrix of shape {mat.shape}, increasing n_dense from "
f"{mat.n_dense} to {n_dense} results in inefficient storage.")
if n_batch > mat.n_batch and any(d > 1 for d in mat.shape[mat.n_batch:n_batch]):
_maybe_err_or_warn(f"For matrix of shape {mat.shape}, increasing n_batch from "
f"{mat.n_batch} to {n_batch} results in inefficient storage.")
new_data, new_indices = mat.data, mat.indices
shape = mat.shape
current_n_batch = mat.n_batch
current_n_dense = mat.n_dense
if n_dense < current_n_dense:
n = current_n_dense - n_dense
@partial(nfold_vmap, N=current_n_batch + 1)
def _update(d, i):
new_d = d.reshape(math.prod(d.shape[:n]), *d.shape[n:])
meshes = jnp.meshgrid(*(jnp.arange(s, dtype=i.dtype) for s in d.shape[:n]),
indexing='ij')
new_i = jnp.column_stack([jnp.broadcast_to(i, (new_d.shape[0], i.size)),
*map(jnp.ravel, meshes)])
return new_d, new_i
new_data, new_indices = _update(new_data, new_indices)
new_data = new_data.reshape(*new_data.shape[:current_n_batch],
math.prod(new_data.shape[current_n_batch:current_n_batch + 2]),
*new_data.shape[current_n_batch + 2:])
new_indices = new_indices.reshape(*new_indices.shape[:current_n_batch],
math.prod(new_indices.shape[current_n_batch: current_n_batch + 2]),
*new_indices.shape[current_n_batch + 2:])
current_n_dense = n_dense
if n_batch < current_n_batch:
n = current_n_batch - n_batch
@partial(nfold_vmap, N=n_batch)
def _update(d, i):
nse = i.shape[-2]
new_d = d.reshape(math.prod(d.shape[:n + 1]), *d.shape[n + 1:])
meshes = jnp.meshgrid(*(jnp.arange(d, dtype=i.dtype) for d in (*i.shape[:n], nse)),
indexing='ij')
new_i = i.reshape(math.prod(i.shape[:n + 1]), *i.shape[n + 1:])
new_i = jnp.column_stack((*(m.ravel() for m in meshes[:-1]), new_i))
return new_d, new_i
new_data, new_indices = _update(new_data, new_indices)
current_n_batch = n_batch
if n_dense > current_n_dense:
n = n_dense - current_n_dense
@partial(nfold_vmap, N=current_n_batch + 1)
def _update(d, i):
new_d = jnp.zeros_like(d, shape=shape[-n_dense:]).at[tuple(i[-n:])].set(d)
new_i = i[:-n]
return new_d, new_i
new_data, new_indices = _update(new_data, new_indices)
current_n_dense = n_dense
if n_batch > current_n_batch:
n = n_batch - current_n_batch
@partial(nfold_vmap, N=current_n_batch)
def _update(d, i):
nse = i.shape[-2]
idx = tuple(i[:, j] for j in range(n)) + (jnp.arange(nse),)
new_i_shape = (*shape[current_n_batch:n_batch], nse, i.shape[-1] - n)
new_i = jnp.broadcast_to(i[:, n:], new_i_shape)
new_d_shape = (*shape[current_n_batch:n_batch], nse, *d.shape[d.ndim - n_dense:])
new_d = jnp.zeros_like(d, shape=new_d_shape).at[idx].set(d)
return new_d, new_i
new_data, new_indices = _update(new_data, new_indices)
current_n_batch = n_batch
return BCOO((new_data, new_indices), shape=shape)
def bcoo_broadcast_in_dim(mat: BCOO, *, shape: Shape, broadcast_dimensions: Sequence[int],
sharding=None) -> BCOO:
"""Expand the size and rank of a BCOO array by duplicating the data.
A BCOO equivalence to jax.lax.broadcast_in_dim.
Args:
mat: A BCOO-format array.
shape: The shape of the target array.
broadcast_dimensions: The dimension in the shape of the target array which
each dimension of the operand (``mat``) shape corresponds to.
Returns:
A BCOO-format array containing the target array.
"""
return BCOO(_bcoo_broadcast_in_dim(mat.data, mat.indices, spinfo=mat._info,
shape=shape,
broadcast_dimensions=broadcast_dimensions),
shape=shape)
def _bcoo_broadcast_in_dim(data: Array, indices: Array, *, spinfo: SparseInfo, shape: Shape,
broadcast_dimensions: Sequence[int]) -> tuple[Array, Array]:
"""BCOO equivalent of lax.broadcast_in_dim"""
if len(spinfo.shape) != len(broadcast_dimensions):
raise ValueError(f"{spinfo.shape=} and {broadcast_dimensions=} must have the same length")
props = _validate_bcoo(data, indices, spinfo.shape)
batch_dims, sparse_dims, dense_dims = split_list(broadcast_dimensions, [props.n_batch, props.n_sparse])
if max(batch_dims, default=0) > min(sparse_dims, default=len(shape)):
raise ValueError("Cannot mix batch and sparse dimensions during broadcast_in_dim")
if max(sparse_dims, default=0) > min(dense_dims, default=len(shape)):
raise ValueError("Cannot mix sparse and dense dimensions during broadcast_in_dim")
# All new dimensions preceding a sparse or dense dimension are batch dimensions:
new_n_batch = min(broadcast_dimensions[props.n_batch:], default=len(shape))
# TODO(jakevdp): Should new trailing dimensions be dense by default?
new_n_dense = props.n_dense and len(shape) - min(broadcast_dimensions[-props.n_dense:])
new_n_sparse = len(shape) - new_n_batch - new_n_dense
if math.prod(spinfo.shape[props.n_batch: props.n_batch + props.n_sparse]) != math.prod(shape[new_n_batch:new_n_batch + new_n_sparse]):
raise NotImplementedError("Adding sparse dimensions with lengths != 1")
new_data, new_indices = data, indices
# batch & dense dimensions
if (new_n_batch, new_n_dense) != (props.n_batch, props.n_dense):
new_data = lax.broadcast_in_dim(new_data,
shape=(*shape[:new_n_batch], props.nse, *shape[new_n_batch + new_n_sparse:]),
broadcast_dimensions=(*batch_dims, new_n_batch, *(b + 1 - new_n_sparse for b in dense_dims)))
new_indices = lax.broadcast_in_dim(new_indices,
shape=(*shape[:new_n_batch], props.nse, props.n_sparse),
broadcast_dimensions=(*batch_dims, new_n_batch, new_n_batch + 1))
# sparse dimensions
if new_n_sparse != props.n_sparse:
shape = (*shape[:new_n_batch], props.nse, new_n_sparse)
ind = jnp.array(sparse_dims, int) - new_n_batch
new_indices = (jnp.zeros_like(new_indices, shape=shape).at[..., ind].set(new_indices))
return new_data, new_indices
def bcoo_concatenate(operands: Sequence[BCOO], *, dimension: int) -> BCOO:
"""Sparse implementation of :func:`jax.lax.concatenate`
Args:
operands : Sequence of BCOO arrays to concatenate. The arrays must have equal
shapes, except in the `dimension` axis. Additionally, the arrays must have
have equivalent batch, sparse, and dense dimensions.
dimension : Positive integer specifying the dimension along which to concatenate
the arrays. The dimension must be among batch or sparse dimensions of the input;
concatenation along dense dimensions is not supported.
Returns:
A BCOO array containing the concatenation of the inputs.
"""
dimension = operator.index(dimension)
if not all(isinstance(op, BCOO) for op in operands):
raise ValueError("bcoo_concatenate: expected operands to be a sequence of BCOO arrays. "
f"Got {operands}")
# Validate inputs using lax.concatenate abstract evaluation.
out_aval = jax.jit(lax.concatenate, static_argnames=("dimension",)).eval_shape(
[core.ShapedArray(op.shape, op.dtype) for op in operands],
dimension=dimension)
if len({op.n_dense for op in operands}) > 1:
raise ValueError("bcoo_concatenate requires inputs to have matching nse dimensions.")
n_batches = {op.n_batch for op in operands}
# Correct for the common case, where op[None, :] adds a single batch dimension and we
# need to align it in order to match the others & concatenate.
if len(n_batches) != 1 and max(n_batches) == 1:
if all(op.shape[0] == 1 for op in operands if op.n_batch == 0):
operands = [bcoo_update_layout(op, n_batch=1) if op.n_batch == 0 else op for op in operands]
elif all(op.shape[0] == 1 for op in operands if op.n_batch == 1):
operands = [bcoo_update_layout(op, n_batch=0) if op.n_batch == 1 else op for op in operands]
n_batches = {op.n_batch for op in operands}
if len(n_batches) != 1:
raise ValueError("bcoo_concatenate requires inputs to have matching batch dimensions.")
n_batch, n_sparse = operands[0].n_batch, operands[0].n_sparse
index_batches = [op.indices.shape[:n_batch] for op in operands]
data_batches = [op.data.shape[:n_batch] for op in operands]
if dimension < n_batch:
index_batches = [s[:dimension] + s[dimension + 1:] for s in index_batches]
data_batches = [s[:dimension] + s[dimension + 1:] for s in data_batches]
if not (len(set(index_batches)) == len(set(data_batches)) == 1):
raise NotImplementedError("concatenation of arrays with broadcasted batch indices")
if dimension < n_batch: # Concatenation along batch axes
# Ensure nse of operands match.
nses = {op.nse for op in operands}
if len(nses) != 1:
nse = max(nses)
operands = [_bcoo_set_nse(op, nse) for op in operands]
new_indices = lax.concatenate([op.indices for op in operands], dimension=dimension)
new_data = lax.concatenate([op.data for op in operands], dimension=dimension)
elif dimension < n_batch + n_sparse: # Concatenation along sparse axes
offsets = np.cumsum([0] + [op.shape[dimension] for op in operands[:-1]],
dtype=operands[0].indices.dtype)
new_data = lax.concatenate([op.data for op in operands], dimension=n_batch)
new_indices = lax.concatenate([op.indices.at[..., dimension - n_batch].add(offset)
for op, offset in safe_zip(operands, offsets)],
dimension=n_batch)
else: # Concatenation along dense axes
# TODO(jakevdp) should we implement this? In general it results in a wasteful
# representation because we cannot assume that the indices match.
raise NotImplementedError("Concatenation along dense dimensions.")
return BCOO((new_data, new_indices), shape=out_aval.shape)
def bcoo_reshape(mat: BCOO, *, new_sizes: Sequence[int],
dimensions: Sequence[int] | None = None,
sharding=None) -> BCOO:
"""Sparse implementation of :func:`jax.lax.reshape`.
Args:
operand: BCOO array to be reshaped.
new_sizes: sequence of integers specifying the resulting shape. The size
of the final array must match the size of the input. This must be specified
such that batch, sparse, and dense dimensions do not mix.
dimensions: optional sequence of integers specifying the permutation order of
the input shape. If specified, the length must match ``operand.shape``.
Additionally, dimensions must only permute among like dimensions of mat:
batch, sparse, and dense dimensions cannot be permuted.
Returns:
out: reshaped array.
"""
if (mat.indices.shape[:mat.n_batch] != mat.data.shape[:mat.n_batch] != mat.shape[:mat.n_batch]):
# TODO(jakevdp) implement this case via broadcast_in_dim
raise NotImplementedError("reshape of arrays with broadcasted batch dimensions.")
batch_shape, sparse_shape, dense_shape = split_list(mat.shape, [mat.n_batch, mat.n_sparse])
batch_perm, sparse_perm, dense_perm = _validate_permutation(
mat.data, mat.indices, dimensions or tuple(range(mat.ndim)), mat.shape)
batch_size = math.prod(batch_shape)
sparse_size = math.prod(sparse_shape)
cuml_shape = np.cumprod(new_sizes)
if batch_size != 1 and batch_size not in cuml_shape:
raise ValueError("bcoo_reshape: new shape cannot mix batch and sparse dimensions; "
f"got shape={mat.shape} new_shape={new_sizes} with n_batch={mat.n_batch}")
if sparse_size != 1 and batch_size * sparse_size not in cuml_shape:
raise ValueError("bcoo_reshape: new shape cannot mix sparse and dense dimensions; "
f"got shape={mat.shape} new_shape={new_sizes} with n_dense={mat.n_dense}")
i1 = cuml_shape.searchsorted(batch_size, side='right')
i2 = cuml_shape.searchsorted(batch_size * sparse_size, side='right')
new_batch_shape, new_sparse_shape, new_dense_shape = split_list(new_sizes, [int(i1), int(i2)])
# Reshape batch & dense dimensions: this is accomplished via a standard reshape.
data = lax.reshape(
mat.data, new_sizes=(*new_batch_shape, mat.nse, *new_dense_shape),
dimensions=(*batch_perm, mat.n_batch, *(p + mat.n_batch + 1 for p in dense_perm)))
indices = lax.reshape(
mat.indices, new_sizes=(*new_batch_shape, mat.nse, mat.n_sparse),
dimensions=(*batch_perm, mat.n_batch, mat.n_batch + 1))
# Reshape the sparse dimensions: this is accomplished by re-indexing.
if not new_sparse_shape:
indices = jnp.empty_like(indices, shape=(*new_batch_shape, mat.nse, 0))
elif sparse_shape:
index_cols = tuple(indices[..., i] for i in sparse_perm)
sparse_shape = [int(mat.shape[mat.n_batch + i]) for i in sparse_perm]
flat_indices = jnp.ravel_multi_index(index_cols, dims=tuple(sparse_shape), mode='clip')
with jax.numpy_rank_promotion('allow'):
oob_indices = (indices >= jnp.array(mat.shape[mat.n_batch: mat.n_batch + mat.n_sparse],
dtype=indices.dtype)).any(-1, keepdims=True)
new_index_cols = jnp.unravel_index(flat_indices, new_sparse_shape)
indices = jnp.concatenate([col[..., None] for col in new_index_cols], axis=-1)
indices = jnp.where(oob_indices, jnp.array(new_sparse_shape, dtype=indices.dtype), indices)
return BCOO((data, indices), shape=new_sizes)
def bcoo_rev(operand, dimensions):
"""Sparse implementation of :func:`jax.lax.rev`"""
# Check validity of dimensions via original implementation.
_ = jax.jit(lax.rev, static_argnames=("dimensions",)).eval_shape(
jax.ShapeDtypeStruct(operand.shape, operand.dtype),
dimensions=dimensions)
batch_dims = [d for d in dimensions if d < operand.n_batch]
sparse_dims = [d for d in dimensions if operand.n_batch <= d < operand.n_batch + operand.n_sparse]
dense_dims = [d for d in dimensions if d >= operand.n_batch + operand.n_sparse]
data, indices = operand.data, operand.indices
if batch_dims:
indices = lax.rev(indices, dimensions=batch_dims)
if batch_dims or dense_dims:
data = lax.rev(data, dimensions=batch_dims + [d + 1 - operand.n_sparse for d in dense_dims])
if sparse_dims:
sparse_shape = jnp.array(operand.shape[operand.n_batch: operand.n_batch + operand.n_sparse],
dtype=indices.dtype)
spdims = jnp.array([d - operand.n_batch for d in sparse_dims])
indices = indices.at[..., spdims].mul(-1)
indices = indices.at[..., spdims].add(sparse_shape[spdims] - 1)
indices = jnp.where(indices < 0, sparse_shape, indices)
return BCOO((data, indices), shape=operand.shape)
def bcoo_squeeze(arr: BCOO, *, dimensions: Sequence[int]) -> BCOO:
"""Sparse implementation of :func:`jax.lax.squeeze`.
Squeeze any number of size 1 dimensions from an array.
Args:
arr: BCOO array to be reshaped.
dimensions: sequence of integers specifying dimensions to squeeze.
Returns:
out: reshaped array.
"""
dimensions = tuple(canonicalize_axis(dim, arr.ndim) for dim in dimensions)
if any(arr.shape[dim] != 1 for dim in dimensions):
raise ValueError("cannot select an axis to squeeze out which has size not equal to one, "
f"got shape={arr.shape} and {dimensions=}")
batch_dims = tuple(d for d in dimensions if d < arr.n_batch)
sparse_dims = np.array([i for i in range(arr.n_sparse)
if i + arr.n_batch not in dimensions], dtype=int)
dense_dims = tuple(d - arr.n_sparse + 1 for d in dimensions
if d >= arr.n_batch + arr.n_sparse)
data_out = lax.squeeze(arr.data, batch_dims + dense_dims)
indices_out = lax.squeeze(arr.indices[..., sparse_dims], batch_dims)
out_shape = tuple(s for i, s in enumerate(arr.shape) if i not in dimensions)
return BCOO((data_out, indices_out), shape=out_shape,
indices_sorted=arr.indices_sorted, unique_indices=arr.unique_indices)
def bcoo_slice(mat: BCOO, *, start_indices: Sequence[int], limit_indices: Sequence[int],
strides: Sequence[int] | None = None) -> BCOO:
"""Sparse implementation of :func:`jax.lax.slice`.
Args:
mat: BCOO array to be reshaped.
start_indices: sequence of integers of length `mat.ndim` specifying the starting
indices of each slice.
limit_indices: sequence of integers of length `mat.ndim` specifying the ending
indices of each slice
strides: (not implemented) sequence of integers of length `mat.ndim` specifying
the stride for each slice
Returns:
out: BCOO array containing the slice.
"""
if not isinstance(mat, BCOO):
raise TypeError(f"bcoo_slice: input should be BCOO array, got type(mat)={type(mat)}")
start_indices = [operator.index(i) for i in start_indices]
limit_indices = [operator.index(i) for i in limit_indices]
if strides is not None:
strides = [operator.index(i) for i in strides]
else:
strides = [1] * mat.ndim
if len(start_indices) != len(limit_indices) != len(strides) != mat.ndim:
raise ValueError(f"bcoo_slice: indices must have size mat.ndim={mat.ndim}")
if len(strides) != mat.ndim:
raise ValueError(f"len(strides) = {len(strides)}; expected {mat.ndim}")
if any(s <= 0 for s in strides):
raise ValueError(f"strides must be a sequence of positive integers; got {strides}")
if not all(0 <= start <= end <= size
for start, end, size in safe_zip(start_indices, limit_indices, mat.shape)):
raise ValueError(f"bcoo_slice: invalid indices. Got {start_indices=}, "
f"{limit_indices=} and shape={mat.shape}")
start_batch, start_sparse, start_dense = split_list(start_indices, [mat.n_batch, mat.n_sparse])
end_batch, end_sparse, end_dense = split_list(limit_indices, [mat.n_batch, mat.n_sparse])
stride_batch, stride_sparse, stride_dense = split_list(strides, [mat.n_batch, mat.n_sparse])
data_slices = []
index_slices = []
for i, (start, end, stride) in enumerate(zip(start_batch, end_batch, stride_batch)):
data_slices.append(slice(None) if mat.data.shape[i] != mat.shape[i] else slice(start, end, stride))
index_slices.append(slice(None) if mat.indices.shape[i] != mat.shape[i] else slice(start, end, stride))
data_slices.append(slice(None))
index_slices.extend([slice(None), slice(None)])
for i, (start, end, stride) in enumerate(zip(start_dense, end_dense, stride_dense)):
data_slices.append(slice(start, end, stride))
new_data = mat.data[tuple(data_slices)]
new_indices = mat.indices[tuple(index_slices)]
new_shape = tuple(
(end - start + stride - 1) // stride
for start, end, stride in safe_zip(start_indices, limit_indices, strides))
_, new_shape_sparse, _ = split_list(new_shape, [mat.n_batch, mat.n_sparse])
if mat.n_sparse:
starts = jnp.expand_dims(jnp.array(start_sparse, dtype=new_indices.dtype), range(mat.n_batch + 1))
ends = jnp.expand_dims(jnp.array(end_sparse, dtype=new_indices.dtype), range(mat.n_batch + 1))
strides_ = jnp.expand_dims(jnp.array(stride_sparse, dtype=new_indices.dtype), range(mat.n_batch + 1))
keep = jnp.all((new_indices >= starts) & (new_indices < ends) &
((new_indices - starts) % strides_ == 0),
axis=-1, keepdims=True)
new_indices = jnp.where(keep, (new_indices - starts + strides_ - 1) // strides_,
(ends - starts + strides_ - 1) // strides_)
keep_data = lax.expand_dims(keep[..., 0], range(mat.n_batch + 1, mat.n_batch + 1 + mat.n_dense))
new_data = jnp.where(keep_data, new_data, 0)
new_nse = math.prod(new_shape_sparse)
if mat.nse > new_nse:
new_data, new_indices = _bcoo_sum_duplicates(
new_data, new_indices, spinfo=SparseInfo(shape=new_shape), nse=new_nse)
return BCOO((new_data, new_indices), shape=new_shape)
def bcoo_dynamic_slice(mat: BCOO, start_indices: Sequence[Any], slice_sizes: Sequence[int]) -> BCOO:
"""Sparse implementation of :func:`jax.lax.dynamic_slice`.
Args:
mat: BCOO array to slice.
start_indices: a list of scalar indices, one per dimension. These values
may be dynamic.
slice_sizes: the size of the slice. Must be a sequence of non-negative
integers with length equal to `ndim(operand)`. Inside a JIT compiled
function, only static values are supported (all JAX arrays inside JIT
must have statically known size).
Returns:
out: BCOO array containing the slice.
"""
slice_sizes = tuple(operator.index(i) for i in slice_sizes)
# Use abstract eval to validate inputs.
jax.jit(lax.dynamic_slice, static_argnames=("slice_sizes",)).eval_shape(
jax.ShapeDtypeStruct(mat.shape, mat.dtype), start_indices,
slice_sizes=slice_sizes)
if not isinstance(mat, BCOO):
raise TypeError(f"bcoo_slice: input should be BCOO array, got type(mat)={type(mat)}")
start_indices = tuple(jnp.asarray(i) for i in start_indices)
assert all(jnp.issubdtype(i.dtype, np.integer) for i in start_indices)
assert all(i.shape == () for i in start_indices)
if len(start_indices) != len(slice_sizes) != mat.ndim:
raise ValueError(f"bcoo_dynamic_slice: indices must have size mat.ndim={mat.ndim}")
if not all(0 <= slice_size <= axis_size for slice_size, axis_size in zip(slice_sizes, mat.shape)):
raise TypeError("slice_sizes must be less than or equal to operand shape, "
f"got slice_sizes {slice_sizes} for operand shape {mat.shape}")
start_batch, start_sparse, start_dense = split_list(start_indices, [mat.n_batch, mat.n_sparse])
size_batch, size_sparse, size_dense = split_list(slice_sizes, [mat.n_batch, mat.n_sparse])
data_start = []
data_sizes = []
indices_start = []
indices_sizes = []
zero = _const(start_indices[0] if start_indices else np.int32, 0)
for i, (start, size) in enumerate(zip(start_batch, size_batch)):
data_is_broadcast = mat.data.shape[i] != mat.shape[i]
indices_is_broadcast = mat.indices.shape[i] != mat.shape[i]
data_start.append(zero if data_is_broadcast else start)
data_sizes.append(1 if data_is_broadcast else size)
indices_start.append(zero if indices_is_broadcast else start)
indices_sizes.append(1 if indices_is_broadcast else size)
data_start.append(zero)
data_sizes.append(mat.nse)
indices_start.extend([zero, zero])
indices_sizes.extend([mat.nse, mat.n_sparse])
data_start.extend(start_dense)
data_sizes.extend(size_dense)
new_data = lax.dynamic_slice(mat.data, data_start, data_sizes)
new_indices = lax.dynamic_slice(mat.indices, indices_start, indices_sizes)
new_shape = slice_sizes
if mat.n_sparse:
starts = jnp.array(start_sparse, dtype=new_indices.dtype)
sizes = jnp.array(size_sparse, dtype=new_indices.dtype)
sparse_shape = jnp.array(mat.shape[mat.n_batch: mat.n_batch + mat.n_sparse], dtype=new_indices.dtype)
starts = jnp.where(starts < 0, starts + sparse_shape, starts)
starts = jnp.clip(starts, 0, sparse_shape - sizes)
starts = jnp.expand_dims(starts, range(mat.n_batch + 1))
sizes = jnp.expand_dims(sizes, range(mat.n_batch + 1))
sparse_shape = jnp.expand_dims(sparse_shape, range(mat.n_batch + 1))
keep = jnp.all((new_indices >= starts) & (new_indices < starts + sizes), -1, keepdims=True)
new_indices = jnp.where(keep, new_indices - starts, sizes)
keep_data = lax.expand_dims(keep[..., 0], range(mat.n_batch + 1, mat.n_batch + 1 + mat.n_dense))
new_data = jnp.where(keep_data, new_data, 0)
if mat.nse > math.prod(size_sparse):
new_nse = math.prod(size_sparse)
new_data, new_indices = _bcoo_sum_duplicates(
new_data, new_indices, spinfo=SparseInfo(shape=new_shape), nse=new_nse)
return BCOO((new_data, new_indices), shape=new_shape)
def _tuple_replace(tup, ind, val):
return tuple(val if i == ind else t for i, t in enumerate(tup))
def bcoo_reduce_sum(mat: BCOO, *, axes: Sequence[int]) -> BCOO:
"""Sum array element over given axes.
Args:
mat: A BCOO-format array.
shape: The shape of the target array.
axes: A tuple or list or ndarray which contains axes of ``mat`` over which
sum is performed.
Returns:
A BCOO-format array containing the result.
"""
out_data, out_indices, out_shape = _bcoo_reduce_sum(
mat.data, mat.indices, spinfo=mat._info, axes=axes)
return BCOO((out_data, out_indices), shape=out_shape)
def _bcoo_reduce_sum(data: Array, indices: Array, *, spinfo: SparseInfo, axes: Sequence[int]) -> tuple[Array, Array, Shape]:
shape = spinfo.shape
assert all(0 <= a < len(shape) for a in axes)
n_batch, n_sparse, _, nse = _validate_bcoo(data, indices, shape)
axes = sorted(set(axes))
# Sum over dense dimensions -> sum over data
dense_axes = tuple(ax - n_sparse + 1 for ax in axes if ax >= n_batch + n_sparse)
data = data.sum(dense_axes)
if n_sparse:
# zero-out data corresponding to invalid indices.
fill_value = jnp.expand_dims(
jnp.array(shape[n_batch: n_batch + n_sparse], dtype=indices.dtype),
range(indices.ndim - 1))
mask = jnp.all(indices < fill_value, -1)
if data.ndim > mask.ndim:
mask = lax.expand_dims(mask, tuple(range(mask.ndim, data.ndim)))
data = jnp.where(mask, data, 0)
# Sum over sparse dimensions -> drop index; sum is implicit
sparse_idx = [i for i in range(n_sparse) if i + n_batch not in axes]
if not sparse_idx:
indices = jnp.zeros(_tuple_replace(indices.shape, n_batch + 1, 0), indices.dtype)
else:
indices = indices[..., np.array(sparse_idx)]
# Sum over batch dimensions -> reshape into nse
batch_axes = {ax for ax in axes if ax < n_batch}
# First handle broadcasted batch dimensions
for ax in batch_axes:
if data.shape[ax] == 1:
if indices.shape[ax] == 1:
data = data * shape[ax]
else:
data = lax.broadcast_in_dim(data, _tuple_replace(data.shape, ax, shape[ax]), tuple(range(data.ndim)))
else:
if indices.shape[ax] == 1:
data = data.sum(ax)
assert data.shape[ax] == indices.shape[ax]
new_batch_dims = tuple(sorted(set(range(n_batch)) - batch_axes))
new_batch_shape = tuple(data.shape[i] for i in new_batch_dims)
new_nse = nse * math.prod([data.shape[i] for i in batch_axes])
data = lax.reshape(data,
(*new_batch_shape, new_nse, *data.shape[n_batch + 1:]),
(*new_batch_dims, *batch_axes, *range(n_batch, data.ndim)))
indices = lax.reshape(indices,
(*new_batch_shape, new_nse, *indices.shape[n_batch + 1:]),
(*new_batch_dims, *batch_axes, *range(n_batch, indices.ndim)))
out_shape = tuple(shape[i] for i in range(len(shape)) if i not in axes)
return data, indices, out_shape
def bcoo_multiply_sparse(lhs: BCOO, rhs: BCOO) -> BCOO:
"""An element-wise multiplication of two sparse arrays.
Args:
lhs: A BCOO-format array.
rhs: A BCOO-format array.
Returns:
An BCOO-format array containing the result.
"""
out_data, out_indices, out_shape = _bcoo_multiply_sparse(
lhs.data, lhs.indices, rhs.data, rhs.indices, lhs_spinfo=lhs._info,
rhs_spinfo=rhs._info)
return BCOO((out_data, out_indices), shape=out_shape)
def _bcoo_multiply_sparse(lhs_data: Array, lhs_indices: Array, rhs_data: Array, rhs_indices: Array, *,
lhs_spinfo: SparseInfo, rhs_spinfo: SparseInfo) -> tuple[Array, Array, Shape]:
lhs_shape = lhs_spinfo.shape
rhs_shape = rhs_spinfo.shape
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
if len(lhs_shape) != len(rhs_shape):
# Similar requirement as lax.mul:
raise TypeError("bcoo_multiply_sparse: arrays must have same number of dimensions, "
f"got {lhs_shape}, {rhs_shape}")
if lhs.n_dense != rhs.n_dense:
raise NotImplementedError("bcoo_multiply_sparse: arrays with differing numbers of "
f"dense dimensions: {lhs}, {rhs}")
n_batch = min(lhs.n_batch, rhs.n_batch)
_mul = functools.partial(_bcoo_multiply_sparse_unbatched,
lhs_shape=lhs_shape[n_batch:],
rhs_shape=rhs_shape[n_batch:])
_mul = nfold_vmap(_mul, n_batch)
data, indices = _mul(lhs_data, lhs_indices, rhs_data, rhs_indices)
return data, indices, jnp.broadcast_shapes(lhs_shape, rhs_shape)
def _bcoo_multiply_sparse_unbatched(lhs_data, lhs_indices, rhs_data, rhs_indices, *, lhs_shape, rhs_shape):
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
assert (lhs.n_batch == 0) or (rhs.n_batch == 0) # Ensured at call site above
# TODO(jakevdp): this can be made more efficient by utilizing batch structure.
if lhs.n_batch:
lhs_data, lhs_indices = bcoo_update_layout(BCOO((lhs_data, lhs_indices), shape=lhs_shape), n_batch=0)._bufs
lhs = _validate_bcoo(lhs_data, lhs_indices, lhs_shape)
elif rhs.n_batch:
rhs_data, rhs_indices = bcoo_update_layout(BCOO((rhs_data, rhs_indices), shape=rhs_shape), n_batch=0)._bufs
rhs = _validate_bcoo(rhs_data, rhs_indices, rhs_shape)
dims = jnp.array([i for i, (s1, s2) in enumerate(safe_zip(lhs_shape[:lhs.n_sparse], rhs_shape[:rhs.n_sparse]))
if s1 != 1 and s2 != 1], dtype=int)
# TODO(jakevdp): this nse can be tightened to min(lhs.nse, rhs.nse) if there
# is no broadcasting and indices are unique.
nse = lhs.nse * rhs.nse
# TODO(jakevdp): this is pretty inefficient. Can we do this membership check
# without constructing the full (lhs.nse, rhs.nse) masking matrix?
mask = jnp.all(lhs_indices[:, None, dims] == rhs_indices[None, :, dims], -1)
i_lhs, i_rhs = jnp.nonzero(mask, size=nse, fill_value=(lhs.nse, rhs.nse))
data = (lhs_data.at[i_lhs].get(mode='fill', fill_value=0) *
rhs_data.at[i_rhs].get(mode='fill', fill_value=0))
indices = jnp.maximum(
lhs_indices.at[i_lhs].get(mode='fill', fill_value=max(lhs_shape, default=0)),
rhs_indices.at[i_rhs].get(mode='fill', fill_value=max(rhs_shape, default=0)))
return data, indices
def bcoo_multiply_dense(sp_mat: BCOO, v: Array) -> Array:
"""An element-wise multiplication between a sparse and a dense array.
Args:
lhs: A BCOO-format array.
rhs: An ndarray.
Returns:
An ndarray containing the result.
"""
return _bcoo_multiply_dense(sp_mat.data, sp_mat.indices, v, spinfo=sp_mat._info)
def _bcoo_multiply_dense(data: Array, indices: Array, v: Array, *, spinfo: SparseInfo) -> Array:
"""Broadcasted elementwise multiplication between a BCOO array and a dense array."""
# TODO(jakevdp): the logic here is similar to bcoo_extract... can we reuse that?
shape = spinfo.shape
if v.ndim == 0:
return lax.mul(data, v)
if shape == v.shape:
# Note: due to distributive property, no deduplication necessary!
return lax.mul(data, _bcoo_extract(indices, v))
if lax.broadcast_shapes(v.shape, shape) != shape:
raise NotImplementedError(
"multiplication between sparse and dense is only implemented for cases "
"where the output shape matches the sparse matrix shape. Got "
f"{shape=}, {v.shape=}")
v = lax.expand_dims(v, range(len(shape) - v.ndim))
props = _validate_bcoo(data, indices, shape)
@partial(nfold_vmap, N=props.n_batch)
def _mul(data, indices, v):
assert indices.shape[1] == v.ndim - props.n_dense
ind = tuple(indices[:, i] for i in range(indices.shape[1]))
ind = tuple(i if s != 1 else 0 for i, s in zip(ind, v.shape))
return data * v[ind]
return _mul(data, indices, v)
def bcoo_gather(operand: BCOO, start_indices: Array,
dimension_numbers: GatherDimensionNumbers,
slice_sizes: Shape, *,
unique_indices: bool = False,
indices_are_sorted: bool = False,
mode: str | GatherScatterMode | None = None,
fill_value = None) -> BCOO:
"""BCOO version of lax.gather."""
_validate_bcoo(operand.data, operand.indices, operand.shape)
# TODO(jakevdp) make use of unique_indices and indices_are_sorted?
if mode is None:
mode = GatherScatterMode.PROMISE_IN_BOUNDS
parsed_mode = GatherScatterMode.from_any(mode)
if parsed_mode != GatherScatterMode.PROMISE_IN_BOUNDS:
raise NotImplementedError(f"bcoo_gather: {mode=} not yet supported.")
kwds = dict(dimension_numbers=dimension_numbers, slice_sizes=slice_sizes,
unique_indices=unique_indices, indices_are_sorted=indices_are_sorted,
mode=mode, fill_value=fill_value)
# Abstract eval lax.gather to validate arguments & determine output shape.
static_argnames = ("dimension_numbers", "slice_sizes", "unique_indices",
"indices_are_sorted", "mode", "fill_value",)
out_aval = jax.jit(lax.gather, static_argnames=static_argnames).eval_shape(
jax.ShapeDtypeStruct(operand.shape, operand.dtype),
jax.ShapeDtypeStruct(start_indices.shape, start_indices.dtype),
**kwds)
offset_dims = dimension_numbers.offset_dims
collapsed_slice_dims = dimension_numbers.collapsed_slice_dims
start_index_map = dimension_numbers.start_index_map
# Expand start_indices & slice_sizes to full rank & use bcoo_dynamic_slice
full_start_indices: list[ArrayLike] = [_const(start_indices, 0)] * operand.ndim
in_axes: list[int | None] = [None for i in range(operand.ndim)]
full_slice_sizes = list(operand.shape)
for i, j in enumerate(start_index_map):
full_start_indices[j] = start_indices[..., i].ravel()
full_slice_sizes[j] = slice_sizes[j]
in_axes[j] = 0
def slice_func(indices):
slc = bcoo_dynamic_slice(operand, indices, slice_sizes=full_slice_sizes)
return bcoo_squeeze(slc, dimensions=collapsed_slice_dims)
result = vmap(slice_func, in_axes=(in_axes,))(full_start_indices)
result = bcoo_reshape(result,
new_sizes=(*start_indices.shape[:-1], *result.shape[1:]),
dimensions=tuple(range(result.ndim)))
# Use offset_dims to permute result dimensions
if result.shape:
batch_dims = tuple(dim for dim in range(len(out_aval.shape))
if dim not in offset_dims)
permutation = np.zeros(result.ndim, dtype=int)
permutation[np.array(batch_dims + offset_dims)] = np.arange(result.ndim)
if set(permutation[:len(batch_dims)]) != set(range(len(batch_dims))):
# TODO: jakevdp more granular approach here. Can we do this in a
# way that preserves the original batch dimensions?
result = bcoo_update_layout(result, n_batch=0)
result = bcoo_transpose(result, permutation=tuple(permutation))
return result.reshape(out_aval.shape).astype(out_aval.dtype)
def bcoo_conv_general_dilated(lhs, rhs, *, window_strides, padding,
lhs_dilation=None, rhs_dilation=None, dimension_numbers=None,
feature_group_count=1, batch_group_count=1, precision=None,
preferred_element_type=None,
out_sharding=None) -> BCOO:
# Validate and process parameters using lax.conv_general_dilated abstract evaluation.
func = functools.partial(
lax.conv_general_dilated,
window_strides=window_strides, padding=padding,
lhs_dilation=lhs_dilation, rhs_dilation=rhs_dilation, dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count, batch_group_count=batch_group_count,
precision=precision, preferred_element_type=preferred_element_type,
out_sharding=out_sharding)
jaxpr = jax.make_jaxpr(func)(jax.ShapeDtypeStruct(lhs.shape, lhs.dtype),
jax.ShapeDtypeStruct(rhs.shape, rhs.dtype))
assert isinstance(jaxpr, core.ClosedJaxpr) and len(jaxpr.eqns) == 1
params = jaxpr.eqns[0].params
if params['lhs_dilation'] != (1,) * (lhs.ndim - 2):
raise NotImplementedError("bcoo convolution with lhs_dilation.")
if params['rhs_dilation'] != (1,) * (rhs.ndim - 2):
raise NotImplementedError("bcoo convolution with lhs_dilation.")
if params['window_strides'] != (1,) * (lhs.ndim - 2):
raise NotImplementedError("bcoo convolution with non-unit window_strides.")
if params['batch_group_count'] != params['feature_group_count'] != 1:
raise NotImplementedError("bcoo convolution with non-unit group counts.")
if lhs.shape[:2] != rhs.shape[:2] != (1, 1):
raise NotImplementedError("bcoo convolution with leading dimensions other than (1, 1)")
index_dtype = (lhs.indices.dtype if hasattr(lhs, 'indices')
else rhs.indices.dtype if hasattr(rhs, 'indices')
else 'int32')
padding, = params['padding']
return _bcoo_conv_1d(_convert_to_1d_for_conv(lhs, index_dtype),
_convert_to_1d_for_conv(rhs, index_dtype),
padding=padding)
def _convert_to_1d_for_conv(mat, index_dtype):
if isinstance(mat, (jax.Array, np.ndarray)):
data = lax.squeeze(mat, (0, 1))
indices = lax.broadcasted_iota(index_dtype, (len(data), 1), 0)
elif isinstance(mat, BCOO):
mat = mat.update_layout(n_batch=2, n_dense=0)
data = lax.squeeze(mat.data, (0, 1))
indices = lax.squeeze(mat.indices, (0, 1))
# zero-out data at OOB indices, otherwise strange things happen.
data = jnp.where(lax.squeeze(indices, (1,)) < mat.shape[-1], data, 0)
else:
raise TypeError(f"bcoo_conv_general_dilated: input of type {type(mat)} not recognized.")
return BCOO((data, indices), shape=mat.shape[2:])
def _bcoo_conv_1d(lhs: BCOO, rhs: BCOO, padding: Sequence[int]) -> BCOO:
assert lhs.ndim == lhs.n_sparse == rhs.ndim == rhs.n_sparse == 1
assert lhs.dtype == rhs.dtype
padding = tuple(map(int, padding))
assert len(padding) == 2
new_data = (lhs.data[:, None] * rhs.data[None, :]).ravel()
offset = padding[0] - rhs.indices
new_indices = (lhs.indices[:, None] + offset[None, :]).ravel()
mask = (new_indices < 0)
new_indices = jnp.where(mask, 0, new_indices)
new_data = jnp.where(mask, 0, new_data)
dimsize = max(0, lhs.shape[0] + padding[0] + padding[1] - rhs.shape[0] + 1)
new_data = lax.expand_dims(new_data, (0, 1))
new_indices = lax.expand_dims(new_indices, (0, 1, 3))
return BCOO((new_data, new_indices), shape=(1, 1, dimsize))
@tree_util.register_pytree_node_class
| Buffer |
python | realpython__materials | solid-principles-python/shapes_lsp.py | {
"start": 854,
"end": 989
} | class ____(Shape):
def __init__(self, side):
self.side = side
def calculate_area(self):
return self.side**2
| Square |
python | getsentry__sentry | tests/sentry/api/endpoints/test_organization_invite_request_details.py | {
"start": 6660,
"end": 12267
} | class ____(InviteRequestBase, HybridCloudTestMixin):
method = "put"
@patch.object(OrganizationMember, "send_invite_email")
def test_owner_can_approve_invite_request(self, mock_invite_email: MagicMock) -> None:
self.login_as(user=self.user)
with outbox_runner():
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 200
assert resp.data["inviteStatus"] == "approved"
assert mock_invite_email.call_count == 1
with assume_test_silo_mode(SiloMode.CONTROL):
audit_log_entry = AuditLogEntry.objects.get(
organization_id=self.org.id,
actor=self.user,
event=audit_log.get_event_id("MEMBER_INVITE"),
)
member = OrganizationMember.objects.get(
id=self.invite_request.id, invite_status=InviteStatus.APPROVED.value
)
assert audit_log_entry.data == member.get_audit_log_data()
def test_member_cannot_approve_invite_request(self) -> None:
self.invite_request.inviter_id = self.member.user_id
self.invite_request.save()
self.login_as(user=self.member)
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 403
@patch.object(OrganizationMember, "send_invite_email")
def test_approve_requires_invite_members_feature(self, mock_invite_email: MagicMock) -> None:
self.login_as(user=self.user)
with Feature({"organizations:invite-members": False}):
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 400
assert mock_invite_email.call_count == 0
@patch.object(OrganizationMember, "send_invite_email")
def test_cannot_approve_join_request_with_disabled_setting(
self, mock_invite_email: MagicMock
) -> None:
OrganizationOption.objects.create(
organization_id=self.org.id, key="sentry:join_requests", value=False
)
self.login_as(user=self.user)
resp = self.get_response(self.org.slug, self.request_to_join.id, approve=1)
assert resp.status_code == 400
assert mock_invite_email.call_count == 0
# can still approve invite request
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 200
@patch.object(OrganizationMember, "send_invite_email")
def test_can_approve_join_request_with_enabled_setting(
self, mock_invite_email: MagicMock
) -> None:
OrganizationOption.objects.create(
organization_id=self.org.id, key="sentry:join_requests", value=True
)
self.login_as(user=self.user)
resp = self.get_response(self.org.slug, self.request_to_join.id, approve=1)
assert resp.status_code == 200
assert mock_invite_email.call_count == 1
@patch.object(OrganizationMember, "send_invite_email")
def test_email_not_sent_without_invites_enabled(self, mock_invite_email: MagicMock) -> None:
self.login_as(user=self.user)
with self.settings(SENTRY_ENABLE_INVITES=False):
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 200
assert mock_invite_email.call_count == 0
assert OrganizationMember.objects.filter(
id=self.invite_request.id, invite_status=InviteStatus.APPROVED.value
).exists()
@patch.object(OrganizationMember, "send_invite_email")
def test_owner_can_update_and_approve(self, mock_invite_email: MagicMock) -> None:
self.login_as(user=self.user)
resp = self.get_response(
self.org.slug,
self.request_to_join.id,
approve=1,
role="manager",
teams=[self.team.slug],
)
assert resp.status_code == 200
assert resp.data["role"] == "manager"
assert resp.data["orgRole"] == "manager"
assert resp.data["inviteStatus"] == "approved"
assert OrganizationMember.objects.filter(
id=self.request_to_join.id, role="manager", invite_status=InviteStatus.APPROVED.value
).exists()
self.assert_org_member_mapping(org_member=self.request_to_join)
assert OrganizationMemberTeam.objects.filter(
organizationmember=self.request_to_join.id, team=self.team
).exists()
assert mock_invite_email.call_count == 1
@patch.object(OrganizationMember, "send_invite_email")
def test_manager_cannot_approve_owner(self, mock_invite_email: MagicMock) -> None:
self.login_as(user=self.manager)
resp = self.get_response(self.org.slug, self.invite_request.id, approve=1)
assert resp.status_code == 400
assert OrganizationMember.objects.filter(
id=self.invite_request.id,
role="owner",
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
).exists()
assert mock_invite_email.call_count == 0
def test_manager_can_approve_manager(self) -> None:
self.login_as(user=self.manager)
invite_request = self.create_member(
email="hello@example.com",
organization=self.org,
role="manager",
invite_status=InviteStatus.REQUESTED_TO_BE_INVITED.value,
)
resp = self.get_response(self.org.slug, invite_request.id, approve=1)
assert resp.status_code == 200
| OrganizationInviteRequestApproveTest |
python | spack__spack | var/spack/test_repos/spack_repo/builder_test/packages/custom_phases/package.py | {
"start": 541,
"end": 920
} | class ____(generic.GenericBuilder):
phases = ["configure", "install"]
def configure(self, pkg, spec, prefix):
os.environ["CONFIGURE_CALLED"] = "1"
os.environ["LAST_PHASE"] = "CONFIGURE"
def install(self, pkg, spec, prefix):
os.environ["INSTALL_CALLED"] = "1"
os.environ["LAST_PHASE"] = "INSTALL"
mkdirp(prefix.bin)
| GenericBuilder |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/dataplex.py | {
"start": 3541,
"end": 3808
} | class ____(BaseGoogleLink):
"""Helper class for constructing Dataplex Catalog EntryTypes link."""
name = "Dataplex Catalog EntryTypes"
key = "dataplex_catalog_entry_types_key"
format_str = DATAPLEX_CATALOG_ENTRY_TYPES_LINK
| DataplexCatalogEntryTypesLink |
python | pytorch__pytorch | test/dynamo/test_backward_higher_order_ops.py | {
"start": 9296,
"end": 12475
} | class ____(torch.nn.Module):
def forward(self, L_inputs_ : list, s69: "Sym(s21)", L_sizes_0_: "f32[0, s21]", L_hooks_1_keywords_fn_keywords_obj_counter: "Sym(s45)"):
l_inputs_ = L_inputs_
l_sizes_0_ = L_sizes_0_
l_hooks_1_keywords_fn_keywords_obj_counter = L_hooks_1_keywords_fn_keywords_obj_counter
getitem: "f32[s21]" = l_inputs_[0]
getitem_1: "f32[s21]" = l_inputs_[1]
getitem_2: "f32[s21]" = l_inputs_[2]; l_inputs_ = None
size: "Sym(s21)" = l_sizes_0_.size(1); l_sizes_0_ = None
validate_outputs = torch__dynamo_compiled_autograd_ops_validate_outputs([getitem], [((None, None, device(type='cpu'), 6, 0, None), [size], False, 6)]); getitem = size = None
getitem_9: "f32[s21]" = validate_outputs[0]; validate_outputs = None
call_aot_bwd_prologue = torch__dynamo_compiled_autograd_call_aot_bwd_prologue((), [], getitem_9); getitem_9 = None
aot0_tangents_1: "f32[s21]" = call_aot_bwd_prologue[0]; call_aot_bwd_prologue = None
accumulate_grad = torch__dynamo_compiled_autograd_ops_AccumulateGrad([aot0_tangents_1], getitem_1, None, False); getitem_1 = None
getitem_11: "f32[s21]" = accumulate_grad[0]; accumulate_grad = None
add: "Sym(s45 + 1)" = l_hooks_1_keywords_fn_keywords_obj_counter + 1; l_hooks_1_keywords_fn_keywords_obj_counter = None
result: "f32[s21]" = aot0_tangents_1 * aot0_tangents_1; aot0_tangents_1 = None
accumulate_grad_1 = torch__dynamo_compiled_autograd_ops_AccumulateGrad([result], getitem_2, None, False); result = getitem_2 = None
getitem_12: "f32[s21]" = accumulate_grad_1[0]; accumulate_grad_1 = None
return (getitem_11, getitem_12, add)
""",
)
out = fn(x, y)
out.backward(grad_out)
self.assertEqual(obj.counter, 2)
out = fn(x, y)
out.backward(grad_out)
self.assertEqual(obj.counter, 3)
graph = None
def test_invoke_in_pt2_compiled_autograd_graph_breaks(self):
def _graph_breaking_fn(x):
print("Boo!")
return _multiply(x)
def _graph_break_invoke(grad):
return trace_wrapped(grad, fn=_graph_breaking_fn)
def compiler_fn(gm):
return torch.compile(gm, backend="inductor", fullgraph=True, dynamic=True)
for backend in ["eager", "aot_eager", "inductor"]:
torch._dynamo.reset()
x = torch.tensor([0.5, 0.5], requires_grad=True)
y = torch.tensor([0.5, 0.5], requires_grad=True)
def fn(x, y):
x.register_hook(_graph_break_invoke)
return x + y
fn = torch.compile(fn, backend=backend, fullgraph=True)
out = fn(x, y)
grad_out = torch.tensor([2.0, 2.0])
with self.assertRaisesRegex(
torch._dynamo.exc.Unsupported,
"print",
):
with compiled_autograd._enable(compiler_fn):
out.backward(grad_out)
if __name__ == "__main__":
from torch._dynamo.test_case import run_tests
run_tests()
| GraphModule |
python | scipy__scipy | scipy/linalg/tests/test_fblas.py | {
"start": 3562,
"end": 4360
} | class ____:
''' Mixin class for scal testing '''
def test_simple(self):
x = arange(3., dtype=self.dtype)
real_x = x*3.
x = self.blas_func(3., x)
assert_array_equal(real_x, x)
def test_x_stride(self):
x = arange(6., dtype=self.dtype)
real_x = x.copy()
real_x[::2] = x[::2]*array(3., self.dtype)
x = self.blas_func(3., x, n=3, incx=2)
assert_array_equal(real_x, x)
def test_x_bad_size(self):
x = arange(12., dtype=self.dtype)
with pytest.raises(Exception, match='failed for 1st keyword'):
self.blas_func(2., x, n=4, incx=5)
try:
class TestSscal(BaseScal):
blas_func = fblas.sscal
dtype = float32
except AttributeError:
class TestSscal:
pass
| BaseScal |
python | pyparsing__pyparsing | examples/adventureEngine.py | {
"start": 2161,
"end": 3093
} | class ____:
items = {}
def __init__(self, desc):
self.desc = desc
self.isDeadly = False
self.isFragile = False
self.isBroken = False
self.isTakeable = True
self.isVisible = True
self.isOpenable = False
self.useAction = None
self.usableConditionTest = None
self.cantTakeMessage = "You can't take that!"
Item.items[desc] = self
def __str__(self):
return self.desc
def breakItem(self):
if not self.isBroken:
print("<Crash!>")
self.desc = "broken " + self.desc
self.isBroken = True
def isUsable(self, player, target):
if self.usableConditionTest:
return self.usableConditionTest(player, target)
else:
return False
def useItem(self, player, target):
if self.useAction:
self.useAction(player, self, target)
| Item |
python | django__django | tests/gis_tests/geoapp/models.py | {
"start": 580,
"end": 749
} | class ____(City):
county = models.CharField(max_length=30)
founded = models.DateTimeField(null=True)
class Meta:
app_label = "geoapp"
| PennsylvaniaCity |
python | redis__redis-py | redis/_parsers/base.py | {
"start": 5648,
"end": 7913
} | class ____:
"""Protocol defining maintenance push notification parsing functionality"""
@staticmethod
def parse_maintenance_start_msg(response, notification_type):
# Expected message format is: <notification_type> <seq_number> <time>
id = response[1]
ttl = response[2]
return notification_type(id, ttl)
@staticmethod
def parse_maintenance_completed_msg(response, notification_type):
# Expected message format is: <notification_type> <seq_number>
id = response[1]
return notification_type(id)
@staticmethod
def parse_moving_msg(response):
# Expected message format is: MOVING <seq_number> <time> <endpoint>
id = response[1]
ttl = response[2]
if response[3] is None:
host, port = None, None
else:
value = response[3]
if isinstance(value, bytes):
value = value.decode()
host, port = value.split(":")
port = int(port) if port is not None else None
return NodeMovingNotification(id, host, port, ttl)
_INVALIDATION_MESSAGE = "invalidate"
_MOVING_MESSAGE = "MOVING"
_MIGRATING_MESSAGE = "MIGRATING"
_MIGRATED_MESSAGE = "MIGRATED"
_FAILING_OVER_MESSAGE = "FAILING_OVER"
_FAILED_OVER_MESSAGE = "FAILED_OVER"
_MAINTENANCE_MESSAGES = (
_MIGRATING_MESSAGE,
_MIGRATED_MESSAGE,
_FAILING_OVER_MESSAGE,
_FAILED_OVER_MESSAGE,
)
MSG_TYPE_TO_MAINT_NOTIFICATION_PARSER_MAPPING: dict[
str, tuple[type[MaintenanceNotification], Callable]
] = {
_MIGRATING_MESSAGE: (
NodeMigratingNotification,
MaintenanceNotificationsParser.parse_maintenance_start_msg,
),
_MIGRATED_MESSAGE: (
NodeMigratedNotification,
MaintenanceNotificationsParser.parse_maintenance_completed_msg,
),
_FAILING_OVER_MESSAGE: (
NodeFailingOverNotification,
MaintenanceNotificationsParser.parse_maintenance_start_msg,
),
_FAILED_OVER_MESSAGE: (
NodeFailedOverNotification,
MaintenanceNotificationsParser.parse_maintenance_completed_msg,
),
_MOVING_MESSAGE: (
NodeMovingNotification,
MaintenanceNotificationsParser.parse_moving_msg,
),
}
| MaintenanceNotificationsParser |
python | huggingface__transformers | src/transformers/models/yolos/modeling_yolos.py | {
"start": 19083,
"end": 20707
} | class ____(YolosPreTrainedModel):
def __init__(self, config: YolosConfig, add_pooling_layer: bool = True):
r"""
add_pooling_layer (bool, *optional*, defaults to `True`):
Whether to add a pooling layer
"""
super().__init__(config)
self.config = config
self.embeddings = YolosEmbeddings(config)
self.encoder = YolosEncoder(config)
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.pooler = YolosPooler(config) if add_pooling_layer else None
# Initialize weights and apply final processing
self.post_init()
def get_input_embeddings(self) -> YolosPatchEmbeddings:
return self.embeddings.patch_embeddings
@check_model_inputs(tie_last_hidden_states=False)
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> BaseModelOutputWithPooling:
if pixel_values is None:
raise ValueError("You have to specify pixel_values")
embedding_output = self.embeddings(pixel_values)
height, width = pixel_values.shape[-2:]
encoder_outputs: BaseModelOutput = self.encoder(embedding_output, height=height, width=width)
sequence_output = encoder_outputs.last_hidden_state
sequence_output = self.layernorm(sequence_output)
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
return BaseModelOutputWithPooling(last_hidden_state=sequence_output, pooler_output=pooled_output)
| YolosModel |
python | plotly__plotly.py | tests/test_core/test_graph_objs/test_graph_objs.py | {
"start": 4545,
"end": 5448
} | class ____(TestCase):
def test_warn_on_deprecated_mapbox_traces(self):
# This test will fail if any of the following traces
# fails to emit a DeprecationWarning
for trace_constructor in [
go.Scattermapbox,
go.Densitymapbox,
go.Choroplethmapbox,
]:
with pytest.warns(DeprecationWarning):
_ = go.Figure([trace_constructor()])
def test_no_warn_on_non_deprecated_traces(self):
# This test will fail if any of the following traces emits a DeprecationWarning
for trace_constructor in [
go.Scatter,
go.Bar,
go.Scattermap,
go.Densitymap,
go.Choroplethmap,
]:
with warnings.catch_warnings():
warnings.simplefilter("error")
_ = go.Figure([trace_constructor()])
| TestDeprecationWarnings |
python | pytorch__pytorch | torch/_inductor/pattern_matcher.py | {
"start": 21098,
"end": 27690
} | class ____(_TargetExpr):
"""
Base class for filtering match by node.{target,args,kwargs}
"""
def __init__(
self,
fns: Union[torch.fx.node.Target, str, Sequence[Any]],
*args: Any,
_users: Union[int, Multiple] = 1,
**kwargs: Any,
) -> None:
super().__init__(fns, _users)
self.args = tuple(args)
self.kwargs = dict(kwargs)
if any(
isinstance(x, (dict, list, tuple))
for x in itertools.chain(args, kwargs.values())
):
self.flatten = self.pytree_flatten
else:
self.flatten = self.simple_flatten
self.flat_args_kwargs = self.flatten(self.args, self.kwargs)
@staticmethod
def simple_flatten(
args: Sequence[Any], kwargs: Mapping[Any, Any]
) -> tuple[Sequence[Any], Union[_SimpleSpec, pytree.TreeSpec]]:
values = (*args, *kwargs.values())
spec = (len(args), *kwargs.keys())
return values, spec
@staticmethod
def pytree_flatten(
args: Sequence[Any], kwargs: Mapping[Any, Any]
) -> tuple[Sequence[Any], Union[_SimpleSpec, pytree.TreeSpec]]:
type_mapping: dict[type, type] = {
immutable_list: tuple,
list: tuple,
immutable_dict: dict,
}
def convert_type(x: Any) -> Any:
cls = type(x)
convert_fn = type_mapping.get(cls)
if convert_fn is not None:
return pytree.tree_map(
convert_type,
convert_fn(x),
is_leaf=lambda x: type(x) in type_mapping,
)
return x
normalized_args_tree = pytree.tree_map(
convert_type,
(args, kwargs),
is_leaf=lambda x: type(x) in type_mapping,
)
flat, spec = pytree.tree_flatten(normalized_args_tree)
return flat, spec
def __repr__(self) -> str:
args = [
self.fns_repr(),
*map(repr, self.args),
*[f"{k}={v}" for k, v in self.kwargs.items()],
]
if self.users is MULTIPLE:
args.append("_users=MULTIPLE")
elif self.users != 1:
args.append(f"_users={self.users}")
return f"{self.__class__.__name__}({', '.join(args)})"
def pretty_print(self, pp: PatternPrettyPrinter) -> str:
args = [
self.fns_repr(),
*(pp.pretty_print(x) for x in self.args),
*[f"{k}={pp.pretty_print(v)}" for k, v in self.kwargs.items()],
]
if self.users is MULTIPLE:
args.append("_users=MULTIPLE")
elif self.users != 1:
args.append(f"_users={self.users}")
joiner_str = ", "
return f"{self.__class__.__name__}({joiner_str.join(args)})"
def _match(self, node: torch.fx.Node, ctx: MatchContext) -> MatchResult:
if not self._match_fns(node) or len(node.args) != len(self.args):
return FailedMatch("function_mismatch: node={}, pattern={}", node, self)
if not self._match_users(node, ctx):
return FailedMatch("multiple_users {}", self)
_args = node.args
_kwargs = node.kwargs
if len(_kwargs) < len(self.kwargs):
from torch.fx.operator_schemas import normalize_function
assert callable(node.target)
normalized_args_and_kwargs = normalize_function(
node.target, node.args, node.kwargs
)
if normalized_args_and_kwargs is None:
return FailedMatch("function_mismatch: node={}, pattern={}", node, self)
else:
_args, _kwargs = normalized_args_and_kwargs
if len(_args) == len(self.args) and len(_kwargs) >= len(self.kwargs):
_kwargs = {i: _kwargs[i] for i in _kwargs if i in self.kwargs}
else:
return FailedMatch(
"function_mismatch: node={}, pattern={}", node, self
)
else:
_kwargs = {i: _kwargs[i] for i in _kwargs if i in self.kwargs}
node_items, node_spec = self.flatten(_args, _kwargs)
self_items, self_spec = self.flat_args_kwargs
if node_spec != self_spec:
return FailedMatch("args_structure {} {}", node_spec, self_spec)
assert len(node_items) == len(self_items)
m = Match(ctx, self)
for pattern, child_node in zip(self_items, node_items):
if isinstance(pattern, PatternExpr):
child_match = ctx.match(pattern, child_node)
if not is_match(child_match):
return child_match
m.extend(child_match)
elif isinstance(child_node, torch.fx.Node) or child_node != pattern:
return FailedMatch(
"constant_args: {} {!r}!={pattern!r}", node, child_node
)
m.nodes.append(node)
m.targets[self] = node.target
return m
def find_anchor_nodes(
self, ctx: MatchContext, searched: OrderedSet[torch.fx.Node]
) -> Generator[Optional[torch.fx.Node], None, None]:
"""
This is used when we are matching a pattern with multiple outputs.
There is a partial match (stored in ctx) and we want to walk
this pattern to find a connection to an already-matched node.
Yields candidate nodes that `self._match` might like.
"""
if self in ctx.pattern_to_node:
yield ctx.pattern_to_node[self]
return
for pattern in self.flat_args_kwargs[0]:
if isinstance(pattern, PatternExpr):
for other_node in pattern.find_anchor_nodes(ctx, searched):
if not isinstance(other_node, torch.fx.Node):
continue
for node in other_node.users:
if node not in searched:
if self._match_fns(node):
yield node
searched.add(node)
def pattern_eq(self, other: Any) -> bool:
other = typing.cast(Self, other) # super makes sure this is true
return (
super().pattern_eq(other)
and self.flat_args_kwargs[1] == other.flat_args_kwargs[1]
and all(
a.pattern_eq(b) if isinstance(a, PatternExpr) else a == b
for a, b in zip(self.flat_args_kwargs[0], other.flat_args_kwargs[0])
)
)
| _TargetArgsExpr |
python | huggingface__transformers | src/transformers/models/jamba/modular_jamba.py | {
"start": 29098,
"end": 33065
} | class ____(JambaPreTrainedModel):
def __init__(self, config: JambaConfig):
super().__init__(config)
self.padding_idx = config.pad_token_id
self.vocab_size = config.vocab_size
self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, self.padding_idx)
decoder_layers = []
for i in range(config.num_hidden_layers):
layer_class = ALL_DECODER_LAYER_TYPES[config.layers_block_type[i]]
decoder_layers.append(layer_class(config, layer_idx=i))
self.layers = nn.ModuleList(decoder_layers)
self.final_layernorm = JambaRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.gradient_checkpointing = False
# Initialize weights and apply final processing
self.post_init()
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[HybridMambaAttentionDynamicCache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> MoeModelOutputWithPast:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids)
if use_cache and past_key_values is None:
past_key_values = HybridMambaAttentionDynamicCache(
config=self.config,
batch_size=inputs_embeds.shape[0],
dtype=inputs_embeds.dtype,
device=inputs_embeds.device,
)
if cache_position is None:
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
cache_position = torch.arange(
past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
)
if position_ids is None:
position_ids = cache_position.unsqueeze(0)
causal_mask = create_causal_mask(
config=self.config,
input_embeds=inputs_embeds,
attention_mask=attention_mask,
cache_position=cache_position,
past_key_values=past_key_values,
position_ids=position_ids,
)
mamba_mask = self._update_mamba_mask(attention_mask, cache_position)
hidden_states = inputs_embeds
for decoder_layer in self.layers:
layer_mask = mamba_mask if isinstance(decoder_layer, JambaMambaDecoderLayer) else causal_mask
hidden_states = decoder_layer(
hidden_states,
attention_mask=layer_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = self.final_layernorm(hidden_states)
if past_key_values and not past_key_values.has_previous_state:
past_key_values.has_previous_state = True
return MoeModelOutputWithPast(
last_hidden_state=hidden_states,
past_key_values=past_key_values,
)
def _update_mamba_mask(self, attention_mask, cache_position):
"""
No need for zeroing states when
1. Cached forward
2. Attending to all inputs
"""
mamba_mask = attention_mask
if (cache_position is not None and cache_position[0] > 0) or (
attention_mask is not None and torch.all(attention_mask == 1)
):
mamba_mask = None
return mamba_mask
| JambaModel |
python | dagster-io__dagster | python_modules/libraries/dagster-looker/dagster_looker/api/dagster_looker_api_translator.py | {
"start": 589,
"end": 2558
} | class ____:
"""A record representing all content in a Looker instance."""
explores_by_id: dict[str, LookmlModelExplore]
dashboards_by_id: dict[str, Dashboard]
users_by_id: dict[str, User]
def to_state(self, sdk: Looker40SDK) -> Mapping[str, Any]:
return {
"dashboards_by_id": {
dashboard_id: (sdk.serialize(api_model=dashboard_data).decode()) # type: ignore
for dashboard_id, dashboard_data in self.dashboards_by_id.items()
},
"explores_by_id": {
explore_id: (sdk.serialize(api_model=explore_data).decode()) # type: ignore
for explore_id, explore_data in self.explores_by_id.items()
},
"users_by_id": {
user_id: (sdk.serialize(api_model=user_data).decode()) # type: ignore
for user_id, user_data in self.users_by_id.items()
},
}
@staticmethod
def from_state(sdk: Looker40SDK, state: Mapping[str, Any]) -> "LookerInstanceData":
explores_by_id = {
explore_id: (
sdk.deserialize(data=serialized_lookml_explore, structure=LookmlModelExplore) # type: ignore
)
for explore_id, serialized_lookml_explore in state["explores_by_id"].items()
}
dashboards_by_id = {
dashboard_id: (sdk.deserialize(data=serialized_looker_dashboard, structure=Dashboard)) # type: ignore
for dashboard_id, serialized_looker_dashboard in state["dashboards_by_id"].items()
}
users_by_id = {
user_id: (sdk.deserialize(data=serialized_user, structure=User)) # type: ignore
for user_id, serialized_user in state["users_by_id"].items()
}
return LookerInstanceData(
explores_by_id=explores_by_id,
dashboards_by_id=dashboards_by_id,
users_by_id=users_by_id,
)
@record
| LookerInstanceData |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 33729,
"end": 40964
} | class ____(Decoder):
"""
JSON streaming decoder optimized for Google Ads API responses.
Uses a fast JSON parse when the full payload fits within max_direct_decode_bytes;
otherwise streams records incrementally from the `results` array.
Ensures truncated or structurally invalid JSON is detected and reported.
"""
chunk_size: int = 5 * 1024 * 1024 # 5 MB
# Fast-path threshold: if whole body < 20 MB, decode with json.loads
max_direct_decode_bytes: int = 20 * 1024 * 1024 # 20 MB
def __post_init__(self):
self.parser = JsonParser()
def is_stream_response(self) -> bool:
return True
def decode(self, response: requests.Response) -> Generator[MutableMapping[str, Any], None, None]:
data, complete = self._buffer_up_to_limit(response)
if complete:
yield from self.parser.parse(io.BytesIO(data))
return
records_batch: List[Dict[str, Any]] = []
for record in self._parse_records_from_stream(data):
records_batch.append(record)
if len(records_batch) >= 100:
yield {"results": records_batch}
records_batch = []
if records_batch:
yield {"results": records_batch}
def _buffer_up_to_limit(self, response: requests.Response) -> Tuple[Union[bytes, Iterable[bytes]], bool]:
buf = bytearray()
response_stream = response.iter_content(chunk_size=self.chunk_size)
while chunk := next(response_stream, None):
buf.extend(chunk)
if len(buf) >= self.max_direct_decode_bytes:
return (self._chain_prefix_and_stream(bytes(buf), response_stream), False)
return (bytes(buf), True)
@staticmethod
def _chain_prefix_and_stream(prefix: bytes, rest_stream: Iterable[bytes]) -> Iterable[bytes]:
yield prefix
yield from rest_stream
def _parse_records_from_stream(self, byte_iter: Iterable[bytes], encoding: str = "utf-8") -> Generator[Dict[str, Any], None, None]:
string_state = StringParseState()
results_state = ResultsArrayState()
record_state = RecordParseState()
top_level_state = TopLevelObjectState()
for chunk in byte_iter:
for char in chunk.decode(encoding, errors="replace"):
self._append_to_current_record_if_any(char, record_state)
if self._update_string_state(char, string_state):
continue
# Track outer braces only outside results array
if not results_state.inside_results_array:
if char == "{":
top_level_state.depth += 1
elif char == "}":
top_level_state.depth = max(0, top_level_state.depth - 1)
if not results_state.inside_results_array:
self._detect_results_array(char, string_state, results_state)
continue
record = self._parse_record_structure(char, results_state, record_state)
if record is not None:
yield record
# EOF validation
if (
string_state.inside_string
or record_state.inside_record
or record_state.record_nesting_depth != 0
or results_state.inside_results_array
or results_state.array_nesting_depth != 0
or top_level_state.depth != 0
):
raise AirbyteTracedException(
message="Response JSON stream ended prematurely and is incomplete.",
internal_message=(
"Detected truncated JSON stream: one or more structural elements were not fully closed before the response ended."
),
failure_type=FailureType.system_error,
)
def _update_string_state(self, char: str, state: StringParseState) -> bool:
"""Return True if char was handled as part of string parsing."""
if state.inside_string:
if state.escape_next_character:
state.escape_next_character = False
return True
if char == "\\":
state.escape_next_character = True
return True
if char == '"':
state.inside_string = False
state.last_parsed_key = "".join(state.collected_string_chars)
state.collected_string_chars.clear()
return True
state.collected_string_chars.append(char)
return True
if char == '"':
state.inside_string = True
state.collected_string_chars.clear()
return True
return False
def _detect_results_array(self, char: str, string_state: StringParseState, results_state: ResultsArrayState) -> None:
if char == ":" and string_state.last_parsed_key == "results":
results_state.expecting_results_array_start = True
elif char == "[" and results_state.expecting_results_array_start:
results_state.inside_results_array = True
results_state.array_nesting_depth = 1
results_state.expecting_results_array_start = False
def _parse_record_structure(
self, char: str, results_state: ResultsArrayState, record_state: RecordParseState
) -> Optional[Dict[str, Any]]:
if char == "{":
if record_state.inside_record:
record_state.record_nesting_depth += 1
else:
self._start_record(record_state)
return None
if char == "}":
if record_state.inside_record:
record_state.record_nesting_depth -= 1
if record_state.record_nesting_depth == 0:
return self._finish_record(record_state)
return None
if char == "[":
if record_state.inside_record:
record_state.record_nesting_depth += 1
else:
results_state.array_nesting_depth += 1
return None
if char == "]":
if record_state.inside_record:
record_state.record_nesting_depth -= 1
else:
results_state.array_nesting_depth -= 1
if results_state.array_nesting_depth == 0:
results_state.inside_results_array = False
return None
@staticmethod
def _append_to_current_record_if_any(char: str, record_state: RecordParseState):
if record_state.inside_record:
record_state.record_text_buffer.append(char)
@staticmethod
def _start_record(record_state: RecordParseState):
record_state.inside_record = True
record_state.record_text_buffer = ["{"]
record_state.record_nesting_depth = 1
@staticmethod
def _finish_record(record_state: RecordParseState) -> Optional[Dict[str, Any]]:
text = "".join(record_state.record_text_buffer).strip()
record_state.inside_record = False
record_state.record_text_buffer.clear()
record_state.record_nesting_depth = 0
return json.loads(text) if text else None
| GoogleAdsStreamingDecoder |
python | getsentry__sentry | tests/sentry/deletions/test_organization.py | {
"start": 2009,
"end": 18946
} | class ____(TransactionTestCase, HybridCloudTestMixin, BaseWorkflowTest):
def test_simple(self) -> None:
org_owner = self.create_user()
org = self.create_organization(name="test", owner=org_owner)
with assume_test_silo_mode(SiloMode.CONTROL):
org_mapping = OrganizationMapping.objects.get(organization_id=org.id)
org_member = OrganizationMember.objects.get(organization_id=org.id, user_id=org_owner.id)
self.assert_org_member_mapping(org_member=org_member)
org_owner2 = self.create_user()
org2 = self.create_organization(name="test2", owner=org_owner2)
with assume_test_silo_mode(SiloMode.CONTROL):
org_mapping2 = OrganizationMapping.objects.get(organization_id=org2.id)
self.create_team(organization=org, name="test1")
self.create_team(organization=org, name="test2")
release = Release.objects.create(version="a" * 32, organization_id=org.id)
repo = Repository.objects.create(organization_id=org.id, name=org.name, provider="dummy")
commit_author = CommitAuthor.objects.create(
organization_id=org.id, name="foo", email="foo@example.com"
)
commit = Commit.objects.create(
repository_id=repo.id, organization_id=org.id, author=commit_author, key="a" * 40
)
pull_request = PullRequest.objects.create(
repository_id=repo.id, organization_id=org.id, author=commit_author, key="b" * 40
)
ReleaseCommit.objects.create(
organization_id=org.id, release=release, commit=commit, order=0
)
env = Environment.objects.create(organization_id=org.id, name="foo")
release_env = ReleaseEnvironment.objects.create(
organization_id=org.id, project_id=4, release_id=release.id, environment_id=env.id
)
external_issue = ExternalIssue.objects.create(
organization_id=org.id, integration_id=5, key="12345"
)
dashboard = Dashboard.objects.create(
organization_id=org.id, title="The Dashboard", created_by_id=self.user.id
)
widget_1 = DashboardWidget.objects.create(
dashboard=dashboard,
order=1,
title="Widget 1",
display_type=0,
widget_type=DashboardWidgetTypes.DISCOVER,
)
widget_2 = DashboardWidget.objects.create(
dashboard=dashboard,
order=2,
title="Widget 2",
display_type=5,
widget_type=DashboardWidgetTypes.DISCOVER,
)
widget_1_data = DashboardWidgetQuery.objects.create(
widget=widget_1, order=1, name="Incoming data"
)
widget_2_data_1 = DashboardWidgetQuery.objects.create(
widget=widget_2, order=1, name="Incoming data"
)
widget_2_data_2 = DashboardWidgetQuery.objects.create(
widget=widget_2, order=2, name="Outgoing data"
)
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks(), outbox_runner():
run_scheduled_deletions()
assert Organization.objects.filter(id=org2.id).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert OrganizationMapping.objects.filter(id=org_mapping2.id).exists()
assert OrganizationMemberMapping.objects.filter(organization_id=org2.id).exists()
assert not Organization.objects.filter(id=org.id).exists()
with assume_test_silo_mode(SiloMode.CONTROL):
assert not OrganizationMapping.objects.filter(id=org_mapping.id).exists()
assert not OrganizationMemberMapping.objects.filter(organization_id=org.id).exists()
assert not Environment.objects.filter(id=env.id).exists()
assert not ReleaseEnvironment.objects.filter(id=release_env.id).exists()
assert not Repository.objects.filter(id=repo.id).exists()
assert not ReleaseCommit.objects.filter(organization_id=org.id).exists()
assert not Release.objects.filter(organization_id=org.id).exists()
assert not CommitAuthor.objects.filter(id=commit_author.id).exists()
assert not Commit.objects.filter(id=commit.id).exists()
assert not PullRequest.objects.filter(id=pull_request.id).exists()
assert not ExternalIssue.objects.filter(id=external_issue.id).exists()
assert not Dashboard.objects.filter(id=dashboard.id).exists()
assert not DashboardWidget.objects.filter(id__in=[widget_1.id, widget_2.id]).exists()
assert not DashboardWidgetQuery.objects.filter(
id__in=[widget_1_data.id, widget_2_data_1.id, widget_2_data_2.id]
).exists()
def test_no_delete_visible(self) -> None:
org = self.create_organization(name="test")
release = Release.objects.create(version="a" * 32, organization_id=org.id)
deletion = self.ScheduledDeletion.schedule(instance=org, days=0)
assert org.status == OrganizationStatus.ACTIVE
with self.tasks():
run_scheduled_deletions()
assert Organization.objects.filter(id=org.id).exists()
assert Release.objects.filter(id=release.id).exists()
assert not self.ScheduledDeletion.objects.filter(id=deletion.id).exists()
def test_large_child_relation_deletion(self) -> None:
org = self.create_organization(name="test")
self.create_team(organization=org, name="test1")
repo = Repository.objects.create(organization_id=org.id, name=org.name, provider="dummy")
author_bob = CommitAuthor.objects.create(
organization_id=org.id, name="bob", email="bob@example.com"
)
author_sally = CommitAuthor.objects.create(
organization_id=org.id, name="sally", email="sally@example.com"
)
# Make >100 commits so we can ensure that all commits are removed before authors are.
for i in range(0, 150):
author = author_bob if i % 2 == 0 else author_sally
Commit.objects.create(
repository_id=repo.id, organization_id=org.id, author=author, key=uuid4().hex
)
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Organization.objects.filter(id=org.id).exists()
assert not Commit.objects.filter(organization_id=org.id).exists()
assert not CommitAuthor.objects.filter(organization_id=org.id).exists()
def test_group_first_release(self) -> None:
org = self.create_organization(name="test")
project = self.create_project(organization=org)
release = self.create_release(project=project, user=self.user, version="1.2.3")
group = Group.objects.create(project=project, first_release=release)
# Simulate the project being deleted but the deletion crashing.
project.delete()
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Group.objects.filter(id=group.id).exists()
assert not Organization.objects.filter(id=org.id).exists()
def test_orphan_commits(self) -> None:
# We have had a few orgs get into a state where they have commits
# but no repositories. Ensure that we can proceed.
org = self.create_organization(name="test")
repo = Repository.objects.create(organization_id=org.id, name=org.name, provider="dummy")
author = CommitAuthor.objects.create(
organization_id=org.id, name="foo", email="foo@example.com"
)
commit = Commit.objects.create(
repository_id=repo.id, organization_id=org.id, author=author, key="a" * 40
)
# Simulate the project being deleted but the deletion crashing.
repo.delete()
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Organization.objects.filter(id=org.id).exists()
assert not Commit.objects.filter(id=commit.id).exists()
assert not CommitAuthor.objects.filter(id=author.id).exists()
def test_alert_rule(self) -> None:
org = self.create_organization(name="test", owner=self.user)
self.create_team(organization=org, name="test1")
env = Environment.objects.create(organization_id=org.id, name="foo")
snuba_query = SnubaQuery.objects.create(
type=SnubaQuery.Type.ERROR.value,
dataset="events",
aggregate="count()",
time_window=60,
resolution=60,
environment=env,
)
alert_rule = AlertRule.objects.create(
organization=org,
name="rule with environment",
threshold_period=1,
snuba_query=snuba_query,
# This status is hidden from the default finder.
status=AlertRuleStatus.SNAPSHOT.value,
)
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Organization.objects.filter(id=org.id).exists()
assert not Environment.objects.filter(id=env.id).exists()
assert not AlertRule.objects.filter(id=alert_rule.id).exists()
assert not SnubaQuery.objects.filter(id=snuba_query.id).exists()
def test_discover_query_cleanup(self) -> None:
org = self.create_organization(name="test", owner=self.user)
self.create_team(organization=org, name="test1")
other = self.create_organization(name="other", owner=self.user)
other_project = self.create_project(organization=other, name="other project")
query = DiscoverSavedQuery.objects.create(organization=org, name="test query", query={})
# Make a cross-org project reference. This can happen when an account was
# merged in the past and we didn't update the discover queries.
query_project = DiscoverSavedQueryProject.objects.create(
discover_saved_query=query, project=other_project
)
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Organization.objects.filter(id=org.id).exists()
assert not DiscoverSavedQuery.objects.filter(id=query.id).exists()
assert not DiscoverSavedQueryProject.objects.filter(id=query_project.id).exists()
def test_delete_org_simple(self) -> None:
name_filter = {"name": "test_delete_org_simple"}
org = self.create_organization(**name_filter)
assert Organization.objects.filter(**name_filter).count() == 1
assert self.ScheduledDeletion.objects.count() == 0
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert Organization.objects.filter(**name_filter).count() == 0
def test_delete_org_after_project_transfer(self) -> None:
from_org = self.create_organization(name="from_org")
from_user = self.create_user()
self.create_member(user=from_user, role="member", organization=from_org)
from_team = self.create_team(organization=from_org)
to_org = self.create_organization(name="to_org")
self.create_team(organization=to_org)
to_user = self.create_user()
self.create_member(user=to_user, role="member", organization=to_org)
project = self.create_project(teams=[from_team])
environment = Environment.get_or_create(project, "production")
staging_environment = Environment.get_or_create(project, "staging")
project_rule = self.create_project_rule(project=project)
project_rule.update(environment_id=staging_environment.id)
alert_rule = self.create_alert_rule(
organization=from_org,
projects=[project],
owner=Actor.from_identifier(f"team:{from_team.id}"),
environment=environment,
)
project.transfer_to(organization=to_org)
assert project.organization.id is to_org.id
alert_rule.refresh_from_db()
assert AlertRule.objects.fetch_for_project(project).count() == 1
assert alert_rule.snuba_query.environment is not None
assert alert_rule.snuba_query.environment.id != environment.id
assert (
alert_rule.snuba_query.environment.name
== Environment.objects.filter(organization_id=to_org.id, name=environment.name)
.get()
.name
)
assert EnvironmentProject.objects.filter(project=project).count() == 2
assert (
Environment.objects.filter(organization_id=from_org.id, name=environment.name).get().id
== environment.id
)
assert (
Environment.objects.filter(organization_id=to_org.id, name=environment.name).get().id
!= environment.id
)
assert (
Environment.objects.filter(organization_id=from_org.id, name=staging_environment.name)
.get()
.id
== project_rule.environment_id
)
assert (
Environment.objects.filter(organization_id=to_org.id, name=staging_environment.name)
.get()
.id
!= project_rule.environment_id
)
from_org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=from_org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Organization.objects.filter(name=from_org.name).exists()
assert Organization.objects.filter(name=to_org.name).exists()
assert not Environment.objects.filter(organization_id=from_org.id).exists()
assert Environment.objects.filter(organization_id=to_org.id).count() == 2
assert EnvironmentProject.objects.filter(project_id=project.id).count() == 2
assert AlertRule.objects.filter(id=alert_rule.id).exists()
assert (
SnubaQuery.objects.filter(id=alert_rule.snuba_query.id)
.exclude(environment=None)
.exists()
)
def test_workflow_engine_cleanup(self) -> None:
org = self.create_organization(name="test")
project = self.create_project(organization=org)
dcg = self.create_data_condition_group(organization=org)
dc = self.create_data_condition(condition_group=dcg)
detector = self.create_detector(
project_id=project.id,
name="Test Detector",
type=MetricIssue.slug,
workflow_condition_group=dcg,
)
workflow = self.create_workflow(organization=org, name="Test Workflow")
org.update(status=OrganizationStatus.PENDING_DELETION)
self.ScheduledDeletion.schedule(instance=org, days=0)
with self.tasks():
run_scheduled_deletions()
assert not Project.objects.filter(id=project.id).exists()
assert not Detector.objects.filter(id=detector.id).exists()
assert not DataConditionGroup.objects.filter(id=dcg.id).exists()
assert not DataCondition.objects.filter(id=dc.id).exists()
assert not Workflow.objects.filter(id=workflow.id).exists()
def test_overwatch_notification_on_deletion(self) -> None:
"""Test that Overwatch notification is scheduled when organization is deleted"""
org = self.create_organization(name="test")
org_id = org.id
org_slug = org.slug
manager = DeletionTaskManager()
deletion_task = OrganizationDeletionTask(
manager=manager, model=Organization, query={"id": org_id}
)
with patch(
"sentry.deletions.defaults.organization.notify_overwatch_organization_deleted"
) as mock_task:
with patch(
"sentry.deletions.defaults.organization.transaction.on_commit"
) as mock_on_commit:
def execute_callback(callback: Any, using: Any = None) -> None:
callback()
mock_on_commit.side_effect = execute_callback
# Call delete_instance directly
deletion_task.delete_instance(org)
# Verify the task was scheduled via on_commit
mock_task.delay.assert_called_once_with(org_id, org_slug)
| DeleteOrganizationTest |
python | pytorch__pytorch | torch/fx/experimental/symbolic_shapes.py | {
"start": 104886,
"end": 105022
} | class ____:
exprs: list[str]
# A dataclass for storing C++ expressions and helper variables
@dataclass(frozen=True)
| _ShapeGuardsHelper |
python | getsentry__sentry | src/sentry/integrations/msteams/card_builder/block.py | {
"start": 1783,
"end": 2049
} | class ____(TypedDict, total=False):
size: TextSize
weight: TextWeight
horizontalAlignment: ContentAlignment
spacing: Literal["None"]
isSubtle: bool
height: Literal["stretch"]
wrap: bool
fontType: Literal["Default"]
| _TextBlockNotRequired |
python | bokeh__bokeh | src/bokeh/core/query.py | {
"start": 8284,
"end": 8629
} | class ____(_Operator):
''' Predicate to test if property values are greater than or equal to
some value.
Construct and ``GEQ`` predicate as a dict with ``GEQ`` as the key,
and a value to compare against.
.. code-block:: python
# matches any models with .size >= 10
dict(size={ GEQ: 10 })
'''
pass
| GEQ |
python | huggingface__transformers | tests/models/speecht5/test_modeling_speecht5.py | {
"start": 28781,
"end": 31393
} | class ____(unittest.TestCase):
@cached_property
def default_processor(self):
return SpeechT5Processor.from_pretrained("microsoft/speecht5_asr")
def _load_datasamples(self, num_samples):
from datasets import load_dataset
ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
# automatic decoding with librispeech
speech_samples = ds.sort("id")[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def test_generation_librispeech(self):
model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(1)
input_values = processor(audio=input_speech, return_tensors="pt").input_values.to(torch_device)
generated_ids = model.generate(input_values)
generated_transcript = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = [
"mister quilter is the apostle of the middle classes and we are glad to welcome his gospel"
]
self.assertListEqual(generated_transcript, EXPECTED_TRANSCRIPTIONS)
def test_generation_librispeech_batched(self):
model = SpeechT5ForSpeechToText.from_pretrained("microsoft/speecht5_asr")
model.to(torch_device)
processor = self.default_processor
input_speech = self._load_datasamples(4)
inputs = processor(audio=input_speech, return_tensors="pt", padding=True)
input_values = inputs.input_values.to(torch_device)
attention_mask = inputs.attention_mask.to(torch_device)
generated_ids = model.generate(input_values, attention_mask=attention_mask)
generated_transcripts = processor.batch_decode(generated_ids, skip_special_tokens=True)
EXPECTED_TRANSCRIPTIONS = [
"mister quilter is the apostle of the middle classes and we are glad to welcome his gospel",
"nor is mister quilter's manner less interesting than his matter",
"he tells us that at this festive season of the year with christmas and rosebeaf looming before us"
" similars drawn from eating and its results occur most readily to the mind",
"he has grave doubts whether sir frederick latin's work is really greek after all and can discover in it"
" but little of rocky ithica",
]
self.assertListEqual(generated_transcripts, EXPECTED_TRANSCRIPTIONS)
@require_torch
| SpeechT5ForSpeechToTextIntegrationTests |
python | streamlit__streamlit | lib/tests/streamlit/runtime/pages_manager_test.py | {
"start": 764,
"end": 3324
} | class ____(unittest.TestCase):
def setUp(self):
self.pages_manager = PagesManager("main_script_path")
def test_get_page_script_valid_hash(self):
"""Ensure the page script is provided with valid page hash specified"""
self.pages_manager.set_script_intent("page_hash", "")
self.pages_manager.set_pages({"page_hash": {"page_script_hash": "page_hash"}})
page_script = self.pages_manager.get_page_script(
self.pages_manager.main_script_hash
)
assert page_script["page_script_hash"] == "page_hash"
def test_get_page_script_invalid_hash(self):
"""Ensure the page script is provided with invalid page hash specified"""
self.pages_manager.set_script_intent("bad_hash", "")
self.pages_manager.set_pages({"page_hash": {"page_script_hash": "page_hash"}})
page_script = self.pages_manager.get_page_script(
self.pages_manager.main_script_hash
)
assert page_script is None
def test_get_page_script_valid_name(self):
"""Ensure the page script is provided with valid page name specified"""
self.pages_manager.set_script_intent("", "page_name")
self.pages_manager.set_pages(
{
"page_hash": {
"page_script_hash": "page_hash",
"url_pathname": "page_name",
}
}
)
page_script = self.pages_manager.get_page_script(
self.pages_manager.main_script_hash
)
assert page_script["page_script_hash"] == "page_hash"
def test_get_page_script_invalid_name(self):
"""Ensure the page script is not provided with invalid page name specified"""
self.pages_manager.set_script_intent("", "foo")
self.pages_manager.set_pages(
{
"page_hash": {
"page_script_hash": "page_hash",
"url_pathname": "page_name",
}
}
)
page_script = self.pages_manager.get_page_script(
self.pages_manager.main_script_hash
)
assert page_script is None
def test_get_initial_active_script(self):
"""Test that the initial active script is correctly retrieved with the
main script path provided."""
page_info = self.pages_manager.get_initial_active_script("page_hash")
assert page_info == {
"script_path": "main_script_path",
"page_script_hash": "page_hash",
}
| PagesManagerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mssql/pyodbc.py | {
"start": 17042,
"end": 17112
} | class ____(_ms_numeric_pyodbc, sqltypes.Float):
pass
| _MSFloat_pyodbc |
python | huggingface__transformers | tests/models/blenderbot_small/test_modeling_blenderbot_small.py | {
"start": 2060,
"end": 7897
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=7,
is_training=True,
use_labels=False,
vocab_size=99,
hidden_size=16,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=4,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=50,
eos_token_id=2,
pad_token_id=1,
bos_token_id=0,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.eos_token_id = eos_token_id
self.pad_token_id = pad_token_id
self.bos_token_id = bos_token_id
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp(
3,
)
input_ids[:, -1] = self.eos_token_id # Eos Token
decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
config = self.get_config()
inputs_dict = prepare_blenderbot_small_inputs_dict(config, input_ids, decoder_input_ids)
return config, inputs_dict
def get_config(self):
return BlenderbotSmallConfig(
vocab_size=self.vocab_size,
d_model=self.hidden_size,
encoder_layers=self.num_hidden_layers,
decoder_layers=self.num_hidden_layers,
encoder_attention_heads=self.num_attention_heads,
decoder_attention_heads=self.num_attention_heads,
encoder_ffn_dim=self.intermediate_size,
decoder_ffn_dim=self.intermediate_size,
dropout=self.hidden_dropout_prob,
attention_dropout=self.attention_probs_dropout_prob,
max_position_embeddings=self.max_position_embeddings,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
pad_token_id=self.pad_token_id,
)
def prepare_config_and_inputs_for_common(self):
config, inputs_dict = self.prepare_config_and_inputs()
return config, inputs_dict
def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict):
model = BlenderbotSmallModel(config=config).get_decoder().to(torch_device).eval()
input_ids = inputs_dict["input_ids"]
attention_mask = inputs_dict["attention_mask"]
# first forward pass
outputs = model(input_ids, attention_mask=attention_mask, use_cache=True)
output, past_key_values = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size)
next_attn_mask = ids_tensor((self.batch_size, 3), 2)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1)
output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"]
output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[
"last_hidden_state"
]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3))
def check_encoder_decoder_model_standalone(self, config, inputs_dict):
model = BlenderbotSmallModel(config=config).to(torch_device).eval()
outputs = model(**inputs_dict)
encoder_last_hidden_state = outputs.encoder_last_hidden_state
last_hidden_state = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
encoder = model.get_encoder()
encoder.save_pretrained(tmpdirname)
encoder = BlenderbotSmallEncoder.from_pretrained(tmpdirname).to(torch_device)
encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[
0
]
self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3)
with tempfile.TemporaryDirectory() as tmpdirname:
decoder = model.get_decoder()
decoder.save_pretrained(tmpdirname)
decoder = BlenderbotSmallDecoder.from_pretrained(tmpdirname).to(torch_device)
last_hidden_state_2 = decoder(
input_ids=inputs_dict["decoder_input_ids"],
attention_mask=inputs_dict["decoder_attention_mask"],
encoder_hidden_states=encoder_last_hidden_state,
encoder_attention_mask=inputs_dict["attention_mask"],
)[0]
self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3)
@require_torch
| BlenderbotSmallModelTester |
python | doocs__leetcode | solution/1000-1099/1022.Sum of Root To Leaf Binary Numbers/Solution.py | {
"start": 192,
"end": 545
} | class ____:
def sumRootToLeaf(self, root: TreeNode) -> int:
def dfs(root, t):
if root is None:
return 0
t = (t << 1) | root.val
if root.left is None and root.right is None:
return t
return dfs(root.left, t) + dfs(root.right, t)
return dfs(root, 0)
| Solution |
python | getsentry__sentry | src/sentry/preprod/models.py | {
"start": 17415,
"end": 18244
} | class ____(DefaultFieldsModel):
"""
A model that represents an installable preprod artifact with an expiring URL.
This is created when a user generates a download QR code for a preprod artifact.
"""
__relocation_scope__ = RelocationScope.Excluded
preprod_artifact = FlexibleForeignKey("preprod.PreprodArtifact")
# A random string used in the URL path for secure access
url_path = models.CharField(max_length=255, unique=True, db_index=True)
# When the install link expires
expiration_date = models.DateTimeField(null=True)
# Number of times the IPA was downloaded
download_count = models.PositiveIntegerField(default=0, null=True)
class Meta:
app_label = "preprod"
db_table = "sentry_installablepreprodartifact"
@region_silo_model
| InstallablePreprodArtifact |
python | charliermarsh__ruff | crates/ty_python_semantic/resources/corpus/73_class_generic_defaults.py | {
"start": 0,
"end": 27
} | class ____[T=str]:
x: T
| Foo |
python | pypa__pip | tests/unit/test_operations_prepare.py | {
"start": 3119,
"end": 4650
} | class ____:
def prep(self, tmpdir: Path, data: TestData) -> None:
self.build_dir = os.fspath(tmpdir.joinpath("build"))
self.download_dir = tmpdir.joinpath("download")
os.mkdir(self.build_dir)
os.mkdir(self.download_dir)
self.dist_file = "simple-1.0.tar.gz"
self.dist_file2 = "simple-2.0.tar.gz"
self.dist_path = data.packages.joinpath(self.dist_file)
self.dist_path2 = data.packages.joinpath(self.dist_file2)
self.dist_url = Link(self.dist_path.as_uri())
self.dist_url2 = Link(self.dist_path2.as_uri())
self.no_download = Mock(side_effect=AssertionError)
def test_unpack_url_no_download(self, tmpdir: Path, data: TestData) -> None:
self.prep(tmpdir, data)
unpack_url(self.dist_url, self.build_dir, self.no_download, verbosity=0)
assert os.path.isdir(os.path.join(self.build_dir, "simple"))
assert not os.path.isfile(os.path.join(self.download_dir, self.dist_file))
def test_unpack_url_bad_hash(self, tmpdir: Path, data: TestData) -> None:
"""
Test when the file url hash fragment is wrong
"""
self.prep(tmpdir, data)
url = f"{self.dist_url.url}#md5=bogus"
dist_url = Link(url)
with pytest.raises(HashMismatch):
unpack_url(
dist_url,
self.build_dir,
download=self.no_download,
hashes=Hashes({"md5": ["bogus"]}),
verbosity=0,
)
| Test_unpack_url |
python | arrow-py__arrow | arrow/locales.py | {
"start": 61801,
"end": 62354
} | class ____(ArabicLocale):
names = ["ar-ma"]
month_names = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
month_abbreviations = [
"",
"يناير",
"فبراير",
"مارس",
"أبريل",
"ماي",
"يونيو",
"يوليوز",
"غشت",
"شتنبر",
"أكتوبر",
"نونبر",
"دجنبر",
]
| MoroccoArabicLocale |
python | ray-project__ray | rllib/examples/envs/classes/multi_agent/footsies/game/footsies_binary.py | {
"start": 1123,
"end": 8071
} | class ____:
def __init__(self, config: EnvContext, port: int):
self._urls = BinaryUrls()
self.config = config
self.port = port
self.binary_to_download = config["binary_to_download"]
if self.binary_to_download == "linux_server":
self.url = self._urls.URL_LINUX_SERVER_BINARIES
elif self.binary_to_download == "linux_windowed":
self.url = self._urls.URL_LINUX_WINDOWED_BINARIES
elif self.binary_to_download == "mac_headless":
self.url = self._urls.URL_MAC_HEADLESS_BINARIES
elif self.binary_to_download == "mac_windowed":
self.url = self._urls.URL_MAC_WINDOWED_BINARIES
else:
raise ValueError(f"Invalid target binary: {self.binary_to_download}")
self.full_download_dir = Path(config["binary_download_dir"]).resolve()
self.full_download_path = (
self.full_download_dir / str.split(self.url, sep="/")[-1]
)
self.full_extract_dir = Path(config["binary_extract_dir"]).resolve()
self.renamed_path = self.full_extract_dir / "footsies_binaries"
@staticmethod
def _add_executable_permission(binary_path: Path) -> None:
binary_path.chmod(binary_path.stat().st_mode | stat.S_IXUSR)
def start_game_server(self) -> int:
"""Downloads, unzips, and starts the Footsies game server binary.
Returns footsies process PID.
"""
self._download_game_binary()
self._unzip_game_binary()
if self.binary_to_download == "mac_windowed":
game_binary_path = (
Path(self.renamed_path) / "Contents" / "MacOS" / "FOOTSIES"
)
elif self.binary_to_download == "mac_headless":
game_binary_path = Path(self.renamed_path) / "FOOTSIES"
else:
game_binary_path = Path(self.renamed_path) / "footsies.x86_64"
if os.access(game_binary_path, os.X_OK):
logger.info(
f"Game binary has an 'executable' permission: {game_binary_path}"
)
else:
self._add_executable_permission(game_binary_path)
logger.info(f"Game binary path: {game_binary_path}")
if (
self.binary_to_download == "linux_server"
or self.binary_to_download == "linux_windowed"
):
process = subprocess.Popen([game_binary_path, "--port", str(self.port)])
else:
process = subprocess.Popen(
[
"arch",
"-x86_64",
game_binary_path,
"--port",
str(self.port),
],
)
# check if the game server is running correctly
timeout = 2
channel = grpc.insecure_channel(f"localhost:{self.port}")
stub = footsies_pb2_grpc.FootsiesGameServiceStub(channel)
# step 1: try to start the game
while True:
try:
stub.StartGame(footsies_pb2.Empty())
logger.info("Game ready!")
break
except grpc.RpcError as e:
code = e.code()
if code in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.DEADLINE_EXCEEDED,
):
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
time.sleep(timeout)
continue
raise
# step 2: check if the game is ready
ready = False
while not ready:
try:
ready = stub.IsReady(footsies_pb2.Empty()).value
if not ready:
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
time.sleep(timeout)
continue
else:
logger.info("Game ready!")
break
except grpc.RpcError as e:
if e.code() in (
grpc.StatusCode.UNAVAILABLE,
grpc.StatusCode.DEADLINE_EXCEEDED,
):
time.sleep(timeout)
logger.info(f"RLlib {self.__class__.__name__}: Game not ready...")
continue
raise
channel.close()
return process.pid
def _download_game_binary(self):
# As multiple actors might try to download all at the same time.
# The file lock should force only one actor to download
chunk_size = 1024 * 1024 # 1MB
lock_path = self.full_download_path.parent / ".footsies-download.lock"
with FileLock(lock_path, timeout=300):
if self.full_download_path.exists():
logger.info(
f"Game binary already exists at {self.full_download_path}, skipping download."
)
else:
try:
with requests.get(self.url, stream=True) as response:
response.raise_for_status()
self.full_download_dir.mkdir(parents=True, exist_ok=True)
with open(self.full_download_path, "wb") as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
logger.info(
f"Downloaded game binary to {self.full_download_path}\n"
f"Binary size: {self.full_download_path.stat().st_size / 1024 / 1024:.1f} MB\n"
)
except requests.exceptions.RequestException as e:
logger.error(f"Failed to download binary from {self.url}: {e}")
def _unzip_game_binary(self):
# As multiple actors might try to unzip or rename the paths at the same time.
# The file lock should force this function to be sequential
lock_path = self.full_download_path.parent / ".footsies-unzip.lock"
with FileLock(lock_path, timeout=300):
if self.renamed_path.exists():
logger.info(
f"Game binary already extracted at {self.renamed_path}, skipping extraction."
)
else:
self.full_extract_dir.mkdir(parents=True, exist_ok=True)
with zipfile.ZipFile(self.full_download_path, mode="r") as zip_ref:
zip_ref.extractall(self.full_extract_dir)
if self.binary_to_download == "mac_windowed":
self.full_download_path.with_suffix(".app").rename(
self.renamed_path
)
else:
self.full_download_path.with_suffix("").rename(self.renamed_path)
logger.info(f"Extracted game binary to {self.renamed_path}")
| FootsiesBinary |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_new_features.py | {
"start": 12898,
"end": 17303
} | class ____:
"""
Tests the logic for fetching child pages, specifically handling the
difference between cloud and on-premise Confluence instances.
"""
def test_on_prem_folder_call_is_never_made(self):
"""
On-premise mode: Ensures the fix prevents calls for 'folder' children.
"""
from llama_index.readers.confluence import ConfluenceReader
import requests
def side_effect(page_id, type, start=0, limit=50):
if type == "folder":
raise requests.exceptions.HTTPError(
"No ContentTypeBinding found for type: folder"
)
if page_id == "root" and start == 0:
return ["p1"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = False
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="http://onprem", cloud=False, api_token="t"
)
res = reader._dfs_page_ids("root", type="page")
assert set(res) == {"root", "p1"}
def test_mixed_children_recursion_in_cloud(self):
"""
Cloud mode: Verifies correct handling of pages and folders.
"""
from llama_index.readers.confluence import ConfluenceReader
def side_effect(page_id, type, start=0, limit=50):
if start > 0:
return []
if page_id == "root" and type == "page":
return ["p1"]
if page_id == "root" and type == "folder":
return ["f1"]
if page_id == "f1" and type == "page":
return ["p2"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = True
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="https://cloud", cloud=True, api_token="t"
)
res = reader._dfs_page_ids("root", type="page")
expected_ids = {"root", "p1", "p2"}
assert set(res) == expected_ids
assert "f1" not in res
def test_max_num_results_is_respected(self):
"""
Ensures the recursive search stops correctly when the limit is reached.
"""
from llama_index.readers.confluence import ConfluenceReader
def side_effect(page_id, type, start=0, limit=50):
if page_id == "root" and start == 0 and type == "page":
return ["p1", "p2", "p3", "p4"]
return []
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=side_effect)
mock_inst.cloud = False
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="http://onprem", cloud=False, api_token="t"
)
res = reader._dfs_page_ids("root", type="page", max_num_results=3)
assert len(res) == 3
assert set(res) == {"root", "p1", "p2"}
def test_paging_behavior_helper_function(self):
"""
Tests that the _get_data_with_paging helper function works correctly.
"""
from llama_index.readers.confluence import ConfluenceReader
def paged_side_effect(page_id, type, start=0, limit=50):
full_data = ["p1", "p2", "p3", "p4", "p5"]
return full_data[start : start + limit]
with patch("atlassian.Confluence") as MockConfluence:
mock_inst = Mock()
mock_inst.get_child_id_list = Mock(side_effect=paged_side_effect)
mock_inst.cloud = True
MockConfluence.return_value = mock_inst
reader = ConfluenceReader(
base_url="https://cloud", cloud=True, api_token="t"
)
all_ids = reader._get_data_with_paging(
paged_function=reader.confluence.get_child_id_list,
page_id="root",
type="page",
)
assert all_ids == ["p1", "p2", "p3", "p4", "p5"]
| TestChildPageFetching |
python | huggingface__transformers | src/transformers/integrations/deepspeed.py | {
"start": 3076,
"end": 21059
} | class ____(HfDeepSpeedConfig):
"""
The `HfTrainerDeepSpeedConfig` object is meant to be created during `TrainingArguments` object creation and has the
same lifespan as the latter.
"""
def __init__(self, config_file_or_dict):
super().__init__(config_file_or_dict)
self._dtype = None
self.mismatches = []
def dtype(self):
if self._dtype is None:
raise ValueError("trainer_config_process() wasn't called yet to tell dtype")
return self._dtype
def is_auto(self, ds_key_long):
val = self.get_value(ds_key_long)
if val is None:
return False
else:
return val == "auto"
def fill_match(self, ds_key_long, hf_val, hf_key=None, must_match=True):
"""
A utility method that massages the config file and can optionally verify that the values match.
1. Replace "auto" values with `TrainingArguments` value.
2. If it wasn't "auto" and `must_match` is true, then check that DS config matches Trainer
config values and if mismatched add the entry to `self.mismatched` - will assert during
`trainer_config_finalize` for one or more mismatches.
"""
config, ds_key = self.find_config_node(ds_key_long)
if config is None:
return
if config.get(ds_key) == "auto":
config[ds_key] = hf_val
return
if not must_match:
return
ds_val = config.get(ds_key)
if ds_val is not None and ds_val != hf_val:
self.mismatches.append(f"- ds {ds_key_long}={ds_val} vs hf {hf_key}={hf_val}")
fill_only = partialmethod(fill_match, must_match=False)
def trainer_config_process(self, args, auto_find_batch_size=False):
"""
Adjust the config with `TrainingArguments` values. This stage is run during `TrainingArguments` object
creation.
"""
# DeepSpeed does:
# train_batch_size = world_size * train_micro_batch_size_per_gpu * gradient_accumulation_steps
train_batch_size = args.world_size * args.per_device_train_batch_size * args.gradient_accumulation_steps
self.fill_match(
"train_micro_batch_size_per_gpu",
args.per_device_train_batch_size,
"per_device_train_batch_size",
not auto_find_batch_size,
)
self.fill_match(
"gradient_accumulation_steps",
args.gradient_accumulation_steps,
"gradient_accumulation_steps",
)
self.fill_match(
"train_batch_size",
train_batch_size,
"train_batch_size (calculated)",
not auto_find_batch_size,
)
self.fill_match("gradient_clipping", args.max_grad_norm, "max_grad_norm")
self.fill_match("optimizer.params.lr", args.learning_rate, "learning_rate")
self.fill_match(
"optimizer.params.betas",
[args.adam_beta1, args.adam_beta2],
"adam_beta1+adam_beta2",
)
self.fill_match("optimizer.params.eps", args.adam_epsilon, "adam_epsilon")
self.fill_match("optimizer.params.weight_decay", args.weight_decay, "weight_decay")
self.fill_only("scheduler.params.warmup_min_lr", 0) # not a trainer arg
self.fill_match("scheduler.params.warmup_max_lr", args.learning_rate, "learning_rate")
# total_num_steps - will get set in trainer_config_finalize
if args.save_on_each_node:
# deepspeed uses shared storage by default. Let's override this setting if save_on_each_node == True
self.config["checkpoint"] = self.config.get("checkpoint", {})
self.config["checkpoint"]["use_node_local_storage"] = args.save_on_each_node
# amp: similar to the pytorch native amp - it has a bunch of optional params but we won't set
# any here unless the user did the work
self.fill_match("fp16.enabled", (args.fp16 or args.fp16_full_eval), "fp16|fp16_full_eval")
self.fill_match("bf16.enabled", (args.bf16 or args.bf16_full_eval), "bf16|bf16_full_eval")
# deepspeed's default mode is fp16 unless there is a config that says differently
if self.is_true("bf16.enabled"):
self._dtype = torch.bfloat16
elif self.is_true("fp16.enabled"):
self._dtype = torch.float16
else:
self._dtype = torch.float32
def trainer_config_finalize(self, args, model, num_training_steps):
"""
This stage is run after we have the model and know num_training_steps.
Now we can complete the configuration process.
"""
# zero
# deal with config keys that use `auto` value and rely on model's hidden_size
hidden_size_based_keys = [
"zero_optimization.reduce_bucket_size",
"zero_optimization.stage3_prefetch_bucket_size",
"zero_optimization.stage3_param_persistence_threshold",
]
hidden_size_auto_keys = [x for x in hidden_size_based_keys if self.is_auto(x)]
if len(hidden_size_auto_keys) > 0:
hidden_size = None
if hasattr(model, "config"):
if hasattr(model.config, "hidden_size"):
hidden_size = model.config.hidden_size
elif hasattr(model.config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.hidden_sizes)
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_size"):
hidden_size = model.config.text_config.hidden_size
elif hasattr(model.config, "text_config") and hasattr(model.config.text_config, "hidden_sizes"):
# if there are many hidden sizes pick the largest one
hidden_size = max(model.config.text_config.hidden_sizes)
if hidden_size is None:
raise ValueError(
"The model's config file has neither `hidden_size` nor `hidden_sizes` entry, "
"therefore it's not possible to automatically fill out the following `auto` entries "
f"in the DeepSpeed config file: {hidden_size_auto_keys}. You can fix that by replacing "
"`auto` values for these keys with an integer value of your choice."
)
self.fill_only("zero_optimization.reduce_bucket_size", hidden_size * hidden_size)
if self.is_zero3():
# automatically assign the optimal config values based on model config
self.fill_only(
"zero_optimization.stage3_prefetch_bucket_size",
int(0.9 * hidden_size * hidden_size),
)
self.fill_only(
"zero_optimization.stage3_param_persistence_threshold",
10 * hidden_size,
)
# scheduler
self.fill_match(
"scheduler.params.total_num_steps",
num_training_steps,
"num_training_steps (calculated)",
)
self.fill_match(
"scheduler.params.warmup_num_steps",
args.get_warmup_steps(num_training_steps),
"warmup_steps",
)
if len(self.mismatches) > 0:
mismatches = "\n".join(self.mismatches)
raise ValueError(
"Please correct the following DeepSpeed config values that mismatch TrainingArguments"
f" values:\n{mismatches}\nThe easiest method is to set these DeepSpeed config values to 'auto'."
)
# keep the config object global to be able to access it anywhere during TrainingArguments life-cycle
_hf_deepspeed_config_weak_ref = None
def set_hf_deepspeed_config(hf_deepspeed_config_obj):
# this is a special weakref global object to allow us to get to Deepspeed config from APIs
# that don't have an easy way to get to the Deepspeed config outside of the Trainer domain.
global _hf_deepspeed_config_weak_ref
# will go away automatically when HfDeepSpeedConfig is destroyed (when TrainingArguments is destroyed)
_hf_deepspeed_config_weak_ref = weakref.ref(hf_deepspeed_config_obj)
def unset_hf_deepspeed_config():
# useful for unit tests to ensure the global state doesn't leak - call from `tearDown` method
global _hf_deepspeed_config_weak_ref
_hf_deepspeed_config_weak_ref = None
def is_deepspeed_zero3_enabled():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().is_zero3()
else:
return False
def deepspeed_config():
if _hf_deepspeed_config_weak_ref is not None and _hf_deepspeed_config_weak_ref() is not None:
return _hf_deepspeed_config_weak_ref().config
else:
return None
def _load_state_dict_into_zero3_model(model_to_load, state_dict):
"""
Loads state dict into a model specifically for Zero3, since DeepSpeed does not support the `transformers`
tensor parallelism API.
Nearly identical code to PyTorch's `_load_from_state_dict`
"""
# copy state_dict so `_load_state_dict_into_zero3_model` can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
error_msgs = []
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module: nn.Module, state_dict, prefix="", assign_to_params_buffers=False):
local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})
local_metadata["assign_to_params_buffers"] = assign_to_params_buffers
args = (state_dict, prefix, local_metadata, True, [], [], error_msgs)
# Parameters of module and children will start with prefix. We can exit early if there are none in this
# state_dict
if is_deepspeed_zero3_enabled():
import deepspeed
# In sharded models, each shard has only part of the full state_dict, so only gather
# parameters that are in the current state_dict.
named_parameters = dict(module.named_parameters(prefix=prefix[:-1], recurse=False))
params_to_gather = [named_parameters[k] for k in named_parameters if k in state_dict]
if len(params_to_gather) > 0:
# because zero3 puts placeholders in model params, this context
# manager gathers (unpartitions) the params of the current layer, then loads from
# the state dict and then re-partitions them again
with deepspeed.zero.GatheredParameters(params_to_gather, modifier_rank=0):
if torch.distributed.get_rank() == 0:
module._load_from_state_dict(*args)
for name, child in module._modules.items():
if child is not None:
load(child, state_dict, prefix + name + ".", assign_to_params_buffers)
load(model_to_load, state_dict, assign_to_params_buffers=False)
return error_msgs
def deepspeed_optim_sched(trainer, hf_deepspeed_config, args, num_training_steps, model_parameters):
"""
A convenience wrapper that deals with optimizer and lr scheduler configuration.
"""
from accelerate.utils import DummyOptim, DummyScheduler
config = hf_deepspeed_config.config
# Mixing and matching DS schedulers and optimizers is supported unless Offload is enabled in which case it's:
# 1. DS scheduler + DS optimizer: Yes
# 2. HF scheduler + HF optimizer: Mostly*
# 3. DS scheduler + HF optimizer: Mostly*
# 4. HF scheduler + DS optimizer: Yes
#
# Mostly*: All non-native DeepSpeed optimizers that have both CPU and GPU implementation should work (except LAMB)
optimizer = None
if "optimizer" in config:
optimizer = DummyOptim(params=model_parameters)
else:
if hf_deepspeed_config.is_offload():
logger.info(
"Detected ZeRO Offload and non-DeepSpeed optimizers: This combination should work as long as the"
" custom optimizer has both CPU and GPU implementation (except LAMB)"
)
# ds supports Adam, OneBitAdam, and Lamb optimizers and can import other optimizers from torch.
# But trainer uses AdamW by default.
optimizer = trainer.create_optimizer()
# To use other optimizers requires voiding warranty with: `zero_allow_untested_optimizer`
config["zero_allow_untested_optimizer"] = True
lr_scheduler = None
if "scheduler" in config:
lr_scheduler = DummyScheduler(optimizer)
else:
if isinstance(optimizer, DummyOptim):
def _lr_scheduler_callable(optimizer):
# create a shallow copy first, so later modifications do not affect original trainer
trainer_copy = copy.copy(trainer)
# at the time _lr_scheduler_callable is called, trainer.lr_scheduler has been set
# update it to None so that we can re-create a new scheduler
trainer_copy.lr_scheduler = None
lr_scheduler = trainer_copy.create_scheduler(
num_training_steps=num_training_steps, optimizer=optimizer
)
return lr_scheduler
lr_scheduler = DummyScheduler(optimizer, lr_scheduler_callable=_lr_scheduler_callable)
else:
lr_scheduler = trainer.create_scheduler(num_training_steps=num_training_steps, optimizer=optimizer)
return optimizer, lr_scheduler
def deepspeed_init(trainer, num_training_steps, inference=False):
"""
Init DeepSpeed, after updating the DeepSpeed configuration with any relevant Trainer's args.
If `resume_from_checkpoint` was passed then an attempt to resume from a previously saved checkpoint will be made.
Args:
trainer: Trainer object
num_training_steps: per single gpu
resume_from_checkpoint: path to a checkpoint if to resume from after normal DeepSpeedEngine load
inference: launch in inference mode (no optimizer and no lr scheduler)
auto_find_batch_size: whether to ignore the `train_micro_batch_size_per_gpu` argument as it's being
set automatically by the auto batch size finder
Returns: optimizer, lr_scheduler
We may use `deepspeed_init` more than once during the life of Trainer, when we do - it's a temp hack based on:
https://github.com/deepspeedai/DeepSpeed/issues/1394#issuecomment-937405374 until Deepspeed fixes a bug where it
can't resume from a checkpoint after it did some stepping https://github.com/deepspeedai/DeepSpeed/issues/1612
"""
from deepspeed.utils import logger as ds_logger
model = trainer.model
args = trainer.args
hf_deepspeed_config = trainer.accelerator.state.deepspeed_plugin.hf_ds_config
# resume config update - some bits like `model` and `num_training_steps` only become available during train
hf_deepspeed_config.trainer_config_finalize(args, model, num_training_steps)
# set the Deepspeed log level consistent with the Trainer
ds_logger.setLevel(args.get_process_log_level())
if inference:
# only Z3 makes sense for the inference
if not hf_deepspeed_config.is_zero3():
raise ValueError("ZeRO inference only makes sense with ZeRO Stage 3 - please adjust your config")
# in case the training config is re-used for inference
hf_deepspeed_config.del_config_sub_tree("optimizer")
hf_deepspeed_config.del_config_sub_tree("lr_scheduler")
optimizer, lr_scheduler = None, None
model_parameters = None
else:
trainer.optimizer = None # important for when deepspeed_init is used as re-init
deepspeed_tp_size = hf_deepspeed_config.config.get("tensor_parallel", {}).get("autotp_size", 1)
if deepspeed_tp_size > 1:
import deepspeed
model = deepspeed.tp_model_init(
model=model,
tp_size=deepspeed_tp_size,
dtype=hf_deepspeed_config.dtype(),
config=hf_deepspeed_config.config,
)
model_parameters = list(filter(lambda p: p.requires_grad, model.parameters()))
optimizer, lr_scheduler = deepspeed_optim_sched(
trainer, hf_deepspeed_config, args, num_training_steps, model_parameters
)
# keep for quick debug:
# from pprint import pprint; pprint(config)
return optimizer, lr_scheduler
def deepspeed_load_checkpoint(deepspeed_engine, checkpoint_path, load_module_strict=True):
# it's possible that the user is trying to resume from model_path, which doesn't necessarily
# contain a deepspeed checkpoint. e.g. examples just check if the dir exists and assume it's
# a resume from a checkpoint and not just a local pretrained weight. So we check here if the
# path contains what looks like a deepspeed checkpoint
import glob
deepspeed_checkpoint_dirs = sorted(glob.glob(f"{checkpoint_path}/global_step*"))
if len(deepspeed_checkpoint_dirs) > 0:
logger.info(f"Attempting to resume from {checkpoint_path}")
# this magically updates self.optimizer and self.lr_scheduler
load_path, _ = deepspeed_engine.load_checkpoint(
checkpoint_path,
load_module_strict=load_module_strict,
load_optimizer_states=True,
load_lr_scheduler_states=True,
)
if load_path is None:
raise ValueError(f"[deepspeed] failed to resume from checkpoint {checkpoint_path}")
else:
raise ValueError(f"Can't find a valid checkpoint at {checkpoint_path}")
| HfTrainerDeepSpeedConfig |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 39203,
"end": 39541
} | class ____(str, Enum):
"""
* `AUTOSCALE`: Automatically resized based on load.
* `USER_REQUEST`: User requested a new size.
* `AUTORECOVERY`: Autorecovery monitor resized the cluster after it lost a node.
"""
autoscale = "AUTOSCALE"
userrequest = "USER_REQUEST"
autorecovery = "AUTORECOVERY"
| ResizeCause |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 43436,
"end": 44530
} | class ____(TestCase):
def test_extra_kwargs_not_altered(self):
class TestSerializer(serializers.ModelSerializer):
non_model_field = serializers.CharField()
class Meta:
model = OneFieldModel
read_only_fields = ('char_field', 'non_model_field')
fields = read_only_fields
extra_kwargs = {}
class ChildSerializer(TestSerializer):
class Meta(TestSerializer.Meta):
read_only_fields = ()
test_expected = dedent("""
TestSerializer():
char_field = CharField(read_only=True)
non_model_field = CharField()
""")
child_expected = dedent("""
ChildSerializer():
char_field = CharField(max_length=100)
non_model_field = CharField()
""")
self.assertEqual(repr(ChildSerializer()), child_expected)
self.assertEqual(repr(TestSerializer()), test_expected)
self.assertEqual(repr(ChildSerializer()), child_expected)
| TestMetaInheritance |
python | numpy__numpy | numpy/lib/tests/test_arraypad.py | {
"start": 26744,
"end": 30650
} | class ____:
def test_check_simple(self):
a = np.arange(100).astype('f')
a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
0.80, 0.64, 0.48, 0.32, 0.16,
0.00, 1.00, 2.00, 3.00, 4.00, 5.00, 6.00, 7.00, 8.00, 9.00,
10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0,
20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0,
30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0,
40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0,
50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0,
60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0,
70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0,
80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0,
90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0,
94.3, 89.6, 84.9, 80.2, 75.5, 70.8, 66.1, 61.4, 56.7, 52.0,
47.3, 42.6, 37.9, 33.2, 28.5, 23.8, 19.1, 14.4, 9.7, 5.]
)
assert_allclose(a, b, rtol=1e-5, atol=1e-5)
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
[0., 0., 0., 1., 2., 3., 4., 2., 0.],
[0., 2.5, 5., 6., 7., 8., 9., 4.5, 0.],
[0., 5., 10., 11., 12., 13., 14., 7., 0.],
[0., 7.5, 15., 16., 17., 18., 19., 9.5, 0.],
[0., 3.75, 7.5, 8., 8.5, 9., 9.5, 4.75, 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0.]])
assert_allclose(test, expected)
@pytest.mark.xfail(exceptions=(AssertionError,))
def test_object_array(self):
from fractions import Fraction
arr = np.array([Fraction(1, 2), Fraction(-1, 2)])
actual = np.pad(arr, (2, 3), mode='linear_ramp', end_values=0)
# deliberately chosen to have a non-power-of-2 denominator such that
# rounding to floats causes a failure.
expected = np.array([
Fraction( 0, 12),
Fraction( 3, 12),
Fraction( 6, 12),
Fraction(-6, 12),
Fraction(-4, 12),
Fraction(-2, 12),
Fraction(-0, 12),
])
assert_equal(actual, expected)
def test_end_values(self):
"""Ensure that end values are exact."""
a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
assert_equal(a[:, 0], 0.)
assert_equal(a[:, -1], 0.)
assert_equal(a[0, :], 0.)
assert_equal(a[-1, :], 0.)
@pytest.mark.parametrize("dtype", _numeric_dtypes)
def test_negative_difference(self, dtype):
"""
Check correct behavior of unsigned dtypes if there is a negative
difference between the edge to pad and `end_values`. Check both cases
to be independent of implementation. Test behavior for all other dtypes
in case dtype casting interferes with complex dtypes. See gh-14191.
"""
x = np.array([3], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=0)
expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
assert_equal(result, expected)
x = np.array([0], dtype=dtype)
result = np.pad(x, 3, mode="linear_ramp", end_values=3)
expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
assert_equal(result, expected)
| TestLinearRamp |
python | run-llama__llama_index | llama-index-integrations/evaluation/llama-index-evaluation-tonic-validate/llama_index/evaluation/tonic_validate/augmentation_precision.py | {
"start": 366,
"end": 2005
} | class ____(BaseEvaluator):
"""
Tonic Validate's augmentation precision metric.
The output score is a float between 0.0 and 1.0.
See https://docs.tonic.ai/validate/ for more details.
Args:
openai_service(OpenAIService): The OpenAI service to use. Specifies the chat
completion model to use as the LLM evaluator. Defaults to "gpt-4".
"""
def __init__(self, openai_service: Optional[Any] = None):
if openai_service is None:
openai_service = OpenAIService("gpt-4")
self.openai_service = openai_service
self.metric = AugmentationPrecisionMetric()
async def aevaluate(
self,
query: Optional[str] = None,
response: Optional[str] = None,
contexts: Optional[Sequence[str]] = None,
**kwargs: Any,
) -> EvaluationResult:
from tonic_validate.classes.benchmark import BenchmarkItem
from tonic_validate.classes.llm_response import LLMResponse
benchmark_item = BenchmarkItem(question=query)
llm_response = LLMResponse(
llm_answer=response,
llm_context_list=contexts,
benchmark_item=benchmark_item,
)
score = self.metric.score(llm_response, self.openai_service)
return EvaluationResult(
query=query, contexts=contexts, response=response, score=score
)
def _get_prompts(self) -> PromptDictType:
return {}
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
return
| AugmentationPrecisionEvaluator |
python | PyCQA__pylint | tests/functional/ext/code_style/cs_consider_using_assignment_expr.py | {
"start": 2645,
"end": 2761
} | class ____:
var = 1
A.var = 2
if A.var:
...
i: int
if i: # pylint: disable=used-before-assignment
pass
| A |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 8759,
"end": 9477
} | class ____:
"""§11.6.5 of the 1.7 and 2.0 references."""
TYPE = "/Type" # name, required; must be /XObject
SUBTYPE = "/Subtype" # name, required; must be /Image
NAME = "/Name" # name, required
WIDTH = "/Width" # integer, required
HEIGHT = "/Height" # integer, required
BITS_PER_COMPONENT = "/BitsPerComponent" # integer, required
COLOR_SPACE = "/ColorSpace" # name, required
DECODE = "/Decode" # array, optional
INTENT = "/Intent" # string, optional
INTERPOLATE = "/Interpolate" # boolean, optional
IMAGE_MASK = "/ImageMask" # boolean, optional
MASK = "/Mask" # 1-bit image mask stream
S_MASK = "/SMask" # dictionary or name, optional
| ImageAttributes |
python | encode__django-rest-framework | tests/test_model_serializer.py | {
"start": 49414,
"end": 50842
} | class ____(TestCase):
def test_traverse_nullable_fk(self):
"""
A dotted source with nullable elements uses default when any item in the chain is None. #5849.
Similar to model example from test_serializer.py `test_default_for_multiple_dotted_source` method,
but using RelatedField, rather than CharField.
"""
class TestSerializer(serializers.ModelSerializer):
target = serializers.PrimaryKeyRelatedField(
source='target.target', read_only=True, allow_null=True, default=None
)
class Meta:
model = NestedForeignKeySource
fields = ('target', )
model = NestedForeignKeySource.objects.create()
assert TestSerializer(model).data['target'] is None
def test_named_field_source(self):
class TestSerializer(serializers.ModelSerializer):
class Meta:
model = RegularFieldsModel
fields = ('number_field',)
extra_kwargs = {
'number_field': {
'source': 'integer_field'
}
}
expected = dedent(r"""
TestSerializer\(\):
number_field = IntegerField\(.*source='integer_field'\)
""")
self.maxDiff = None
assert re.search(expected, repr(TestSerializer())) is not None
| TestFieldSource |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-cogniswitch/llama_index/tools/cogniswitch/base.py | {
"start": 120,
"end": 5381
} | class ____(BaseToolSpec):
"""
Cogniswitch Tool Spec.
A toolspec to have store_data and query_knowledge as tools to store the data from a file or a url
and answer questions from the knowledge stored respectively.
"""
spec_functions = ["store_data", "query_knowledge", "knowledge_status"]
def __init__(
self,
cs_token: str,
apiKey: str,
OAI_token: Optional[str] = None,
) -> None:
"""
Args:
cs_token (str): Cogniswitch token.
OAI_token (str): OpenAI token.
apiKey (str): Oauth token.
"""
self.cs_token = cs_token
if OAI_token:
self.OAI_token = OAI_token
elif os.environ["OPENAI_API_KEY"]:
self.OAI_token = os.environ["OPENAI_API_KEY"]
else:
raise ValueError("Please provide the OpenAI token")
self.apiKey = apiKey
self.source_URL_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/url"
)
self.source_file_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/file"
)
self.knowledge_request_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeRequest"
)
self.knowledge_status_endpoint = (
"https://api.cogniswitch.ai:8243/cs-api/0.0.1/cs/knowledgeSource/status"
)
self.headers = {
"apiKey": self.apiKey,
"platformToken": self.cs_token,
"openAIToken": self.OAI_token,
}
def store_data(
self,
url: Optional[str] = None,
file: Optional[str] = None,
document_name: Optional[str] = None,
document_description: Optional[str] = None,
) -> dict:
"""
Store data using the Cogniswitch service.
Args:
url (Optional[str]): URL link.
file (Optional[str]): file path of your file.
the current files supported by the files are
.txt, .pdf, .docx, .doc, .html
document_name (Optional[str]): Name of the document you are uploading.
document_description (Optional[str]): Description of the document.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
if not file and not url:
return {
"message": "No input provided",
}
elif file and url:
return {
"message": "Too many inputs, please provide either file or url",
}
elif url:
api_url = self.source_URL_endpoint
headers = self.headers
files = None
data = {
"url": url,
"documentName": document_name,
"documentDescription": document_description,
}
response = requests.post(api_url, headers=headers, data=data, files=files)
elif file:
api_url = self.source_file_endpoint
headers = self.headers
if file is not None:
files = {"file": open(file, "rb")}
else:
files = None
data = {
"url": url,
"documentName": document_name,
"documentDescription": document_description,
}
response = requests.post(api_url, headers=headers, data=data, files=files)
if response.status_code == 200:
return response.json()
else:
# error_message = response.json()["message"]
return {
"message": "Bad Request",
}
def query_knowledge(self, query: str) -> dict:
"""
Send a query to the Cogniswitch service and retrieve the response.
Args:
query (str): Query to be answered.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
api_url = self.knowledge_request_endpoint
headers = self.headers
data = {"query": query}
response = requests.post(api_url, headers=headers, data=data)
if response.status_code == 200:
return response.json()
else:
# error_message = response.json()["message"]
return {
"message": "Bad Request",
}
def knowledge_status(self, document_name: str) -> dict:
"""
Use this function to know the status of the document or the URL uploaded
Args:
document_name (str): The document name or the url that is uploaded.
Returns:
dict: Response JSON from the Cogniswitch service.
"""
params = {"docName": document_name, "platformToken": self.cs_token}
response = requests.get(
self.knowledge_status_endpoint,
headers=self.headers,
params=params,
)
if response.status_code == 200:
source_info = response.json()
return source_info[-1]
else:
# error_message = response.json()["message"]
return {
"message": "Bad Request",
}
| CogniswitchToolSpec |
python | sympy__sympy | sympy/polys/domains/old_fractionfield.py | {
"start": 383,
"end": 6226
} | class ____(Field, CompositeDomain):
"""A class for representing rational function fields. """
dtype = DMF
is_FractionField = is_Frac = True
has_assoc_Ring = True
has_assoc_Field = True
def __init__(self, dom, *gens):
if not gens:
raise GeneratorsNeeded("generators not specified")
lev = len(gens) - 1
self.ngens = len(gens)
self.zero = self.dtype.zero(lev, dom)
self.one = self.dtype.one(lev, dom)
self.domain = self.dom = dom
self.symbols = self.gens = gens
def set_domain(self, dom):
"""Make a new fraction field with given domain. """
return self.__class__(dom, *self.gens)
def new(self, element):
return self.dtype(element, self.dom, len(self.gens) - 1)
def __str__(self):
return str(self.dom) + '(' + ','.join(map(str, self.gens)) + ')'
def __hash__(self):
return hash((self.__class__.__name__, self.dtype, self.dom, self.gens))
def __eq__(self, other):
"""Returns ``True`` if two domains are equivalent. """
return isinstance(other, FractionField) and \
self.dtype == other.dtype and self.dom == other.dom and self.gens == other.gens
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return (basic_from_dict(a.numer().to_sympy_dict(), *self.gens) /
basic_from_dict(a.denom().to_sympy_dict(), *self.gens))
def from_sympy(self, a):
"""Convert SymPy's expression to ``dtype``. """
p, q = a.as_numer_denom()
num, _ = dict_from_basic(p, gens=self.gens)
den, _ = dict_from_basic(q, gens=self.gens)
for k, v in num.items():
num[k] = self.dom.from_sympy(v)
for k, v in den.items():
den[k] = self.dom.from_sympy(v)
return self((num, den)).cancel()
def from_ZZ(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_python(K1, a, K0):
"""Convert a Python ``int`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_QQ_python(K1, a, K0):
"""Convert a Python ``Fraction`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_ZZ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpz`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_QQ_gmpy(K1, a, K0):
"""Convert a GMPY ``mpq`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_RealField(K1, a, K0):
"""Convert a mpmath ``mpf`` object to ``dtype``. """
return K1(K1.dom.convert(a, K0))
def from_GlobalPolynomialRing(K1, a, K0):
"""Convert a ``DMF`` object to ``dtype``. """
if K1.gens == K0.gens:
if K1.dom == K0.dom:
return K1(a.to_list())
else:
return K1(a.convert(K1.dom).to_list())
else:
monoms, coeffs = _dict_reorder(a.to_dict(), K0.gens, K1.gens)
if K1.dom != K0.dom:
coeffs = [ K1.dom.convert(c, K0.dom) for c in coeffs ]
return K1(dict(zip(monoms, coeffs)))
def from_FractionField(K1, a, K0):
"""
Convert a fraction field element to another fraction field.
Examples
========
>>> from sympy.polys.polyclasses import DMF
>>> from sympy.polys.domains import ZZ, QQ
>>> from sympy.abc import x
>>> f = DMF(([ZZ(1), ZZ(2)], [ZZ(1), ZZ(1)]), ZZ)
>>> QQx = QQ.old_frac_field(x)
>>> ZZx = ZZ.old_frac_field(x)
>>> QQx.from_FractionField(f, ZZx)
DMF([1, 2], [1, 1], QQ)
"""
if K1.gens == K0.gens:
if K1.dom == K0.dom:
return a
else:
return K1((a.numer().convert(K1.dom).to_list(),
a.denom().convert(K1.dom).to_list()))
elif set(K0.gens).issubset(K1.gens):
nmonoms, ncoeffs = _dict_reorder(
a.numer().to_dict(), K0.gens, K1.gens)
dmonoms, dcoeffs = _dict_reorder(
a.denom().to_dict(), K0.gens, K1.gens)
if K1.dom != K0.dom:
ncoeffs = [ K1.dom.convert(c, K0.dom) for c in ncoeffs ]
dcoeffs = [ K1.dom.convert(c, K0.dom) for c in dcoeffs ]
return K1((dict(zip(nmonoms, ncoeffs)), dict(zip(dmonoms, dcoeffs))))
def get_ring(self):
"""Returns a ring associated with ``self``. """
from sympy.polys.domains import PolynomialRing
return PolynomialRing(self.dom, *self.gens)
def poly_ring(self, *gens):
"""Returns a polynomial ring, i.e. `K[X]`. """
raise NotImplementedError('nested domains not allowed')
def frac_field(self, *gens):
"""Returns a fraction field, i.e. `K(X)`. """
raise NotImplementedError('nested domains not allowed')
def is_positive(self, a):
"""Returns True if ``a`` is positive. """
return self.dom.is_positive(a.numer().LC())
def is_negative(self, a):
"""Returns True if ``a`` is negative. """
return self.dom.is_negative(a.numer().LC())
def is_nonpositive(self, a):
"""Returns True if ``a`` is non-positive. """
return self.dom.is_nonpositive(a.numer().LC())
def is_nonnegative(self, a):
"""Returns True if ``a`` is non-negative. """
return self.dom.is_nonnegative(a.numer().LC())
def numer(self, a):
"""Returns numerator of ``a``. """
return a.numer()
def denom(self, a):
"""Returns denominator of ``a``. """
return a.denom()
def factorial(self, a):
"""Returns factorial of ``a``. """
return self.dtype(self.dom.factorial(a))
| FractionField |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/metadata/metadata_value.py | {
"start": 26952,
"end": 29303
} | class ____(
MetadataValue["TableMetadataValue"],
LegacyNamedTupleMixin,
IHaveNew,
):
"""Container class for table metadata entry data.
Args:
records (TableRecord): The data as a list of records (i.e. rows).
schema (Optional[TableSchema]): A schema for the table.
Example:
.. code-block:: python
from dagster import TableMetadataValue, TableRecord
TableMetadataValue(
schema=None,
records=[
TableRecord({"column1": 5, "column2": "x"}),
TableRecord({"column1": 7, "column2": "y"}),
]
)
"""
records: PublicAttr[Sequence[TableRecord]]
schema: PublicAttr[TableSchema]
@public
@staticmethod
def infer_column_type(value: object) -> str:
"""str: Infer the :py:class:`TableSchema` column type that will be used for a value."""
if isinstance(value, bool):
return "bool"
elif isinstance(value, int):
return "int"
elif isinstance(value, float):
return "float"
else:
return "string"
def __new__(cls, records: Sequence[TableRecord], schema: Optional[TableSchema]):
check.sequence_param(records, "records", of_type=TableRecord)
check.opt_inst_param(schema, "schema", TableSchema)
if len(records) == 0:
schema = check.not_none(schema, "schema must be provided if records is empty")
else:
columns = set(records[0].data.keys())
for record in records[1:]:
check.invariant(
set(record.data.keys()) == columns, "All records must have the same fields"
)
schema = schema or TableSchema(
columns=[
TableColumn(name=k, type=TableMetadataValue.infer_column_type(v))
for k, v in records[0].data.items()
]
)
return super().__new__(
cls,
records=records,
schema=schema,
)
@public
@property
def value(self) -> Self:
"""TableMetadataValue: Identity function."""
return self
@public
@whitelist_for_serdes(storage_name="TableSchemaMetadataEntryData")
@record(kw_only=False)
| TableMetadataValue |
python | numba__llvmlite | llvmlite/ir/instructions.py | {
"start": 346,
"end": 1748
} | class ____(NamedValue, _HasMetadata):
def __init__(self, parent, typ, opname, operands, name='', flags=()):
super(Instruction, self).__init__(parent, typ, name=name)
assert isinstance(parent, Block)
assert isinstance(flags, (tuple, list))
self.opname = opname
self.operands = operands
self.flags = list(flags)
self.metadata = {}
@property
def function(self):
return self.parent.function
@property
def module(self):
return self.parent.function.module
def descr(self, buf):
opname = self.opname
if self.flags:
opname = ' '.join([opname] + self.flags)
operands = ', '.join([op.get_reference() for op in self.operands])
typ = self.type
metadata = self._stringify_metadata(leading_comma=True)
buf.append("{0} {1} {2}{3}\n"
.format(opname, typ, operands, metadata))
def replace_usage(self, old, new):
if old in self.operands:
ops = []
for op in self.operands:
ops.append(new if op is old else op)
self.operands = tuple(ops)
self._clear_string_cache()
def __repr__(self):
return "<ir.%s %r of type '%s', opname %r, operands %r>" % (
self.__class__.__name__, self.name, self.type,
self.opname, self.operands)
| Instruction |
python | great-expectations__great_expectations | great_expectations/experimental/metric_repository/metrics.py | {
"start": 746,
"end": 1395
} | class ____(str, enum.Enum, metaclass=MetricTypesMeta):
"""Represents Metric types in OSS that are used for ColumnDescriptiveMetrics and MetricRepository.
More Metric types will be added in the future.
""" # noqa: E501 # FIXME CoP
# Table metrics
TABLE_COLUMNS = "table.columns"
TABLE_ROW_COUNT = "table.row_count"
TABLE_COLUMN_TYPES = "table.column_types"
# Column metrics
COLUMN_MIN = "column.min"
COLUMN_MAX = "column.max"
COLUMN_MEDIAN = "column.median"
COLUMN_MEAN = "column.mean"
COLUMN_NULL_COUNT = "column_values.null.count"
COLUMN_NON_NULL_COUNT = "column.non_null_count"
| MetricTypes |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/nameBinding2.py | {
"start": 143,
"end": 234
} | class ____:
def test(self):
nonlocal missing_symbol
missing_symbol = 4
| Test |
python | jazzband__django-oauth-toolkit | tests/test_rest_framework.py | {
"start": 2331,
"end": 2429
} | class ____(OAuth2View):
permission_classes = [TokenMatchesOASRequirements]
| MethodScopeAltViewBad |
python | getsentry__sentry | tests/sentry/db/models/fields/bitfield/test_bitfield.py | {
"start": 1015,
"end": 4708
} | class ____(unittest.TestCase):
def test_comparison(self) -> None:
bithandler_1 = BitHandler(0, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
bithandler_2 = BitHandler(1, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
bithandler_3 = BitHandler(0, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
assert bithandler_1 == bithandler_1
assert bithandler_1 != bithandler_2
assert bithandler_1 == bithandler_3
def test_defaults(self) -> None:
bithandler = BitHandler(0, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
# Default value of 0.
self.assertEqual(int(bithandler), 0)
# Test bit numbers.
self.assertEqual(int(bithandler.FLAG_0.number), 0)
self.assertEqual(int(bithandler.FLAG_1.number), 1)
self.assertEqual(int(bithandler.FLAG_2.number), 2)
self.assertEqual(int(bithandler.FLAG_3.number), 3)
# Negative test non-existant key.
pytest.raises(AttributeError, lambda: bithandler.FLAG_4)
# Test bool().
self.assertEqual(bool(bithandler.FLAG_0), False)
self.assertEqual(bool(bithandler.FLAG_1), False)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
def test_bool_default(self) -> None:
bithandler = BitHandler(1, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
self.assertEqual(bool(bithandler.FLAG_0), True)
self.assertEqual(bool(bithandler.FLAG_1), False)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(2, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
self.assertEqual(bool(bithandler.FLAG_0), False)
self.assertEqual(bool(bithandler.FLAG_1), True)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(3, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
self.assertEqual(bool(bithandler.FLAG_0), True)
self.assertEqual(bool(bithandler.FLAG_1), True)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(4, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
self.assertEqual(bool(bithandler.FLAG_0), False)
self.assertEqual(bool(bithandler.FLAG_1), False)
self.assertEqual(bool(bithandler.FLAG_2), True)
self.assertEqual(bool(bithandler.FLAG_3), False)
def test_mutation(self) -> None:
bithandler = BitHandler(0, ("FLAG_0", "FLAG_1", "FLAG_2", "FLAG_3"))
self.assertEqual(bool(bithandler.FLAG_0), False)
self.assertEqual(bool(bithandler.FLAG_1), False)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
bithandler = BitHandler(bithandler | 1, bithandler._keys)
self.assertEqual(bool(bithandler.FLAG_0), True)
self.assertEqual(bool(bithandler.FLAG_1), False)
self.assertEqual(bool(bithandler.FLAG_2), False)
self.assertEqual(bool(bithandler.FLAG_3), False)
bithandler ^= 3
self.assertEqual(int(bithandler), 2)
self.assertEqual(bool(bithandler & 1), False)
bithandler.FLAG_0 = False
self.assertEqual(bithandler.FLAG_0, False)
bithandler.FLAG_1 = True
self.assertEqual(bithandler.FLAG_0, False)
self.assertEqual(bithandler.FLAG_1, True)
bithandler.FLAG_2 = False
self.assertEqual(bithandler.FLAG_0, False)
self.assertEqual(bithandler.FLAG_1, True)
self.assertEqual(bithandler.FLAG_2, False)
| BitHandlerTest |
python | ray-project__ray | rllib/algorithms/dqn/dqn_tf_policy.py | {
"start": 4663,
"end": 17643
} | class ____:
"""Assign the `compute_td_error` method to the DQNTFPolicy
This allows us to prioritize on the worker side.
"""
def __init__(self):
@make_tf_callable(self.get_session(), dynamic_shape=True)
def compute_td_error(
obs_t, act_t, rew_t, obs_tp1, terminateds_mask, importance_weights
):
# Do forward pass on loss to update td error attribute
build_q_losses(
self,
self.model,
None,
{
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_t),
SampleBatch.ACTIONS: tf.convert_to_tensor(act_t),
SampleBatch.REWARDS: tf.convert_to_tensor(rew_t),
SampleBatch.NEXT_OBS: tf.convert_to_tensor(obs_tp1),
SampleBatch.TERMINATEDS: tf.convert_to_tensor(terminateds_mask),
PRIO_WEIGHTS: tf.convert_to_tensor(importance_weights),
},
)
return self.q_loss.td_error
self.compute_td_error = compute_td_error
@OldAPIStack
def build_q_model(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> ModelV2:
"""Build q_model and target_model for DQN
Args:
policy: The Policy, which will use the model for optimization.
obs_space (gym.spaces.Space): The policy's observation space.
action_space (gym.spaces.Space): The policy's action space.
config (AlgorithmConfigDict):
Returns:
ModelV2: The Model for the Policy to use.
Note: The target q model will not be returned, just assigned to
`policy.target_model`.
"""
if not isinstance(action_space, gym.spaces.Discrete):
raise UnsupportedSpaceException(
"Action space {} is not supported for DQN.".format(action_space)
)
if config["hiddens"]:
# try to infer the last layer size, otherwise fall back to 256
num_outputs = ([256] + list(config["model"]["fcnet_hiddens"]))[-1]
config["model"]["no_final_linear"] = True
else:
num_outputs = action_space.n
q_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="tf",
model_interface=DistributionalQTFModel,
name=Q_SCOPE,
num_atoms=config["num_atoms"],
dueling=config["dueling"],
q_hiddens=config["hiddens"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise",
)
policy.target_model = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=action_space,
num_outputs=num_outputs,
model_config=config["model"],
framework="tf",
model_interface=DistributionalQTFModel,
name=Q_TARGET_SCOPE,
num_atoms=config["num_atoms"],
dueling=config["dueling"],
q_hiddens=config["hiddens"],
use_noisy=config["noisy"],
v_min=config["v_min"],
v_max=config["v_max"],
sigma0=config["sigma0"],
# TODO(sven): Move option to add LayerNorm after each Dense
# generically into ModelCatalog.
add_layer_norm=isinstance(getattr(policy, "exploration", None), ParameterNoise)
or config["exploration_config"]["type"] == "ParameterNoise",
)
return q_model
@OldAPIStack
def get_distribution_inputs_and_class(
policy: Policy, model: ModelV2, input_dict: SampleBatch, *, explore=True, **kwargs
):
q_vals = compute_q_values(
policy, model, input_dict, state_batches=None, explore=explore
)
q_vals = q_vals[0] if isinstance(q_vals, tuple) else q_vals
policy.q_values = q_vals
# Return a Torch TorchCategorical distribution where the temperature
# parameter is partially binded to the configured value.
temperature = policy.config["categorical_distribution_temperature"]
return (
policy.q_values,
get_categorical_class_with_temperature(temperature),
[],
) # state-out
@OldAPIStack
def build_q_losses(policy: Policy, model, _, train_batch: SampleBatch) -> TensorType:
"""Constructs the loss for DQNTFPolicy.
Args:
policy: The Policy to calculate the loss for.
model (ModelV2): The Model to calculate the loss for.
train_batch: The training data.
Returns:
TensorType: A single loss tensor.
"""
config = policy.config
# q network evaluation
q_t, q_logits_t, q_dist_t, _ = compute_q_values(
policy,
model,
SampleBatch({"obs": train_batch[SampleBatch.CUR_OBS]}),
state_batches=None,
explore=False,
)
# target q network evalution
q_tp1, q_logits_tp1, q_dist_tp1, _ = compute_q_values(
policy,
policy.target_model,
SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}),
state_batches=None,
explore=False,
)
if not hasattr(policy, "target_q_func_vars"):
policy.target_q_func_vars = policy.target_model.variables()
# q scores for actions which we know were selected in the given state.
one_hot_selection = tf.one_hot(
tf.cast(train_batch[SampleBatch.ACTIONS], tf.int32), policy.action_space.n
)
q_t_selected = tf.reduce_sum(q_t * one_hot_selection, 1)
q_logits_t_selected = tf.reduce_sum(
q_logits_t * tf.expand_dims(one_hot_selection, -1), 1
)
# compute estimate of best possible value starting from state at t + 1
if config["double_q"]:
(
q_tp1_using_online_net,
q_logits_tp1_using_online_net,
q_dist_tp1_using_online_net,
_,
) = compute_q_values(
policy,
model,
SampleBatch({"obs": train_batch[SampleBatch.NEXT_OBS]}),
state_batches=None,
explore=False,
)
q_tp1_best_using_online_net = tf.argmax(q_tp1_using_online_net, 1)
q_tp1_best_one_hot_selection = tf.one_hot(
q_tp1_best_using_online_net, policy.action_space.n
)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
else:
q_tp1_best_one_hot_selection = tf.one_hot(
tf.argmax(q_tp1, 1), policy.action_space.n
)
q_tp1_best = tf.reduce_sum(q_tp1 * q_tp1_best_one_hot_selection, 1)
q_dist_tp1_best = tf.reduce_sum(
q_dist_tp1 * tf.expand_dims(q_tp1_best_one_hot_selection, -1), 1
)
loss_fn = huber_loss if policy.config["td_error_loss_fn"] == "huber" else l2_loss
policy.q_loss = QLoss(
q_t_selected,
q_logits_t_selected,
q_tp1_best,
q_dist_tp1_best,
train_batch[PRIO_WEIGHTS],
tf.cast(train_batch[SampleBatch.REWARDS], tf.float32),
tf.cast(train_batch[SampleBatch.TERMINATEDS], tf.float32),
config["gamma"],
config["n_step"],
config["num_atoms"],
config["v_min"],
config["v_max"],
loss_fn,
)
return policy.q_loss.loss
@OldAPIStack
def adam_optimizer(
policy: Policy, config: AlgorithmConfigDict
) -> "tf.keras.optimizers.Optimizer":
if policy.config["framework"] == "tf2":
return tf.keras.optimizers.Adam(
learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"]
)
else:
return tf1.train.AdamOptimizer(
learning_rate=policy.cur_lr, epsilon=config["adam_epsilon"]
)
@OldAPIStack
def clip_gradients(
policy: Policy, optimizer: "tf.keras.optimizers.Optimizer", loss: TensorType
) -> ModelGradients:
if not hasattr(policy, "q_func_vars"):
policy.q_func_vars = policy.model.variables()
return minimize_and_clip(
optimizer,
loss,
var_list=policy.q_func_vars,
clip_val=policy.config["grad_clip"],
)
@OldAPIStack
def build_q_stats(policy: Policy, batch) -> Dict[str, TensorType]:
return dict(
{
"cur_lr": tf.cast(policy.cur_lr, tf.float64),
},
**policy.q_loss.stats
)
@OldAPIStack
def setup_mid_mixins(policy: Policy, obs_space, action_space, config) -> None:
LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"])
ComputeTDErrorMixin.__init__(policy)
@OldAPIStack
def setup_late_mixins(
policy: Policy,
obs_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: AlgorithmConfigDict,
) -> None:
TargetNetworkMixin.__init__(policy)
@OldAPIStack
def compute_q_values(
policy: Policy,
model: ModelV2,
input_batch: SampleBatch,
state_batches=None,
seq_lens=None,
explore=None,
is_training: bool = False,
):
config = policy.config
model_out, state = model(input_batch, state_batches or [], seq_lens)
if config["num_atoms"] > 1:
(
action_scores,
z,
support_logits_per_action,
logits,
dist,
) = model.get_q_value_distributions(model_out)
else:
(action_scores, logits, dist) = model.get_q_value_distributions(model_out)
if config["dueling"]:
state_score = model.get_state_value(model_out)
if config["num_atoms"] > 1:
support_logits_per_action_mean = tf.reduce_mean(
support_logits_per_action, 1
)
support_logits_per_action_centered = (
support_logits_per_action
- tf.expand_dims(support_logits_per_action_mean, 1)
)
support_logits_per_action = (
tf.expand_dims(state_score, 1) + support_logits_per_action_centered
)
support_prob_per_action = tf.nn.softmax(logits=support_logits_per_action)
value = tf.reduce_sum(input_tensor=z * support_prob_per_action, axis=-1)
logits = support_logits_per_action
dist = support_prob_per_action
else:
action_scores_mean = reduce_mean_ignore_inf(action_scores, 1)
action_scores_centered = action_scores - tf.expand_dims(
action_scores_mean, 1
)
value = state_score + action_scores_centered
else:
value = action_scores
return value, logits, dist, state
@OldAPIStack
def postprocess_nstep_and_prio(
policy: Policy, batch: SampleBatch, other_agent=None, episode=None
) -> SampleBatch:
# N-step Q adjustments.
if policy.config["n_step"] > 1:
adjust_nstep(policy.config["n_step"], policy.config["gamma"], batch)
# Create dummy prio-weights (1.0) in case we don't have any in
# the batch.
if PRIO_WEIGHTS not in batch:
batch[PRIO_WEIGHTS] = np.ones_like(batch[SampleBatch.REWARDS])
# Prioritize on the worker side.
if batch.count > 0 and policy.config["replay_buffer_config"].get(
"worker_side_prioritization", False
):
td_errors = policy.compute_td_error(
batch[SampleBatch.OBS],
batch[SampleBatch.ACTIONS],
batch[SampleBatch.REWARDS],
batch[SampleBatch.NEXT_OBS],
batch[SampleBatch.TERMINATEDS],
batch[PRIO_WEIGHTS],
)
# Retain compatibility with old-style Replay args
epsilon = policy.config.get("replay_buffer_config", {}).get(
"prioritized_replay_eps"
) or policy.config.get("prioritized_replay_eps")
if epsilon is None:
raise ValueError("prioritized_replay_eps not defined in config.")
new_priorities = np.abs(convert_to_numpy(td_errors)) + epsilon
batch[PRIO_WEIGHTS] = new_priorities
return batch
DQNTFPolicy = build_tf_policy(
name="DQNTFPolicy",
get_default_config=lambda: ray.rllib.algorithms.dqn.dqn.DQNConfig(),
make_model=build_q_model,
action_distribution_fn=get_distribution_inputs_and_class,
loss_fn=build_q_losses,
stats_fn=build_q_stats,
postprocess_fn=postprocess_nstep_and_prio,
optimizer_fn=adam_optimizer,
compute_gradients_fn=clip_gradients,
extra_action_out_fn=lambda policy: {"q_values": policy.q_values},
extra_learn_fetches_fn=lambda policy: {"td_error": policy.q_loss.td_error},
before_loss_init=setup_mid_mixins,
after_init=setup_late_mixins,
mixins=[
TargetNetworkMixin,
ComputeTDErrorMixin,
LearningRateSchedule,
],
)
| ComputeTDErrorMixin |
python | numpy__numpy | numpy/distutils/fcompiler/nag.py | {
"start": 118,
"end": 577
} | class ____(FCompiler):
version_pattern = r'NAG.* Release (?P<version>[^(\s]*)'
def version_match(self, version_string):
m = re.search(self.version_pattern, version_string)
if m:
return m.group('version')
else:
return None
def get_flags_linker_so(self):
return ["-Wl,-shared"]
def get_flags_opt(self):
return ['-O4']
def get_flags_arch(self):
return []
| BaseNAGFCompiler |
python | pydantic__pydantic | pydantic/functional_validators.py | {
"start": 30227,
"end": 31682
} | class ____:
"""A helper class to validate a custom type from a type that is natively supported by Pydantic.
Args:
from_type: The type natively supported by Pydantic to use to perform validation.
instantiation_hook: A callable taking the validated type as an argument, and returning
the populated custom type.
Example:
```python {lint="skip"}
from typing import Annotated
from pydantic import BaseModel, TypeAdapter, ValidateAs
class MyCls:
def __init__(self, a: int) -> None:
self.a = a
def __repr__(self) -> str:
return f"MyCls(a={self.a})"
class Model(BaseModel):
a: int
ta = TypeAdapter(
Annotated[MyCls, ValidateAs(Model, lambda v: MyCls(a=v.a))]
)
print(ta.validate_python({'a': 1}))
#> MyCls(a=1)
```
"""
# TODO: make use of PEP 747
def __init__(self, from_type: type[_FromTypeT], /, instantiation_hook: Callable[[_FromTypeT], Any]) -> None:
self.from_type = from_type
self.instantiation_hook = instantiation_hook
def __get_pydantic_core_schema__(self, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
schema = handler(self.from_type)
return core_schema.no_info_after_validator_function(
self.instantiation_hook,
schema=schema,
)
| ValidateAs |
python | wandb__wandb | tests/unit_tests/test_artifacts/test_wandb_artifacts.py | {
"start": 2806,
"end": 17742
} | class ____:
@staticmethod
def _fixture_kwargs_to_kwargs(
artifact_id: str = "my-artifact-id",
artifact_manifest_id: str = "my-artifact-manifest-id",
entry_path: str = "my-path",
entry_digest: str = "my-digest",
entry_local_path: Optional[Path] = None,
preparer: Optional[StepPrepare] = None,
) -> Mapping[str, Any]:
if preparer is None:
preparer = mock_preparer()
return dict(
artifact_id=artifact_id,
artifact_manifest_id=artifact_manifest_id,
entry=ArtifactManifestEntry(
path=entry_path,
digest=entry_digest,
local_path=entry_local_path,
),
preparer=preparer if preparer else mock_preparer(),
)
@staticmethod
def _store_file(policy: WandbStoragePolicy, **kwargs) -> bool:
"""Runs store_file to completion."""
return policy.store_file(**TestStoreFile._fixture_kwargs_to_kwargs(**kwargs))
@pytest.fixture(params=["sync", "async"])
def store_file_mode(self, request) -> str:
return request.param
@pytest.fixture
def store_file(self) -> "StoreFileFixture":
"""Fixture to run prepare and return the result.
Example usage:
def test_smoke(store_file: "StoreFileFixture", api):
store_file(WandbStoragePolicy(api=api), entry_local_path=example_file)
api.upload_file_retry.assert_called_once()
"""
return TestStoreFile._store_file
@pytest.fixture
def api(self):
"""Fixture to give a mock `internal_api.Api` object, with properly-functioning upload methods."""
upload_file_retry = Mock()
upload_multipart_file_chunk_retry = Mock()
complete_multipart_upload_artifact = Mock()
return Mock(
upload_file_retry=upload_file_retry,
upload_multipart_file_chunk_retry=upload_multipart_file_chunk_retry,
complete_multipart_upload_artifact=complete_multipart_upload_artifact,
)
def test_smoke(self, store_file: "StoreFileFixture", api, example_file: Path):
store_file(WandbStoragePolicy(api=api), entry_local_path=example_file)
api.upload_file_retry.assert_called_once()
def test_uploads_to_prepared_url(
self, store_file: "StoreFileFixture", api, example_file: Path
):
preparer = mock_preparer(
prepare=lambda spec: singleton_queue(
dummy_response_prepare(spec)._replace(
upload_url="https://wandb-test/dst"
)
)
)
store_file(
WandbStoragePolicy(api=api),
entry_local_path=example_file,
preparer=preparer,
)
assert api.upload_file_retry.call_args[0][0] == "https://wandb-test/dst"
def test_passes_prepared_headers_to_upload(
self, store_file: "StoreFileFixture", api, example_file: Path
):
preparer = mock_preparer(
prepare=lambda spec: singleton_queue(
dummy_response_prepare(spec)._replace(
upload_headers=["x-my-header:my-header-val"]
)
)
)
store_file(
WandbStoragePolicy(api=api),
entry_local_path=example_file,
preparer=preparer,
)
assert api.upload_file_retry.call_args[1]["extra_headers"] == {
"x-my-header": "my-header-val"
}
@pytest.mark.parametrize(
["upload_url", "expect_upload", "expect_deduped"],
[
("http://wandb-test/dst", True, False),
(None, False, True),
],
)
def test_skips_upload_if_no_prepared_url(
self,
store_file: "StoreFileFixture",
api,
example_file: Path,
upload_url: Optional[str],
expect_upload: bool,
expect_deduped: bool,
):
preparer = mock_preparer(
prepare=lambda spec: singleton_queue(
dummy_response_prepare(spec)._replace(upload_url=upload_url)
)
)
policy = WandbStoragePolicy(api=api)
deduped = store_file(policy, entry_local_path=example_file, preparer=preparer)
assert deduped == expect_deduped
if expect_upload:
api.upload_file_retry.assert_called_once()
else:
api.upload_file_retry.assert_not_called()
@pytest.mark.parametrize(
["has_local_path", "expect_upload"],
[
(True, True),
(False, False),
],
)
def test_skips_upload_if_no_local_path(
self,
store_file: "StoreFileFixture",
api,
example_file: Path,
has_local_path: bool,
expect_upload: bool,
):
policy = WandbStoragePolicy(api=api)
deduped = store_file(
policy,
entry_local_path=example_file if has_local_path else None,
)
assert not deduped
if expect_upload:
api.upload_file_retry.assert_called_once()
else:
api.upload_file_retry.assert_not_called()
@pytest.mark.parametrize("err", [None, Exception("some error")])
def test_caches_result_on_success(
self,
store_file: "StoreFileFixture",
api,
example_file: Path,
artifact_file_cache: ArtifactFileCache,
err: Optional[Exception],
):
size = example_file.stat().st_size
api.upload_file_retry = Mock(side_effect=err)
policy = WandbStoragePolicy(api=api, cache=artifact_file_cache)
assert not is_cache_hit(artifact_file_cache, "my-digest", size)
store = functools.partial(store_file, policy, entry_local_path=example_file)
if err is None:
store()
assert is_cache_hit(artifact_file_cache, "my-digest", size)
else:
with pytest.raises(Exception, match=err.args[0]):
store()
assert not is_cache_hit(artifact_file_cache, "my-digest", size)
@pytest.mark.parametrize(
[
"upload_url",
"multipart_upload_urls",
"expect_single_upload",
"expect_multipart_upload",
"expect_deduped",
],
[
(
"http://wandb-test/dst",
{
1: "http://wandb-test/part=1",
2: "http://wandb-test/part=2",
3: "http://wandb-test/part=3",
},
False,
True,
False,
),
(
None,
{
1: "http://wandb-test/part=1",
2: "http://wandb-test/part=2",
3: "http://wandb-test/part=3",
},
False,
False,
True,
), # super weird case but shouldn't happen, upload url should always be generated
("http://wandb-test/dst", None, True, False, False),
(None, None, False, False, True),
],
)
@mock.patch(
"wandb.sdk.artifacts.storage_policies.wandb_storage_policy.WandbStoragePolicy."
"s3_multipart_file_upload"
)
def test_multipart_upload_handle_response(
self,
mock_s3_multipart_file_upload,
api,
example_file: Path,
upload_url: Optional[str],
multipart_upload_urls: Optional[dict],
expect_multipart_upload: bool,
expect_single_upload: bool,
expect_deduped: bool,
):
# Tests if we handle uploading correctly depending on what response we get from CreateArtifactFile.
preparer = mock_preparer(
prepare=lambda spec: singleton_queue(
dummy_response_prepare(spec)._replace(
upload_url=upload_url, multipart_upload_urls=multipart_upload_urls
)
)
)
policy = WandbStoragePolicy(api=api)
# Mock minimum size for multipart so that we can test multipart
with mock.patch(
"wandb.sdk.artifacts.storage_policies._multipart.MIN_MULTI_UPLOAD_SIZE",
example_file.stat().st_size,
):
deduped = self._store_file(
policy, entry_local_path=example_file, preparer=preparer
)
assert deduped == expect_deduped
if expect_multipart_upload:
mock_s3_multipart_file_upload.assert_called_once()
api.complete_multipart_upload_artifact.assert_called_once()
api.upload_file_retry.assert_not_called()
elif expect_single_upload:
api.upload_file_retry.assert_called_once()
api.upload_multipart_file_chunk_retry.assert_not_called()
else:
api.upload_file_retry.assert_not_called()
api.upload_multipart_file_chunk_retry.assert_not_called()
def test_s3_multipart_file_upload(
self,
api,
example_file: Path,
):
# Tests that s3 multipart calls upload on every part and retrieves the etag for every part
multipart_parts = {
1: "http://wandb-test/part=1",
2: "http://wandb-test/part=2",
3: "http://wandb-test/part=3",
}
hex_digests = {1: "abc1", 2: "abc2", 3: "abc3"}
chunk_size = 1
policy = WandbStoragePolicy(api=api)
responses = []
for idx in range(1, len(hex_digests) + 1):
etag_response = requests.Response()
etag_response.headers = {"ETag": hex_digests[idx]}
responses.append(etag_response)
api.upload_multipart_file_chunk_retry.side_effect = responses
with mock.patch("builtins.open", mock.mock_open(read_data="abc")):
etags = policy.s3_multipart_file_upload(
example_file, chunk_size, hex_digests, multipart_parts, extra_headers={}
)
assert api.upload_multipart_file_chunk_retry.call_count == 3
# Note Etags == hex_digest when there isn't an additional encryption method for uploading.
assert len(etags) == len(hex_digests)
for etag in etags:
assert etag["hexMD5"] == hex_digests[etag["partNumber"]]
@pytest.mark.parametrize("invalid_type", ["job", "wandb-history", "wandb-foo"])
def test_invalid_artifact_type(invalid_type):
with pytest.raises(ValueError, match="reserved for internal use"):
Artifact("foo", type=invalid_type)
@given(
invalid_name=(
text( # Too many characters
alphabet={*ascii_letters, *digits, "_", "-", " "},
min_size=NAME_MAXLEN + 1,
)
| from_regex( # Contains invalid characters
r"(\w|\d|\s)*(/)(\w|\d|\s)*",
fullmatch=True,
)
)
)
def test_invalid_artifact_name(invalid_name):
"""Prevent users from instantiating an artifact with an invalid name."""
with pytest.raises(ValueError):
_ = Artifact(invalid_name, type="any")
@pytest.mark.parametrize(
"property",
[
"entity",
"project",
"version",
"source_entity",
"source_project",
"source_version",
"ttl",
"aliases", # Perhaps shouldn't be restricted? It is today.
"commit_hash",
"file_count", # Probably doesn't need to be restricted, but is today.
"created_at",
"updated_at",
],
)
def test_unlogged_artifact_property_errors(property):
art = Artifact("foo", type="any")
error_message = f"'Artifact.{property}' used prior to logging artifact"
with pytest.raises(ArtifactNotLoggedError, match=error_message):
getattr(art, property)
@pytest.mark.parametrize(
"method",
[
"new_draft",
"download",
"checkout",
"verify",
"file",
"files",
"delete",
"used_by",
"logged_by",
"json_encode",
],
)
def test_unlogged_artifact_basic_method_errors(method):
art = Artifact("foo", type="any")
error_message = f"'Artifact.{method}' used prior to logging artifact"
with pytest.raises(ArtifactNotLoggedError, match=error_message):
getattr(art, method)()
def test_unlogged_artifact_other_method_errors():
art = Artifact("foo", type="any")
with pytest.raises(ArtifactNotLoggedError, match="Artifact.get_entry"):
art.get_entry("pathname")
with pytest.raises(ArtifactNotLoggedError, match="Artifact.get"):
art["obj_name"]
def test_cache_write_failure_is_ignored(monkeypatch, capsys):
def bad_write(*args, **kwargs):
raise FileNotFoundError("unable to copy from source file")
monkeypatch.setattr(shutil, "copyfileobj", bad_write)
policy = WandbStoragePolicy()
path = Path("foo.txt")
path.write_text("hello")
entry = ArtifactManifestEntry(
path=path,
digest="NWQ0MTQwMmFiYzRiMmE3NmI5NzE5ZDkxMTAxN2M1OTI=",
local_path=path,
)
policy._write_cache(entry)
captured = capsys.readouterr()
assert "Failed to cache" in captured.err
def test_artifact_manifest_length():
artifact = Artifact("test-artifact", "test-type")
assert len(artifact.manifest) == 0
with artifact.new_file("test.txt") as f:
f.write("test")
assert len(artifact.manifest) == 1
testpath = Path("test.txt")
testpath.write_text("also a test")
artifact.add_reference(testpath.resolve().as_uri(), "test2.txt")
assert len(artifact.manifest) == 2
def test_download_with_pathlib_root(monkeypatch):
artifact = Artifact("test-artifact", "test-type")
artifact._state = ArtifactState.COMMITTED
monkeypatch.setattr(artifact, "_download", lambda *args, **kwargs: "")
monkeypatch.setattr(artifact, "_download_using_core", lambda *args, **kwargs: "")
custom_path = Path("some/relative/path")
artifact.download(custom_path)
assert len(artifact._download_roots) == 1
root = list(artifact._download_roots)[0]
path_parts = custom_path.parts
assert Path(root).parts[-len(path_parts) :] == path_parts
def test_artifact_multipart_download_threshold():
mb = 1024 * 1024
assert should_multipart_download(100 * mb) is False
assert should_multipart_download(100 * mb, override=True) is True
assert should_multipart_download(100 * mb, override=False) is False
assert should_multipart_download(2080 * mb) is True
assert should_multipart_download(2080 * mb, override=True) is True
assert should_multipart_download(2080 * mb, override=False) is False
assert should_multipart_download(5070 * mb) is True
assert should_multipart_download(5070 * mb, override=True) is True
assert should_multipart_download(5070 * mb, override=False) is False
| TestStoreFile |
python | chardet__chardet | chardet/chardistribution.py | {
"start": 6811,
"end": 7633
} | class ____(CharDistributionAnalysis):
def __init__(self) -> None:
super().__init__()
self._char_to_freq_order = GB2312_CHAR_TO_FREQ_ORDER
self._table_size = GB2312_TABLE_SIZE
self.typical_distribution_ratio = GB2312_TYPICAL_DISTRIBUTION_RATIO
def get_order(self, byte_str: Union[bytes, bytearray]) -> int: # type: ignore[reportIncompatibleMethodOverride]
# for GB2312 encoding, we are interested
# first byte range: 0xb0 -- 0xfe
# second byte range: 0xa1 -- 0xfe
# no validation needed here. State machine has done that
first_char, second_char = byte_str[0], byte_str[1]
if (first_char >= 0xB0) and (second_char >= 0xA1):
return 94 * (first_char - 0xB0) + second_char - 0xA1
return -1
| GB2312DistributionAnalysis |
python | gevent__gevent | src/greentest/3.11/test_select.py | {
"start": 267,
"end": 3514
} | class ____(unittest.TestCase):
class Nope:
pass
class Almost:
def fileno(self):
return 'fileno'
def test_error_conditions(self):
self.assertRaises(TypeError, select.select, 1, 2, 3)
self.assertRaises(TypeError, select.select, [self.Nope()], [], [])
self.assertRaises(TypeError, select.select, [self.Almost()], [], [])
self.assertRaises(TypeError, select.select, [], [], [], "not a number")
self.assertRaises(ValueError, select.select, [], [], [], -1)
# Issue #12367: http://www.freebsd.org/cgi/query-pr.cgi?pr=kern/155606
@unittest.skipIf(sys.platform.startswith('freebsd'),
'skip because of a FreeBSD bug: kern/155606')
def test_errno(self):
with open(__file__, 'rb') as fp:
fd = fp.fileno()
fp.close()
try:
select.select([fd], [], [], 0)
except OSError as err:
self.assertEqual(err.errno, errno.EBADF)
else:
self.fail("exception not raised")
def test_returned_list_identity(self):
# See issue #8329
r, w, x = select.select([], [], [], 1)
self.assertIsNot(r, w)
self.assertIsNot(r, x)
self.assertIsNot(w, x)
@support.requires_fork()
def test_select(self):
code = textwrap.dedent('''
import time
for i in range(10):
print("testing...", flush=True)
time.sleep(0.050)
''')
cmd = [sys.executable, '-I', '-c', code]
with subprocess.Popen(cmd, stdout=subprocess.PIPE) as proc:
pipe = proc.stdout
for timeout in (0, 1, 2, 4, 8, 16) + (None,)*10:
if support.verbose:
print(f'timeout = {timeout}')
rfd, wfd, xfd = select.select([pipe], [], [], timeout)
self.assertEqual(wfd, [])
self.assertEqual(xfd, [])
if not rfd:
continue
if rfd == [pipe]:
line = pipe.readline()
if support.verbose:
print(repr(line))
if not line:
if support.verbose:
print('EOF')
break
continue
self.fail('Unexpected return values from select():',
rfd, wfd, xfd)
# Issue 16230: Crash on select resized list
@unittest.skipIf(
support.is_emscripten, "Emscripten cannot select a fd multiple times."
)
def test_select_mutated(self):
a = []
class F:
def fileno(self):
del a[-1]
return sys.__stdout__.fileno()
a[:] = [F()] * 10
self.assertEqual(select.select([], a, []), ([], a[:5], []))
def test_disallow_instantiation(self):
support.check_disallow_instantiation(self, type(select.poll()))
if hasattr(select, 'devpoll'):
support.check_disallow_instantiation(self, type(select.devpoll()))
def tearDownModule():
support.reap_children()
if __name__ == "__main__":
unittest.main()
| SelectTestCase |
python | automl__auto-sklearn | autosklearn/metalearning/metafeatures/metafeatures.py | {
"start": 12961,
"end": 13669
} | class ____(MetaFeature):
def _calculate(self, X, y, logger, feat_type):
occurences = helper_functions.get_value("ClassOccurences")
max_value = -1
if len(y.shape) == 2:
for i in range(y.shape[1]):
for num_occurences in occurences[i].values():
if num_occurences > max_value:
max_value = num_occurences
else:
for num_occurences in occurences.values():
if num_occurences > max_value:
max_value = num_occurences
return float(max_value) / float(y.shape[0])
@metafeatures.define("ClassProbabilityMean", dependency="ClassOccurences")
| ClassProbabilityMax |
python | getsentry__sentry-python | sentry_sdk/integrations/grpc/aio/client.py | {
"start": 2292,
"end": 3327
} | class ____(
ClientInterceptor,
UnaryStreamClientInterceptor, # type: ignore
):
async def intercept_unary_stream(
self,
continuation: Callable[[ClientCallDetails, Message], UnaryStreamCall],
client_call_details: ClientCallDetails,
request: Message,
) -> Union[AsyncIterable[Any], UnaryStreamCall]:
method = client_call_details.method
with sentry_sdk.start_span(
op=OP.GRPC_CLIENT,
name="unary stream call to %s" % method.decode(),
origin=SPAN_ORIGIN,
) as span:
span.set_data("type", "unary stream")
span.set_data("method", method)
client_call_details = self._update_client_call_details_metadata_from_scope(
client_call_details
)
response = await continuation(client_call_details, request)
# status_code = await response.code()
# span.set_data("code", status_code)
return response
| SentryUnaryStreamClientInterceptor |
python | walkccc__LeetCode | solutions/2505. Bitwise OR of All Subsequence Sums/2505.py | {
"start": 0,
"end": 181
} | class ____:
def subsequenceSumOr(self, nums: list[int]) -> int:
ans = 0
prefix = 0
for num in nums:
prefix += num
ans |= num | prefix
return ans
| Solution |
python | hyperopt__hyperopt | hyperopt/rdists.py | {
"start": 2283,
"end": 3781
} | class ____:
# -- not inheriting from scipy.stats.rv_discrete
# because I don't understand the design of those rv classes
"""Stats for Y = q * round(X / q) where X ~ U(low, high)."""
def __init__(self, low, high, q):
low, high = list(map(float, (low, high)))
qlow = safe_int_cast(np.round(low / q)) * q
qhigh = safe_int_cast(np.round(high / q)) * q
if qlow == qhigh:
xs = [qlow]
ps = [1.0]
else:
lowmass = 1 - ((low - qlow + 0.5 * q) / q)
assert 0 <= lowmass <= 1.0, (lowmass, low, qlow, q)
highmass = (high - qhigh + 0.5 * q) / q
assert 0 <= highmass <= 1.0, (highmass, high, qhigh, q)
# -- xs: qlow to qhigh inclusive
xs = np.arange(qlow, qhigh + 0.5 * q, q)
ps = np.ones(len(xs))
ps[0] = lowmass
ps[-1] = highmass
ps /= ps.sum()
self.low = low
self.high = high
self.q = q
self.qlow = qlow
self.qhigh = qhigh
self.xs = np.asarray(xs)
self.ps = np.asarray(ps)
def pmf(self, x):
return qtable_pmf(x, self.q, self.qlow, self.xs, self.ps)
def logpmf(self, x):
return qtable_logpmf(x, self.q, self.qlow, self.xs, self.ps)
def rvs(self, size=()):
rval = mtrand.uniform(low=self.low, high=self.high, size=size)
rval = safe_int_cast(np.round(rval / self.q)) * self.q
return rval
| quniform_gen |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/monitor.py | {
"start": 879,
"end": 986
} | class ____(BaseModel):
"""Base info serializer for responses."""
status: str | None
| BaseInfoResponse |
python | getsentry__sentry | src/sentry/options/store.py | {
"start": 851,
"end": 1029
} | class ____:
# Name of the group of options to include this option in
name: str
# Order of the option within the group
order: int
@dataclasses.dataclass
| GroupingInfo |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 53478,
"end": 53686
} | class ____:
xlDisplayShapes = -4104 # from enum XlDisplayDrawingObjects
xlHide = 3 # from enum XlDisplayDrawingObjects
xlPlaceholders = 2 # from enum XlDisplayDrawingObjects
| DisplayDrawingObjects |
python | pandas-dev__pandas | pandas/tests/indexes/test_any_index.py | {
"start": 2832,
"end": 3352
} | class ____:
def test_pickle_roundtrip(self, index):
result = tm.round_trip_pickle(index)
tm.assert_index_equal(result, index, exact=True)
if result.nlevels > 1:
# GH#8367 round-trip with timezone
assert index.equal_levels(result)
def test_pickle_preserves_name(self, index):
original_name, index.name = index.name, "foo"
unpickled = tm.round_trip_pickle(index)
assert index.equals(unpickled)
index.name = original_name
| TestRoundTrips |
python | PrefectHQ__prefect | tests/server/schemas/test_actions.py | {
"start": 11479,
"end": 11746
} | class ____:
def test_updatable_fields(self):
fields = BlockTypeUpdate.updatable_fields()
assert fields == {
"logo_url",
"documentation_url",
"description",
"code_example",
}
| TestBlockTypeUpdate |
python | ray-project__ray | doc/source/tune/doc_code/trainable.py | {
"start": 1367,
"end": 2111
} | class ____(tune.Trainable):
def setup(self, config: dict):
# config (dict): A dict of hyperparameters
self.x = 0
self.a = config["a"]
self.b = config["b"]
def step(self): # This is called iteratively.
score = objective(self.x, self.a, self.b)
self.x += 1
return {"score": score}
tuner = tune.Tuner(
Trainable,
run_config=tune.RunConfig(
# Train for 20 steps
stop={"training_iteration": 20},
checkpoint_config=tune.CheckpointConfig(
# We haven't implemented checkpointing yet. See below!
checkpoint_at_end=False
),
),
param_space={"a": 2, "b": 4},
)
results = tuner.fit()
# __class_api_example_end__
| Trainable |
python | kamyu104__LeetCode-Solutions | Python/special-array-ii.py | {
"start": 46,
"end": 526
} | class ____(object):
def isArraySpecial(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[bool]
"""
prefix = [0]*len(nums)
for i in xrange(len(nums)-1):
prefix[i+1] = prefix[i]+int(nums[i+1]&1 != nums[i]&1)
result = [False]*len(queries)
for i, (l, r) in enumerate(queries):
result[i] = prefix[r]-prefix[l] == r-l
return result
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.