language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/base.py | {
"start": 56694,
"end": 76140
} | class ____(Generic[_COLKEY, _COL_co]):
"""Collection of :class:`_expression.ColumnElement` instances,
typically for
:class:`_sql.FromClause` objects.
The :class:`_sql.ColumnCollection` object is most commonly available
as the :attr:`_schema.Table.c` or :attr:`_schema.Table.columns` collection
on the :class:`_schema.Table` object, introduced at
:ref:`metadata_tables_and_columns`.
The :class:`_expression.ColumnCollection` has both mapping- and sequence-
like behaviors. A :class:`_expression.ColumnCollection` usually stores
:class:`_schema.Column` objects, which are then accessible both via mapping
style access as well as attribute access style.
To access :class:`_schema.Column` objects using ordinary attribute-style
access, specify the name like any other object attribute, such as below
a column named ``employee_name`` is accessed::
>>> employee_table.c.employee_name
To access columns that have names with special characters or spaces,
index-style access is used, such as below which illustrates a column named
``employee ' payment`` is accessed::
>>> employee_table.c["employee ' payment"]
As the :class:`_sql.ColumnCollection` object provides a Python dictionary
interface, common dictionary method names like
:meth:`_sql.ColumnCollection.keys`, :meth:`_sql.ColumnCollection.values`,
and :meth:`_sql.ColumnCollection.items` are available, which means that
database columns that are keyed under these names also need to use indexed
access::
>>> employee_table.c["values"]
The name for which a :class:`_schema.Column` would be present is normally
that of the :paramref:`_schema.Column.key` parameter. In some contexts,
such as a :class:`_sql.Select` object that uses a label style set
using the :meth:`_sql.Select.set_label_style` method, a column of a certain
key may instead be represented under a particular label name such
as ``tablename_columnname``::
>>> from sqlalchemy import select, column, table
>>> from sqlalchemy import LABEL_STYLE_TABLENAME_PLUS_COL
>>> t = table("t", column("c"))
>>> stmt = select(t).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
>>> subq = stmt.subquery()
>>> subq.c.t_c
<sqlalchemy.sql.elements.ColumnClause at 0x7f59dcf04fa0; t_c>
:class:`.ColumnCollection` also indexes the columns in order and allows
them to be accessible by their integer position::
>>> cc[0]
Column('x', Integer(), table=None)
>>> cc[1]
Column('y', Integer(), table=None)
.. versionadded:: 1.4 :class:`_expression.ColumnCollection`
allows integer-based
index access to the collection.
Iterating the collection yields the column expressions in order::
>>> list(cc)
[Column('x', Integer(), table=None),
Column('y', Integer(), table=None)]
The base :class:`_expression.ColumnCollection` object can store
duplicates, which can
mean either two columns with the same key, in which case the column
returned by key access is **arbitrary**::
>>> x1, x2 = Column("x", Integer), Column("x", Integer)
>>> cc = ColumnCollection(columns=[(x1.name, x1), (x2.name, x2)])
>>> list(cc)
[Column('x', Integer(), table=None),
Column('x', Integer(), table=None)]
>>> cc["x"] is x1
False
>>> cc["x"] is x2
True
Or it can also mean the same column multiple times. These cases are
supported as :class:`_expression.ColumnCollection`
is used to represent the columns in
a SELECT statement which may include duplicates.
A special subclass :class:`.DedupeColumnCollection` exists which instead
maintains SQLAlchemy's older behavior of not allowing duplicates; this
collection is used for schema level objects like :class:`_schema.Table`
and
:class:`.PrimaryKeyConstraint` where this deduping is helpful. The
:class:`.DedupeColumnCollection` class also has additional mutation methods
as the schema constructs have more use cases that require removal and
replacement of columns.
.. versionchanged:: 1.4 :class:`_expression.ColumnCollection`
now stores duplicate
column keys as well as the same column in multiple positions. The
:class:`.DedupeColumnCollection` class is added to maintain the
former behavior in those cases where deduplication as well as
additional replace/remove operations are needed.
"""
__slots__ = ("_collection", "_index", "_colset", "_proxy_index")
_collection: List[Tuple[_COLKEY, _COL_co, _ColumnMetrics[_COL_co]]]
_index: Dict[Union[None, str, int], Tuple[_COLKEY, _COL_co]]
_proxy_index: Dict[ColumnElement[Any], Set[_ColumnMetrics[_COL_co]]]
_colset: Set[_COL_co]
def __init__(
self, columns: Optional[Iterable[Tuple[_COLKEY, _COL_co]]] = None
):
object.__setattr__(self, "_colset", set())
object.__setattr__(self, "_index", {})
object.__setattr__(
self, "_proxy_index", collections.defaultdict(util.OrderedSet)
)
object.__setattr__(self, "_collection", [])
if columns:
self._initial_populate(columns)
@util.preload_module("sqlalchemy.sql.elements")
def __clause_element__(self) -> ClauseList:
elements = util.preloaded.sql_elements
return elements.ClauseList(
_literal_as_text_role=roles.ColumnsClauseRole,
group=False,
*self._all_columns,
)
def _initial_populate(
self, iter_: Iterable[Tuple[_COLKEY, _COL_co]]
) -> None:
self._populate_separate_keys(iter_)
@property
def _all_columns(self) -> List[_COL_co]:
return [col for (_, col, _) in self._collection]
def keys(self) -> List[_COLKEY]:
"""Return a sequence of string key names for all columns in this
collection."""
return [k for (k, _, _) in self._collection]
def values(self) -> List[_COL_co]:
"""Return a sequence of :class:`_sql.ColumnClause` or
:class:`_schema.Column` objects for all columns in this
collection."""
return [col for (_, col, _) in self._collection]
def items(self) -> List[Tuple[_COLKEY, _COL_co]]:
"""Return a sequence of (key, column) tuples for all columns in this
collection each consisting of a string key name and a
:class:`_sql.ColumnClause` or
:class:`_schema.Column` object.
"""
return [(k, col) for (k, col, _) in self._collection]
def __bool__(self) -> bool:
return bool(self._collection)
def __len__(self) -> int:
return len(self._collection)
def __iter__(self) -> Iterator[_COL_co]:
# turn to a list first to maintain over a course of changes
return iter([col for _, col, _ in self._collection])
@overload
def __getitem__(self, key: Union[str, int]) -> _COL_co: ...
@overload
def __getitem__(
self, key: Tuple[Union[str, int], ...]
) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]: ...
@overload
def __getitem__(
self, key: slice
) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]: ...
def __getitem__(
self, key: Union[str, int, slice, Tuple[Union[str, int], ...]]
) -> Union[ReadOnlyColumnCollection[_COLKEY, _COL_co], _COL_co]:
try:
if isinstance(key, (tuple, slice)):
if isinstance(key, slice):
cols = (
(sub_key, col)
for (sub_key, col, _) in self._collection[key]
)
else:
cols = (self._index[sub_key] for sub_key in key)
return ColumnCollection(cols).as_readonly()
else:
return self._index[key][1]
except KeyError as err:
if isinstance(err.args[0], int):
raise IndexError(err.args[0]) from err
else:
raise
def __getattr__(self, key: str) -> _COL_co:
try:
return self._index[key][1]
except KeyError as err:
raise AttributeError(key) from err
def __contains__(self, key: str) -> bool:
if key not in self._index:
if not isinstance(key, str):
raise exc.ArgumentError(
"__contains__ requires a string argument"
)
return False
else:
return True
def compare(self, other: ColumnCollection[_COLKEY, _COL_co]) -> bool:
"""Compare this :class:`_expression.ColumnCollection` to another
based on the names of the keys"""
for l, r in zip_longest(self, other):
if l is not r:
return False
else:
return True
def __eq__(self, other: Any) -> bool:
return self.compare(other)
@overload
def get(self, key: str, default: None = None) -> Optional[_COL_co]: ...
@overload
def get(self, key: str, default: _COL) -> Union[_COL_co, _COL]: ...
def get(
self, key: str, default: Optional[_COL] = None
) -> Optional[Union[_COL_co, _COL]]:
"""Get a :class:`_sql.ColumnClause` or :class:`_schema.Column` object
based on a string key name from this
:class:`_expression.ColumnCollection`."""
if key in self._index:
return self._index[key][1]
else:
return default
def __str__(self) -> str:
return "%s(%s)" % (
self.__class__.__name__,
", ".join(str(c) for c in self),
)
def __setitem__(self, key: str, value: Any) -> NoReturn:
raise NotImplementedError()
def __delitem__(self, key: str) -> NoReturn:
raise NotImplementedError()
def __setattr__(self, key: str, obj: Any) -> NoReturn:
raise NotImplementedError()
def clear(self) -> NoReturn:
"""Dictionary clear() is not implemented for
:class:`_sql.ColumnCollection`."""
raise NotImplementedError()
def remove(self, column: Any) -> NoReturn:
raise NotImplementedError()
def update(self, iter_: Any) -> NoReturn:
"""Dictionary update() is not implemented for
:class:`_sql.ColumnCollection`."""
raise NotImplementedError()
# https://github.com/python/mypy/issues/4266
__hash__: Optional[int] = None # type: ignore
def _populate_separate_keys(
self, iter_: Iterable[Tuple[_COLKEY, _COL_co]]
) -> None:
"""populate from an iterator of (key, column)"""
self._collection[:] = collection = [
(k, c, _ColumnMetrics(self, c)) for k, c in iter_
]
self._colset.update(c._deannotate() for _, c, _ in collection)
self._index.update(
{idx: (k, c) for idx, (k, c, _) in enumerate(collection)}
)
self._index.update({k: (k, col) for k, col, _ in reversed(collection)})
def add(
self,
column: ColumnElement[Any],
key: Optional[_COLKEY] = None,
) -> None:
"""Add a column to this :class:`_sql.ColumnCollection`.
.. note::
This method is **not normally used by user-facing code**, as the
:class:`_sql.ColumnCollection` is usually part of an existing
object such as a :class:`_schema.Table`. To add a
:class:`_schema.Column` to an existing :class:`_schema.Table`
object, use the :meth:`_schema.Table.append_column` method.
"""
colkey: _COLKEY
if key is None:
colkey = column.key # type: ignore
else:
colkey = key
l = len(self._collection)
# don't really know how this part is supposed to work w/ the
# covariant thing
_column = cast(_COL_co, column)
self._collection.append(
(colkey, _column, _ColumnMetrics(self, _column))
)
self._colset.add(_column._deannotate())
self._index[l] = (colkey, _column)
if colkey not in self._index:
self._index[colkey] = (colkey, _column)
def __getstate__(self) -> Dict[str, Any]:
return {
"_collection": [(k, c) for k, c, _ in self._collection],
"_index": self._index,
}
def __setstate__(self, state: Dict[str, Any]) -> None:
object.__setattr__(self, "_index", state["_index"])
object.__setattr__(
self, "_proxy_index", collections.defaultdict(util.OrderedSet)
)
object.__setattr__(
self,
"_collection",
[
(k, c, _ColumnMetrics(self, c))
for (k, c) in state["_collection"]
],
)
object.__setattr__(
self, "_colset", {col for k, col, _ in self._collection}
)
def contains_column(self, col: ColumnElement[Any]) -> bool:
"""Checks if a column object exists in this collection"""
if col not in self._colset:
if isinstance(col, str):
raise exc.ArgumentError(
"contains_column cannot be used with string arguments. "
"Use ``col_name in table.c`` instead."
)
return False
else:
return True
def as_readonly(self) -> ReadOnlyColumnCollection[_COLKEY, _COL_co]:
"""Return a "read only" form of this
:class:`_sql.ColumnCollection`."""
return ReadOnlyColumnCollection(self)
def _init_proxy_index(self) -> None:
"""populate the "proxy index", if empty.
proxy index is added in 2.0 to provide more efficient operation
for the corresponding_column() method.
For reasons of both time to construct new .c collections as well as
memory conservation for large numbers of large .c collections, the
proxy_index is only filled if corresponding_column() is called. once
filled it stays that way, and new _ColumnMetrics objects created after
that point will populate it with new data. Note this case would be
unusual, if not nonexistent, as it means a .c collection is being
mutated after corresponding_column() were used, however it is tested in
test/base/test_utils.py.
"""
pi = self._proxy_index
if pi:
return
for _, _, metrics in self._collection:
eps = metrics.column._expanded_proxy_set
for eps_col in eps:
pi[eps_col].add(metrics)
def corresponding_column(
self, column: _COL, require_embedded: bool = False
) -> Optional[Union[_COL, _COL_co]]:
"""Given a :class:`_expression.ColumnElement`, return the exported
:class:`_expression.ColumnElement` object from this
:class:`_expression.ColumnCollection`
which corresponds to that original :class:`_expression.ColumnElement`
via a common
ancestor column.
:param column: the target :class:`_expression.ColumnElement`
to be matched.
:param require_embedded: only return corresponding columns for
the given :class:`_expression.ColumnElement`, if the given
:class:`_expression.ColumnElement`
is actually present within a sub-element
of this :class:`_expression.Selectable`.
Normally the column will match if
it merely shares a common ancestor with one of the exported
columns of this :class:`_expression.Selectable`.
.. seealso::
:meth:`_expression.Selectable.corresponding_column`
- invokes this method
against the collection returned by
:attr:`_expression.Selectable.exported_columns`.
.. versionchanged:: 1.4 the implementation for ``corresponding_column``
was moved onto the :class:`_expression.ColumnCollection` itself.
"""
# TODO: cython candidate
# don't dig around if the column is locally present
if column in self._colset:
return column
selected_intersection, selected_metrics = None, None
target_set = column.proxy_set
pi = self._proxy_index
if not pi:
self._init_proxy_index()
for current_metrics in (
mm for ts in target_set if ts in pi for mm in pi[ts]
):
if not require_embedded or current_metrics.embedded(target_set):
if selected_metrics is None:
# no corresponding column yet, pick this one.
selected_metrics = current_metrics
continue
current_intersection = target_set.intersection(
current_metrics.column._expanded_proxy_set
)
if selected_intersection is None:
selected_intersection = target_set.intersection(
selected_metrics.column._expanded_proxy_set
)
if len(current_intersection) > len(selected_intersection):
# 'current' has a larger field of correspondence than
# 'selected'. i.e. selectable.c.a1_x->a1.c.x->table.c.x
# matches a1.c.x->table.c.x better than
# selectable.c.x->table.c.x does.
selected_metrics = current_metrics
selected_intersection = current_intersection
elif current_intersection == selected_intersection:
# they have the same field of correspondence. see
# which proxy_set has fewer columns in it, which
# indicates a closer relationship with the root
# column. Also take into account the "weight"
# attribute which CompoundSelect() uses to give
# higher precedence to columns based on vertical
# position in the compound statement, and discard
# columns that have no reference to the target
# column (also occurs with CompoundSelect)
selected_col_distance = sum(
[
sc._annotations.get("weight", 1)
for sc in (
selected_metrics.column._uncached_proxy_list()
)
if sc.shares_lineage(column)
],
)
current_col_distance = sum(
[
sc._annotations.get("weight", 1)
for sc in (
current_metrics.column._uncached_proxy_list()
)
if sc.shares_lineage(column)
],
)
if current_col_distance < selected_col_distance:
selected_metrics = current_metrics
selected_intersection = current_intersection
return selected_metrics.column if selected_metrics else None
_NAMEDCOL = TypeVar("_NAMEDCOL", bound="NamedColumn[Any]")
| ColumnCollection |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 61584,
"end": 62068
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("label", "description", "identifier")
label = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="label")
description = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="description"
)
identifier = sgqlc.types.Field(
sgqlc.types.non_null(String), graphql_name="identifier"
)
| CheckRunAction |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 864620,
"end": 865018
} | class ____(sgqlc.types.Type):
"""An edge in a connection."""
__schema__ = github_schema
__field_names__ = ("cursor", "node")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
"""A cursor for use in pagination."""
node = sgqlc.types.Field("ProjectV2Workflow", graphql_name="node")
"""The item at the end of the edge."""
| ProjectV2WorkflowEdge |
python | PrefectHQ__prefect | tests/cli/test_deploy.py | {
"start": 230772,
"end": 236403
} | class ____:
"""Regression tests for deployment trigger templating (issue #19348)"""
async def test_deployment_trigger_with_boolean_enabled_after_templating(self):
"""
Regression test for issue #19348: ensure that trigger initialization
works correctly when the 'enabled' field is a boolean (as it would be
after Jinja template resolution from "{{ prefect.variables.is_prod }}").
This test verifies that the fix (moving trigger initialization after
templating) allows boolean values to pass validation.
"""
# Simulate a trigger spec AFTER templating has resolved the Jinja variable
# from "{{ prefect.variables.is_prod }}" to True
trigger_spec = {
"enabled": True, # This would have been "{{ prefect.variables.is_prod }}" before templating
"match": {"prefect.resource.id": "prefect.flow-run.*"},
"expect": ["prefect.flow-run.Completed"],
}
# This should not raise a validation error
triggers = _initialize_deployment_triggers("test-deployment", [trigger_spec])
assert len(triggers) == 1
assert triggers[0].enabled is True
assert triggers[0].name == "test-deployment__automation_1"
async def test_deployment_trigger_event_parameters_preserved(
self, project_dir: Path, prefect_client: PrefectClient
):
"""
Regression test for issue #19501: ensure that event template parameters
in triggers are preserved during deployment, not stripped out by apply_values().
When triggers with parameters like {"name": "{{ event.name }}"} are deployed,
these runtime event templates should be preserved in the automation action,
not removed because they're not in the build step outputs.
"""
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
# Create a flow file
flow_file = project_dir / "flow.py"
flow_file.write_text("""
from prefect import flow
@flow
def say_hello(name: str) -> str:
return f"Hello, {name}!"
""")
# Create prefect.yaml with triggers that have event template parameters
prefect_yaml = project_dir / "prefect.yaml"
prefect_yaml.write_text("""
deployments:
- name: say-hello
entrypoint: flow.py:say_hello
work_pool:
name: test-pool
triggers:
- type: event
enabled: true
match:
prefect.resource.id: hello.world
expect:
- external.resource.pinged
parameters:
name: "{{ event.name }}"
""")
# Deploy the flow
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
)
# Read the created automation
automations = await prefect_client.read_automations_by_name(
"say-hello__automation_1"
)
assert len(automations) == 1
automation = automations[0]
# Verify the automation action has the event template parameter preserved
assert len(automation.actions) == 1
action = automation.actions[0]
assert action.parameters == {"name": "{{ event.name }}"}
async def test_deployment_trigger_prefect_kind_jinja_parameters_preserved(
self, project_dir: Path, prefect_client: PrefectClient
):
"""
Regression test for issue #19501 (second case): ensure that event template
parameters using the __prefect_kind: jinja structure are preserved.
This tests the case where parameters are structured as:
{"event_id": {"template": "{{ event.id }}", "__prefect_kind": "jinja"}}
"""
await prefect_client.create_work_pool(
WorkPoolCreate(name="test-pool", type="test")
)
# Create a flow file
flow_file = project_dir / "flow.py"
flow_file.write_text("""
from prefect import flow
@flow
def process_event(event_id: str, fan_out: bool = False):
return f"Processing {event_id}"
""")
# Create prefect.yaml with triggers using __prefect_kind structure
prefect_yaml = project_dir / "prefect.yaml"
prefect_yaml.write_text("""
deployments:
- name: process-event
entrypoint: flow.py:process_event
work_pool:
name: test-pool
triggers:
- type: event
enabled: true
match:
prefect.resource.name: test-resource
expect:
- prefect.asset.materialization.succeeded
parameters:
event_id:
template: "{{ event.id }}"
__prefect_kind: jinja
fan_out: true
""")
# Deploy the flow
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
)
# Read the created automation
automations = await prefect_client.read_automations_by_name(
"process-event__automation_1"
)
assert len(automations) == 1
automation = automations[0]
# Verify the automation action has the event template parameter preserved
assert len(automation.actions) == 1
action = automation.actions[0]
# Both the template and __prefect_kind should be preserved
assert "event_id" in action.parameters
assert action.parameters["event_id"]["template"] == "{{ event.id }}"
assert action.parameters["event_id"]["__prefect_kind"] == "jinja"
assert action.parameters["fan_out"] is True
| TestDeploymentTriggerTemplating |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 21781,
"end": 22117
} | class ____(sgqlc.types.Enum):
"""Properties by which Enterprise Server user accounts upload
connections can be ordered.
Enumeration Choices:
* `CREATED_AT`: Order user accounts uploads by creation time
"""
__schema__ = github_schema
__choices__ = ("CREATED_AT",)
| EnterpriseServerUserAccountsUploadOrderField |
python | getsentry__sentry | tests/sentry/codecov/endpoints/test_repository_token_regenerate.py | {
"start": 126,
"end": 5884
} | class ____(APITestCase):
endpoint_name = "sentry-api-0-repository-token-regenerate"
def setUp(self) -> None:
super().setUp()
self.user = self.create_user(email="user@example.com")
self.organization = self.create_organization(owner=self.user)
self.integration = self.create_integration(
organization=self.organization,
external_id="1234",
name="testowner",
provider="github",
)
self.login_as(user=self.user)
def reverse_url(self, owner="testowner", repository="testrepo"):
"""Custom reverse URL method to handle required URL parameters"""
return reverse(
self.endpoint_name,
kwargs={
"organization_id_or_slug": self.organization.slug,
"owner": self.integration.id,
"repository": repository,
},
)
@patch(
"sentry.codecov.endpoints.repository_token_regenerate.repository_token_regenerate.CodecovApiClient"
)
def test_post_calls_api(self, mock_codecov_client_class) -> None:
"""Test that when use_codecov param is provided, it calls the Codecov API"""
mock_graphql_response = {
"data": {
"regenerateRepositoryUploadToken": {
"token": "codecov-generated-token-12345",
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
response = self.client.post(url, data={})
mock_codecov_client_class.assert_called_once_with(git_provider_org="testowner")
mock_codecov_client_instance.query.assert_called_once_with(
query=ANY,
variables={
"owner": "testowner",
"repoName": "testrepo",
},
)
assert response.status_code == 200
assert response.data["token"] == "codecov-generated-token-12345"
@patch(
"sentry.codecov.endpoints.repository_token_regenerate.repository_token_regenerate.CodecovApiClient"
)
def test_post_handles_errors(self, mock_codecov_client_class) -> None:
"""Test that GraphQL errors are properly handled when calling Codecov API"""
mock_graphql_response = {
"data": {
"regenerateRepositoryUploadToken": {
"error": {
"__typename": "ValidationError",
"message": "Repository not found",
},
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
url = self.reverse_url()
response = self.client.post(url, data={})
assert response.status_code == 400
assert response.data[0] == "Repository not found"
@patch(
"sentry.codecov.endpoints.repository_token_regenerate.repository_token_regenerate.CodecovApiClient"
)
def test_scope_map_enforcement(self, mock_codecov_client_class) -> None:
"""Test that the scope map permissions are properly enforced"""
# Mock the Codecov API client to avoid actual API calls during permission testing
mock_graphql_response = {
"data": {
"regenerateRepositoryUploadToken": {
"token": "codecov-generated-token-12345",
}
}
}
mock_codecov_client_instance = Mock()
mock_response = Mock()
mock_response.json.return_value = mock_graphql_response
mock_codecov_client_instance.query.return_value = mock_response
mock_codecov_client_class.return_value = mock_codecov_client_instance
# Create a user with only org:read permission
user_with_read_only = self.create_user("readonly@test.com")
self.create_member(
user=user_with_read_only,
organization=self.organization,
role="member", # member role has org:read
)
# Create a user with org:write permission
user_with_write = self.create_user("write@test.com")
self.create_member(
user=user_with_write,
organization=self.organization,
role="admin", # admin role has org:write
)
# Create a user with no permissions
user_without_permissions = self.create_user("noperms@test.com")
# Don't add them to the organization
url = self.reverse_url()
# Test that user with org:read can access the endpoint
self.login_as(user_with_read_only)
response = self.client.post(url, data={})
# Should not be a 403 Forbidden (permission denied)
assert response.status_code == 200
# Test that user with org:write can access the endpoint
self.login_as(user_with_write)
response = self.client.post(url, data={})
# Should not be a 403 Forbidden (permission denied)
assert response.status_code == 200
# Test that user without permissions cannot access the endpoint
self.login_as(user_without_permissions)
response = self.client.post(url, data={})
# Should be 403 Forbidden (permission denied)
assert response.status_code == 403
| RepositoryTokenRegenerateEndpointTest |
python | openai__openai-python | src/openai/types/responses/function_tool.py | {
"start": 223,
"end": 796
} | class ____(BaseModel):
name: str
"""The name of the function to call."""
parameters: Optional[Dict[str, object]] = None
"""A JSON schema object describing the parameters of the function."""
strict: Optional[bool] = None
"""Whether to enforce strict parameter validation. Default `true`."""
type: Literal["function"]
"""The type of the function tool. Always `function`."""
description: Optional[str] = None
"""A description of the function.
Used by the model to determine whether or not to call the function.
"""
| FunctionTool |
python | python-visualization__folium | folium/vector_layers.py | {
"start": 6855,
"end": 8348
} | class ____(BaseMultiLocation):
"""Draw polygon overlays on a map.
See :func:`folium.vector_layers.path_options` for the `Path` options.
Parameters
----------
locations: list of points (latitude, longitude)
- One list of coordinate pairs to define a polygon. You don't have to
add a last point equal to the first point.
- If you pass a list with multiple of those it will make a multi-
polygon.
popup: string or folium.Popup, default None
Input text or visualization for object displayed when clicking.
tooltip: str or folium.Tooltip, default None
Display a text when hovering over the object.
**kwargs
Other valid (possibly inherited) options. See:
https://leafletjs.com/reference.html#polygon
"""
_template = Template(
"""
{% macro script(this, kwargs) %}
var {{ this.get_name() }} = L.polygon(
{{ this.locations|tojson }},
{{ this.options|tojson }}
).addTo({{this._parent.get_name()}});
{% endmacro %}
"""
)
def __init__(
self,
locations: TypeMultiLine,
popup: Union[Popup, str, None] = None,
tooltip: Union[Tooltip, str, None] = None,
**kwargs: TypePathOptions,
):
super().__init__(locations, popup=popup, tooltip=tooltip)
self._name = "Polygon"
self.options = path_options(line=True, radius=None, **kwargs)
| Polygon |
python | huggingface__transformers | src/transformers/models/regnet/convert_regnet_to_pytorch.py | {
"start": 1347,
"end": 2190
} | class ____:
module: nn.Module
traced: list[nn.Module] = field(default_factory=list)
handles: list = field(default_factory=list)
def _forward_hook(self, m, inputs: Tensor, outputs: Tensor):
has_not_submodules = len(list(m.modules())) == 1 or isinstance(m, (nn.Conv2d, nn.BatchNorm2d))
if has_not_submodules:
self.traced.append(m)
def __call__(self, x: Tensor):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook))
self.module(x)
[x.remove() for x in self.handles]
return self
@property
def parametrized(self):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda x: len(list(x.state_dict().keys())) > 0, self.traced))
@dataclass
| Tracker |
python | pandas-dev__pandas | asv_bench/benchmarks/groupby.py | {
"start": 12358,
"end": 16435
} | class ____:
param_names = ["dtype", "method", "application", "ncols"]
params = [
["int", "int16", "float", "object", "datetime", "uint"],
[
"all",
"any",
"bfill",
"count",
"cumcount",
"cummax",
"cummin",
"cumprod",
"cumsum",
"describe",
"diff",
"ffill",
"first",
"head",
"last",
"max",
"min",
"median",
"mean",
"nunique",
"pct_change",
"prod",
"quantile",
"rank",
"sem",
"shift",
"size",
"skew",
"std",
"sum",
"tail",
"unique",
"value_counts",
"var",
],
["direct", "transformation"],
[1, 5],
["cython", "numba"],
]
def setup(self, dtype, method, application, ncols, engine):
if method in method_blocklist.get(dtype, {}):
raise NotImplementedError # skip benchmark
if ncols != 1 and method in ["value_counts", "unique"]:
# DataFrameGroupBy doesn't have these methods
raise NotImplementedError
if application == "transformation" and method in [
"describe",
"head",
"tail",
"unique",
"value_counts",
"size",
]:
# DataFrameGroupBy doesn't have these methods
raise NotImplementedError
# Numba currently doesn't support
# multiple transform functions or strs for transform,
# grouping on multiple columns
# and we lack kernels for a bunch of methods
if (
(engine == "numba" and method in _numba_unsupported_methods)
or ncols > 1
or application == "transformation"
or dtype == "datetime"
):
raise NotImplementedError
if method == "describe":
ngroups = 20
elif method == "skew":
ngroups = 100
else:
ngroups = 1000
size = ngroups * 2
rng = np.arange(ngroups).reshape(-1, 1)
rng = np.broadcast_to(rng, (len(rng), ncols))
taker = np.random.randint(0, ngroups, size=size)
values = rng.take(taker, axis=0)
if dtype == "int":
key = np.random.randint(0, size, size=size)
elif dtype in ("int16", "uint"):
key = np.random.randint(0, size, size=size, dtype=dtype)
elif dtype == "float":
key = np.concatenate(
[np.random.random(ngroups) * 0.1, np.random.random(ngroups) * 10.0]
)
elif dtype == "object":
key = ["foo"] * size
elif dtype == "datetime":
key = date_range("1/1/2011", periods=size, freq="s")
cols = [f"values{n}" for n in range(ncols)]
df = DataFrame(values, columns=cols)
df["key"] = key
if len(cols) == 1:
cols = cols[0]
# Not everything supports the engine keyword yet
kwargs = {}
if engine == "numba":
kwargs["engine"] = engine
if application == "transformation":
self.as_group_method = lambda: df.groupby("key")[cols].transform(
method, **kwargs
)
self.as_field_method = lambda: df.groupby(cols)["key"].transform(
method, **kwargs
)
else:
self.as_group_method = partial(
getattr(df.groupby("key")[cols], method), **kwargs
)
self.as_field_method = partial(
getattr(df.groupby(cols)["key"], method), **kwargs
)
def time_dtype_as_group(self, dtype, method, application, ncols, engine):
self.as_group_method()
def time_dtype_as_field(self, dtype, method, application, ncols, engine):
self.as_field_method()
| GroupByMethods |
python | readthedocs__readthedocs.org | readthedocs/storage/mixins.py | {
"start": 152,
"end": 746
} | class ____:
"""
Override the hostname when outputting URLs.
This is useful for use with a CDN or when proxying outside of Blob Storage
See: https://github.com/jschneier/django-storages/pull/658
"""
override_hostname = None # Just the hostname without scheme (eg. 'assets.readthedocs.org')
def url(self, *args, **kwargs):
url = super().url(*args, **kwargs)
if self.override_hostname:
parts = list(urlsplit(url))
parts[1] = self.override_hostname
url = urlunsplit(parts)
return url
| OverrideHostnameMixin |
python | keras-team__keras | keras/src/distillation/distillation_loss_test.py | {
"start": 2194,
"end": 8304
} | class ____(TestCase):
"""End-to-end distillation tests with real models."""
def setUp(self):
"""Set up models and test data for all tests."""
super().setUp()
# Create teacher model
self.teacher = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="teacher_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="teacher_dense_2"
),
keras.layers.Dense(10, name="teacher_output"),
]
)
# Create student model
self.student = keras.Sequential(
[
keras.layers.Dense(
32, activation="relu", name="student_dense_1"
),
keras.layers.Dense(
16, activation="relu", name="student_dense_2"
),
keras.layers.Dense(10, name="student_output"),
]
)
self.x = np.random.random((32, 20)).astype(np.float32)
self.y = np.random.randint(0, 10, (32,)).astype(np.int32)
self.teacher(self.x[:2])
self.student(self.x[:2])
def test_logits_distillation_end_to_end(self):
"""Test end-to-end logits distillation with real models."""
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=LogitsDistillation(temperature=3.0),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify loss values are reasonable
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(final_loss))
self.assertGreater(final_loss, 0.0)
# Test prediction
predictions = distiller.predict(self.x[:5], verbose=0)
self.assertEqual(predictions.shape, (5, 10))
# Test student model access
student_model = distiller.student
self.assertIsInstance(student_model, keras.Model)
def test_feature_distillation_end_to_end(self):
"""Test end-to-end feature distillation with real models."""
# Create distiller with feature distillation
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify feature extraction worked
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Test that feature extractors have correct outputs
self.assertEqual(
len(distiller._teacher_feature_extractor.outputs), 2
) # final + dense_1
self.assertEqual(
len(distiller._student_feature_extractor.outputs), 2
) # final + dense_1
def test_multi_distillation_loss_distillation_end_to_end(self):
"""Test end-to-end distillation with multiple distillation_loss."""
# Create multiple distillation_loss
distillation_loss = [
LogitsDistillation(temperature=3.0),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_1",
student_layer_name="student_dense_1",
),
FeatureDistillation(
loss="mse",
teacher_layer_name="teacher_dense_2",
student_layer_name="student_dense_2",
),
]
# Create distiller
distiller = Distiller(
teacher=self.teacher,
student=self.student,
distillation_losses=distillation_loss,
distillation_loss_weights=[1.0, 0.5, 0.3],
student_loss_weight=0.5,
)
# Compile distiller
distiller.compile(
optimizer="adam",
loss="sparse_categorical_crossentropy",
metrics=["accuracy"],
)
# Test training
history = distiller.fit(self.x, self.y, epochs=2, verbose=0)
# Verify training completed
self.assertIn("total_loss", history.history)
self.assertIn("student_loss", history.history)
self.assertIn("distillation_loss", history.history)
# Verify efficient feature extraction
self.assertIsNotNone(distiller._teacher_feature_extractor)
self.assertIsNotNone(distiller._student_feature_extractor)
# Should have 3 outputs: final + dense_1 + dense_2
self.assertEqual(len(distiller._teacher_feature_extractor.outputs), 3)
self.assertEqual(len(distiller._student_feature_extractor.outputs), 3)
# Test that loss decreases (learning is happening)
initial_loss = history.history["total_loss"][0]
final_loss = history.history["total_loss"][-1]
self.assertTrue(np.isfinite(initial_loss))
self.assertTrue(np.isfinite(final_loss))
| TestEndToEndDistillation |
python | langchain-ai__langchain | libs/core/tests/unit_tests/test_tools.py | {
"start": 2802,
"end": 2934
} | class ____(BaseModel):
"""Return the arguments directly."""
arg1: int
arg2: bool
arg3: dict | None = None
| _MockSchema |
python | sympy__sympy | sympy/stats/random_matrix_models.py | {
"start": 2140,
"end": 3950
} | class ____(RandomMatrixEnsembleModel):
"""
Abstract class for Gaussian ensembles.
Contains the properties common to all the
gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Random_matrix#Gaussian_ensembles
.. [2] https://arxiv.org/pdf/1712.07903.pdf
"""
def _compute_normalization_constant(self, beta, n):
"""
Helper function for computing normalization
constant for joint probability density of eigen
values of Gaussian ensembles.
References
==========
.. [1] https://en.wikipedia.org/wiki/Selberg_integral#Mehta's_integral
"""
n = S(n)
prod_term = lambda j: gamma(1 + beta*S(j)/2)/gamma(S.One + beta/S(2))
j = Dummy('j', integer=True, positive=True)
term1 = Product(prod_term(j), (j, 1, n)).doit()
term2 = (2/(beta*n))**(beta*n*(n - 1)/4 + n/2)
term3 = (2*pi)**(n/2)
return term1 * term2 * term3
def _compute_joint_eigen_distribution(self, beta):
"""
Helper function for computing the joint
probability distribution of eigen values
of the random matrix.
"""
n = self.dimension
Zbn = self._compute_normalization_constant(beta, n)
l = IndexedBase('l')
i = Dummy('i', integer=True, positive=True)
j = Dummy('j', integer=True, positive=True)
k = Dummy('k', integer=True, positive=True)
term1 = exp((-S(n)/2) * Sum(l[k]**2, (k, 1, n)).doit())
sub_term = Lambda(i, Product(Abs(l[j] - l[i])**beta, (j, i + 1, n)))
term2 = Product(sub_term(i).doit(), (i, 1, n - 1)).doit()
syms = ArrayComprehension(l[k], (k, 1, n)).doit()
return Lambda(tuple(syms), (term1 * term2)/Zbn)
| GaussianEnsembleModel |
python | keon__algorithms | tests/test_strings.py | {
"start": 1548,
"end": 2609
} | class ____(unittest.TestCase):
"""[summary]
Test for the file breaking_bad.py
Arguments:
unittest {[type]} -- [description]
"""
def setUp(self):
self.words = ['Amazon', 'Microsoft', 'Google']
self.symbols = ['i', 'Am', 'cro', 'le', 'abc']
self.result = ['M[i]crosoft', '[Am]azon', 'Mi[cro]soft', 'Goog[le]']
def test_match_symbol(self):
self.assertEqual(self.result, match_symbol(self.words, self.symbols))
def test_match_symbol_1(self):
self.assertEqual(['[Am]azon', 'Mi[cro]soft', 'Goog[le]'],
match_symbol_1(self.words, self.symbols))
def test_bracket(self):
self.assertEqual(('[Am]azon', 'Mi[cro]soft', 'Goog[le]'),
bracket(self.words, self.symbols))
self.assertEqual(('Amazon', 'Microsoft', 'Google'),
bracket(self.words, ['thisshouldnotmatch']))
self.assertEqual(('Amazon', 'M[i]crosoft', 'Google'),
bracket(self.words, ['i', 'i']))
| TestBreakingBad |
python | numpy__numpy | numpy/lib/_iotools.py | {
"start": 13009,
"end": 13155
} | class ____(ConverterError):
"""
Exception raised when an attempt is made to upgrade a locked converter.
"""
pass
| ConverterLockError |
python | huggingface__transformers | src/transformers/models/mobilenet_v1/modeling_mobilenet_v1.py | {
"start": 4421,
"end": 4701
} | class ____(PreTrainedModel):
config: MobileNetV1Config
base_model_prefix = "mobilenet_v1"
main_input_name = "pixel_values"
input_modalities = ("image",)
supports_gradient_checkpointing = False
_no_split_modules = []
@auto_docstring
| MobileNetV1PreTrainedModel |
python | django__django | tests/m2m_regress/models.py | {
"start": 1849,
"end": 1968
} | class ____(models.Model):
name = models.CharField(max_length=30)
friends = models.ManyToManyField(auth.User)
| User |
python | pytest-dev__pytest | testing/test_doctest.py | {
"start": 35139,
"end": 38613
} | class ____:
"""
If all examples in a doctest are skipped due to the SKIP option, then
the tests should be SKIPPED rather than PASSED. (#957)
"""
@pytest.fixture(params=["text", "module"])
def makedoctest(self, pytester, request):
def makeit(doctest):
mode = request.param
if mode == "text":
pytester.maketxtfile(doctest)
else:
assert mode == "module"
pytester.makepyfile(f'"""\n{doctest}"""')
return makeit
def test_one_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
4
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=1)
def test_one_skipped_failed(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(failed=1)
def test_all_skipped(self, pytester, makedoctest):
makedoctest(
"""
>>> 1 + 1 # doctest: +SKIP
2
>>> 2 + 2 # doctest: +SKIP
200
"""
)
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(skipped=1)
def test_vacuous_all_skipped(self, pytester, makedoctest):
makedoctest("")
reprec = pytester.inline_run("--doctest-modules")
reprec.assertoutcome(passed=0, skipped=0)
def test_continue_on_failure(self, pytester: Pytester):
pytester.maketxtfile(
test_something="""
>>> i = 5
>>> def foo():
... raise ValueError('error1')
>>> foo()
>>> i
>>> i + 2
7
>>> i + 1
"""
)
result = pytester.runpytest(
"--doctest-modules", "--doctest-continue-on-failure"
)
result.assert_outcomes(passed=0, failed=1)
# The lines that contains the failure are 4, 5, and 8. The first one
# is a stack trace and the other two are mismatches.
result.stdout.fnmatch_lines(
["*4: UnexpectedException*", "*5: DocTestFailure*", "*8: DocTestFailure*"]
)
def test_skipping_wrapped_test(self, pytester):
"""
Issue 8796: INTERNALERROR raised when skipping a decorated DocTest
through pytest_collection_modifyitems.
"""
pytester.makeconftest(
"""
import pytest
from _pytest.doctest import DoctestItem
def pytest_collection_modifyitems(config, items):
skip_marker = pytest.mark.skip()
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
"""
)
pytester.makepyfile(
"""
from contextlib import contextmanager
@contextmanager
def my_config_context():
'''
>>> import os
'''
"""
)
result = pytester.runpytest("--doctest-modules")
assert "INTERNALERROR" not in result.stdout.str()
result.assert_outcomes(skipped=1)
| TestDoctestSkips |
python | pennersr__django-allauth | allauth/account/migrations/0002_email_max_length.py | {
"start": 211,
"end": 841
} | class ____(migrations.Migration):
dependencies = [
("account", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="emailaddress",
name="email",
field=models.EmailField(
unique=UNIQUE_EMAIL,
max_length=EMAIL_MAX_LENGTH,
verbose_name="email address",
),
),
]
if not UNIQUE_EMAIL:
operations += [
migrations.AlterUniqueTogether(
name="emailaddress",
unique_together=set([("user", "email")]),
),
]
| Migration |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 584220,
"end": 584838
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"repository",
"repository_name",
"repository_resource_path",
"repository_url",
)
repository = sgqlc.types.Field("Repository", graphql_name="repository")
repository_name = sgqlc.types.Field(String, graphql_name="repositoryName")
repository_resource_path = sgqlc.types.Field(
URI, graphql_name="repositoryResourcePath"
)
repository_url = sgqlc.types.Field(URI, graphql_name="repositoryUrl")
| RepositoryAuditEntryData |
python | plotly__plotly.py | plotly/graph_objs/parcats/_dimension.py | {
"start": 233,
"end": 12535
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "parcats"
_path_str = "parcats.dimension"
_valid_props = {
"categoryarray",
"categoryarraysrc",
"categoryorder",
"displayindex",
"label",
"ticktext",
"ticktextsrc",
"values",
"valuessrc",
"visible",
}
@property
def categoryarray(self):
"""
Sets the order in which categories in this dimension appear.
Only has an effect if `categoryorder` is set to "array". Used
with `categoryorder`.
The 'categoryarray' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["categoryarray"]
@categoryarray.setter
def categoryarray(self, val):
self["categoryarray"] = val
@property
def categoryarraysrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
The 'categoryarraysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["categoryarraysrc"]
@categoryarraysrc.setter
def categoryarraysrc(self, val):
self["categoryarraysrc"] = val
@property
def categoryorder(self):
"""
Specifies the ordering logic for the categories in the
dimension. By default, plotly uses "trace", which specifies the
order that is present in the data supplied. Set `categoryorder`
to *category ascending* or *category descending* if order
should be determined by the alphanumerical order of the
category names. Set `categoryorder` to "array" to derive the
ordering from the attribute `categoryarray`. If a category is
not found in the `categoryarray` array, the sorting behavior
for that attribute will be identical to the "trace" mode. The
unspecified categories will follow the categories in
`categoryarray`.
The 'categoryorder' property is an enumeration that may be specified as:
- One of the following enumeration values:
['trace', 'category ascending', 'category descending',
'array']
Returns
-------
Any
"""
return self["categoryorder"]
@categoryorder.setter
def categoryorder(self, val):
self["categoryorder"] = val
@property
def displayindex(self):
"""
The display index of dimension, from left to right, zero
indexed, defaults to dimension index.
The 'displayindex' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
Returns
-------
int
"""
return self["displayindex"]
@displayindex.setter
def displayindex(self, val):
self["displayindex"] = val
@property
def label(self):
"""
The shown name of the dimension.
The 'label' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["label"]
@label.setter
def label(self, val):
self["label"] = val
@property
def ticktext(self):
"""
Sets alternative tick labels for the categories in this
dimension. Only has an effect if `categoryorder` is set to
"array". Should be an array the same length as `categoryarray`
Used with `categoryorder`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `ticktext`.
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
@property
def values(self):
"""
Dimension values. `values[n]` represents the category value of
the `n`th point in the dataset, therefore the `values` vector
for all dimensions must be the same (longer vectors will be
truncated).
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for `values`.
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
@property
def visible(self):
"""
Shows the dimension when set to `true` (the default). Hides the
dimension for `false`.
The 'visible' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
@property
def _prop_descriptions(self):
return """\
categoryarray
Sets the order in which categories in this dimension
appear. Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the categories in the
dimension. By default, plotly uses "trace", which
specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`.
displayindex
The display index of dimension, from left to right,
zero indexed, defaults to dimension index.
label
The shown name of the dimension.
ticktext
Sets alternative tick labels for the categories in this
dimension. Only has an effect if `categoryorder` is set
to "array". Should be an array the same length as
`categoryarray` Used with `categoryorder`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
values
Dimension values. `values[n]` represents the category
value of the `n`th point in the dataset, therefore the
`values` vector for all dimensions must be the same
(longer vectors will be truncated).
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
"""
def __init__(
self,
arg=None,
categoryarray=None,
categoryarraysrc=None,
categoryorder=None,
displayindex=None,
label=None,
ticktext=None,
ticktextsrc=None,
values=None,
valuessrc=None,
visible=None,
**kwargs,
):
"""
Construct a new Dimension object
The dimensions (variables) of the parallel categories diagram.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.parcats.Dimension`
categoryarray
Sets the order in which categories in this dimension
appear. Only has an effect if `categoryorder` is set to
"array". Used with `categoryorder`.
categoryarraysrc
Sets the source reference on Chart Studio Cloud for
`categoryarray`.
categoryorder
Specifies the ordering logic for the categories in the
dimension. By default, plotly uses "trace", which
specifies the order that is present in the data
supplied. Set `categoryorder` to *category ascending*
or *category descending* if order should be determined
by the alphanumerical order of the category names. Set
`categoryorder` to "array" to derive the ordering from
the attribute `categoryarray`. If a category is not
found in the `categoryarray` array, the sorting
behavior for that attribute will be identical to the
"trace" mode. The unspecified categories will follow
the categories in `categoryarray`.
displayindex
The display index of dimension, from left to right,
zero indexed, defaults to dimension index.
label
The shown name of the dimension.
ticktext
Sets alternative tick labels for the categories in this
dimension. Only has an effect if `categoryorder` is set
to "array". Should be an array the same length as
`categoryarray` Used with `categoryorder`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
`ticktext`.
values
Dimension values. `values[n]` represents the category
value of the `n`th point in the dataset, therefore the
`values` vector for all dimensions must be the same
(longer vectors will be truncated).
valuessrc
Sets the source reference on Chart Studio Cloud for
`values`.
visible
Shows the dimension when set to `true` (the default).
Hides the dimension for `false`.
Returns
-------
Dimension
"""
super().__init__("dimensions")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.parcats.Dimension
constructor must be a dict or
an instance of :class:`plotly.graph_objs.parcats.Dimension`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("categoryarray", arg, categoryarray)
self._set_property("categoryarraysrc", arg, categoryarraysrc)
self._set_property("categoryorder", arg, categoryorder)
self._set_property("displayindex", arg, displayindex)
self._set_property("label", arg, label)
self._set_property("ticktext", arg, ticktext)
self._set_property("ticktextsrc", arg, ticktextsrc)
self._set_property("values", arg, values)
self._set_property("valuessrc", arg, valuessrc)
self._set_property("visible", arg, visible)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Dimension |
python | ray-project__ray | rllib/utils/tests/test_framework_agnostic_components.py | {
"start": 1128,
"end": 1195
} | class ____(DummyComponent):
pass
| NonAbstractChildOfDummyComponent |
python | huggingface__transformers | tests/models/hubert/test_modeling_hubert.py | {
"start": 16372,
"end": 21162
} | class ____(ModelTesterMixin, unittest.TestCase):
all_model_classes = (HubertForCTC, HubertForSequenceClassification, HubertModel) if is_torch_available() else ()
def setUp(self):
self.model_tester = HubertModelTester(
self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True
)
self.config_tester = ConfigTester(self, config_class=HubertConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_batched_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_batch_inference(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_seq_classifier_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_seq_classifier_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_seq_classifier_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
@unittest.skip(reason="Hubert has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Hubert has input_values instead of input_ids")
def test_forward_signature(self):
pass
@unittest.skip(reason="Hubert has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Hubert has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# force eager attention to support output attentions
config._attn_implementation = "eager"
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = HubertModel.from_pretrained("facebook/hubert-large-ls960-ft")
self.assertIsNotNone(model)
@require_torch
| HubertRobustModelTest |
python | wandb__wandb | tests/system_tests/test_functional/console_capture/patching_exception.py | {
"start": 159,
"end": 1708
} | class ____(io.TextIOBase):
def __init__(self, delegate: TextIO) -> None:
self._delegate = delegate
def __setattr__(self, name, value):
if name == "write":
raise _TestError()
return super().__setattr__(name, value)
if __name__ == "__main__":
sys.stdout = MyStdout(sys.stdout)
# This will attempt to overwrite `sys.stdout.write` on import,
# which will raise an error that must not be propagated.
from wandb.sdk.lib import console_capture
try:
console_capture.capture_stdout(lambda *unused: None)
except console_capture.CannotCaptureConsoleError as e:
if e.__cause__ and isinstance(e.__cause__, _TestError):
print("[stdout] Caught _TestError!", file=sys.stderr)
else:
print(
"[stdout] Caught error, but its cause is not _TestError!",
file=sys.stderr,
)
sys.exit(1)
else:
print("[stdout] No error!", file=sys.stderr)
sys.exit(1)
try:
console_capture.capture_stderr(lambda *unused: None)
except console_capture.CannotCaptureConsoleError as e:
if e.__cause__ and isinstance(e.__cause__, _TestError):
print("[stderr] Caught _TestError!", file=sys.stderr)
else:
print(
"[stderr] Caught error, but its cause is not _TestError!",
file=sys.stderr,
)
sys.exit(1)
else:
print("[stderr] No error!", file=sys.stderr)
sys.exit(1)
| MyStdout |
python | gevent__gevent | src/greentest/3.14/test_httpservers.py | {
"start": 16228,
"end": 34914
} | class ____(BaseTestCase):
class request_handler(NoLogRequestHandler, SimpleHTTPRequestHandler):
pass
def setUp(self):
super().setUp()
self.cwd = os.getcwd()
basetempdir = tempfile.gettempdir()
os.chdir(basetempdir)
self.data = b'We are the knights who say Ni!'
self.tempdir = tempfile.mkdtemp(dir=basetempdir)
self.tempdir_name = os.path.basename(self.tempdir)
self.base_url = '/' + self.tempdir_name
tempname = os.path.join(self.tempdir, 'test')
with open(tempname, 'wb') as temp:
temp.write(self.data)
temp.flush()
mtime = os.stat(tempname).st_mtime
# compute last modification datetime for browser cache tests
last_modif = datetime.datetime.fromtimestamp(mtime,
datetime.timezone.utc)
self.last_modif_datetime = last_modif.replace(microsecond=0)
self.last_modif_header = email.utils.formatdate(
last_modif.timestamp(), usegmt=True)
def tearDown(self):
try:
os.chdir(self.cwd)
try:
shutil.rmtree(self.tempdir)
except:
pass
finally:
super().tearDown()
def check_status_and_reason(self, response, status, data=None):
def close_conn():
"""Don't close reader yet so we can check if there was leftover
buffered input"""
nonlocal reader
reader = response.fp
response.fp = None
reader = None
response._close_conn = close_conn
body = response.read()
self.assertTrue(response)
self.assertEqual(response.status, status)
self.assertIsNotNone(response.reason)
if data:
self.assertEqual(data, body)
# Ensure the server has not set up a persistent connection, and has
# not sent any extra data
self.assertEqual(response.version, 10)
self.assertEqual(response.msg.get("Connection", "close"), "close")
self.assertEqual(reader.read(30), b'', 'Connection should be closed')
reader.close()
return body
def check_list_dir_dirname(self, dirname, quotedname=None):
fullpath = os.path.join(self.tempdir, dirname)
try:
os.mkdir(os.path.join(self.tempdir, dirname))
except (OSError, UnicodeEncodeError):
self.skipTest(f'Can not create directory {dirname!a} '
f'on current file system')
if quotedname is None:
quotedname = urllib.parse.quote(dirname, errors='surrogatepass')
response = self.request(self.base_url + '/' + quotedname + '/')
body = self.check_status_and_reason(response, HTTPStatus.OK)
displaypath = html.escape(f'{self.base_url}/{dirname}/', quote=False)
enc = sys.getfilesystemencoding()
prefix = f'listing for {displaypath}</'.encode(enc, 'surrogateescape')
self.assertIn(prefix + b'title>', body)
self.assertIn(prefix + b'h1>', body)
def check_list_dir_filename(self, filename):
fullpath = os.path.join(self.tempdir, filename)
content = ascii(fullpath).encode() + (os_helper.TESTFN_UNDECODABLE or b'\xff')
try:
with open(fullpath, 'wb') as f:
f.write(content)
except OSError:
self.skipTest(f'Can not create file {filename!a} '
f'on current file system')
response = self.request(self.base_url + '/')
body = self.check_status_and_reason(response, HTTPStatus.OK)
quotedname = urllib.parse.quote(filename, errors='surrogatepass')
enc = response.headers.get_content_charset()
self.assertIsNotNone(enc)
self.assertIn((f'href="{quotedname}"').encode('ascii'), body)
displayname = html.escape(filename, quote=False)
self.assertIn(f'>{displayname}<'.encode(enc, 'surrogateescape'), body)
response = self.request(self.base_url + '/' + quotedname)
self.check_status_and_reason(response, HTTPStatus.OK, data=content)
@unittest.skipUnless(os_helper.TESTFN_NONASCII,
'need os_helper.TESTFN_NONASCII')
def test_list_dir_nonascii_dirname(self):
dirname = os_helper.TESTFN_NONASCII + '.dir'
self.check_list_dir_dirname(dirname)
@unittest.skipUnless(os_helper.TESTFN_NONASCII,
'need os_helper.TESTFN_NONASCII')
def test_list_dir_nonascii_filename(self):
filename = os_helper.TESTFN_NONASCII + '.txt'
self.check_list_dir_filename(filename)
@unittest.skipIf(is_apple,
'undecodable name cannot always be decoded on Apple platforms')
@unittest.skipIf(sys.platform == 'win32',
'undecodable name cannot be decoded on win32')
@unittest.skipUnless(os_helper.TESTFN_UNDECODABLE,
'need os_helper.TESTFN_UNDECODABLE')
def test_list_dir_undecodable_dirname(self):
dirname = os.fsdecode(os_helper.TESTFN_UNDECODABLE) + '.dir'
self.check_list_dir_dirname(dirname)
@unittest.skipIf(is_apple,
'undecodable name cannot always be decoded on Apple platforms')
@unittest.skipIf(sys.platform == 'win32',
'undecodable name cannot be decoded on win32')
@unittest.skipUnless(os_helper.TESTFN_UNDECODABLE,
'need os_helper.TESTFN_UNDECODABLE')
def test_list_dir_undecodable_filename(self):
filename = os.fsdecode(os_helper.TESTFN_UNDECODABLE) + '.txt'
self.check_list_dir_filename(filename)
def test_list_dir_undecodable_dirname2(self):
dirname = '\ufffd.dir'
self.check_list_dir_dirname(dirname, quotedname='%ff.dir')
@unittest.skipUnless(os_helper.TESTFN_UNENCODABLE,
'need os_helper.TESTFN_UNENCODABLE')
def test_list_dir_unencodable_dirname(self):
dirname = os_helper.TESTFN_UNENCODABLE + '.dir'
self.check_list_dir_dirname(dirname)
@unittest.skipUnless(os_helper.TESTFN_UNENCODABLE,
'need os_helper.TESTFN_UNENCODABLE')
def test_list_dir_unencodable_filename(self):
filename = os_helper.TESTFN_UNENCODABLE + '.txt'
self.check_list_dir_filename(filename)
def test_list_dir_escape_dirname(self):
# Characters that need special treating in URL or HTML.
for name in ('q?', 'f#', '&', '&', '<i>', '"dq"', "'sq'",
'%A4', '%E2%82%AC'):
with self.subTest(name=name):
dirname = name + '.dir'
self.check_list_dir_dirname(dirname,
quotedname=urllib.parse.quote(dirname, safe='&<>\'"'))
def test_list_dir_escape_filename(self):
# Characters that need special treating in URL or HTML.
for name in ('q?', 'f#', '&', '&', '<i>', '"dq"', "'sq'",
'%A4', '%E2%82%AC'):
with self.subTest(name=name):
filename = name + '.txt'
self.check_list_dir_filename(filename)
os_helper.unlink(os.path.join(self.tempdir, filename))
def test_list_dir_with_query_and_fragment(self):
prefix = f'listing for {self.base_url}/</'.encode('latin1')
response = self.request(self.base_url + '/#123').read()
self.assertIn(prefix + b'title>', response)
self.assertIn(prefix + b'h1>', response)
response = self.request(self.base_url + '/?x=123').read()
self.assertIn(prefix + b'title>', response)
self.assertIn(prefix + b'h1>', response)
def test_get_dir_redirect_location_domain_injection_bug(self):
"""Ensure //evil.co/..%2f../../X does not put //evil.co/ in Location.
//netloc/ in a Location header is a redirect to a new host.
https://github.com/python/cpython/issues/87389
This checks that a path resolving to a directory on our server cannot
resolve into a redirect to another server.
"""
os.mkdir(os.path.join(self.tempdir, 'existing_directory'))
url = f'/python.org/..%2f..%2f..%2f..%2f..%2f../%0a%0d/../{self.tempdir_name}/existing_directory'
expected_location = f'{url}/' # /python.org.../ single slash single prefix, trailing slash
# Canonicalizes to /tmp/tempdir_name/existing_directory which does
# exist and is a dir, triggering the 301 redirect logic.
response = self.request(url)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
location = response.getheader('Location')
self.assertEqual(location, expected_location, msg='non-attack failed!')
# //python.org... multi-slash prefix, no trailing slash
attack_url = f'/{url}'
response = self.request(attack_url)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
location = response.getheader('Location')
self.assertNotStartsWith(location, '//')
self.assertEqual(location, expected_location,
msg='Expected Location header to start with a single / and '
'end with a / as this is a directory redirect.')
# ///python.org... triple-slash prefix, no trailing slash
attack3_url = f'//{url}'
response = self.request(attack3_url)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
self.assertEqual(response.getheader('Location'), expected_location)
# If the second word in the http request (Request-URI for the http
# method) is a full URI, we don't worry about it, as that'll be parsed
# and reassembled as a full URI within BaseHTTPRequestHandler.send_head
# so no errant scheme-less //netloc//evil.co/ domain mixup can happen.
attack_scheme_netloc_2slash_url = f'https://pypi.org/{url}'
expected_scheme_netloc_location = f'{attack_scheme_netloc_2slash_url}/'
response = self.request(attack_scheme_netloc_2slash_url)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
location = response.getheader('Location')
# We're just ensuring that the scheme and domain make it through, if
# there are or aren't multiple slashes at the start of the path that
# follows that isn't important in this Location: header.
self.assertStartsWith(location, 'https://pypi.org/')
def test_get(self):
#constructs the path relative to the root directory of the HTTPServer
response = self.request(self.base_url + '/test')
self.check_status_and_reason(response, HTTPStatus.OK, data=self.data)
# check for trailing "/" which should return 404. See Issue17324
response = self.request(self.base_url + '/test/')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
response = self.request(self.base_url + '/test%2f')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
response = self.request(self.base_url + '/test%2F')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.base_url + '%2f')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.base_url + '%2F')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.base_url)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
self.assertEqual(response.getheader("Location"), self.base_url + "/")
self.assertEqual(response.getheader("Content-Length"), "0")
response = self.request(self.base_url + '/?hi=2')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.base_url + '?hi=1')
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
self.assertEqual(response.getheader("Location"),
self.base_url + "/?hi=1")
response = self.request('/ThisDoesNotExist')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
response = self.request('/' + 'ThisDoesNotExist' + '/')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
os.makedirs(os.path.join(self.tempdir, 'spam', 'index.html'))
response = self.request(self.base_url + '/spam/')
self.check_status_and_reason(response, HTTPStatus.OK)
data = b"Dummy index file\r\n"
with open(os.path.join(self.tempdir_name, 'index.html'), 'wb') as f:
f.write(data)
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, HTTPStatus.OK, data)
# chmod() doesn't work as expected on Windows, and filesystem
# permissions are ignored by root on Unix.
if os.name == 'posix' and os.geteuid() != 0:
os.chmod(self.tempdir, 0)
try:
response = self.request(self.base_url + '/')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
finally:
os.chmod(self.tempdir, 0o755)
def test_head(self):
response = self.request(
self.base_url + '/test', method='HEAD')
self.check_status_and_reason(response, HTTPStatus.OK)
self.assertEqual(response.getheader('content-length'),
str(len(self.data)))
self.assertEqual(response.getheader('content-type'),
'application/octet-stream')
def test_browser_cache(self):
"""Check that when a request to /test is sent with the request header
If-Modified-Since set to date of last modification, the server returns
status code 304, not 200
"""
headers = email.message.Message()
headers['If-Modified-Since'] = self.last_modif_header
response = self.request(self.base_url + '/test', headers=headers)
self.check_status_and_reason(response, HTTPStatus.NOT_MODIFIED)
# one hour after last modification : must return 304
new_dt = self.last_modif_datetime + datetime.timedelta(hours=1)
headers = email.message.Message()
headers['If-Modified-Since'] = email.utils.format_datetime(new_dt,
usegmt=True)
response = self.request(self.base_url + '/test', headers=headers)
self.check_status_and_reason(response, HTTPStatus.NOT_MODIFIED)
def test_browser_cache_file_changed(self):
# with If-Modified-Since earlier than Last-Modified, must return 200
dt = self.last_modif_datetime
# build datetime object : 365 days before last modification
old_dt = dt - datetime.timedelta(days=365)
headers = email.message.Message()
headers['If-Modified-Since'] = email.utils.format_datetime(old_dt,
usegmt=True)
response = self.request(self.base_url + '/test', headers=headers)
self.check_status_and_reason(response, HTTPStatus.OK)
def test_browser_cache_with_If_None_Match_header(self):
# if If-None-Match header is present, ignore If-Modified-Since
headers = email.message.Message()
headers['If-Modified-Since'] = self.last_modif_header
headers['If-None-Match'] = "*"
response = self.request(self.base_url + '/test', headers=headers)
self.check_status_and_reason(response, HTTPStatus.OK)
def test_invalid_requests(self):
response = self.request('/', method='FOO')
self.check_status_and_reason(response, HTTPStatus.NOT_IMPLEMENTED)
# requests must be case sensitive,so this should fail too
response = self.request('/', method='custom')
self.check_status_and_reason(response, HTTPStatus.NOT_IMPLEMENTED)
response = self.request('/', method='GETs')
self.check_status_and_reason(response, HTTPStatus.NOT_IMPLEMENTED)
def test_last_modified(self):
"""Checks that the datetime returned in Last-Modified response header
is the actual datetime of last modification, rounded to the second
"""
response = self.request(self.base_url + '/test')
self.check_status_and_reason(response, HTTPStatus.OK, data=self.data)
last_modif_header = response.headers['Last-modified']
self.assertEqual(last_modif_header, self.last_modif_header)
def test_path_without_leading_slash(self):
response = self.request(self.tempdir_name + '/test')
self.check_status_and_reason(response, HTTPStatus.OK, data=self.data)
response = self.request(self.tempdir_name + '/test/')
self.check_status_and_reason(response, HTTPStatus.NOT_FOUND)
response = self.request(self.tempdir_name + '/')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.tempdir_name)
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
self.assertEqual(response.getheader("Location"),
self.tempdir_name + "/")
response = self.request(self.tempdir_name + '/?hi=2')
self.check_status_and_reason(response, HTTPStatus.OK)
response = self.request(self.tempdir_name + '?hi=1')
self.check_status_and_reason(response, HTTPStatus.MOVED_PERMANENTLY)
self.assertEqual(response.getheader("Location"),
self.tempdir_name + "/?hi=1")
cgi_file1 = """\
#!%s
print("Content-type: text/html")
print()
print("Hello World")
"""
cgi_file2 = """\
#!%s
import os
import sys
import urllib.parse
print("Content-type: text/html")
print()
content_length = int(os.environ["CONTENT_LENGTH"])
query_string = sys.stdin.buffer.read(content_length)
params = {key.decode("utf-8"): val.decode("utf-8")
for key, val in urllib.parse.parse_qsl(query_string)}
print("%%s, %%s, %%s" %% (params["spam"], params["eggs"], params["bacon"]))
"""
cgi_file4 = """\
#!%s
import os
print("Content-type: text/html")
print()
print(os.environ["%s"])
"""
cgi_file6 = """\
#!%s
import os
print("X-ambv: was here")
print("Content-type: text/html")
print()
print("<pre>")
for k, v in os.environ.items():
try:
k.encode('ascii')
v.encode('ascii')
except UnicodeEncodeError:
continue # see: BPO-44647
print(f"{k}={v}")
print("</pre>")
"""
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"This test can't be run reliably as root (issue #13308).")
@requires_subprocess()
| SimpleHTTPServerTestCase |
python | pypa__pip | src/pip/_internal/vcs/mercurial.py | {
"start": 496,
"end": 5575
} | class ____(VersionControl):
name = "hg"
dirname = ".hg"
repo_name = "clone"
schemes = (
"hg+file",
"hg+http",
"hg+https",
"hg+ssh",
"hg+static-http",
)
@staticmethod
def get_base_rev_args(rev: str) -> list[str]:
return [f"--rev={rev}"]
def fetch_new(
self, dest: str, url: HiddenText, rev_options: RevOptions, verbosity: int
) -> None:
rev_display = rev_options.to_display()
logger.info(
"Cloning hg %s%s to %s",
url,
rev_display,
display_path(dest),
)
if verbosity <= 0:
flags: tuple[str, ...] = ("--quiet",)
elif verbosity == 1:
flags = ()
elif verbosity == 2:
flags = ("--verbose",)
else:
flags = ("--verbose", "--debug")
self.run_command(make_command("clone", "--noupdate", *flags, url, dest))
self.run_command(
make_command("update", *flags, rev_options.to_args()),
cwd=dest,
)
def switch(
self,
dest: str,
url: HiddenText,
rev_options: RevOptions,
verbosity: int = 0,
) -> None:
extra_flags = []
repo_config = os.path.join(dest, self.dirname, "hgrc")
config = configparser.RawConfigParser()
if verbosity <= 0:
extra_flags.append("-q")
try:
config.read(repo_config)
config.set("paths", "default", url.secret)
with open(repo_config, "w") as config_file:
config.write(config_file)
except (OSError, configparser.NoSectionError) as exc:
logger.warning("Could not switch Mercurial repository to %s: %s", url, exc)
else:
cmd_args = make_command("update", *extra_flags, rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
def update(
self,
dest: str,
url: HiddenText,
rev_options: RevOptions,
verbosity: int = 0,
) -> None:
extra_flags = []
if verbosity <= 0:
extra_flags.append("-q")
self.run_command(["pull", *extra_flags], cwd=dest)
cmd_args = make_command("update", *extra_flags, rev_options.to_args())
self.run_command(cmd_args, cwd=dest)
@classmethod
def get_remote_url(cls, location: str) -> str:
url = cls.run_command(
["showconfig", "paths.default"],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
if cls._is_local_repository(url):
url = path_to_url(url)
return url.strip()
@classmethod
def get_revision(cls, location: str) -> str:
"""
Return the repository-local changeset revision number, as an integer.
"""
current_revision = cls.run_command(
["parents", "--template={rev}"],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
return current_revision
@classmethod
def get_requirement_revision(cls, location: str) -> str:
"""
Return the changeset identification hash, as a 40-character
hexadecimal string
"""
current_rev_hash = cls.run_command(
["parents", "--template={node}"],
show_stdout=False,
stdout_only=True,
cwd=location,
).strip()
return current_rev_hash
@classmethod
def is_commit_id_equal(cls, dest: str, name: str | None) -> bool:
"""Always assume the versions don't match"""
return False
@classmethod
def get_subdirectory(cls, location: str) -> str | None:
"""
Return the path to Python project root, relative to the repo root.
Return None if the project root is in the repo root.
"""
# find the repo root
repo_root = cls.run_command(
["root"], show_stdout=False, stdout_only=True, cwd=location
).strip()
if not os.path.isabs(repo_root):
repo_root = os.path.abspath(os.path.join(location, repo_root))
return find_path_to_project_root_from_repo_root(location, repo_root)
@classmethod
def get_repository_root(cls, location: str) -> str | None:
loc = super().get_repository_root(location)
if loc:
return loc
try:
r = cls.run_command(
["root"],
cwd=location,
show_stdout=False,
stdout_only=True,
on_returncode="raise",
log_failed_cmd=False,
)
except BadCommand:
logger.debug(
"could not determine if %s is under hg control "
"because hg is not available",
location,
)
return None
except InstallationError:
return None
return os.path.normpath(r.rstrip("\r\n"))
vcs.register(Mercurial)
| Mercurial |
python | facelessuser__soupsieve | tests/test_level4/test_defined.py | {
"start": 52,
"end": 2571
} | class ____(util.TestCase):
"""Test defined selectors."""
def test_defined_html(self):
"""Test defined HTML."""
markup = """
<!DOCTYPE html>
<html>
<head>
</head>
<body>
<div id="0"></div>
<div-custom id="1"></div-custom>
<prefix:div id="2"></prefix:div>
<prefix:div-custom id="3"></prefix:div-custom>
</body>
</html>
"""
self.assert_selector(
markup,
'body :defined',
['0', '2', '3'],
flags=util.HTML
)
@util.skip_no_lxml
def test_defined_xhtml(self):
"""Test defined XHTML."""
markup = """
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"
"http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html lang="en" xmlns="http://www.w3.org/1999/xhtml">
<head>
</head>
<body>
<div id="0"></div>
<div-custom id="1"></div-custom>
<prefix:div id="2"></prefix:div>
<!--
lxml seems to strip away the prefix in versions less than 4.4.0.
This was most likely because prefix with no namespace is not really valid.
XML does allow colons in names, but encourages them to be used for namespaces.
This is a quirk of LXML, but it appears to be fine in 4.4.0+.
-->
<prefix:div-custom id="3"></prefix:div-custom>
</body>
</html>
"""
from lxml import etree
self.assert_selector(
markup,
'body :defined',
# We should get 3, but for LXML versions less than 4.4.0 we don't for reasons stated above.
['0', '2'] if etree.LXML_VERSION < (4, 4, 0, 0) else ['0', '1', '2'],
flags=util.XHTML
)
def test_defined_xml(self):
"""Test defined HTML."""
markup = """
<?xml version="1.0" encoding="UTF-8"?>
<html>
<head>
</head>
<body>
<div id="0"></div>
<div-custom id="1"></div-custom>
<prefix:div id="2"></prefix:div>
<prefix:div-custom id="3"></prefix:div-custom>
</body>
</html>
"""
# Defined is a browser thing.
# XML doesn't care about defined and this will match nothing in XML.
self.assert_selector(
markup,
'body :defined',
[],
flags=util.XML
)
| TestDefined |
python | donnemartin__system-design-primer | solutions/system_design/social_graph/social_graph_snippets.py | {
"start": 1116,
"end": 1410
} | class ____(object):
def __init__(self):
self.people = {} # key: person_id, value: person
def get_people(self, ids):
results = []
for id in ids:
if id in self.people:
results.append(self.people[id])
return results
| PersonServer |
python | sqlalchemy__sqlalchemy | test/orm/test_deferred.py | {
"start": 89121,
"end": 90065
} | class ____(fixtures.DeclarativeMappedTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class A(Base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
bs = relationship("B")
class B(Base):
__tablename__ = "b"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
A.b_count = deferred(
select(func.count(1)).where(A.id == B.a_id).scalar_subquery()
)
def test_deferred_autoflushes(self):
A, B = self.classes("A", "B")
s = fixture_session()
a1 = A(id=1, bs=[B()])
s.add(a1)
s.commit()
eq_(a1.b_count, 1)
s.close()
a1 = s.query(A).first()
assert "b_count" not in a1.__dict__
b1 = B(a_id=1)
s.add(b1)
eq_(a1.b_count, 2)
assert b1 in s
| AutoflushTest |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 4124,
"end": 4798
} | class ____(ASTAttribute):
def __init__(self, arg: str) -> None:
self.arg = arg
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTCPPAttribute):
return NotImplemented
return self.arg == other.arg
def __hash__(self) -> int:
return hash(self.arg)
def _stringify(self, transform: StringifyTransform) -> str:
return f'[[{self.arg}]]'
def describe_signature(self, signode: TextElement) -> None:
signode.append(addnodes.desc_sig_punctuation('[[', '[['))
signode.append(nodes.Text(self.arg))
signode.append(addnodes.desc_sig_punctuation(']]', ']]'))
| ASTCPPAttribute |
python | pola-rs__polars | py-polars/src/polars/expr/whenthen.py | {
"start": 1272,
"end": 3412
} | class ____(Expr):
"""
Utility class for the `when-then-otherwise` expression.
Represents the state of the expression after `pl.when(...).then(...)` is called.
"""
def __init__(self, then: Any) -> None:
self._then = then
@classmethod
def _from_pyexpr(cls, pyexpr: PyExpr) -> Expr:
return wrap_expr(pyexpr)
@property
def _pyexpr(self) -> PyExpr: # type: ignore[override]
return self._then.otherwise(F.lit(None)._pyexpr)
def when(
self,
*predicates: IntoExpr | Iterable[IntoExpr],
**constraints: Any,
) -> ChainedWhen:
"""
Add a condition to the `when-then-otherwise` expression.
Parameters
----------
predicates
Condition(s) that must be met in order to apply the subsequent statement.
Accepts one or more boolean expressions, which are implicitly combined with
`&`. String input is parsed as a column name.
constraints
Apply conditions as `col_name = value` keyword arguments that are treated as
equality matches, such as `x = 123`. As with the predicates parameter,
multiple conditions are implicitly combined using `&`.
Notes
-----
The expression output name is taken from the first `then` statement. It is
not affected by `predicates`, nor by `constraints`.
"""
condition_pyexpr = parse_predicates_constraints_into_expression(
*predicates, **constraints
)
return ChainedWhen(self._then.when(condition_pyexpr))
def otherwise(self, statement: IntoExpr) -> Expr:
"""
Define a default for the `when-then-otherwise` expression.
Parameters
----------
statement
The statement to apply if all conditions are false.
Accepts expression input. Strings are parsed as column names, other
non-expression inputs are parsed as literals.
"""
statement_pyexpr = parse_into_expression(statement)
return wrap_expr(self._then.otherwise(statement_pyexpr))
| Then |
python | tensorflow__tensorflow | tensorflow/python/training/saver_test.py | {
"start": 39139,
"end": 48912
} | class ____(test.TestCase):
_WRITE_VERSION = saver_pb2.SaverDef.V1
def _get_test_dir(self, dirname):
test_dir = os.path.join(self.get_temp_dir(), dirname)
gfile.MakeDirs(test_dir)
return test_dir
def testBasics(self):
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
# Build a graph with 2 parameter nodes on different devices.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variable_v1.VariableV1(10, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variable_v1.VariableV1(20, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k1", 30.0).run()
t1.insert("k2", 40.0).run()
val = save.save(sess, save_path)
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(save_path + "-?????-of-00002", val)
else:
self.assertEqual(save_path, val)
meta_graph_filename = checkpoint_management.meta_graph_filename(val)
self.assertEqual(save_path + ".meta", meta_graph_filename)
if save._write_version is saver_pb2.SaverDef.V1:
# Restore different ops from shard 0 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variable_v1.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
save = saver_module.Saver(
{
"v0": v0,
"t0": t0.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
save.restore(sess, save_path + "-00000-of-00002")
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
# Restore different ops from shard 1 of the saved files.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v1 = variable_v1.VariableV1(222)
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v1": v1,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t1.insert("k22", 44.0).run()
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save.restore(sess, save_path + "-00001-of-00002")
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
# Now try a restore with the sharded filename.
with session.Session(
target="",
config=config_pb2.ConfigProto(device_count={"CPU": 2})) as sess:
with sess.graph.device("/cpu:0"):
v0 = variable_v1.VariableV1(111, name="v0")
t0 = saver_test_utils.CheckpointedOp(name="t0")
with sess.graph.device("/cpu:1"):
v1 = variable_v1.VariableV1(222, name="v1")
t1 = saver_test_utils.CheckpointedOp(name="t1")
save = saver_module.Saver(
{
"v0": v0,
"v1": v1,
"t0": t0.saveable,
"t1": t1.saveable
},
write_version=self._WRITE_VERSION,
sharded=True)
self.evaluate(variables.global_variables_initializer())
t0.insert("k11", 33.0).run()
t1.insert("k22", 44.0).run()
self.assertEqual(111, self.evaluate(v0))
self.assertEqual(222, self.evaluate(v1))
self.assertEqual(b"k11", self.evaluate(t0.keys()))
self.assertEqual(33.0, self.evaluate(t0.values()))
self.assertEqual(b"k22", self.evaluate(t1.keys()))
self.assertEqual(44.0, self.evaluate(t1.values()))
save_path = os.path.join(self.get_temp_dir(), "sharded_basics")
if save._write_version is saver_pb2.SaverDef.V1:
save.restore(sess, save_path + "-?????-of-?????")
else:
save.restore(sess, save_path)
self.assertEqual(10, self.evaluate(v0))
self.assertEqual(20, self.evaluate(v1))
self.assertEqual(b"k1", self.evaluate(t0.keys()))
self.assertEqual(30.0, self.evaluate(t0.values()))
self.assertEqual(b"k2", self.evaluate(t1.keys()))
self.assertEqual(40.0, self.evaluate(t1.values()))
if save._write_version is saver_pb2.SaverDef.V1:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics-?????-of-00002"))
else:
self.assertEqual(
checkpoint_management.latest_checkpoint(self.get_temp_dir()),
os.path.join(self.get_temp_dir(), "sharded_basics"))
def testSaverDef(self):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.cached_session():
v0 = variable_v1.VariableV1(123, name="v0")
save = saver_module.Saver({"v0": v0}, sharded=True)
sd = save.as_saver_def()
self.assertTrue(sd.sharded)
def _testPartitionedVariables(self, use_resource):
var_full_shape = [10, 3]
# Allows save/restore mechanism to work w/ different slicings.
var_name = "my_var"
saved_dir = self._get_test_dir("partitioned_variables")
saved_path = os.path.join(saved_dir, "ckpt")
call_saver_with_dict = False # updated by test loop below
def _save(partitioner=None):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.session() as sess:
# Calls .eval() to return the ndarray that makes up the full variable.
rnd = random_ops.random_uniform(var_full_shape).eval()
if partitioner:
vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=rnd,
partitioner=partitioner,
use_resource=use_resource)
]
else:
if use_resource:
vs = [resource_variable_ops.ResourceVariable(rnd, name=var_name)]
else:
vs = [variable_v1.VariableV1(rnd, name=var_name)]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({var_name: vs[0]})
else:
saver = saver_module.Saver(vs)
actual_path = saver.save(sess, saved_path)
self.assertEqual(saved_path, actual_path)
return rnd
def _restore(partitioner=None):
# train.Saver is V1 only API.
with ops_lib.Graph().as_default(), self.session() as sess:
if partitioner:
new_vs = [
variable_scope.get_variable(
var_name,
shape=var_full_shape,
initializer=array_ops.zeros(var_full_shape),
partitioner=partitioner)
]
else:
new_vs = [
variable_v1.VariableV1(
array_ops.zeros(
shape=var_full_shape), # != original contents.
name=var_name)
]
self.evaluate(variables.global_variables_initializer())
if call_saver_with_dict:
saver = saver_module.Saver({
var_name: new_vs[0]
})
else:
saver = saver_module.Saver(new_vs)
saver.restore(sess, saved_path)
if partitioner:
return new_vs[0].as_tensor().eval()
else:
return new_vs[0].eval()
for call_saver_with_dict in {False, True}:
# Save PartitionedVariable and restore into full variable.
saved_full = _save(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
restored_full = _restore()
self.assertAllEqual(saved_full, restored_full)
# Restores into the same number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2))
self.assertAllEqual(saved_full, restored_full)
# Restores into a different number of partitions.
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
# Now, saves a full variable and restores PartitionedVariable.
saved_full = _save()
restored_full = _restore(
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=3))
self.assertAllEqual(saved_full, restored_full)
def testPartitionedVariable(self):
self._testPartitionedVariables(use_resource=False)
def testPartitionedResourceVariable(self):
self._testPartitionedVariables(use_resource=True)
| SaveRestoreShardedTest |
python | lepture__authlib | authlib/integrations/base_client/sync_openid.py | {
"start": 232,
"end": 3165
} | class ____:
def fetch_jwk_set(self, force=False):
metadata = self.load_server_metadata()
jwk_set = metadata.get("jwks")
if jwk_set and not force:
return jwk_set
uri = metadata.get("jwks_uri")
if not uri:
raise RuntimeError('Missing "jwks_uri" in metadata')
with self.client_cls(**self.client_kwargs) as session:
resp = session.request("GET", uri, withhold_token=True)
resp.raise_for_status()
jwk_set = resp.json()
self.server_metadata["jwks"] = jwk_set
return jwk_set
def userinfo(self, **kwargs):
"""Fetch user info from ``userinfo_endpoint``."""
metadata = self.load_server_metadata()
resp = self.get(metadata["userinfo_endpoint"], **kwargs)
resp.raise_for_status()
data = resp.json()
return UserInfo(data)
def parse_id_token(
self, token, nonce, claims_options=None, claims_cls=None, leeway=120
):
"""Return an instance of UserInfo from token's ``id_token``."""
if "id_token" not in token:
return None
load_key = self.create_load_key()
claims_params = dict(
nonce=nonce,
client_id=self.client_id,
)
if claims_cls is None:
if "access_token" in token:
claims_params["access_token"] = token["access_token"]
claims_cls = CodeIDToken
else:
claims_cls = ImplicitIDToken
metadata = self.load_server_metadata()
if claims_options is None and "issuer" in metadata:
claims_options = {"iss": {"values": [metadata["issuer"]]}}
alg_values = metadata.get("id_token_signing_alg_values_supported")
if alg_values:
_jwt = JsonWebToken(alg_values)
else:
_jwt = jwt
claims = _jwt.decode(
token["id_token"],
key=load_key,
claims_cls=claims_cls,
claims_options=claims_options,
claims_params=claims_params,
)
# https://github.com/authlib/authlib/issues/259
if claims.get("nonce_supported") is False:
claims.params["nonce"] = None
claims.validate(leeway=leeway)
return UserInfo(claims)
def create_load_key(self):
def load_key(header, _):
jwk_set = JsonWebKey.import_key_set(self.fetch_jwk_set())
try:
return jwk_set.find_by_kid(
header.get("kid"), use="sig", alg=header.get("alg")
)
except ValueError:
# re-try with new jwk set
jwk_set = JsonWebKey.import_key_set(self.fetch_jwk_set(force=True))
return jwk_set.find_by_kid(
header.get("kid"), use="sig", alg=header.get("alg")
)
return load_key
| OpenIDMixin |
python | pytorch__pytorch | torch/distributed/elastic/agent/server/local_elastic_agent.py | {
"start": 1504,
"end": 18752
} | class ____(SimpleElasticAgent):
"""An implementation of :py:class:`torchelastic.agent.server.ElasticAgent` that handles host-local workers.
This agent is deployed per host and is configured to spawn ``n`` workers.
When using GPUs, ``n`` maps to the number of GPUs available on the host.
The local agent does not communicate to other local agents deployed on
other hosts, even if the workers may communicate inter-host. The worker id
is interpreted to be a local process. The agent starts and stops all worker
processes as a single unit.
The worker function and argument passed to the worker function must be
python multiprocessing compatible. To pass multiprocessing data structures
to the workers you may create the data structure in the same multiprocessing
context as the specified ``start_method`` and pass it as a function argument.
The ``exit_barrier_timeout`` specifies the amount of time (in seconds) to wait
for other agents to finish. This acts as a safety net to handle cases where
workers finish at different times, to prevent agents from viewing workers
that finished early as a scale-down event. It is strongly advised that the
user code deal with ensuring that workers are terminated in a synchronous
manner rather than relying on the exit_barrier_timeout.
A named pipe based watchdog can be enabled in ```LocalElasticAgent``` if an
environment variable ``TORCHELASTIC_ENABLE_FILE_TIMER`` with value 1 has
been defined in the ```LocalElasticAgent``` process.
Optionally, another environment variable ```TORCHELASTIC_TIMER_FILE```
can be set with a unique file name for the named pipe. If the environment
variable ```TORCHELASTIC_TIMER_FILE``` is not set, ```LocalElasticAgent```
will internally create a unique file name and set it to the environment
variable ```TORCHELASTIC_TIMER_FILE```, and this environment variable will
be propagated to the worker processes to allow them to connect to the same
named pipe that ```LocalElasticAgent``` uses.
Logs are written to the specified log directory. Each log line will be by default
prefixed by ``[${role_name}${local_rank}]:`` (e.g. ``[trainer0]: foobar``).
Log prefixes can be customized by passing a `template string
<https://docs.python.org/3/library/string.html#template-strings>`_ as the
``log_line_prefix_template`` argument.
The following macros (identifiers) are substituted at runtime:
``${role_name}, ${local_rank}, ${rank}``. For example, to prefix each log line with
global rank instead of the local rank, set ``log_line_prefix_template = "[${rank}]:``.
Example launching function
::
def trainer(args) -> str:
return "do train"
def main():
start_method="spawn"
shared_queue= multiprocessing.get_context(start_method).Queue()
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint=trainer,
args=("foobar",),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec, start_method)
results = agent.run()
if results.is_failed():
print("trainer failed")
else:
print(f"rank 0 return value: {results.return_values[0]}")
# prints -> rank 0 return value: do train
Example launching binary
::
def main():
spec = WorkerSpec(
role="trainer",
local_world_size=nproc_per_process,
entrypoint="/usr/local/bin/trainer",
args=("--trainer-args", "foobar"),
...<OTHER_PARAMS...>)
agent = LocalElasticAgent(spec)
results = agent.run()
if not results.is_failed():
print("binary launches do not have return values")
"""
def __init__(
self,
spec: WorkerSpec,
logs_specs: LogsSpecs,
start_method="spawn",
exit_barrier_timeout: float = 300,
log_line_prefix_template: str | None = None,
):
super().__init__(spec, exit_barrier_timeout)
self._start_method = start_method
self._pcontext: PContext | None = None
self._rdzv_handler = spec.rdzv_handler
self._log_line_prefix_template = log_line_prefix_template
self._worker_watchdog: timer.FileTimerServer | None = None
self._logs_specs = logs_specs
self._health_check_server: HealthCheckServer | None = None
def _setup_local_watchdog(self, envs: dict[int, dict[str, str]]) -> None:
enable_watchdog_env_name = TORCHELASTIC_ENABLE_FILE_TIMER
watchdog_enabled = os.getenv(enable_watchdog_env_name)
watchdog_file_env_name = TORCHELASTIC_TIMER_FILE
watchdog_file_path = os.getenv(watchdog_file_env_name)
if watchdog_enabled is not None and str(watchdog_enabled) == "1":
if watchdog_file_path is None:
watchdog_file_path = "/tmp/watchdog_timer_" + str(uuid.uuid4())
logger.info("Starting a FileTimerServer with %s ...", watchdog_file_path)
if not envs:
logger.warning(
"Empty envs variables, using empty run_id for FileTimerServer"
)
run_id = ""
else:
run_id = envs[0]["TORCHELASTIC_RUN_ID"]
self._worker_watchdog = timer.FileTimerServer(
file_path=watchdog_file_path,
run_id=run_id,
max_interval=0.1,
daemon=True,
log_event=self._log_watchdog_event,
)
self._worker_watchdog.start()
logger.info("FileTimerServer started")
else:
logger.info(
"Environment variable '%s' not found. Do not start FileTimerServer.",
enable_watchdog_env_name,
)
# Propagate the watchdog file env to worker processes
if watchdog_file_path is not None:
for worker_env in envs.values():
worker_env[watchdog_file_env_name] = watchdog_file_path
@staticmethod
def _get_current_time_secs() -> int:
return int(time.time())
def _setup_healthcheck(self) -> None:
healthcheck_port_env_name = TORCHELASTIC_HEALTH_CHECK_PORT
healthcheck_port = os.getenv(healthcheck_port_env_name)
if healthcheck_port is not None:
logger.info(
"Found healthcheck port %s: %s",
healthcheck_port_env_name,
healthcheck_port,
)
if self._worker_watchdog is None:
logger.info(
"FileTimerServer doesn't exist, using current time as dummy callback"
)
alive_callback = LocalElasticAgent._get_current_time_secs
else:
alive_callback = self._worker_watchdog.get_last_progress_time
try:
healthcheck_port_as_int = int(healthcheck_port)
self._health_check_server = create_healthcheck_server(
alive_callback=alive_callback,
port=healthcheck_port_as_int,
timeout=60,
)
self._health_check_server.start()
except ValueError:
logger.info(
"Invalid healthcheck port value: '%s', expecting integer. Not starting healthcheck server.",
healthcheck_port,
)
else:
logger.info(
"Environment variable '%s' not found. Do not start health check.",
healthcheck_port_env_name,
)
def _get_fq_hostname(self) -> str:
return socket.getfqdn(socket.gethostname())
def _log_watchdog_event(
self,
name: str,
request: timer.FileTimerRequest | None,
) -> None:
wg = self._worker_group
spec = wg.spec
md = {"watchdog_event": name}
if request is not None:
md["worker_pid"] = str(request.worker_pid)
md["scope_id"] = request.scope_id
md["expiration_time"] = str(request.expiration_time)
md["signal"] = str(request.signal)
md_str = json.dumps(md)
state = "RUNNING"
metadata: dict[str, EventMetadataValue] = {
"run_id": spec.rdzv_handler.get_run_id(),
"global_rank": None,
"group_rank": wg.group_rank,
"worker_id": None,
"role": spec.role,
"hostname": self._get_fq_hostname(),
"state": state,
"total_run_time": self._total_execution_time,
"rdzv_backend": spec.rdzv_handler.get_backend(),
"raw_error": None,
"metadata": md_str,
"agent_restarts": spec.max_restarts - self._remaining_restarts,
}
# Note: The 'metadata' field of the Event is converted to a TorchelasticStatusLogEntry later.
# The 'name' field of the Event is NOT used in the TorchelasticStatusLogEntry.
event = events.Event(
name=name, source=events.EventSource.AGENT, metadata=metadata
)
events.record(event, self._worker_group.spec.event_log_handler)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _stop_workers(self, worker_group: WorkerGroup) -> None:
self._shutdown()
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _start_workers(self, worker_group: WorkerGroup) -> dict[int, Any]:
spec = worker_group.spec
store = worker_group.store
assert store is not None
restart_count = spec.max_restarts - self._remaining_restarts
use_agent_store: bool = spec.rdzv_handler.use_agent_store
logger.info("use_agent_store: %s", use_agent_store)
args: dict[int, tuple] = {}
envs: dict[int, dict[str, str]] = {}
log_line_prefixes: dict[int, str] | None = (
{} if self._log_line_prefix_template else None
)
for worker in worker_group.workers:
local_rank = worker.local_rank
worker_env = {
"RANK": str(worker.global_rank),
"GROUP_RANK": str(worker_group.group_rank),
"ROLE_RANK": str(worker.role_rank),
"ROLE_NAME": spec.role,
"LOCAL_WORLD_SIZE": str(spec.local_world_size),
"WORLD_SIZE": str(worker.world_size),
"GROUP_WORLD_SIZE": str(worker_group.group_world_size),
"ROLE_WORLD_SIZE": str(worker.role_world_size),
"MASTER_ADDR": worker_group.master_addr,
"MASTER_PORT": str(worker_group.master_port),
"TORCHELASTIC_RESTART_COUNT": str(restart_count),
"TORCHELASTIC_MAX_RESTARTS": str(spec.max_restarts),
"TORCHELASTIC_RUN_ID": spec.rdzv_handler.get_run_id(),
"TORCHELASTIC_USE_AGENT_STORE": str(use_agent_store),
"TORCH_NCCL_ASYNC_ERROR_HANDLING": os.getenv(
"TORCH_NCCL_ASYNC_ERROR_HANDLING", str(1)
),
}
self._set_local_rank_env(worker_env, local_rank, spec)
if "OMP_NUM_THREADS" in os.environ:
worker_env["OMP_NUM_THREADS"] = os.environ["OMP_NUM_THREADS"]
if self._log_line_prefix_template:
log_line_prefix = Template(
self._log_line_prefix_template
).safe_substitute(
role_name=spec.role,
rank=worker.global_rank,
local_rank=local_rank,
)
# pyrefly: ignore [unsupported-operation]
log_line_prefixes[local_rank] = log_line_prefix
# pyrefly: ignore [unsupported-operation]
envs[local_rank] = worker_env
worker_args = list(spec.args)
worker_args = macros.substitute(worker_args, str(local_rank))
args[local_rank] = tuple(worker_args)
self._setup_local_watchdog(envs=envs)
self._setup_healthcheck()
assert spec.entrypoint is not None
assert self._logs_specs is not None
self._pcontext = start_processes(
name=spec.role,
entrypoint=spec.entrypoint,
args=args,
envs=envs,
logs_specs=self._logs_specs,
log_line_prefixes=log_line_prefixes,
start_method=self._start_method,
numa_options=spec.numa_options,
duplicate_stdout_filters=spec.duplicate_stdout_filters,
duplicate_stderr_filters=spec.duplicate_stderr_filters,
)
return self._pcontext.pids()
def _set_local_rank_env(
self, worker_env: dict[str, str | None], local_rank: int, spec: WorkerSpec
) -> None:
# Set CUDA_VISIBLE_DEVICES and LOCAL_RANK based on virtual_local_rank mode.
# Virtual mode: Each worker sees only its assigned GPU as device 0, LOCAL_RANK=0
# Traditional mode: Workers see all GPUs, LOCAL_RANK matches actual local rank
if spec.virtual_local_rank:
# Set LOCAL_RANK=0 and use CUDA_VISIBLE_DEVICES to control the actual GPU access.
worker_env["LOCAL_RANK"] = "0"
# Map local_rank through existing CUDA_VISIBLE_DEVICES
# HIP uses CUDA_VISIBLE_DEVICES as a compatibility hack:
# https://rocm.docs.amd.com/en/latest/conceptual/gpu-isolation.html#cuda-visible-devices
parent_visible_devices = os.getenv("CUDA_VISIBLE_DEVICES")
if parent_visible_devices is not None:
# Parse comma-separated list of GPU IDs
available_gpus = parent_visible_devices.split(",")
if local_rank >= len(available_gpus):
raise ValueError(
f"local_rank {local_rank} exceeds available GPUs in "
f"CUDA_VISIBLE_DEVICES={parent_visible_devices}"
)
visible_gpu = available_gpus[local_rank].strip()
else:
# No restriction, use local_rank directly
visible_gpu = str(local_rank)
worker_env["CUDA_VISIBLE_DEVICES"] = visible_gpu
return
# In traditional mode, don't override CUDA_VISIBLE_DEVICES
# (inherit from parent environment)
worker_env["LOCAL_RANK"] = str(local_rank)
if "CUDA_VISIBLE_DEVICES" in os.environ:
worker_env["CUDA_VISIBLE_DEVICES"] = os.environ["CUDA_VISIBLE_DEVICES"]
def _shutdown(self, death_sig: signal.Signals = signal.SIGTERM) -> None:
if self._worker_watchdog is not None:
self._worker_watchdog.stop()
self._worker_watchdog = None
if self._health_check_server is not None:
self._health_check_server.stop()
self._health_check_server = None
if self._pcontext:
self._pcontext.close(death_sig)
# pyre-fixme[56]: Pyre was not able to infer the type of the decorator
# `torch.distributed.elastic.metrics.prof`.
@prof
def _monitor_workers(self, worker_group: WorkerGroup) -> RunResult:
role = worker_group.spec.role
worker_pids = {w.id for w in worker_group.workers}
assert self._pcontext is not None
pc_pids = set(self._pcontext.pids().values())
if worker_pids != pc_pids:
logger.error(
"[%s] worker pids do not match process_context pids."
" Expected: %s, actual: %s",
role,
worker_pids,
pc_pids,
)
return RunResult(state=WorkerState.UNKNOWN)
result = self._pcontext.wait(0)
if result:
if result.is_failed():
# map local rank failure to global rank
worker_failures = {}
for local_rank, failure in result.failures.items():
worker = worker_group.workers[local_rank]
worker_failures[worker.global_rank] = failure
return RunResult(
state=WorkerState.FAILED,
failures=worker_failures,
)
else:
# copy ret_val_queue into a map with a global ranks
workers_ret_vals = {}
for local_rank, ret_val in result.return_values.items():
worker = worker_group.workers[local_rank]
workers_ret_vals[worker.global_rank] = ret_val
return RunResult(
state=WorkerState.SUCCEEDED,
return_values=workers_ret_vals,
)
else:
return RunResult(state=WorkerState.HEALTHY)
| LocalElasticAgent |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 13848,
"end": 13962
} | class ____(nodes.Element):
"""Node to mark start of a new file, used in the LaTeX builder only."""
| start_of_file |
python | google__jax | tests/pallas/tpu_ragged_paged_attention_test.py | {
"start": 1071,
"end": 13131
} | class ____(jtu.JaxTestCase):
def _test_ragged_paged_attention(
self,
seq_lens, # List[(q_len, kv_len)]
num_heads, # [num_q_heads, num_kv_heads]
head_dim,
page_size,
q_dtype,
kv_dtype,
num_pages,
*,
num_kv_pages_per_block=8,
num_queries_per_block=64,
vmem_limit_bytes=32 * 1024 * 1024,
max_num_batched_tokens=512,
max_num_seq=8,
sliding_window: int | None = None,
soft_cap: float | None = None,
k_scale: float | None = None,
v_scale: float | None = None,
):
if not jtu.is_device_tpu_at_least(version=4):
self.skipTest("Expect TPUv4+")
cu_q_lens = [0]
kv_lens = []
for q_len, kv_len in seq_lens:
assert q_len <= kv_len
cu_q_lens.append(cu_q_lens[-1] + q_len)
kv_lens.append(kv_len)
max_num_batched_tokens = max(cu_q_lens[-1], max_num_batched_tokens)
max_num_seq = max(len(seq_lens), max_num_seq)
max_kv_len = max(kv_lens)
pages_per_seq = pl.cdiv(max_kv_len, page_size)
num_q_heads, num_kv_heads = num_heads
prng_key = jax.random.key(1234)
k0, k1 = jax.random.split(prng_key, 2)
q = jax.random.normal(
k0,
(max_num_batched_tokens, num_q_heads, head_dim),
dtype=q_dtype,
)
page_cnt = 0
page_indices_list = []
kv_pages_list = []
for kv_len in kv_lens:
if jnp.issubdtype(kv_dtype, jnp.integer):
# random.randint doesn't support int4, so we use jnp.int32 here and then
# convert to the desired dtype.
kv = jax.random.normal(
k1,
(kv_len, num_kv_heads * 2, head_dim),
dtype=jnp.int32,
)
kv = kv.astype(kv_dtype)
else:
kv = jax.random.normal(
k1,
(kv_len, num_kv_heads * 2, head_dim),
dtype=kv_dtype,
)
kv = jnp.pad(
kv,
((0, pl.cdiv(kv_len, page_size) * page_size - kv_len), (0, 0), (0, 0)),
constant_values=jnp.nan,
).reshape(-1, page_size, num_kv_heads * 2, head_dim)
indices = page_cnt + jnp.arange(kv.shape[0], dtype=jnp.int32)
indices = jnp.pad(
indices,
((0, pages_per_seq - indices.shape[0]),),
constant_values=jnp.nan,
)
page_indices_list.append(indices)
page_cnt += kv.shape[0]
kv_pages_list.append(kv)
kv_pages = jnp.concatenate(kv_pages_list, axis=0)
kv_pages = jnp.pad(
kv_pages,
((0, num_pages - kv_pages.shape[0]), (0, 0), (0, 0), (0, 0)),
constant_values=jnp.nan,
)
page_indices = jnp.stack(page_indices_list, axis=0)
page_indices = jnp.pad(
page_indices,
((0, max_num_seq - page_indices.shape[0]), (0, 0)),
constant_values=jnp.nan,
)
cu_q_lens = jnp.array(cu_q_lens, dtype=jnp.int32)
cu_q_lens = jnp.pad(cu_q_lens, (0, max_num_seq + 1 - cu_q_lens.shape[0]))
kv_lens = jnp.array(kv_lens, dtype=jnp.int32)
kv_lens = jnp.pad(kv_lens, (0, max_num_seq - kv_lens.shape[0]))
num_seqs = jnp.array([len(seq_lens)], dtype=jnp.int32)
dynamic_validate_inputs(
q,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs,
sliding_window=sliding_window,
soft_cap=soft_cap,
)
actual_num_q_tokens = cu_q_lens[num_seqs[0]]
output = ragged_paged_attention(
q,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs=num_seqs,
num_kv_pages_per_block=min(num_kv_pages_per_block, pages_per_seq),
num_queries_per_block=num_queries_per_block,
vmem_limit_bytes=vmem_limit_bytes,
sliding_window=sliding_window,
soft_cap=soft_cap,
k_scale=k_scale,
v_scale=v_scale,
)[:actual_num_q_tokens]
expected = ref_ragged_paged_attention(
q,
kv_pages,
kv_lens,
page_indices,
cu_q_lens,
num_seqs=num_seqs,
sliding_window=sliding_window,
soft_cap=soft_cap,
k_scale=k_scale,
v_scale=v_scale,
)
dtype_bits = dtypes.itemsize_bits(jnp.dtype(kv_dtype))
tols = {
32: 0.15,
16: 0.2,
8: 0.2,
4: 0.2,
}
tol = tols[dtype_bits]
self.assertAllClose(output, expected, atol=tol, rtol=tol)
@parameterized.product(
dtype=[jnp.float32, jnp.bfloat16],
)
def test_ragged_paged_attention_basic(self, dtype):
seq_lens = [(192, 328), (128, 180), (64, 255)]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
)
# TODO: support int4 and int8
@parameterized.product(
q_dtype=[jnp.bfloat16],
kv_dtype=[jnp.float8_e5m2, jnp.float8_e4m3fn],
kv_scales=[(0.5, 0.5), (None, None)],
)
def test_ragged_paged_attention_quantized_kv_cache(
self, q_dtype, kv_dtype, kv_scales
):
if not jtu.is_device_tpu_at_least(version=5):
self.skipTest("Expect TPUv5+")
seq_lens = [(192, 328), (128, 180), (64, 255)]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
k_scale, v_scale = kv_scales
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
q_dtype,
kv_dtype,
num_pages,
k_scale=k_scale,
v_scale=v_scale,
)
@parameterized.product(
dtype=[jnp.float32, jnp.bfloat16],
)
def test_ragged_paged_attention_decode_only(self, dtype):
seq_lens = [
(1, 18),
(1, 129),
(1, 597),
(1, 122),
(1, 64),
(1, 322),
(1, 463),
(1, 181),
(1, 1107),
(1, 123),
(1, 31),
(1, 18),
(1, 1229),
(1, 229),
(1, 87),
(1, 1328),
]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
)
@parameterized.product(
dtype=[jnp.float32, jnp.bfloat16],
)
def test_ragged_paged_attention_prefill_only(self, dtype):
seq_lens = [
(5, 18),
(15, 129),
(120, 597),
(100, 122),
(21, 64),
(32, 322),
(251, 463),
(40, 181),
(64, 1107),
(99, 123),
(10, 31),
(5, 18),
(3, 1229),
(120, 229),
(9, 87),
(2, 1328),
]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
)
@parameterized.product(
dtype=[jnp.float32, jnp.bfloat16],
)
def test_ragged_paged_attention_mixed(self, dtype):
seq_lens = [
(5, 18),
(1, 129),
(120, 597),
(1, 122),
(1, 64),
(32, 322),
(251, 463),
(1, 181),
(1, 1107),
(99, 123),
(1, 31),
(5, 18),
(3, 1229),
(117, 229),
(1, 87),
(1, 1328),
]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
)
@parameterized.product(
num_seqs=[1, 5, 16],
# TODO(jevinjiang): Support more num_heads!
# TODO(b/434082000): Investigate why (12, 2) does not work after libtpu-2025-07-21.
num_heads=[(32, 8), (32, 16), (16, 2), (4, 4), (8, 1)],
dtype=[jnp.float32, jnp.bfloat16],
num_kv_pages_per_block=[4, 8],
num_queries_per_block=[32, 64],
)
def test_ragged_paged_attention_complex(
self,
num_seqs,
num_heads,
dtype,
num_kv_pages_per_block,
num_queries_per_block,
):
rng = np.random.default_rng(1234)
q_lens = rng.integers(1, 100, num_seqs)
kv_lens = q_lens + rng.integers(0, 50, num_seqs)
seq_lens = list(zip(q_lens.tolist(), kv_lens.tolist()))
# TODO(jevinjiang): Support non-128 head_dim!
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
num_kv_pages_per_block=num_kv_pages_per_block,
num_queries_per_block=num_queries_per_block,
)
@parameterized.product(
num_kv_pages_per_block=[4, 8],
num_queries_per_block=[32, 64],
sliding_window=[None, 5, 128],
)
def test_ragged_paged_attention_sliding_window(
self,
num_kv_pages_per_block,
num_queries_per_block,
sliding_window: int | None,
):
num_seqs = 5
num_heads = (4, 4)
dtype = jnp.float32
rng = np.random.default_rng(1234)
q_lens = rng.integers(1, 100, num_seqs)
kv_lens = q_lens + rng.integers(0, 50, num_seqs)
seq_lens = list(zip(q_lens.tolist(), kv_lens.tolist()))
# TODO(jevinjiang): Support non-128 head_dim!
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
num_kv_pages_per_block=num_kv_pages_per_block,
num_queries_per_block=num_queries_per_block,
sliding_window=sliding_window,
)
@parameterized.product(
num_kv_pages_per_block=[4, 8],
num_queries_per_block=[32, 64],
soft_cap=[None, 50.0],
)
def test_ragged_paged_attention_logit_soft_capping(
self,
num_kv_pages_per_block,
num_queries_per_block,
soft_cap: float | None,
):
num_heads = (16, 2)
num_seqs = 2
dtype = jnp.float32
rng = np.random.default_rng(1234)
q_lens = rng.integers(1, 100, num_seqs)
kv_lens = q_lens + rng.integers(0, 50, num_seqs)
seq_lens = list(zip(q_lens.tolist(), kv_lens.tolist()))
head_dim = 128
page_size = 16
num_pages = 1000
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
num_kv_pages_per_block=num_kv_pages_per_block,
num_queries_per_block=num_queries_per_block,
soft_cap=soft_cap,
)
def test_ragged_paged_attention_sliding_window_should_be_positive(self):
dtype = jnp.float32
seq_lens = [(192, 328), (128, 180), (64, 255)]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
with self.assertRaisesRegex(ValueError, "must be positive"):
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
sliding_window=0,
)
with self.assertRaisesRegex(ValueError, "must be positive"):
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
sliding_window=-1,
)
def test_ragged_paged_attention_soft_cap_cannot_be_zero(self):
dtype = jnp.float32
seq_lens = [(192, 328), (128, 180), (64, 255)]
num_heads = (32, 8)
head_dim = 128
page_size = 16
num_pages = 1000
with self.assertRaisesRegex(ValueError, "must not be 0.0"):
self._test_ragged_paged_attention(
seq_lens,
num_heads,
head_dim,
page_size,
dtype,
dtype,
num_pages,
soft_cap=0.0,
)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| RaggedPagedAttentionKernelTest |
python | PrefectHQ__prefect | tests/test_cache_policies.py | {
"start": 10513,
"end": 12798
} | class ____:
def test_changing_the_inputs_busts_the_cache(self):
inputs = dict(x=42)
key = DEFAULT.compute_key(task_ctx=None, inputs=inputs, flow_parameters=None)
inputs = dict(x=43)
new_key = DEFAULT.compute_key(
task_ctx=None, inputs=inputs, flow_parameters=None
)
assert key != new_key
def test_changing_the_run_id_busts_the_cache(self):
@dataclass
class Run:
id: str
flow_run_id: str = None
def my_task():
pass
@dataclass
class TaskCtx:
task_run: Run
task = my_task
task_run_a = Run(id="a", flow_run_id="a")
task_run_b = Run(id="b", flow_run_id="b")
task_run_c = Run(id="c", flow_run_id=None)
task_run_d = Run(id="d", flow_run_id=None)
key_a = DEFAULT.compute_key(
task_ctx=TaskCtx(task_run=task_run_a), inputs=None, flow_parameters=None
)
key_b = DEFAULT.compute_key(
task_ctx=TaskCtx(task_run=task_run_b), inputs=None, flow_parameters=None
)
key_c = DEFAULT.compute_key(
task_ctx=TaskCtx(task_run=task_run_c), inputs=None, flow_parameters=None
)
key_d = DEFAULT.compute_key(
task_ctx=TaskCtx(task_run=task_run_d), inputs=None, flow_parameters=None
)
assert key_a not in [key_b, key_c, key_d]
assert key_b not in [key_a, key_c, key_d]
assert key_c not in [key_a, key_b, key_d]
assert key_d not in [key_a, key_b, key_c]
def test_changing_the_source_busts_the_cache(self):
@dataclass
class Run:
id: str
flow_run_id: str = None
@dataclass
class TaskCtx:
task_run: Run
task: Callable = None
task_run = Run(id="a", flow_run_id="b")
ctx_one = TaskCtx(task_run=task_run, task=lambda: "foo")
ctx_two = TaskCtx(task_run=task_run, task=lambda: "bar")
key_one = DEFAULT.compute_key(
task_ctx=ctx_one, inputs=None, flow_parameters=None
)
key_two = DEFAULT.compute_key(
task_ctx=ctx_two, inputs=None, flow_parameters=None
)
assert key_one != key_two
| TestDefaultPolicy |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 158140,
"end": 163087
} | class ____(DataplexCatalogBaseOperator):
"""
Get an Entry resource.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogGetEntryOperator`
:param entry_id: Required. Entry identifier. It has to be unique within an Entry Group.
Entries corresponding to Google Cloud resources use an Entry ID format based on `full resource
names <https://cloud.google.com/apis/design/resource_names#full_resource_name>`__.
The format is a full resource name of the resource without the prefix double slashes in the API
service name part of the full resource name. This allows retrieval of entries using their associated
resource name.
For example, if the full resource name of a resource is
``//library.googleapis.com/shelves/shelf1/books/book2``, then the suggested entry_id is
``library.googleapis.com/shelves/shelf1/books/book2``.
It is also suggested to follow the same convention for entries corresponding to resources from
providers or systems other than Google Cloud.
The maximum size of the field is 4000 characters.
:param entry_group_id: Required. EntryGroup resource name to which created Entry will belong to.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param view: Optional. View to control which parts of an Entry the service should return.
:param aspect_types: Optional. Limits the aspects returned to the provided aspect types. It only works
for CUSTOM view.
:param paths: Optional. Limits the aspects returned to those associated with the provided paths within
the Entry. It only works for CUSTOM view.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple(
{"entry_id", "entry_group_id"} | set(DataplexCatalogBaseOperator.template_fields)
)
operator_extra_links = (DataplexCatalogEntryLink(),)
def __init__(
self,
entry_id: str,
entry_group_id: str,
view: EntryView | str | None = None,
aspect_types: MutableSequence[str] | None = None,
paths: MutableSequence[str] | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.entry_id = entry_id
self.entry_group_id = entry_group_id
self.view = view
self.aspect_types = aspect_types
self.paths = paths
@property
def extra_links_params(self) -> dict[str, Any]:
return {
**super().extra_links_params,
"entry_id": self.entry_id,
"entry_group_id": self.entry_group_id,
}
def execute(self, context: Context):
DataplexCatalogEntryLink.persist(context=context)
self.log.info(
"Retrieving Dataplex Catalog Entry %s.",
self.entry_id,
)
try:
entry = self.hook.get_entry(
entry_id=self.entry_id,
entry_group_id=self.entry_group_id,
view=self.view,
aspect_types=self.aspect_types,
paths=self.paths,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
except NotFound:
self.log.info(
"Dataplex Catalog Entry %s not found.",
self.entry_id,
)
raise AirflowException(NotFound)
except Exception as ex:
raise AirflowException(ex)
return Entry.to_dict(entry)
| DataplexCatalogGetEntryOperator |
python | huggingface__transformers | src/transformers/models/layoutlm/modeling_layoutlm.py | {
"start": 1512,
"end": 5766
} | class ____(nn.Module):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
self.x_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.y_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.h_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.w_position_embeddings = nn.Embedding(config.max_2d_position_embeddings, config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
self.LayerNorm = LayoutLMLayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer(
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
)
def forward(
self,
input_ids=None,
bbox=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
device = input_ids.device if input_ids is not None else inputs_embeds.device
if position_ids is None:
position_ids = self.position_ids[:, :seq_length]
if token_type_ids is None:
token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
words_embeddings = inputs_embeds
position_embeddings = self.position_embeddings(position_ids)
try:
left_position_embeddings = self.x_position_embeddings(bbox[:, :, 0])
upper_position_embeddings = self.y_position_embeddings(bbox[:, :, 1])
right_position_embeddings = self.x_position_embeddings(bbox[:, :, 2])
lower_position_embeddings = self.y_position_embeddings(bbox[:, :, 3])
except IndexError as e:
raise IndexError("The `bbox`coordinate values should be within 0-1000 range.") from e
h_position_embeddings = self.h_position_embeddings(bbox[:, :, 3] - bbox[:, :, 1])
w_position_embeddings = self.w_position_embeddings(bbox[:, :, 2] - bbox[:, :, 0])
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (
words_embeddings
+ position_embeddings
+ left_position_embeddings
+ upper_position_embeddings
+ right_position_embeddings
+ lower_position_embeddings
+ h_position_embeddings
+ w_position_embeddings
+ token_type_embeddings
)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
# Copied from transformers.models.align.modeling_align.eager_attention_forward
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs,
):
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
# Copied from transformers.models.align.modeling_align.AlignTextSelfAttention with AlignText->LayoutLM
| LayoutLMEmbeddings |
python | getsentry__sentry | tests/sentry/pipeline/test_pipeline.py | {
"start": 677,
"end": 830
} | class ____:
def dispatch(self, request, pipeline):
pipeline.dispatch_count += 1
pipeline.bind_state("some_state", "value")
| PipelineStep |
python | getsentry__sentry-python | sentry_sdk/integrations/statsig.py | {
"start": 510,
"end": 1227
} | class ____(Integration):
identifier = "statsig"
@staticmethod
def setup_once():
# type: () -> None
version = parse_version(STATSIG_VERSION)
_check_minimum_version(StatsigIntegration, version, "statsig")
# Wrap and patch evaluation method(s) in the statsig module
old_check_gate = statsig_module.check_gate
@wraps(old_check_gate)
def sentry_check_gate(user, gate, *args, **kwargs):
# type: (StatsigUser, str, *Any, **Any) -> Any
enabled = old_check_gate(user, gate, *args, **kwargs)
add_feature_flag(gate, enabled)
return enabled
statsig_module.check_gate = sentry_check_gate
| StatsigIntegration |
python | kamyu104__LeetCode-Solutions | Python/minimum-garden-perimeter-to-collect-enough-apples.py | {
"start": 1535,
"end": 3360
} | class ____(object):
def minimumPerimeter(self, neededApples):
"""
:type neededApples: int
:rtype: int
"""
# r+r , (r-1)+r, ..., 1+r, 0+r , 1+r, ..., (r-1)+r, r+r
# r+(r-1), 0+(r-1), r+(r-1)
# . . .
# . . .
# . . .
# r+1 , (r-1)+1, ..., 1+1, 1+0 , 1+1, ..., (r-1)+1, r+1
# r+0 , (r-1)+0, ..., 1+0, 0+0 , 1+0, ..., (r-1)+0, r+0
# r+1 , (r-1)+1, ..., 1+1, 1+0 , 1+1, ..., (r-1)+1, r+1
# . . .
# . . .
# . . .
# r+(r-1), 0+(r-1), r+(r-1)
# r+r , (r-1)+r, ..., 1+r, 0+r , 1+r, ..., r+(r-1), r+r
#
# each up/down direction forms an arithmetic sequence, there are 2r+1 columns
# => 2*(1+r)*r/2 * (2r+1)
#
# each left/right direction forms an arithmetic sequence, there are 2r+1 rows
# => 2*(1+r)*r/2 * (2r+1)
#
# => total = 2 * 2*(1+r)*r/2 * (2r+1) = r*(2r+1)*(2r+2) = 4r^3+6r^2+2r
# => find min r, s.t. (2r)(2r+1)*(2r+2) >= 2*neededApples
# => find min x = 2r+2, s.t. (x-2)(x-1)(x) >= 2*neededApples
x = int((2*neededApples)**(1.0/3))
x -= x%2
assert((x-2)*(x-1)*x < 2*neededApples < (x+2)**3)
x += 2
if (x-2)*(x-1)*x < 2*neededApples:
x += 2
return 8*(x-2)//2
# Time: O(logn)
# Space: O(1)
| Solution2 |
python | sqlalchemy__sqlalchemy | test/orm/test_deferred.py | {
"start": 49699,
"end": 52570
} | class ____(fixtures.DeclarativeMappedTest):
"""test #8166"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class User(Base):
__tablename__ = "users"
id = Column(Integer, primary_key=True)
name = Column(String(10))
phone = Column(String(10))
class Task(Base):
__tablename__ = "tasks"
id = Column(Integer, primary_key=True)
name = Column(String(10))
created_by_id = Column(Integer, ForeignKey("users.id"))
managed_by_id = Column(Integer, ForeignKey("users.id"))
# reverse the order of these two in order to see it change
if cls.rel_ordering:
managed_by = relationship("User", foreign_keys=[managed_by_id])
created_by = relationship("User", foreign_keys=[created_by_id])
else:
created_by = relationship("User", foreign_keys=[created_by_id])
managed_by = relationship("User", foreign_keys=[managed_by_id])
@classmethod
def insert_data(cls, connection):
User, Task = cls.classes("User", "Task")
u1 = User(name="u1", phone="p1")
u2 = User(name="u2", phone="p2")
u3 = User(name="u3", phone="p3")
with Session(connection) as session:
session.add(Task(name="t1", created_by=u2, managed_by=u3))
session.add(Task(name="t2", created_by=u1, managed_by=u1))
session.commit()
def test_data_loaded(self):
User, Task = self.classes("User", "Task")
session = fixture_session()
all_tasks = session.query(Task).all() # noqa: F841
all_users = session.query(User).all() # noqa: F841
# expire all objects
session.expire_all()
# now load w/ the special paths. User.phone needs to be
# undeferred
tasks = (
session.query(Task)
.options(
joinedload(Task.managed_by).load_only(User.name),
joinedload(Task.created_by).load_only(User.name, User.phone),
)
.all()
)
session.close()
for task in tasks:
if task.name == "t1":
# for User u2, created_by path includes User.phone
eq_(task.created_by.phone, "p2")
# for User u3, managed_by path does not
assert "phone" not in task.managed_by.__dict__
elif task.name == "t2":
# User u1 was loaded by both created_by and managed_by
# path, so 'phone' should be unconditionally populated
is_(task.created_by, task.managed_by)
eq_(task.created_by.phone, "p1")
eq_(task.managed_by.phone, "p1")
else:
assert False
| MultiPathTest |
python | gevent__gevent | src/gevent/tests/test__core_stat.py | {
"start": 271,
"end": 3754
} | class ____(greentest.TestCase):
__timeout__ = greentest.LARGE_TIMEOUT
def setUp(self):
super(TestCoreStat, self).setUp()
fd, path = tempfile.mkstemp(suffix='.gevent_test_core_stat')
os.close(fd)
self.temp_path = path
self.hub = gevent.get_hub()
# If we don't specify an interval, we default to zero.
# libev interprets that as meaning to use its default interval,
# which is about 5 seconds. If we go below it's minimum check
# threshold, it bumps it up to the minimum.
self.watcher = self.hub.loop.stat(self.temp_path, interval=-1)
def tearDown(self):
self.watcher.close()
if os.path.exists(self.temp_path):
os.unlink(self.temp_path)
super(TestCoreStat, self).tearDown()
def _write(self):
with open(self.temp_path, 'wb', buffering=0) as f:
f.write(b'x')
def _check_attr(self, name, none):
# Deals with the complex behaviour of the 'attr' and 'prev'
# attributes on Windows. This codifies it, rather than simply letting
# the test fail, so we know exactly when and what changes it.
try:
x = getattr(self.watcher, name)
except ImportError:
if WIN:
# the 'posix' module is not available
pass
else:
raise
else:
if WIN and not LIBUV:
# The ImportError is only raised for the first time;
# after that, the attribute starts returning None
self.assertIsNone(x, "Only None is supported on Windows")
if none:
self.assertIsNone(x, name)
else:
self.assertIsNotNone(x, name)
def _wait_on_greenlet(self, func, *greenlet_args):
start = time.time()
self.hub.loop.update_now()
greenlet = gevent.spawn_later(DELAY, func, *greenlet_args)
with gevent.Timeout(5 + DELAY + 0.5):
self.hub.wait(self.watcher)
now = time.time()
self.assertGreaterEqual(now, start, "Time must move forward")
wait_duration = now - start
reaction = wait_duration - DELAY
if reaction <= 0.0:
# Sigh. This is especially true on PyPy on Windows
raise gevent.testing.flaky.FlakyTestRaceCondition(
"Bad timer resolution (on Windows?), test is useless. Start %s, now %s" % (start, now))
self.assertGreaterEqual(
reaction, 0.0,
'Watcher %s reacted too early: %.3fs' % (self.watcher, reaction))
greenlet.join()
def test_watcher_basics(self):
watcher = self.watcher
filename = self.temp_path
self.assertEqual(watcher.path, filename)
filenames = filename if isinstance(filename, bytes) else filename.encode('ascii')
self.assertEqual(watcher._paths, filenames)
self.assertEqual(watcher.interval, -1)
def test_write(self):
self._wait_on_greenlet(self._write)
self._check_attr('attr', False)
self._check_attr('prev', False)
# The watcher interval changed after it started; -1 is illegal
self.assertNotEqual(self.watcher.interval, -1)
def test_unlink(self):
self._wait_on_greenlet(os.unlink, self.temp_path)
self._check_attr('attr', True)
self._check_attr('prev', False)
if __name__ == '__main__':
greentest.main()
| TestCoreStat |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 1617,
"end": 2292
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor`):
Last hidden states of the encoder.
extracted_states (`tuple[torch.FloatTensor]`):
The extracted states from the Feature Pyramid Network (FPN) and Path Aggregation Network (PAN) of the encoder.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
extracted_states: Optional[tuple[torch.FloatTensor]] = None
@dataclass
@auto_docstring(
custom_intro="""
Base class for outputs of the OmDetTurboDecoder.
"""
)
| OmDetTurboEncoderOutput |
python | pandas-dev__pandas | pandas/tests/arrays/interval/test_interval.py | {
"start": 4821,
"end": 6822
} | class ____:
def test_set_na(self, left_right_dtypes):
left, right = left_right_dtypes
result = IntervalArray.from_arrays(left, right, copy=True)
if result.dtype.subtype.kind not in ["m", "M"]:
msg = "'value' should be an interval type, got <.*NaTType'> instead."
with pytest.raises(TypeError, match=msg):
result[0] = pd.NaT
if result.dtype.subtype.kind in ["i", "u"]:
msg = "Cannot set float NaN to integer-backed IntervalArray"
# GH#45484 TypeError, not ValueError, matches what we get with
# non-NA un-holdable value.
with pytest.raises(TypeError, match=msg):
result[0] = np.nan
return
result[0] = np.nan
expected_left = Index([left._na_value] + list(left[1:]))
expected_right = Index([right._na_value] + list(right[1:]))
expected = IntervalArray.from_arrays(expected_left, expected_right)
tm.assert_extension_array_equal(result, expected)
def test_setitem_mismatched_closed(self):
arr = IntervalArray.from_breaks(range(4))
orig = arr.copy()
other = arr.set_closed("both")
msg = "'value.closed' is 'both', expected 'right'"
with pytest.raises(ValueError, match=msg):
arr[0] = other[0]
with pytest.raises(ValueError, match=msg):
arr[:1] = other[:1]
with pytest.raises(ValueError, match=msg):
arr[:0] = other[:0]
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1]
with pytest.raises(ValueError, match=msg):
arr[:] = list(other[::-1])
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype(object)
with pytest.raises(ValueError, match=msg):
arr[:] = other[::-1].astype("category")
# empty list should be no-op
arr[:0] = []
tm.assert_interval_array_equal(arr, orig)
| TestSetitem |
python | boto__boto3 | boto3/exceptions.py | {
"start": 2803,
"end": 2855
} | class ____(Boto3Error):
pass
| S3TransferFailedError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis05.py | {
"start": 315,
"end": 1428
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [47076480, 47078016]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis({"name": "XXX"})
chart.set_y_axis({"name": "YYY"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_values.py | {
"start": 7561,
"end": 8722
} | class ____:
def test_private_values_dt64tz(self):
dta = date_range("2000", periods=4, tz="US/Central")._data.reshape(-1, 1)
df = DataFrame(dta, columns=["A"])
tm.assert_equal(df._values, dta)
assert not np.shares_memory(df._values._ndarray, dta._ndarray)
# TimedeltaArray
tda = dta - dta
df2 = df - df
tm.assert_equal(df2._values, tda)
def test_private_values_dt64tz_multicol(self):
dta = date_range("2000", periods=8, tz="US/Central")._data.reshape(-1, 2)
df = DataFrame(dta, columns=["A", "B"])
tm.assert_equal(df._values, dta)
assert not np.shares_memory(df._values._ndarray, dta._ndarray)
# TimedeltaArray
tda = dta - dta
df2 = df - df
tm.assert_equal(df2._values, tda)
def test_private_values_dt64_multiblock(self):
dta = date_range("2000", periods=8)._data
df = DataFrame({"A": dta[:4]}, copy=False)
df["B"] = dta[4:]
assert len(df._mgr.blocks) == 2
result = df._values
expected = dta.reshape(2, 4).T
tm.assert_equal(result, expected)
| TestPrivateValues |
python | PyCQA__pylint | tests/functional/c/consider/consider_iterating_dictionary.py | {
"start": 299,
"end": 2603
} | class ____:
def keys(self):
return []
for key in Unknown().keys():
pass
for key in Unknown.keys():
pass
for key in dict.keys():
pass
for key in {}.values():
pass
for key in {}.key():
pass
for key in CustomClass().keys():
pass
[key for key in {}.keys()] # [consider-iterating-dictionary]
(key for key in {}.keys()) # [consider-iterating-dictionary]
{key for key in {}.keys()} # [consider-iterating-dictionary]
{key: key for key in {}.keys()} # [consider-iterating-dictionary]
comp1 = [key for key in {}.keys()] # [consider-iterating-dictionary]
comp2 = (key for key in {}.keys()) # [consider-iterating-dictionary]
comp3 = {key for key in {}.keys()} # [consider-iterating-dictionary]
COMP4 = {key: key for key in {}.keys()} # [consider-iterating-dictionary]
for key in {}.keys(): # [consider-iterating-dictionary]
pass
# Issue #1247
DICT = {'a': 1, 'b': 2}
comp1 = [k * 2 for k in DICT.keys()] + [k * 3 for k in DICT.keys()] # [consider-iterating-dictionary,consider-iterating-dictionary]
comp2, comp3 = [k * 2 for k in DICT.keys()], [k * 3 for k in DICT.keys()] # [consider-iterating-dictionary,consider-iterating-dictionary]
SOME_TUPLE = ([k * 2 for k in DICT.keys()], [k * 3 for k in DICT.keys()]) # [consider-iterating-dictionary,consider-iterating-dictionary]
# Checks for membership checks
if 1 in dict().keys(): # [consider-iterating-dictionary]
pass
if 1 in {}.keys(): # [consider-iterating-dictionary]
pass
if 1 in Unknown().keys():
pass
if 1 in Unknown.keys():
pass
if 1 in CustomClass().keys():
pass
if 1 in dict():
pass
if 1 in dict().values():
pass
if (1, 1) in dict().items():
pass
if [1] == {}.keys():
pass
if [1] == {}:
pass
if [1] == dict():
pass
var = 1 in {}.keys() # [consider-iterating-dictionary]
var = 1 in {}
var = 1 in dict()
var = [1, 2] == {}.keys() in {False}
# Additional membership checks
# See: https://github.com/pylint-dev/pylint/issues/5323
METADATA = {}
if "a" not in list(METADATA.keys()): # [consider-iterating-dictionary]
print(1)
if "a" not in METADATA.keys(): # [consider-iterating-dictionary]
print(1)
if "a" in list(METADATA.keys()): # [consider-iterating-dictionary]
print(1)
if "a" in METADATA.keys(): # [consider-iterating-dictionary]
print(1)
| CustomClass |
python | ray-project__ray | python/ray/train/v2/api/config.py | {
"start": 5637,
"end": 8296
} | class ____:
"""Configuration for checkpointing.
Default behavior is to persist all checkpoints reported with
:meth:`ray.train.report` to disk. If ``num_to_keep`` is set,
the default retention policy is to keep the most recent checkpoints.
Args:
num_to_keep: The maximum number of checkpoints to keep.
If you report more checkpoints than this, the oldest
(or lowest-scoring, if ``checkpoint_score_attribute`` is set)
checkpoint will be deleted.
If this is ``None`` then all checkpoints will be kept. Must be >= 1.
checkpoint_score_attribute: The attribute that will be used to
score checkpoints to determine which checkpoints should be kept.
This attribute must be a key from the metrics dictionary
attached to the checkpoint. This attribute must have a numerical value.
checkpoint_score_order: Either "max" or "min".
If "max"/"min", then checkpoints with highest/lowest values of
the ``checkpoint_score_attribute`` will be kept. Defaults to "max".
checkpoint_frequency: [Deprecated]
checkpoint_at_end: [Deprecated]
"""
num_to_keep: Optional[int] = None
checkpoint_score_attribute: Optional[str] = None
checkpoint_score_order: Literal["max", "min"] = "max"
checkpoint_frequency: Union[Optional[int], Literal[_DEPRECATED]] = _DEPRECATED
checkpoint_at_end: Union[Optional[bool], Literal[_DEPRECATED]] = _DEPRECATED
def __post_init__(self):
if self.checkpoint_frequency != _DEPRECATED:
raise DeprecationWarning(
"`checkpoint_frequency` is deprecated since it does not "
"apply to user-defined training functions. "
"Please remove this argument from your CheckpointConfig."
)
if self.checkpoint_at_end != _DEPRECATED:
raise DeprecationWarning(
"`checkpoint_at_end` is deprecated since it does not "
"apply to user-defined training functions. "
"Please remove this argument from your CheckpointConfig."
)
if self.num_to_keep is not None and self.num_to_keep <= 0:
raise ValueError(
f"Received invalid num_to_keep: {self.num_to_keep}. "
"Must be None or an integer >= 1."
)
if self.checkpoint_score_order not in ("max", "min"):
raise ValueError(
f"Received invalid checkpoint_score_order: {self.checkpoint_score_order}. "
"Must be 'max' or 'min'."
)
@dataclass
| CheckpointConfig |
python | tensorflow__tensorflow | tensorflow/python/framework/ops.py | {
"start": 176252,
"end": 201071
} | class ____(stack.DefaultStack[Graph]): # pylint: disable=protected-access
"""A thread-local stack of objects for providing an implicit default graph."""
def __init__(self) -> None:
super(_DefaultGraphStack, self).__init__()
self._global_default_graph = None
def get_default(self) -> Graph:
"""Override that returns a global default if the stack is empty."""
if self.stack:
return self.stack[-1]
elif self._global_default_graph:
return self._global_default_graph
else:
self._global_default_graph = Graph()
return self._global_default_graph
def _GetGlobalDefaultGraph(self) -> Graph:
if self._global_default_graph is None:
# TODO(mrry): Perhaps log that the default graph is being used, or set
# provide some other feedback to prevent confusion when a mixture of
# the global default graph and an explicit graph are combined in the
# same process.
self._global_default_graph = Graph()
return self._global_default_graph
def reset(self) -> None:
super(_DefaultGraphStack, self).reset()
self._global_default_graph = None
@tf_contextlib.contextmanager
def get_controller(self, default) -> Iterator[Graph]:
context.context().context_switches.push(default.building_function,
default.as_default,
default._device_function_stack)
try:
with super(_DefaultGraphStack,
self).get_controller(default) as g, context.graph_mode(): # pytype: disable=wrong-arg-count
yield g
finally:
# If an exception is raised here it may be hiding a related exception in
# the try-block (just above).
context.context().context_switches.pop()
_default_graph_stack: _DefaultGraphStack = _DefaultGraphStack()
# Shared helper used in init_scope and executing_eagerly_outside_functions
# to obtain the outermost context that is not building a function, and the
# innermost non empty device stack.
def _get_outer_context_and_inner_device_stack(
) -> tuple[Callable[[], ContextManager[Graph]], traceable_stack.TraceableStack]:
"""Get the outermost context not building a function."""
default_graph = get_default_graph()
outer_context = None
innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access
if not _default_graph_stack.stack:
# If the default graph stack is empty, then we cannot be building a
# function. Install the global graph (which, in this case, is also the
# default graph) as the outer context.
if default_graph.building_function:
raise RuntimeError("The global graph is building a function.")
outer_context = default_graph.as_default
else:
# Find a context that is not building a function.
for stack_entry in reversed(context.context().context_switches.stack):
if not innermost_nonempty_device_stack:
innermost_nonempty_device_stack = stack_entry.device_stack
if not stack_entry.is_building_function:
outer_context = stack_entry.enter_context_fn
break
if outer_context is None:
# As a last resort, obtain the global default graph; this graph doesn't
# necessarily live on the graph stack (and hence it doesn't necessarily
# live on the context stack), but it is stored in the graph stack's
# encapsulating object.
outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access
if outer_context is None:
# Sanity check; this shouldn't be triggered.
raise RuntimeError("All graphs are building functions, and no "
"eager context was previously active.")
return outer_context, innermost_nonempty_device_stack
# pylint: disable=g-doc-return-or-yield,line-too-long
@tf_export("init_scope")
@tf_contextlib.contextmanager
def init_scope() -> Iterator[None]:
"""A context manager that lifts ops out of control-flow scopes and function-building graphs.
There is often a need to lift variable initialization ops out of control-flow
scopes, function-building graphs, and gradient tapes. Entering an
`init_scope` is a mechanism for satisfying these desiderata. In particular,
entering an `init_scope` has three effects:
(1) All control dependencies are cleared the moment the scope is entered;
this is equivalent to entering the context manager returned from
`control_dependencies(None)`, which has the side-effect of exiting
control-flow scopes like `tf.cond` and `tf.while_loop`.
(2) All operations that are created while the scope is active are lifted
into the lowest context on the `context_stack` that is not building a
graph function. Here, a context is defined as either a graph or an eager
context. Every context switch, i.e., every installation of a graph as
the default graph and every switch into eager mode, is logged in a
thread-local stack called `context_switches`; the log entry for a
context switch is popped from the stack when the context is exited.
Entering an `init_scope` is equivalent to crawling up
`context_switches`, finding the first context that is not building a
graph function, and entering it. A caveat is that if graph mode is
enabled but the default graph stack is empty, then entering an
`init_scope` will simply install a fresh graph as the default one.
(3) The gradient tape is paused while the scope is active.
When eager execution is enabled, code inside an init_scope block runs with
eager execution enabled even when tracing a `tf.function`. For example:
```python
tf.compat.v1.enable_eager_execution()
@tf.function
def func():
# A function constructs TensorFlow graphs,
# it does not execute eagerly.
assert not tf.executing_eagerly()
with tf.init_scope():
# Initialization runs with eager execution enabled
assert tf.executing_eagerly()
```
Raises:
RuntimeError: if graph state is incompatible with this initialization.
"""
# pylint: enable=g-doc-return-or-yield,line-too-long
if context.executing_eagerly():
# Fastpath.
with record.stop_recording():
yield
else:
# Retrieve the active name scope: entering an `init_scope` preserves
# the name scope of the current context.
scope = get_default_graph().get_name_scope()
if scope and scope[-1] != "/":
# Names that end with trailing slashes are treated by `name_scope` as
# absolute.
scope = scope + "/"
outer_context, innermost_nonempty_device_stack = (
_get_outer_context_and_inner_device_stack())
outer_graph = None
outer_device_stack = None
try:
with outer_context(), name_scope(
scope, skip_on_eager=False), control_dependencies(
None), record.stop_recording():
context_manager = NullContextmanager
context_manager_input = None
if not context.executing_eagerly():
# The device stack is preserved when lifting into a graph. Eager
# execution doesn't implement device stacks and in particular it
# doesn't support device functions, so in general it's not possible
# to do the same when lifting into the eager context.
outer_graph = get_default_graph()
outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access
outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access
elif innermost_nonempty_device_stack is not None:
for device_spec in innermost_nonempty_device_stack.peek_objs():
if device_spec.function is None:
break
if device_spec.raw_string:
context_manager = context.device
context_manager_input = device_spec.raw_string
break
# It is currently not possible to have a device function in V2,
# but in V1 we are unable to apply device functions in eager mode.
# This means that we will silently skip some of the entries on the
# device stack in V1 + eager mode.
with context_manager(context_manager_input):
yield
finally:
# If an exception is raised here it may be hiding a related exception in
# try-block (just above).
if outer_graph is not None:
outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access
@tf_export(v1=["executing_eagerly_outside_functions"])
def executing_eagerly_outside_functions() -> bool:
"""Returns True if executing eagerly, even if inside a graph function.
This function will check the outermost context for the program and see if
it is in eager mode. It is useful comparing to `tf.executing_eagerly()`,
which checks the current context and will return `False` within a
`tf.function` body. It can be used to build library that behave differently
in eager runtime and v1 session runtime (deprecated).
Example:
>>> tf.compat.v1.enable_eager_execution()
>>> @tf.function
... def func():
... # A function constructs TensorFlow graphs, it does not execute eagerly,
... # but the outer most context is still eager.
... assert not tf.executing_eagerly()
... return tf.compat.v1.executing_eagerly_outside_functions()
>>> func()
<tf.Tensor: shape=(), dtype=bool, numpy=True>
Returns:
boolean, whether the outermost context is in eager mode.
"""
if context.executing_eagerly():
return True
else:
outer_context, _ = _get_outer_context_and_inner_device_stack()
with outer_context():
return context.executing_eagerly()
@tf_export("inside_function", v1=[])
def inside_function() -> bool:
"""Indicates whether the caller code is executing inside a `tf.function`.
Returns:
Boolean, True if the caller code is executing inside a `tf.function`
rather than eagerly.
Example:
>>> tf.inside_function()
False
>>> @tf.function
... def f():
... print(tf.inside_function())
>>> f()
True
"""
return get_default_graph().building_function
@tf_export(v1=["enable_eager_execution"])
def enable_eager_execution(config=None, device_policy=None,
execution_mode=None) -> None:
"""Enables eager execution for the lifetime of this program.
Eager execution provides an imperative interface to TensorFlow. With eager
execution enabled, TensorFlow functions execute operations immediately (as
opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`)
and
return concrete values (as opposed to symbolic references to a node in a
computational graph).
For example:
```python
tf.compat.v1.enable_eager_execution()
# After eager execution is enabled, operations are executed as they are
# defined and Tensor objects hold concrete values, which can be accessed as
# numpy.ndarray`s through the numpy() method.
assert tf.multiply(6, 7).numpy() == 42
```
Eager execution cannot be enabled after TensorFlow APIs have been used to
create or execute graphs. It is typically recommended to invoke this function
at program startup and not in a library (as most libraries should be usable
both with and without eager execution).
@compatibility(TF2)
This function is not necessary if you are using TF2. Eager execution is
enabled by default.
@end_compatibility
Args:
config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the
environment in which operations are executed. Note that
`tf.compat.v1.ConfigProto` is also used to configure graph execution (via
`tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto`
are not implemented (or are irrelevant) when eager execution is enabled.
device_policy: (Optional.) Policy controlling how operations requiring
inputs on a specific device (e.g., a GPU 0) handle inputs on a different
device (e.g. GPU 1 or CPU). When set to None, an appropriate value will
be picked automatically. The value picked may change between TensorFlow
releases.
Valid values:
- DEVICE_PLACEMENT_EXPLICIT: raises an error if the
placement is not correct.
- DEVICE_PLACEMENT_WARN: copies the tensors which are not
on the right device but logs a warning.
- DEVICE_PLACEMENT_SILENT: silently copies the tensors.
Note that this may hide performance problems as there is no notification
provided when operations are blocked on the tensor being copied between
devices.
- DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies
int32 tensors, raising errors on the other ones.
execution_mode: (Optional.) Policy controlling how operations dispatched are
actually executed. When set to None, an appropriate value will be picked
automatically. The value picked may change between TensorFlow releases.
Valid values:
- SYNC: executes each operation synchronously.
- ASYNC: executes each operation asynchronously. These
operations may return "non-ready" handles.
Raises:
ValueError: If eager execution is enabled after creating/executing a
TensorFlow graph, or if options provided conflict with a previous call
to this function.
"""
_api_usage_gauge.get_cell().set(True)
logging.vlog(1, "Enabling eager execution")
if context.default_execution_mode != context.EAGER_MODE:
return enable_eager_execution_internal(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=None)
@tf_export(v1=["disable_eager_execution"])
def disable_eager_execution() -> None:
"""Disables eager execution.
This function can only be called before any Graphs, Ops, or Tensors have been
created.
@compatibility(TF2)
This function is not necessary if you are using TF2. Eager execution is
enabled by default. If you want to use Graph mode please consider
[tf.function](https://www.tensorflow.org/api_docs/python/tf/function).
@end_compatibility
"""
_api_usage_gauge.get_cell().set(False)
logging.vlog(1, "Disabling eager execution")
context.default_execution_mode = context.GRAPH_MODE
c = context.context_safe()
if c is not None:
c._thread_local_data.is_eager = False # pylint: disable=protected-access
def enable_eager_execution_internal(config=None,
device_policy=None,
execution_mode=None,
server_def=None) -> None:
"""Enables eager execution for the lifetime of this program.
Most of the doc string for enable_eager_execution is relevant here as well.
Args:
config: See enable_eager_execution doc string
device_policy: See enable_eager_execution doc string
execution_mode: See enable_eager_execution doc string
server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on
remote devices. GrpcServers need to be started by creating an identical
server_def to this, and setting the appropriate task_indexes, so that the
servers can communicate. It will then be possible to execute operations on
remote devices.
Raises:
ValueError
"""
if config is not None and not isinstance(config, config_pb2.ConfigProto):
raise TypeError("config must be a tf.ConfigProto, but got %s" %
type(config))
if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT,
context.DEVICE_PLACEMENT_WARN,
context.DEVICE_PLACEMENT_SILENT,
context.DEVICE_PLACEMENT_SILENT_FOR_INT32):
raise ValueError("device_policy must be one of None, DEVICE_PLACEMENT_*")
if execution_mode not in (None, context.SYNC, context.ASYNC):
raise ValueError("execution_mode must be one of None, SYNC, " "ASYNC")
if context.default_execution_mode == context.GRAPH_MODE:
graph_mode_has_been_used = (
_default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access
if graph_mode_has_been_used:
raise ValueError(
"tf.enable_eager_execution must be called at program startup.")
context.default_execution_mode = context.EAGER_MODE
# pylint: disable=protected-access
with context._context_lock:
if context._context is None:
context._set_context_locked(context.Context(
config=config,
device_policy=device_policy,
execution_mode=execution_mode,
server_def=server_def))
elif ((config is not None and config is not context._context._config) or
(device_policy is not None and
device_policy is not context._context._device_policy) or
(execution_mode is not None and
execution_mode is not context._context._execution_mode)):
raise ValueError(
"Trying to change the options of an active eager"
" execution. Context config: %s, specified config:"
" %s. Context device policy: %s, specified device"
" policy: %s. Context execution mode: %s, "
" specified execution mode %s." %
(context._context._config, config, context._context._device_policy,
device_policy, context._context._execution_mode, execution_mode))
else:
# We already created everything, so update the thread local data.
context._context._thread_local_data.is_eager = True
# Monkey patch to get rid of an unnecessary conditional since the context is
# now initialized.
context.context = context.context_safe
def eager_run(main=None, argv=None) -> NoReturn:
"""Runs the program with an optional main function and argv list.
The program will run with eager execution enabled.
Example:
```python
import tensorflow as tf
# Import subject to future changes:
def main(_):
u = tf.constant(6.0)
v = tf.constant(7.0)
print(u * v)
if __name__ == "__main__":
tfe.run()
```
Args:
main: the main function to run.
argv: the arguments to pass to it.
"""
enable_eager_execution()
app.run(main, argv)
@tf_export(v1=["reset_default_graph"])
def reset_default_graph() -> None:
"""Clears the default graph stack and resets the global default graph.
NOTE: The default graph is a property of the current thread. This
function applies only to the current thread. Calling this function while
a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will
result in undefined
behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects
after calling this function will result in undefined behavior.
@compatibility(TF2)
`reset_default_graph` does not work with either eager execution or
`tf.function`, and you should not invoke it directly. To migrate code that
uses Graph-related functions to TF2, rewrite the code without them. See the
[migration guide](https://www.tensorflow.org/guide/migrate) for more
description about the behavior and semantic changes between Tensorflow 1 and
Tensorflow 2.
@end_compatibility
Raises:
AssertionError: If this function is called within a nested graph.
"""
if not _default_graph_stack.is_cleared():
raise AssertionError("Do not use tf.reset_default_graph() to clear "
"nested graphs. If you need a cleared graph, "
"exit the nesting and create a new graph.")
_default_graph_stack.reset()
@tf_export(v1=["get_default_graph"])
def get_default_graph() -> Graph:
"""Returns the default graph for the current thread.
The returned graph will be the innermost graph on which a
`Graph.as_default()` context has been entered, or a global default
graph if none has been explicitly created.
NOTE: The default graph is a property of the current thread. If you
create a new thread, and wish to use the default graph in that
thread, you must explicitly add a `with g.as_default():` in that
thread's function.
@compatibility(TF2)
`get_default_graph` does not work with either eager execution or
`tf.function`, and you should not invoke it directly. To migrate code that
uses Graph-related functions to TF2, rewrite the code without them. See the
[migration guide](https://www.tensorflow.org/guide/migrate) for more
description about the behavior and semantic changes between Tensorflow 1 and
Tensorflow 2.
@end_compatibility
Returns:
The default `Graph` being used in the current thread.
"""
return _default_graph_stack.get_default()
def has_default_graph() -> bool:
"""Returns True if there is a default graph."""
return len(_default_graph_stack.stack) >= 1
# Exported due to b/171079555
@tf_export("__internal__.get_name_scope", v1=[])
def get_name_scope() -> str:
"""Returns the current name scope in the default_graph.
For example:
```python
with tf.name_scope('scope1'):
with tf.name_scope('scope2'):
print(tf.get_name_scope())
```
would print the string `scope1/scope2`.
Returns:
A string representing the current name scope.
"""
if context.executing_eagerly():
return context.context().scope_name.rstrip("/")
return get_default_graph().get_name_scope()
def _assert_same_graph(original_item, item) -> None:
"""Fail if the 2 items are from different graphs.
Args:
original_item: Original item to check against.
item: Item to check.
Raises:
ValueError: if graphs do not match.
"""
original_graph = getattr(original_item, "graph", None)
graph = getattr(item, "graph", None)
if original_graph and graph and original_graph is not graph:
raise ValueError(
"%s must be from the same graph as %s (graphs are %s and %s)." %
(item, original_item, graph, original_graph))
def _get_graph_from_inputs(op_input_list, graph=None) -> Graph:
"""Returns the appropriate graph to use for the given inputs.
This library method provides a consistent algorithm for choosing the graph
in which an Operation should be constructed:
1. If the default graph is being used to construct a function, we
use the default graph.
2. If the "graph" is specified explicitly, we validate that all of the inputs
in "op_input_list" are compatible with that graph.
3. Otherwise, we attempt to select a graph from the first Operation-
or Tensor-valued input in "op_input_list", and validate that all other
such inputs are in the same graph.
4. If the graph was not specified and it could not be inferred from
"op_input_list", we attempt to use the default graph.
Args:
op_input_list: A list of inputs to an operation, which may include `Tensor`,
`Operation`, and other objects that may be converted to a graph element.
graph: (Optional) The explicit graph to use.
Raises:
TypeError: If op_input_list is not a list or tuple, or if graph is not a
Graph.
ValueError: If a graph is explicitly passed and not all inputs are from it,
or if the inputs are from multiple graphs, or we could not find a graph
and there was no default graph.
Returns:
The appropriate graph to use for the given inputs.
"""
current_default_graph = get_default_graph()
if current_default_graph.building_function:
return current_default_graph
op_input_list = tuple(op_input_list) # Handle generators correctly
if graph and not isinstance(graph, Graph):
raise TypeError("Input graph needs to be a Graph: %s" % (graph,))
# 1. We validate that all of the inputs are from the same graph. This is
# either the supplied graph parameter, or the first one selected from one
# the graph-element-valued inputs. In the latter case, we hold onto
# that input in original_graph_element so we can provide a more
# informative error if a mismatch is found.
original_graph_element = None
for op_input in op_input_list:
graph_element = None
if isinstance(op_input, (Operation, SymbolicTensor)):
graph_element = op_input
else:
graph_element = _as_graph_element(op_input)
if graph_element is not None:
if not graph:
original_graph_element = graph_element
graph = getattr(graph_element, "graph", None)
elif original_graph_element is not None:
_assert_same_graph(original_graph_element, graph_element)
elif graph_element.graph is not graph:
raise ValueError("%s is not from the passed-in graph." % graph_element)
# 2. If all else fails, we use the default graph, which is always there.
return graph or current_default_graph
@tf_export(v1=["GraphKeys"])
| _DefaultGraphStack |
python | openai__openai-python | src/openai/resources/fine_tuning/fine_tuning.py | {
"start": 1951,
"end": 3104
} | class ____(AsyncAPIResource):
@cached_property
def jobs(self) -> AsyncJobs:
return AsyncJobs(self._client)
@cached_property
def checkpoints(self) -> AsyncCheckpoints:
return AsyncCheckpoints(self._client)
@cached_property
def alpha(self) -> AsyncAlpha:
return AsyncAlpha(self._client)
@cached_property
def with_raw_response(self) -> AsyncFineTuningWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return AsyncFineTuningWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> AsyncFineTuningWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return AsyncFineTuningWithStreamingResponse(self)
| AsyncFineTuning |
python | fluentpython__example-code-2e | 11-pythonic-obj/vector2d_v2_fmt_snippet.py | {
"start": 1384,
"end": 2704
} | class ____:
typecode = 'd'
def __init__(self, x, y):
self.x = float(x)
self.y = float(y)
def __iter__(self):
return (i for i in (self.x, self.y))
def __repr__(self):
class_name = type(self).__name__
return '{}({!r}, {!r})'.format(class_name, *self)
def __str__(self):
return str(tuple(self))
def __bytes__(self):
return bytes(array(Vector2d.typecode, self))
def __eq__(self, other):
return tuple(self) == tuple(other)
def __abs__(self):
return math.hypot(self.x, self.y)
def __bool__(self):
return bool(abs(self))
def angle(self):
return math.atan2(self.y, self.x)
# tag::VECTOR2D_V2_FORMAT[]
def __format__(self, fmt_spec=''):
if fmt_spec.endswith('p'): # <1>
fmt_spec = fmt_spec[:-1] # <2>
coords = (abs(self), self.angle()) # <3>
outer_fmt = '<{}, {}>' # <4>
else:
coords = self # <5>
outer_fmt = '({}, {})' # <6>
components = (format(c, fmt_spec) for c in coords) # <7>
return outer_fmt.format(*components) # <8>
# end::VECTOR2D_V2_FORMAT[]
@classmethod
def frombytes(cls, octets):
memv = memoryview(octets).cast(cls.typecode)
return cls(*memv)
| Vector2d |
python | pytorch__pytorch | torch/_inductor/output_code.py | {
"start": 14894,
"end": 31947
} | class ____(OutputCode):
"""
Class holding a compiled FX graph. This is the object serialized on disk
to support FxGraph caching.
"""
current_callable: Optional[Callable[..., Any]]
recursively_apply_fns: Optional[Callable[..., Any]]
compiled_fn_runner: Optional[Any]
cache_key: str
source_code: str = dataclasses.field(repr=False) # Do not display source_code
runnable_graph_str: str = dataclasses.field(repr=False) # Do not display graph
inductor_post_grad_graph_str: str = dataclasses.field(
repr=False
) # Do not display graph
cache_linemap: Optional[list[tuple[int, str]]]
device_types: OrderedSet[str]
device_idxs: OrderedSet[int]
mutated_inputs: OrderedSet[str]
mutated_input_idxs: OrderedSet[int]
constants: Optional[dict[str, torch.Tensor]]
frozen_param_names: dict[str, str]
torchbind_constants: dict[str, torch._C.ScriptObject | FakeScriptObject]
output_strides: Optional[list[Optional[tuple[_StrideExprStr, ...]]]]
disabled_cudagraphs_reason: Optional[str]
metrics_deltas: metrics.CachedMetricsDeltas
counter_deltas: Counter[str]
# This is a string representation of an expression we serialize
# with the object so the guards can be evaluated in a different
# context in order to verify the validity of serving a cached
# fx graph. The expression must be generated by:
# ShapeEnv.produce_guards_expression()
guards_expr: Optional[str]
inductor_provenance_mapping_str: Optional[str]
inductor_provenance_stack_traces_str: Optional[str]
cudagraph_info: Optional[CudagraphCachedInfo]
partition_maps: Optional[list[GraphPartitionMap]]
fx_kwargs: _CompileFxKwargs
inputs_to_check: Sequence[int]
_boxed_call: Optional[bool] = None
_triton_bundle: Optional[TritonBundle] = None
_wrap_compiled_regions: bool = False
def __init__(
self,
current_callable: Optional[Callable[..., Any]],
graph: GraphLowering,
gm: torch.fx.GraphModule,
output_strides: list[Optional[tuple[_StrideExprStr, ...]]],
disabled_cudagraphs_reason: Optional[str],
metrics_deltas: metrics.CachedMetricsDeltas,
counter_deltas: Counter[str],
cudagraphs: BoxedBool,
example_inputs: Sequence[InputType],
static_input_idxs: Sequence[int],
fx_kwargs: _CompileFxKwargs,
inputs_to_check: Sequence[int],
runnable_graph_str: str,
inductor_post_grad_graph_str: str,
compiled_fn_runner: Optional[Any] = None,
inductor_provenance_mapping_str: Optional[str] = None,
inductor_provenance_stack_traces_str: Optional[str] = None,
) -> None:
self.current_callable = current_callable
self.compiled_fn_runner = compiled_fn_runner
self.recursively_apply_fns = (
compiled_fn_runner.recursively_apply_fns
if compiled_fn_runner is not None
else None
)
self.cache_key = graph.cache_key
if graph.cache_path:
with open(graph.cache_path) as f:
self.source_code = f.read()
self.runnable_graph_str = runnable_graph_str
self.inductor_post_grad_graph_str = inductor_post_grad_graph_str
self.inductor_provenance_mapping_str = inductor_provenance_mapping_str
self.inductor_provenance_stack_traces_str = inductor_provenance_stack_traces_str
self.cache_linemap = graph.cache_linemap
# TODO - ordered set
self.device_types = OrderedSet(graph.device_types)
self.device_idxs = OrderedSet(graph.device_idxs)
self.mutated_inputs = OrderedSet(graph.mutated_inputs)
self.mutated_input_idxs = OrderedSet(graph.mutated_input_idxs)
# We store the constant attributes in the cache entry and re-attach them
# to the module created in PyCodeCache.load_by_key_path. In the case that
# the graph has frozen parameters, we save the mapping from the attribute
# names in the GraphLowering to the original name of the attribute in the
# GraphModule. When we create the module from the cache entry, we then
# look up the constants from the current GraphModule. This scheme allows
# us to support caching with freezing.
if not has_frozen_params(gm):
self.constants = graph.constants
self.frozen_param_names = {}
else:
self.constants = {}
self.frozen_param_names = {}
for k, v in graph.constants.items():
if is_frozen_param(v):
self.frozen_param_names[k] = graph.allocated_constant_name[k]
else:
self.constants[k] = v
self.torchbind_constants = graph.torchbind_constants
self.output_strides = output_strides
self.disabled_cudagraphs_reason = disabled_cudagraphs_reason
self.metrics_deltas = metrics_deltas
self.counter_deltas = counter_deltas
self.guards_expr = None
self.cudagraph_info = None
self.partition_maps = graph.partition_maps
self.fx_kwargs = {}
self.inputs_to_check = ()
cudagraph_info = None
if cudagraphs:
# check cudagraph disabling reasons from inductor lowering
if self.disabled_cudagraphs_reason:
if "cuda" in self.device_types:
log_cudagraph_skip_and_bump_counter(
f"skipping cudagraphs due to {self.disabled_cudagraphs_reason}"
)
else:
counters["inductor"]["cudagraph_skips"] += 1
BoxedBool.disable(cudagraphs)
else:
complex_memory_overlap_inputs = any(
complex_memory_overlap(t)
for t in example_inputs
if isinstance(t, torch.Tensor)
)
if not config.triton.cudagraph_support_input_mutation:
# Skip supports for cudagraph-managed tensors
from torch._inductor.cudagraph_utils import (
check_for_mutation_ignore_cuda_graph_managed_tensor,
)
has_mutation_str = (
check_for_mutation_ignore_cuda_graph_managed_tensor(
gm,
self.mutated_inputs,
self.mutated_input_idxs,
static_input_idxs,
)
)
has_mutation = has_mutation_str is not None
if has_mutation:
self.disabled_cudagraphs_reason = has_mutation_str
else:
# Check mutation later to support cudagraph-managed tensors
has_mutation = None
cudagraph_tests = [
(not has_mutation, "mutated inputs"),
(not complex_memory_overlap_inputs, "complex memory overlap"),
(
all(
isinstance(t, (torch.Tensor, torch.SymInt, torch.Generator))
for t in example_inputs
),
"non-Tensor inputs",
),
]
output = output_node(gm)
# output args are tuple of first argument
assert len(output.args) == 1
stack_traces = [
(arg.stack_trace if isinstance(arg, torch.fx.node.Node) else None)
for arg in output.args[0] # type: ignore[union-attr]
]
cudagraph_fail_reasons = [s for b, s in cudagraph_tests if not b]
placeholders = tuple(get_placeholder_info(gm.graph))
cudagraph_info = CudagraphCachedInfo(
placeholders, stack_traces, cudagraph_fail_reasons
)
self.cudagraph_info = cudagraph_info
self.inputs_to_check = inputs_to_check
self.fx_kwargs = fx_kwargs
# aot autograd needs to know to pass in inputs as a list
self._boxed_call = True
# Store whether to wrap compiled regions in inductor_compiled_code HOP
# This is set at compile time to avoid runtime overhead
self._wrap_compiled_regions = config.wrap_inductor_compiled_regions
def __del__(self) -> None:
if self.compiled_fn_runner is not None:
# For torch._inductor.config.graph_partition = True,
# self.compiled_fn_runner.partitions hold cudagraphified functions
# which prevents deallocation. When CompiledFxGraph is deleted,
# self.compiled_fn_runner will not be called in the future so we
# should also delete these partitions.
del self.compiled_fn_runner.partitions
def __call__(self, inputs: Sequence[Any]) -> Any:
assert self.current_callable is not None
if (
torch._inductor.debug.RECORD_GRAPH_EXECUTION
and torch._inductor.debug.GRAPH_EXECUTION_ORDER is not None
):
graph_id = self.fx_kwargs.get("graph_id")
compile_id = (
torch._inductor.debug.GRAPH_COMPILE_IDS.get(graph_id)
if graph_id is not None
and torch._inductor.debug.GRAPH_COMPILE_IDS is not None
else None
)
torch._inductor.debug.GRAPH_EXECUTION_ORDER.append(
{
"compile_id": compile_id,
}
)
try:
# Checking the profiler directly is faster than nullcontext
if torch.autograd.profiler._is_profiler_enabled:
with record_function(
f"## Call CompiledFxGraph {self._fx_graph_cache_key} ##"
):
return self.current_callable(inputs)
else:
return self.current_callable(inputs)
finally:
get_runtime_metrics_context().finish()
AutotuneCacheBundler.end_compile()
def post_compile(
self,
example_inputs: Sequence[InputType],
constants: CompiledFxGraphConstants,
graph_kwargs: _CompileFxKwargs,
) -> None:
"""
Run a set of post processing steps after loading from the cache. These involve:
- Setting the tracing context output strides
- Running cudagraphs if enabled
- Realigning inputs
This runs whether or not we have a cache hit, and always runs directly after we get a CompiledFxGraph.
The results of this function are *not* saved in the cache itself.
"""
if config.graph_partition and _unstable_customized_partition_wrapper.wrapper:
# Mechanically apply user-specified cudagraph wrappers without modification
assert self.recursively_apply_fns is not None
assert self.compiled_fn_runner is not None
num_partitions = len(self.compiled_fn_runner.partitions)
wrapper_metadatas = [
CUDAGraphWrapperMetadata(num_partitions, i)
for i in range(num_partitions)
]
customized_wrapper = _unstable_customized_partition_wrapper.wrapper
customized_wrappers_with_metadata = [
lambda f, m=metadata: customized_wrapper(f, m)
for metadata in wrapper_metadatas
]
self.recursively_apply_fns(customized_wrappers_with_metadata)
return
set_tracing_context_output_strides(example_inputs, self)
assert graph_kwargs["cudagraphs"] is not None
assert graph_kwargs["is_backward"] is not None
is_backward = graph_kwargs["is_backward"]
cudagraphs: BoxedBool = graph_kwargs["cudagraphs"]
if cudagraphs:
# It's possible that cudagraphs is enabled, but was disabled
# during a previous compilation we're loading from the cache.
# If so, we need to disable it on this new process too.
if self.disabled_cudagraphs_reason:
if "cuda" in self.device_types:
log_cudagraph_skip_and_bump_counter(
f"skipping cudagraphs due to {self.disabled_cudagraphs_reason}"
)
else:
counters["inductor"]["cudagraph_skips"] += 1
BoxedBool.disable(cudagraphs)
else:
if is_backward:
assert "boxed_forward_device_index" in graph_kwargs
boxed_forward_device_index = graph_kwargs[
"boxed_forward_device_index"
]
else:
# On the forward we don't know whether or not
# boxed_forward_device_index is set yet
boxed_forward_device_index = graph_kwargs.get(
"boxed_forward_device_index", None
)
if config.graph_partition:
# with graph_partition=True, we skip some cudagraph checks if it's supported
# with partition. So we have to use cudagraph_partition_post_compile.
cudagraph_partition_post_compile(
example_inputs,
self,
cudagraphs,
constants.unwrap(self),
boxed_forward_device_index,
)
else:
cudagraph_post_compile(
example_inputs,
self,
cudagraphs,
constants.unwrap(self),
boxed_forward_device_index,
)
inputs_to_check = self.inputs_to_check
# cudagraphs could have been disabled from the earlier conditions
# so we still need to realign inputs if that happens
maybe_realign_inputs(
cudagraphs,
self,
inputs_to_check,
self.mutated_input_idxs,
)
# Apply inductor_compiled_code HOP wrapper if configured
# This is done in post_compile to ensure it works with cached artifacts
if self._wrap_compiled_regions and self.current_callable is not None:
original_callable = self.current_callable
def wrapped_callable(inputs):
if is_in_torch_dispatch_mode():
return inductor_compiled_code(original_callable, inputs)
else:
return original_callable(inputs)
self.current_callable = wrapped_callable
def set_triton_bundle(self, triton_bundle: Any) -> None:
self._triton_bundle = triton_bundle
def prepare_for_serialization(self) -> None:
# We can't really serialize callables that may be C++/Triton/etc.,
# so we serialize their PyCodeCache disk cache location instead.
# TODO: This could be better if we're ever able to serialize compiled
# models to disk.
self.current_callable = None
self.recursively_apply_fns = None
self.compiled_fn_runner = None
def write_to_disk(self) -> str:
from torch._dynamo.utils import counters
from torch._inductor.codecache import get_path, write_atomic
# See _save_graph(); we don't store the callable in the cache entry so
# recreate it here from the PyCodeCache disk cache.
artifact_path = get_path(self.cache_key, "py")[2]
code = self.source_code
if not os.path.exists(artifact_path):
counters["inductor"]["fxgraph_lookup_write_file"] += 1
write_atomic(artifact_path, code, make_dirs=True)
return artifact_path
def after_deserialization(self, constants: CompiledFxGraphConstants) -> str:
from torch._dynamo.utils import dynamo_timed
from torch._inductor.codecache import PyCodeCache
artifact_path = self.write_to_disk()
try:
with dynamo_timed(
"PyCodeCache.load_by_key_path",
log_pt2_compile_event=True,
):
code_cache = PyCodeCache.load_by_key_path(
self.cache_key,
artifact_path,
self.cache_linemap,
constants.unwrap(self),
)
self.current_callable = code_cache.call
self.recursively_apply_fns = getattr(
code_cache, "recursively_apply_fns", None
)
self.compiled_fn_runner = getattr(code_cache, "runner", None)
except OSError:
log.error("Failed to load artifact: %s", artifact_path)
raise
return artifact_path
@dataclasses.dataclass
| CompiledFxGraph |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess25.py | {
"start": 831,
"end": 1283
} | class ____(ClassA[T]):
pass
# This should generate an error because x is generic.
ClassB[int].x = 1
# This should generate an error because x is generic.
ClassB[int].x
# This should generate an error because x is generic.
del ClassB[int].x
# This should generate an error because x is generic.
ClassB.x = 1
# This should generate an error because x is generic.
ClassB.x
# This should generate an error because x is generic.
del ClassB.x
| ClassB |
python | allegroai__clearml | clearml/utilities/gpu/pyrsmi.py | {
"start": 6742,
"end": 20902
} | class ____(c_int):
RSMI_IOLINK_TYPE_UNDEFINED = 0
RSMI_IOLINK_TYPE_HYPERTRANSPORT = 1
RSMI_IOLINK_TYPE_PCIEXPRESS = 2
RSMI_IOLINK_TYPE_AMBA = 3
RSMI_IOLINK_TYPE_MIPI = 4
RSMI_IOLINK_TYPE_QPI_1_1 = 5
RSMI_IOLINK_TYPE_RESERVED1 = 6
RSMI_IOLINK_TYPE_RESERVED2 = 7
RSMI_IOLINK_TYPE_RAPID_IO = 8
RSMI_IOLINK_TYPE_INFINIBAND = 9
RSMI_IOLINK_TYPE_RESERVED3 = 10
RSMI_IOLINK_TYPE_XGMI = 11
RSMI_IOLINK_TYPE_XGOP = 12
RSMI_IOLINK_TYPE_GZ = 13
RSMI_IOLINK_TYPE_ETHERNET_RDMA = 14
RSMI_IOLINK_TYPE_RDMA_OTHER = 15
RSMI_IOLINK_TYPE_OTHER = 16
RSMI_IOLINK_TYPE_NUMIOLINKTYPES = 17
RSMI_IOLINK_TYPE_SIZE = 0xFFFFFFFF
## Library loading
rocm_lib = None
lib_load_lock = threading.Lock()
_rocm_lib_refcount = 0
## Function access, to prevent lib_load_lock deadlock
_rocml_get_function_ptr_cache = dict()
def _rocml_get_function_ptr(name):
global rocm_lib
if name in _rocml_get_function_ptr_cache:
return _rocml_get_function_ptr_cache[name]
lib_load_lock.acquire()
try:
# ensure library was loaded
if rocm_lib == None:
raise ROCMLError_Uninitialized
try:
_rocml_get_function_ptr_cache[name] = getattr(rocm_lib, name)
return _rocml_get_function_ptr_cache[name]
except AttributeError:
raise ROCMLError_FunctionNotFound
finally:
# lock is always freed
lib_load_lock.release()
def _load_rocm_library():
"""Load ROCm library if not already loaded"""
global rocm_lib
if rocm_lib == None:
lib_load_lock.acquire()
try:
if rocm_lib == None:
try:
if sys.platform[:3] == 'win':
raise ROCMLError_NotSupported('Windows platform is not supported yet')
else:
# assume linux
path_librocm = _find_lib_rocm()
cdll.LoadLibrary(path_librocm)
rocm_lib = CDLL(path_librocm)
except OSError:
raise ROCMLError_LibraryNotFound('ROCm library not found')
if rocm_lib == None:
raise ROCMLError_LibraryNotFound('ROCm library not found')
finally:
lib_load_lock.release()
def _find_lib_rocm():
"""search for librocm and returns path
if search fails, returns empty string
"""
rocm_path = os.environ.get('ROCM_PATH', '/opt/rocm')
rocm_lib_path = join(rocm_path, 'lib/{}'.format(LIBROCM_NAME))
return rocm_lib_path if isfile(rocm_lib_path) else ''
def _driver_initialized():
""" Returns true if amdgpu is found in the list of initialized modules
"""
initialized = ''
try:
initialized = str(subprocess.check_output("cat /sys/module/amdgpu/initstate |grep live", shell=True))
except subprocess.CalledProcessError:
pass
return len(initialized) > 0
def smi_initialize():
"""Initialize ROCm binding of SMI"""
_load_rocm_library()
if _driver_initialized():
ret_init = rocm_lib.rsmi_init(0)
if ret_init != 0:
logging.error('ROCm SMI init returned value {}'.format(ret_init))
raise RuntimeError('ROCm SMI initialization failed')
else:
raise RuntimeError('ROCm driver initilization failed')
# update reference count
global _rocm_lib_refcount
lib_load_lock.acquire()
_rocm_lib_refcount += 1
lib_load_lock.release()
def rsmi_ret_ok(my_ret, log_error=False):
""" Returns true if RSMI call status is 0 (success)
@param device: DRM device identifier
@param my_ret: Return of RSMI call (rocm_smi_lib API)
@param log_error: Log the error message
@param metric: Parameter of GPU currently being analyzed
"""
if my_ret != rsmi_status_t.RSMI_STATUS_SUCCESS:
if log_error:
err_str = c_char_p()
rocm_lib.rsmi_status_string(my_ret, byref(err_str))
logging.error(err_str.value.decode())
return False
return True
def smi_shutdown():
"""leave the library loaded, but shutdown the interface"""
rsmi_ret_ok(rocm_lib.rsmi_shut_down())
# update reference count
global _rocm_lib_refcount
lib_load_lock.acquire()
_rocm_lib_refcount -= 1
lib_load_lock.release()
def smi_get_kernel_version():
"""returns ROCm kernerl driver version"""
ver_str = create_string_buffer(256)
ret = rocm_lib.rsmi_version_str_get(rsmi_sw_component_t.RSMI_SW_COMP_DRIVER, ver_str, 256)
return ver_str.value.decode() if rsmi_ret_ok(ret) else ''
def smi_get_device_id(dev):
"""returns device id of the device as 64bit integer"""
uid = c_uint64()
ret = rocm_lib.rsmi_dev_id_get(dev, byref(uid))
return uid.value if rsmi_ret_ok(ret) else -1
def smi_get_device_count():
"""returns a list of GPU devices """
num_device = c_uint32(0)
ret = rocm_lib.rsmi_num_monitor_devices(byref(num_device))
return num_device.value if rsmi_ret_ok(ret) else -1
def smi_get_device_name(dev):
"""returns the name of a GPU device"""
series = create_string_buffer(RSMI_MAX_BUFFER_LENGTH)
ret = rocm_lib.rsmi_dev_name_get(dev, series, RSMI_MAX_BUFFER_LENGTH)
return series.value.decode() if rsmi_ret_ok(ret) else ''
def smi_get_device_unique_id(dev):
"""returns unique id of the device as 64bit integer"""
uid = c_uint64()
ret = rocm_lib.rsmi_dev_unique_id_get(dev, byref(uid))
return uid.value if rsmi_ret_ok(ret) else -1
def smi_get_device_utilization(dev):
"""returns GPU device busy percent of device_id dev"""
busy_percent = c_uint32()
ret = rocm_lib.rsmi_dev_busy_percent_get(dev, byref(busy_percent))
return busy_percent.value if rsmi_ret_ok(ret) else -1
def smi_get_device_memory_used(dev, type='VRAM'):
"""returns used memory of device_id dev in bytes"""
type_idx = memory_type_l.index(type)
used = c_uint64()
ret = rocm_lib.rsmi_dev_memory_usage_get(dev, type_idx, byref(used))
return used.value if rsmi_ret_ok(ret) else -1
def smi_get_device_memory_total(dev, type='VRAM'):
"""returns total memory of device_id dev in bytes"""
type_idx = memory_type_l.index(type)
total = c_uint64()
ret = rocm_lib.rsmi_dev_memory_total_get(dev, type_idx, byref(total))
return total.value if rsmi_ret_ok(ret) else -1
def smi_get_device_memory_busy(dev):
"""returns percentage of time any device memory is being used"""
busy_percent = c_uint32()
ret = rocm_lib.rsmi_dev_memory_busy_percent_get(dev, byref(busy_percent))
return busy_percent.value if rsmi_ret_ok(ret) else -1
def smi_get_device_memory_reserved_pages(dev):
"""returns info about reserved memory pages"""
num_pages = c_uint32()
records = rsmi_retired_page_record_t()
ret = rocm_lib.rsmi_dev_memory_reserved_pages_get(dev, byref(num_pages), byref(records))
return (num_pages.value, records) if rsmi_ret_ok(ret) else -1
# PCIE functions
def smi_get_device_pcie_bandwidth(dev):
"""returns list of possible pcie bandwidths for the device in bytes/sec"""
bandwidth = rsmi_pcie_bandwidth_t()
ret = rocm_lib.rsmi_dev_pci_bandwidth_get(dev, byref(bandwidth))
return bandwidth if rsmi_ret_ok(ret) else -1
def smi_get_device_pci_id(dev):
"""returns unique PCI ID of the device in 64bit Hex with format:
BDFID = ((DOMAIN & 0xffffffff) << 32) | ((BUS & 0xff) << 8) |
((DEVICE & 0x1f) <<3 ) | (FUNCTION & 0x7)
"""
bdfid = c_uint64()
ret = rocm_lib.rsmi_dev_pci_id_get(dev, byref(bdfid))
return bdfid.value if rsmi_ret_ok(ret) else -1
def smi_get_device_topo_numa_affinity(dev):
"""returns the NUMA node associated with the device"""
numa_node = c_uint32()
ret = reocm_lib.rsmi_topo_numa_affinity_get(dev, byref(numa_node))
return numa_node.value if rsmi_ret_ok(ret) else -1
def smi_get_device_pcie_throughput(dev):
"""returns measured pcie throughput for the device in bytes/sec"""
sent = c_uint64()
recv = c_uint64()
max_pkt_sz = c_uint64()
ret = rocm_lib.rsmi_dev_pci_throughput_get(dev, byref(sent), byref(recv), byref(max_pkt_sz))
return (recv.value + sent.value) * max_pkt_sz.value if rsmi_ret_ok(ret) else -1
def smi_get_device_pci_replay_counter(dev):
"""return PCIe replay counter of the device"""
counter = c_uint64()
ret = rocm_lib.rsmi_dev_pci_replay_counter_get(dev, byref(counter))
return counter.value if rsmi_ret_ok(ret) else -1
# Compute partition functions
def smi_get_device_compute_partition(dev):
"""returns the compute partition of the device"""
partition = create_string_buffer(RSMI_MAX_BUFFER_LENGTH)
ret = rocm_lib.rsmi_dev_compute_partition_get(dev, byref(partition), RSMI_MAX_BUFFER_LENGTH)
return partition.value.decode() if rsmi_ret_ok(ret) else ''
def smi_set_device_compute_partition(dev, partition):
"""modifies the compute partition of the selected device"""
ret = rocm_lib.rsmi_dev_compute_partition_set(dev, partition)
return rsmi_ret_ok(ret)
def smi_reset_device_compute_partition(dev):
"""reverts the compute partition of the selected device to its boot state"""
ret = rocm_lib.rsmi_dev_compute_partition_reset(dev)
return rsmi_ret_ok(ret)
# Memory partition functions
def smi_get_device_memory_partition(dev):
"""returns the memory partition of the device"""
partition = create_string_buffer(RSMI_MAX_BUFFER_LENGTH)
ret = rocm_lib.rsmi_dev_memory_partition_get(dev, byref(partition), RSMI_MAX_BUFFER_LENGTH)
return partition.value.decode() if rsmi_ret_ok(ret) else ''
def smi_set_device_memory_partition(dev, partition):
"""modifies the memory partition of the selected device"""
ret = rocm_lib.rsmi_dev_memory_partition_set(dev, partition)
return rsmi_ret_ok(ret)
def smi_reset_device_memory_partition(dev):
"""reverts the memory partition of the selected device to its boot state"""
ret = rocm_lib.rsmi_dev_memory_partition_reset(dev)
return rsmi_ret_ok(ret)
# Hardware Topology functions
def smi_get_device_topo_numa_node_number(dev):
"""returns the NUMA node associated with the device"""
numa_node = c_uint32()
ret = rocm_lib.rsmi_topo_get_numa_node_number(dev, byref(numa_node))
return numa_node.value if rsmi_ret_ok(ret) else -1
def smi_get_device_topo_link_weight(dev_src, dev_dst):
"""returns the weight of the link between two devices"""
weight = c_uint64()
ret = rocm_lib.rsmi_topo_get_link_weight(dev_src, dev_dst, byref(weight))
return weight.value if rsmi_ret_ok(ret) else -1
def smi_get_device_minmax_bandwidth(dev_src, dev_dst):
"""returns the minimum and maximum io link bandwidth between two devices
API works if src and dst are connected via XGMI and are 1 hop away.
"""
assert smi_get_device_link_type(dev_src, dev_dst)[0] == 1, 'Devices must be 1 hop away'
min_bandwidth = c_uint64()
max_bandwidth = c_uint64()
ret = rocm_lib.rsmi_minmax_bandwidth_get(dev_src, dev_dst, byref(min_bandwidth), byref(max_bandwidth))
return (min_bandwidth.value, max_bandwidth.value) if rsmi_ret_ok(ret) else -1
def smi_get_device_link_type(dev_src, dev_dst):
"""returns the hops and the type of link between two devices"""
hops = c_uint64()
link_type = rsmi_io_link_type()
ret = rocm_lib.rsmi_topo_get_link_type(dev_src, dev_dst, byref(hops), byref(link_type))
return (hops.value, link_type.value) if rsmi_ret_ok(ret) else -1
def smi_is_device_p2p_accessible(dev_src, dev_dst):
"""returns true if two devices are p2p accessible"""
accessible = c_bool()
ret = rocm_lib.rsmi_is_P2P_accessible(dev_src, dev_dst, byref(accessible))
return accessible.value if rsmi_ret_ok(ret) else -1
def smi_get_device_compute_process():
"""returns list of process ids running compute on the system"""
num_procs = c_uint32()
ret = rocm_lib.rsmi_compute_process_info_get(None, byref(num_procs))
if rsmi_ret_ok(ret):
buff_sz = num_procs.value + 10
proc_info = (rsmi_process_info_t * buff_sz)()
ret2 = rocm_lib.rsmi_compute_process_info_get(byref(proc_info), byref(num_procs))
return [proc_info[i].process_id for i in range(num_procs.value)] if rsmi_ret_ok(ret2) else []
else:
return []
def smi_get_device_average_power(dev):
"""returns average power of device_id dev"""
power = c_uint32()
ret = rocm_lib.rsmi_dev_power_ave_get(dev, 0, byref(power))
return power.value * 1e-6 if rsmi_ret_ok(ret) else -1
# XGMI fuctions
def smi_get_device_xgmi_error_status(dev):
"""returns XGMI error status for a device"""
status = rsmi_xgmi_status_t()
ret = rocm_lib.rsmi_dev_xgmi_error_status(dev, byref(status))
return status.value if rsmi_ret_ok(ret) else -1
def smi_reset_device_xgmi_error(dev):
"""resets XGMI error status for a device"""
ret = rocm_lib.rsmi_dev_xgmi_error_reset(dev)
return rsmi_ret_ok(ret)
def smi_get_device_xgmi_hive_id(dev):
"""returns XGMI hive ID for a device"""
hive_id = c_uint64()
ret = rocm_lib.rsmi_dev_xgmi_hive_id_get(dev, byref(hive_id))
return hive_id.value if rsmi_ret_ok(ret) else -1
# constants for the UUID function
B1 = '%02x'
B2 = B1 * 2
B4 = B1 * 4
B6 = B1 * 6
nv_fmt = 'GPU-{b4}-{b2}-{b2}-{b2}-{b6}'.format(b2=B2, b4=B4, b6=B6)
# UUID function
def smi_get_device_uuid(dev, format='roc'):
DEVICE_UUIDS = get_device_uuids()
"""returns the UUID of the device"""
assert dev < len(DEVICE_UUIDS), 'Device index out of range'
u_s = DEVICE_UUIDS[dev]
if format == 'roc':
# use hex strings
return 'GPU-{}'.format(u_s)
elif format == 'nv':
# break down to ASCII strings according to the format
b_a = bytearray()
b_a.extend(map(ord, u_s))
return nv_fmt % tuple(b_a)
else:
raise ValueError('Invalid format: \'{}\'; use \'roc\' or \'nv\''.format(format))
| rsmi_io_link_type |
python | vyperlang__vyper | vyper/ast/nodes.py | {
"start": 22648,
"end": 22759
} | class ____(Stmt):
__slots__ = ("value",)
@property
def is_terminus(self):
return True
| Return |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/operators/test_redshift_sql.py | {
"start": 1363,
"end": 10346
} | class ____:
@patch.dict("os.environ", AIRFLOW_CONN_AWS_DEFAULT=f"aws://?region_name={MOCK_REGION_NAME}")
@pytest.mark.parametrize(
("connection_host", "connection_extra", "expected_identity", "expected_schemaname"),
[
# test without a connection host but with a cluster_identifier in connection extra
(
None,
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
f"cluster_identifier_from_extra.{MOCK_REGION_NAME}",
"database.public",
),
# test with a connection host and without a cluster_identifier in connection extra
(
"cluster_identifier_from_host.id.my_region.redshift.amazonaws.com",
{"iam": True},
"cluster_identifier_from_host.my_region",
"database.public",
),
# test with both connection host and cluster_identifier in connection extra
(
"cluster_identifier_from_host.x.y",
{"iam": True, "cluster_identifier": "cluster_identifier_from_extra"},
f"cluster_identifier_from_extra.{MOCK_REGION_NAME}",
"database.public",
),
],
)
@patch("airflow.providers.amazon.aws.hooks.base_aws.AwsBaseHook.conn")
def test_execute_openlineage_events(
self,
mock_aws_hook_conn,
connection_host,
connection_extra,
expected_identity,
expected_schemaname,
# self, mock_aws_hook_conn, connection_host, connection_extra, expected_identity, is_below_2_10, expected_schemaname
):
DB_NAME = "database"
DB_SCHEMA_NAME = "public"
ANOTHER_DB_NAME = "another_db"
ANOTHER_DB_SCHEMA = "another_schema"
# Mock AWS Connection
mock_aws_hook_conn.get_cluster_credentials.return_value = {
"DbPassword": "aws_token",
"DbUser": "IAM:user",
}
class RedshiftSQLHook(OriginalRedshiftSQLHook):
get_conn = MagicMock(name="conn")
get_connection = MagicMock()
def get_first(self, *_):
self.log.error("CALLING FIRST")
return [f"{DB_NAME}.{DB_SCHEMA_NAME}"]
dbapi_hook = RedshiftSQLHook()
class RedshiftOperatorForTest(SQLExecuteQueryOperator):
def get_db_hook(self):
return dbapi_hook
sql = (
"INSERT INTO Test_table\n"
"SELECT t1.*, t2.additional_constant FROM ANOTHER_db.another_schema.popular_orders_day_of_week t1\n"
"JOIN little_table t2 ON t1.order_day_of_week = t2.order_day_of_week;\n"
"FORGOT TO COMMENT"
)
op = RedshiftOperatorForTest(task_id="redshift-operator", sql=sql)
rows = [
[
(
ANOTHER_DB_SCHEMA,
"popular_orders_day_of_week",
"order_day_of_week",
1,
"varchar",
ANOTHER_DB_NAME,
),
(
ANOTHER_DB_SCHEMA,
"popular_orders_day_of_week",
"order_placed_on",
2,
"timestamp",
ANOTHER_DB_NAME,
),
(
ANOTHER_DB_SCHEMA,
"popular_orders_day_of_week",
"orders_placed",
3,
"int4",
ANOTHER_DB_NAME,
),
(DB_SCHEMA_NAME, "little_table", "order_day_of_week", 1, "varchar", DB_NAME),
(DB_SCHEMA_NAME, "little_table", "additional_constant", 2, "varchar", DB_NAME),
],
[
(DB_SCHEMA_NAME, "test_table", "order_day_of_week", 1, "varchar", DB_NAME),
(DB_SCHEMA_NAME, "test_table", "order_placed_on", 2, "timestamp", DB_NAME),
(DB_SCHEMA_NAME, "test_table", "orders_placed", 3, "int4", DB_NAME),
(DB_SCHEMA_NAME, "test_table", "additional_constant", 4, "varchar", DB_NAME),
],
]
dbapi_hook.get_connection.return_value = Connection(
conn_id="redshift_default",
conn_type="redshift",
host=connection_host,
extra=connection_extra,
)
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = rows
lineage = op.get_openlineage_facets_on_start()
assert dbapi_hook.get_conn.return_value.cursor.return_value.execute.mock_calls == [
call(
"SELECT SVV_REDSHIFT_COLUMNS.schema_name, "
"SVV_REDSHIFT_COLUMNS.table_name, "
"SVV_REDSHIFT_COLUMNS.column_name, "
"SVV_REDSHIFT_COLUMNS.ordinal_position, "
"SVV_REDSHIFT_COLUMNS.data_type, "
"SVV_REDSHIFT_COLUMNS.database_name \n"
"FROM SVV_REDSHIFT_COLUMNS \n"
f"WHERE SVV_REDSHIFT_COLUMNS.schema_name = '{expected_schemaname}' "
"AND SVV_REDSHIFT_COLUMNS.table_name IN ('little_table') "
"OR SVV_REDSHIFT_COLUMNS.database_name = 'another_db' "
"AND SVV_REDSHIFT_COLUMNS.schema_name = 'another_schema' AND "
"SVV_REDSHIFT_COLUMNS.table_name IN ('popular_orders_day_of_week')"
),
call(
"SELECT SVV_REDSHIFT_COLUMNS.schema_name, "
"SVV_REDSHIFT_COLUMNS.table_name, "
"SVV_REDSHIFT_COLUMNS.column_name, "
"SVV_REDSHIFT_COLUMNS.ordinal_position, "
"SVV_REDSHIFT_COLUMNS.data_type, "
"SVV_REDSHIFT_COLUMNS.database_name \n"
"FROM SVV_REDSHIFT_COLUMNS \n"
f"WHERE SVV_REDSHIFT_COLUMNS.schema_name = '{expected_schemaname}' "
"AND SVV_REDSHIFT_COLUMNS.table_name IN ('Test_table')"
),
]
expected_namespace = f"redshift://{expected_identity}:5439"
assert lineage.inputs == [
Dataset(
namespace=expected_namespace,
name=f"{ANOTHER_DB_NAME}.{ANOTHER_DB_SCHEMA}.popular_orders_day_of_week",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="order_day_of_week", type="varchar"),
SchemaDatasetFacetFields(name="order_placed_on", type="timestamp"),
SchemaDatasetFacetFields(name="orders_placed", type="int4"),
]
)
},
),
Dataset(
namespace=expected_namespace,
name=f"{DB_NAME}.{DB_SCHEMA_NAME}.little_table",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="order_day_of_week", type="varchar"),
SchemaDatasetFacetFields(name="additional_constant", type="varchar"),
]
)
},
),
]
assert lineage.outputs == [
Dataset(
namespace=expected_namespace,
name=f"{DB_NAME}.{DB_SCHEMA_NAME}.test_table",
facets={
"schema": SchemaDatasetFacet(
fields=[
SchemaDatasetFacetFields(name="order_day_of_week", type="varchar"),
SchemaDatasetFacetFields(name="order_placed_on", type="timestamp"),
SchemaDatasetFacetFields(name="orders_placed", type="int4"),
SchemaDatasetFacetFields(name="additional_constant", type="varchar"),
]
),
"columnLineage": ColumnLineageDatasetFacet(
fields={
"additional_constant": Fields(
inputFields=[
InputField(
namespace=expected_namespace,
name="database.public.little_table",
field="additional_constant",
)
],
transformationDescription="",
transformationType="",
)
}
),
},
)
]
assert lineage.job_facets == {"sql": SQLJobFacet(query=sql)}
assert lineage.run_facets["extractionError"].failedTasks == 1
| TestRedshiftSQLOpenLineage |
python | plotly__plotly.py | plotly/graph_objs/bar/_hoverlabel.py | {
"start": 233,
"end": 11213
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "bar"
_path_str = "bar.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
"showarrow",
}
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.bar.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Returns
-------
plotly.graph_objs.bar.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
@property
def showarrow(self):
"""
Sets whether or not to show the hover label arrow/triangle
pointing to the data point.
The 'showarrow' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showarrow"]
@showarrow.setter
def showarrow(self, val):
self["showarrow"] = val
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
showarrow=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.bar.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
showarrow
Sets whether or not to show the hover label
arrow/triangle pointing to the data point.
Returns
-------
Hoverlabel
"""
super().__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.bar.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.bar.Hoverlabel`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("align", arg, align)
self._set_property("alignsrc", arg, alignsrc)
self._set_property("bgcolor", arg, bgcolor)
self._set_property("bgcolorsrc", arg, bgcolorsrc)
self._set_property("bordercolor", arg, bordercolor)
self._set_property("bordercolorsrc", arg, bordercolorsrc)
self._set_property("font", arg, font)
self._set_property("namelength", arg, namelength)
self._set_property("namelengthsrc", arg, namelengthsrc)
self._set_property("showarrow", arg, showarrow)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Hoverlabel |
python | ApeWorX__ape | src/ape/types/private_mempool.py | {
"start": 4268,
"end": 5980
} | class ____(BaseModel):
"""
A bundle of transactions to send to the matchmaker.
"""
version: ProtocolVersion
"""
The version of the MEV-share API to use.
"""
inclusion: Inclusion
"""
Data used by block builders to check if the bundle should be considered for inclusion.
"""
body: list[Union[BundleHashItem, BundleTxItem, BundleNestedItem]]
"""
The transactions to include in the bundle.
"""
validity: Optional[Validity] = None
"""
Requirements for the bundle to be included in the block.
"""
privacy: Optional[Privacy] = None
"""
Preferences on what data should be shared about the bundle and its transactions
"""
@classmethod
def build_for_block(
cls,
block: HexInt,
max_block: Optional[HexInt] = None,
version: Optional[ProtocolVersion] = None,
body: Optional[list[Union[BundleHashItem, BundleTxItem, BundleNestedItem]]] = None,
validity: Optional[Validity] = None,
privacy: Optional[Privacy] = None,
) -> "Bundle":
return cls(
version=version or ProtocolVersion.V0_1,
inclusion=Inclusion(block=block, max_block=max_block),
body=body or [],
validity=validity,
privacy=privacy,
)
def add_tx(self, tx: HexBytes, can_revert: bool) -> "Bundle":
self.body.append(BundleTxItem(tx=tx, can_revert=can_revert))
return self
def add_hash(self, hash: HexBytes32) -> "Bundle":
self.body.append(BundleHashItem(hash=hash))
return self
def add_bundle(self, bundle: "Bundle"):
self.body.append(BundleNestedItem(bundle=bundle))
| Bundle |
python | tensorflow__tensorflow | tensorflow/python/distribute/distributed_table_test.py | {
"start": 2019,
"end": 26859
} | class ____(test.TestCase, parameterized.TestCase):
@classmethod
def setUpClass(cls):
super(DistributedTableTest, cls).setUpClass()
cls.cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=2, num_ps=3, rpc_layer="grpc")
cls.cluster_resolver = cls.cluster.cluster_resolver
@classmethod
def tearDownClass(cls):
super(DistributedTableTest, cls).tearDownClass()
cls.cluster.stop()
def make_initializer(self, init_source, vals):
if init_source == "textfile":
file = os.path.join(self.get_temp_dir(), "text_file_initializer")
with open(file, "w") as f:
f.write("\n".join(str(v) for v in vals) + "\n")
return lookup_ops.TextFileInitializer(
filename=file,
key_dtype=dtypes.int64,
key_index=lookup_ops.TextFileIndex.LINE_NUMBER,
value_dtype=dtypes.int64,
value_index=lookup_ops.TextFileIndex.WHOLE_LINE)
elif init_source == "keyvaluetensor":
keys_tensor = constant_op.constant(
list(range(len(vals))), dtype=dtypes.int64)
vals_tensor = constant_op.constant(vals, dtype=dtypes.int64)
return lookup_ops.KeyValueTensorInitializer(keys_tensor, vals_tensor)
else:
raise ValueError("Unrecognized init_source: " + init_source)
def createStaticHashTable(self,
init_source=None,
vals=None,
default_value=None,
initializer=None):
if not initializer:
initializer = self.make_initializer(init_source, vals)
return lookup_ops.StaticHashTable(
initializer=initializer, default_value=default_value)
def makeDatasetFromTensorWithoutUsingResource(self, input_context, tensor):
"""Returns a dataset made from `tensor`. To be called in a dataset_fn."""
global_batch_size = 24
batch_size = input_context.get_per_replica_batch_size(global_batch_size)
dataset = dataset_ops.DatasetV2.from_tensors(tensor).repeat().batch(
batch_size, drop_remainder=True)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
return dataset
@combinations.generate(source_combination)
def testCreateDistributedTableInScope(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookuptable = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
self.assertIsInstance(lookuptable, ps_values.DistributedTable)
self.assertEqual(self.evaluate(lookuptable.size()), 3)
# Lookup on the coordinator.
output = lookuptable.lookup(
constant_op.constant([0, 1, -1], dtype=dtypes.int64))
self.assertAllEqual([0, 1, -2], output)
self.assertEqual(lookuptable.size(), 3)
@combinations.generate(source_combination)
def testCopyDistributedTable(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookuptable = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
new_table = copy.copy(lookuptable)
# No new coordinator instance or distributed tables are created.
self.assertDictEqual(lookuptable.__dict__, new_table.__dict__)
@combinations.generate(source_combination)
def testCreateLookupInDatasetFnUnderScope(self, source):
# TODO(wxinyi): Warn the user of the inefficiency of this workflow (i.e.
# creating `StaticHashTable` inside a `@tf.function`-wrapped `dataset_fn` to
# be distributed with `distribute_datasets_from_function` and
# `create_per_worker_dataset`.
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
def dataset_fn(input_context):
some_out_of_range_tensor = constant_op.constant(10, dtype=dtypes.int64)
lookuptable = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
self.assertNotIsInstance(lookuptable, ps_values.DistributedTable)
generation_tensor = lookuptable.lookup(some_out_of_range_tensor)
dataset = self.makeDatasetFromTensorWithoutUsingResource(
input_context, generation_tensor)
return dataset
@def_function.function
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(dataset_fn)
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
@def_function.function
def worker_fn(iterator):
return math_ops.reduce_sum(next(iterator))
result = []
for _ in range(10):
result.append(
coordinator.schedule(worker_fn, args=(per_worker_iterator,)))
for r in result:
returned_input = r.fetch()
self.assertAllClose(-48, returned_input)
@combinations.generate(source_combination)
def testAccessingResourceHandleInDatasetFnWithoutMap(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookuptable = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
def dataset_fn(input_context):
some_out_of_range_tensor = constant_op.constant(10, dtype=dtypes.int64)
self.assertIsInstance(lookuptable, ps_values.DistributedTable)
generation_tensor = lookuptable.lookup(some_out_of_range_tensor)
dataset = self.makeDatasetFromTensorWithoutUsingResource(
input_context, generation_tensor)
return dataset
@def_function.function
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(dataset_fn)
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
@def_function.function
def worker_fn(iterator):
return math_ops.reduce_sum(next(iterator))
result = []
for _ in range(10):
result.append(
coordinator.schedule(worker_fn, args=(per_worker_iterator,)))
for r in result:
returned_input = r.fetch()
self.assertAllClose(-48, returned_input)
@combinations.generate(
combinations.combine(
source=["textfile", "keyvaluetensor"],
create_datasets_under_scope=[True, False],
using_dataset_instance_not_function=[True, False],
create_per_worker_dataset_takes_instance=[True, False]))
def testCreateTableUnderScopeCombo(self, source,
create_datasets_under_scope,
using_dataset_instance_not_function,
create_per_worker_dataset_takes_instance):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookup_table = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
if using_dataset_instance_not_function:
def per_worker_dataset_fn():
dataset = dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64))
dataset = dataset.repeat().batch(24, drop_remainder=True).prefetch(2)
dataset = dataset.map(lookup_table.lookup)
return strategy.experimental_distribute_dataset(dataset)
else:
def per_worker_dataset_fn():
def dataset_fn(input_context):
batch_size = input_context.get_per_replica_batch_size(24)
dataset = dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64))
dataset = dataset.repeat().batch(batch_size, drop_remainder=True)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
dataset = dataset.map(lookup_table.lookup)
return dataset
return strategy.distribute_datasets_from_function(dataset_fn)
if create_datasets_under_scope:
with strategy.scope():
if create_per_worker_dataset_takes_instance:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn())
else:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
else:
if create_per_worker_dataset_takes_instance:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn())
else:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
@def_function.function
def worker_fn(iterator):
return math_ops.reduce_sum(next(iterator))
result = []
for _ in range(10):
result.append(
coordinator.schedule(worker_fn, args=(per_worker_iterator,)))
for r in result:
returned_input = r.fetch()
self.assertAllClose(-24, returned_input)
@combinations.generate(
combinations.combine(
source=["textfile", "keyvaluetensor"],
create_datasets_under_scope=[True, False],
using_dataset_instance_not_function=[True, False],
create_per_worker_dataset_takes_instance=[True, False]))
def testCreateTableInDatasetCombo(self, source, create_datasets_under_scope,
using_dataset_instance_not_function,
create_per_worker_dataset_takes_instance):
if using_dataset_instance_not_function and (
not create_per_worker_dataset_takes_instance):
# This is the case that uses the `experimental_distribute_dataset` API to
# distribute dataset (instead of the `distribute_datasets_from_function`
# API), and passes `create_per_worker_dataset` a function that returns
# the distributed dataset (instead of passing it the distributed dataset
# directly).
# TODO(b/201775366): evaluate whether we need to handle this case
self.skipTest("Failed to serialize the input pipeline graph")
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
if using_dataset_instance_not_function:
def per_worker_dataset_fn():
# If this line is being called under strategy.scope(), it becomes a
# DistributedTable. Interestingly, after
# `experimental_distribute_dataset` serializes the dataset on chief and
# deserializes it on workers, `lookup_table` becomes a
# RestoredDistributedTable instead of a DistributedTable. And when it’s
# `resource_handle` is being accessed on the worker, it does not detect
# a DispatchContext, so it returns the restored resource handle,
# which is also the one on the local worker. The LookupTableFindV2 ops
# is on the local worker, too.
lookup_table = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
if create_datasets_under_scope:
self.assertIsInstance(lookup_table, ps_values.DistributedTable)
dataset = dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64))
dataset = dataset.repeat().batch(24, drop_remainder=True).prefetch(2)
dataset = dataset.map(lookup_table.lookup)
return strategy.experimental_distribute_dataset(dataset)
else:
def per_worker_dataset_fn():
def dataset_fn(input_context):
# When we're wrapping the initialization of a StaticHashTable inside a
# `dataset_fn` to be distributed with
# `distribute_datasets_from_function`, no matter it's called under
# strategy.scope() or not, this call creates a StaticHashTable on
# chief instead of a DistributedTable on chief and workers.
# And correspondingly, LookupTableFindV2 ops is on chief and there are
# send-recv communication for the lookup.
lookup_table = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
if create_datasets_under_scope:
self.assertIsInstance(lookup_table, lookup_ops.StaticHashTable)
self.assertNotIsInstance(lookup_table, ps_values.DistributedTable)
batch_size = input_context.get_per_replica_batch_size(24)
dataset = dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64))
dataset = dataset.repeat().batch(batch_size, drop_remainder=True)
dataset = dataset.shard(input_context.num_input_pipelines,
input_context.input_pipeline_id)
dataset = dataset.prefetch(2) # This prefetches 2 batches per device.
dataset = dataset.map(lookup_table.lookup)
return dataset
return strategy.distribute_datasets_from_function(dataset_fn)
if create_datasets_under_scope:
with strategy.scope():
if create_per_worker_dataset_takes_instance:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn())
else:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
else:
if create_per_worker_dataset_takes_instance:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn())
else:
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
@def_function.function
def worker_fn(iterator):
return math_ops.reduce_sum(next(iterator))
result = []
for _ in range(10):
result.append(
coordinator.schedule(worker_fn, args=(per_worker_iterator,)))
for r in result:
returned_input = r.fetch()
self.assertAllClose(-24, returned_input)
@combinations.generate(source_combination)
def testAccessingTableInStepFunction(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookup_table = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
dataset = (
dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64)).repeat().batch(
24, drop_remainder=True).prefetch(2))
dataset = dataset.map(lookup_table.lookup)
distributed_dataset = strategy.experimental_distribute_dataset(dataset)
distributed_dataset = coordinator.create_per_worker_dataset(
distributed_dataset)
@def_function.function
def worker_fn(iterator):
def replica_fn(inputs):
return math_ops.reduce_sum(lookup_table.lookup(inputs))
all_results = strategy.run(replica_fn, args=(next(iterator),))
return all_results
steps_per_epoch = 10
distributed_iterator = iter(distributed_dataset)
result = []
for _ in range(steps_per_epoch):
result.append(
coordinator.schedule(worker_fn, args=(distributed_iterator,)))
coordinator.join()
for r in result:
returned_input = r.fetch()
self.assertAllClose(-24, returned_input)
@combinations.generate(source_combination)
def testAccessingResourceHandleInDatasetFnWithMapFnDefinedOutside(
self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
with strategy.scope():
lookuptable = self.createStaticHashTable(
init_source=source, vals=[0, 1, 2], default_value=-2)
def map_fn(vals):
return lookuptable.lookup(vals)
def dataset_fn(input_context):
generation_tensor = constant_op.constant([0, 1, 3], dtype=dtypes.int64)
dataset = self.makeDatasetFromTensorWithoutUsingResource(
input_context, generation_tensor)
dataset = dataset.map(map_fn)
return dataset
@def_function.function
def per_worker_dataset_fn():
return strategy.distribute_datasets_from_function(dataset_fn)
per_worker_dataset = coordinator.create_per_worker_dataset(
per_worker_dataset_fn)
per_worker_iterator = iter(per_worker_dataset)
@def_function.function
def worker_fn(iterator):
return math_ops.reduce_sum(next(iterator))
result = []
for _ in range(10):
# batch_size == 24 and each input is [0, 1, -2]
result.append(
coordinator.schedule(worker_fn, args=(per_worker_iterator,)))
for r in result:
returned_input = r.fetch()
self.assertAllClose(-24, returned_input)
class Model(module.Module):
def __init__(self, init_source, filepath):
vals = [0, 1, 2]
if init_source == "textfile":
with open(filepath, "w") as f:
f.write("\n".join(str(v) for v in vals) + "\n")
self.initializer = lookup_ops.TextFileInitializer(
filepath, dtypes.int64, lookup_ops.TextFileIndex.LINE_NUMBER,
dtypes.int64, lookup_ops.TextFileIndex.WHOLE_LINE)
else:
keys_tensor = constant_op.constant(
list(range(len(vals))), dtype=dtypes.int64)
vals_tensor = constant_op.constant(vals, dtype=dtypes.int64)
self.initializer = lookup_ops.KeyValueTensorInitializer(
keys_tensor, vals_tensor)
self.table = lookup_ops.StaticHashTable(
self.initializer, default_value=-2)
@def_function.function(
input_signature=[tensor_spec.TensorSpec(None, dtypes.int64)])
def use_table(self, x):
return self.table.lookup(x)
def verifyWorkerLocalInstance(self, coordinator, model):
# assert capturing a worker-local resource on each worker
for worker in coordinator._cluster.workers:
with coordinator_context.with_dispatch_context(worker):
captures = model.use_table.get_concrete_function().captured_inputs
resource_capture = [t for t in captures if t.dtype == dtypes.resource]
self.assertNotEmpty(resource_capture)
for capture in resource_capture:
self.assertEqual(
capture.device,
device_util.canonicalize("/CPU:0", default=worker.device_name))
@combinations.generate(source_combination)
def testInModelAndCapture(self, source):
file_path = os.path.join(self.get_temp_dir(), "text_file_initializer")
model = self.Model(source, file_path)
func_captures = model.use_table.get_concrete_function(
).graph.external_captures
self.assertLen(func_captures, 2)
self.assertTrue(
any(model.table.resource_handle is t for t in func_captures))
deferred_captures = model.use_table.get_concrete_function(
).graph.deferred_external_captures
self.assertEmpty(deferred_captures)
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy)
with strategy.scope():
distributed_model = self.Model("value", file_path)
func_captures = distributed_model.use_table.get_concrete_function(
).graph.external_captures
# One less external_capture, since the table handle becomes a closure in the
# deferred_external_capture
self.assertLen(func_captures, 1)
self.assertFalse(
any(model.table.resource_handle is t for t in func_captures))
deferred_captures = distributed_model.use_table.get_concrete_function(
).graph.deferred_external_captures
self.assertNotEmpty(deferred_captures)
self.verifyWorkerLocalInstance(coordinator, distributed_model)
@combinations.generate(source_combination)
def testLookupInNestedTFWhileLoop(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy=strategy)
file_path = os.path.join(self.get_temp_dir(), "text_file_initializer")
with strategy.scope():
model = self.Model(source, file_path)
@def_function.function
def replica_fn(batch_data):
replica_result = array_ops.zeros(shape=(), dtype=dtypes.int64)
for _ in math_ops.range(10):
replica_result += math_ops.reduce_sum(model.use_table(batch_data))
return replica_result
@def_function.function
def step_fn(iterator):
step_result = array_ops.zeros(shape=(), dtype=dtypes.int64)
for _ in math_ops.range(10):
step_result += strategy.run(replica_fn, args=(next(iterator),))
return step_result
dataset = (
dataset_ops.DatasetV2.from_tensors(
constant_op.constant([0, 1, 3], dtype=dtypes.int64)).repeat().batch(
24, drop_remainder=True).prefetch(2))
distributed_dataset = coordinator.create_per_worker_dataset(
strategy.experimental_distribute_dataset(dataset))
results = []
for _ in range(10):
results.append(
coordinator.schedule(step_fn, args=(iter(distributed_dataset),)))
coordinator.join()
for r in results:
self.assertAllClose(-2400, r.fetch())
@combinations.generate(source_combination)
def testDistributeTableSaveAndServe(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
file_path = os.path.join(self.get_temp_dir(), "text_file_initializer")
with strategy.scope():
model = self.Model(source, file_path)
model_dir = self.get_temp_dir()
tf_save.save(model, model_dir)
loaded_without_strategy = tf_load.load(model_dir)
loaded_func_captures_without_strategy = (
loaded_without_strategy.use_table.get_concrete_function().graph
.external_captures)
loaded_func_deferred_captures_without_strategy = (
loaded_without_strategy.use_table.get_concrete_function().graph
.deferred_external_captures)
self.assertLen(loaded_func_captures_without_strategy, 2)
self.assertEmpty(loaded_func_deferred_captures_without_strategy)
self.assertAllEqual(
loaded_without_strategy.use_table(
constant_op.constant([0, 1, 3], dtype=dtypes.int64)), [0, 1, -2])
@combinations.generate(source_combination)
def testDistributeTableSaveAndLoadUnderStrategy(self, source):
strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
self.cluster_resolver)
coordinator = coordinator_lib.ClusterCoordinator(strategy)
file_path = os.path.join(self.get_temp_dir(), "text_file_initializer")
with strategy.scope():
model = self.Model(source, file_path)
model_dir = self.get_temp_dir()
tf_save.save(model, model_dir)
with strategy.scope():
loaded = tf_load.load(model_dir)
loaded_func_captures = (
loaded.use_table.get_concrete_function().graph.external_captures)
loaded_func_deferred_captures = (
loaded.use_table.get_concrete_function().graph
.deferred_external_captures)
# Compared with loading without strategy, there is one less
# external_capture, since the captured table handle has been swapped to a
# closure in the deferred_external_capture
self.assertLen(loaded_func_captures, 1)
self.assertNotEmpty(loaded_func_deferred_captures)
self.assertIsInstance(loaded.table, ps_values.DistributedTable)
self.assertLen([
t for t in loaded.use_table.get_concrete_function().captured_inputs
if t.dtype == dtypes.resource
], 1)
self.verifyWorkerLocalInstance(coordinator, loaded)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
| DistributedTableTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/sensor.py | {
"start": 3711,
"end": 3806
} | class ____(DagsterError):
"""Error when running the SensorDaemon."""
| DagsterSensorDaemonError |
python | PrefectHQ__prefect | tests/blocks/test_notifications.py | {
"start": 2405,
"end": 5708
} | class ____:
"""
Checks for behavior expected from Apprise-based notification blocks.
"""
async def test_notify_async(self, block_class: Type[AppriseNotificationBlock]):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
block = block_class(url="https://example.com/notification")
await block.notify("test")
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
block.url.get_secret_value()
)
apprise_instance_mock.async_notify.assert_awaited_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_notify_sync(self, block_class: Type[AppriseNotificationBlock]):
with patch("apprise.Apprise", autospec=True) as AppriseMock:
apprise_instance_mock = AppriseMock.return_value
apprise_instance_mock.async_notify = AsyncMock()
block = block_class(url="https://example.com/notification")
@flow
def test_flow():
block.notify("test")
test_flow()
AppriseMock.assert_called_once()
apprise_instance_mock.add.assert_called_once_with(
block.url.get_secret_value()
)
apprise_instance_mock.async_notify.assert_called_once_with(
body="test", title="", notify_type=PREFECT_NOTIFY_TYPE_DEFAULT
)
def test_is_picklable(self, block_class: Type[AppriseNotificationBlock]):
block = block_class(url="https://example.com/notification")
pickled = cloudpickle.dumps(block)
unpickled = cloudpickle.loads(pickled)
assert isinstance(unpickled, block_class)
@pytest.mark.parametrize("value, reason", RESTRICTED_URLS)
async def test_notification_can_prevent_restricted_urls(
self, block_class, value: str, reason: str
):
notification = block_class(url=value, allow_private_urls=False)
with pytest.raises(ValueError, match=f"is not a valid URL.*{reason}"):
await notification.notify(subject="example", body="example")
async def test_raises_on_url_validation_failure(self, block_class):
"""
When within a raise_on_failure block, we want URL validation errors to be
wrapped and captured as NotificationErrors for reporting back to users.
"""
block = block_class(url="https://127.0.0.1/foo/bar", allow_private_urls=False)
# outside of a raise_on_failure block, we get a ValueError directly
with pytest.raises(ValueError, match="not a valid URL") as captured:
await block.notify(subject="Test", body="Test")
# inside of a raise_on_failure block, we get a NotificationError
with block.raise_on_failure():
with pytest.raises(NotificationError) as captured:
await block.notify(subject="Test", body="Test")
assert captured.value.log == (
"'https://127.0.0.1/foo/bar' is not a valid URL. It resolves to the "
"private address 127.0.0.1."
)
| TestAppriseNotificationBlock |
python | pytorch__pytorch | torch/ao/nn/intrinsic/modules/fused.py | {
"start": 683,
"end": 1250
} | class ____(_FusedModule):
r"""This is a sequential container which calls the Conv1d and ReLU modules.
During quantization this will be replaced with the corresponding fused module."""
def __init__(self, conv, relu):
assert (
type_before_parametrizations(conv) == Conv1d
and type_before_parametrizations(relu) == ReLU
), (
f"Incorrect types for input modules{type_before_parametrizations(conv)}"
f"{type_before_parametrizations(relu)}"
)
super().__init__(conv, relu)
| ConvReLU1d |
python | ray-project__ray | python/ray/_private/thirdparty/pyamdsmi/pyamdsmi.py | {
"start": 4984,
"end": 5138
} | class ____(c_int):
RSMI_SW_COMP_FIRST = 0x0
RSMI_SW_COMP_DRIVER = RSMI_SW_COMP_FIRST
RSMI_SW_COMP_LAST = RSMI_SW_COMP_DRIVER
| rsmi_sw_component_t |
python | pandas-dev__pandas | pandas/tests/tools/test_to_datetime.py | {
"start": 110077,
"end": 120518
} | class ____:
@pytest.mark.parametrize(
"date_str, expected",
[
("2011-01-01", datetime(2011, 1, 1)),
("2Q2005", datetime(2005, 4, 1)),
("2Q05", datetime(2005, 4, 1)),
("2005Q1", datetime(2005, 1, 1)),
("05Q1", datetime(2005, 1, 1)),
("2011Q3", datetime(2011, 7, 1)),
("11Q3", datetime(2011, 7, 1)),
("3Q2011", datetime(2011, 7, 1)),
("3Q11", datetime(2011, 7, 1)),
# quarterly without space
("2000Q4", datetime(2000, 10, 1)),
("00Q4", datetime(2000, 10, 1)),
("4Q2000", datetime(2000, 10, 1)),
("4Q00", datetime(2000, 10, 1)),
("2000q4", datetime(2000, 10, 1)),
("2000-Q4", datetime(2000, 10, 1)),
("00-Q4", datetime(2000, 10, 1)),
("4Q-2000", datetime(2000, 10, 1)),
("4Q-00", datetime(2000, 10, 1)),
("00q4", datetime(2000, 10, 1)),
("2005", datetime(2005, 1, 1)),
("2005-11", datetime(2005, 11, 1)),
("2005 11", datetime(2005, 11, 1)),
("11-2005", datetime(2005, 11, 1)),
("11 2005", datetime(2005, 11, 1)),
("200511", datetime(2020, 5, 11)),
("20051109", datetime(2005, 11, 9)),
("20051109 10:15", datetime(2005, 11, 9, 10, 15)),
("20051109 08H", datetime(2005, 11, 9, 8, 0)),
("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15)),
("2005-11-09 08H", datetime(2005, 11, 9, 8, 0)),
("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15)),
("2005/11/09 10:15:32", datetime(2005, 11, 9, 10, 15, 32)),
("2005/11/09 10:15:32 AM", datetime(2005, 11, 9, 10, 15, 32)),
("2005/11/09 10:15:32 PM", datetime(2005, 11, 9, 22, 15, 32)),
("2005/11/09 08H", datetime(2005, 11, 9, 8, 0)),
("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28)),
("Thu Sep 25 2003", datetime(2003, 9, 25)),
("Sep 25 2003", datetime(2003, 9, 25)),
("January 1 2014", datetime(2014, 1, 1)),
# GH#10537
("2014-06", datetime(2014, 6, 1)),
("06-2014", datetime(2014, 6, 1)),
("2014-6", datetime(2014, 6, 1)),
("6-2014", datetime(2014, 6, 1)),
("20010101 12", datetime(2001, 1, 1, 12)),
("20010101 1234", datetime(2001, 1, 1, 12, 34)),
("20010101 123456", datetime(2001, 1, 1, 12, 34, 56)),
],
)
def test_parsers(self, date_str, expected, cache):
# dateutil >= 2.5.0 defaults to yearfirst=True
# https://github.com/dateutil/dateutil/issues/217
yearfirst = True
result1, reso_attrname = parsing.parse_datetime_string_with_reso(
date_str, yearfirst=yearfirst
)
reso = {
"nanosecond": "ns",
}.get(reso_attrname, "us")
result2 = to_datetime(date_str, yearfirst=yearfirst)
result3 = to_datetime([date_str], yearfirst=yearfirst)
# result5 is used below
result4 = to_datetime(
np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache
)
result6 = DatetimeIndex([date_str], yearfirst=yearfirst)
# result7 is used below
result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst)
result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst)
for res in [result1, result2]:
assert res == expected
for res in [result3, result4, result6, result8, result9]:
exp = DatetimeIndex([Timestamp(expected)]).as_unit(reso)
tm.assert_index_equal(res, exp)
# these really need to have yearfirst, but we don't support
if not yearfirst:
result5 = Timestamp(date_str)
assert result5 == expected
result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst)
assert result7 == expected
def test_na_values_with_cache(
self, cache, unique_nulls_fixture, unique_nulls_fixture2
):
# GH22305
expected = Index([NaT, NaT], dtype="datetime64[s]")
result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache)
tm.assert_index_equal(result, expected)
def test_parsers_nat(self):
# Test that each of several string-accepting methods return pd.NaT
result1, _ = parsing.parse_datetime_string_with_reso("NaT")
result2 = to_datetime("NaT")
result3 = Timestamp("NaT")
result4 = DatetimeIndex(["NaT"])[0]
assert result1 is NaT
assert result2 is NaT
assert result3 is NaT
assert result4 is NaT
@pytest.mark.parametrize(
"date_str, dayfirst, yearfirst, expected",
[
("10-11-12", False, False, datetime(2012, 10, 11)),
("10-11-12", True, False, datetime(2012, 11, 10)),
("10-11-12", False, True, datetime(2010, 11, 12)),
("10-11-12", True, True, datetime(2010, 12, 11)),
("20/12/21", False, False, datetime(2021, 12, 20)),
("20/12/21", True, False, datetime(2021, 12, 20)),
("20/12/21", False, True, datetime(2020, 12, 21)),
("20/12/21", True, True, datetime(2020, 12, 21)),
# GH 58859
("20201012", True, False, datetime(2020, 12, 10)),
],
)
def test_parsers_dayfirst_yearfirst(
self, cache, date_str, dayfirst, yearfirst, expected
):
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00
# bug fix in 2.5.2
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00
# OK
# 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00
# revert of bug in 2.5.2
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00
# OK
# 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00
# str : dayfirst, yearfirst, expected
# compare with dateutil result
dateutil_result = parse(date_str, dayfirst=dayfirst, yearfirst=yearfirst)
assert dateutil_result == expected
result1, _ = parsing.parse_datetime_string_with_reso(
date_str, dayfirst=dayfirst, yearfirst=yearfirst
)
# we don't support dayfirst/yearfirst here:
if not dayfirst and not yearfirst:
result2 = Timestamp(date_str)
assert result2 == expected
result3 = to_datetime(
date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache
)
result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0]
assert result1 == expected
assert result3 == expected
assert result4 == expected
@pytest.mark.parametrize(
"date_str, exp_def",
[["10:15", datetime(1, 1, 1, 10, 15)], ["9:05", datetime(1, 1, 1, 9, 5)]],
)
def test_parsers_timestring(self, date_str, exp_def):
# must be the same as dateutil result
exp_now = parse(date_str)
result1, _ = parsing.parse_datetime_string_with_reso(date_str)
result2 = to_datetime(date_str)
result3 = to_datetime([date_str])
result4 = Timestamp(date_str)
result5 = DatetimeIndex([date_str])[0]
# parse time string return time string based on default date
# others are not, and can't be changed because it is used in
# time series plot
assert result1 == exp_def
assert result2 == exp_now
assert result3 == exp_now
assert result4 == exp_now
assert result5 == exp_now
@pytest.mark.parametrize(
"dt_string, tz, dt_string_repr",
[
(
"2013-01-01 05:45+0545",
timezone(timedelta(minutes=345)),
"Timestamp('2013-01-01 05:45:00+0545', tz='UTC+05:45')",
),
(
"2013-01-01 05:30+0530",
timezone(timedelta(minutes=330)),
"Timestamp('2013-01-01 05:30:00+0530', tz='UTC+05:30')",
),
],
)
def test_parsers_timezone_minute_offsets_roundtrip(
self, cache, dt_string, tz, dt_string_repr
):
# GH11708
base = to_datetime("2013-01-01 00:00:00", cache=cache)
base = base.tz_localize("UTC").tz_convert(tz)
dt_time = to_datetime(dt_string, cache=cache)
assert base == dt_time
assert dt_string_repr == repr(dt_time)
@pytest.fixture(params=["D", "s", "ms", "us", "ns"])
def units(request):
"""Day and some time units.
* D
* s
* ms
* us
* ns
"""
return request.param
@pytest.fixture
def julian_dates():
return date_range("2014-1-1", periods=10).to_julian_date().values
| TestDatetimeParsingWrappers |
python | langchain-ai__langchain | libs/partners/anthropic/tests/unit_tests/test_output_parsers.py | {
"start": 654,
"end": 2986
} | class ____(BaseModel):
baz: Literal["a", "b"]
def test_tools_output_parser() -> None:
output_parser = ToolsOutputParser()
expected = [
{
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
},
{
"name": "_Foo2",
"args": {"baz": "a"},
"id": "2",
"index": 3,
"type": "tool_call",
},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_args_only() -> None:
output_parser = ToolsOutputParser(args_only=True)
expected = [
{"bar": 0},
{"baz": "a"},
]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = []
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_first_tool_only() -> None:
output_parser = ToolsOutputParser(first_tool_only=True)
expected: Any = {
"name": "_Foo1",
"args": {"bar": 0},
"id": "1",
"index": 1,
"type": "tool_call",
}
actual = output_parser.parse_result(_RESULT)
assert expected == actual
expected = None
actual = output_parser.parse_result([ChatGeneration(message=AIMessage(""))]) # type: ignore[misc]
assert expected == actual
def test_tools_output_parser_pydantic() -> None:
output_parser = ToolsOutputParser(pydantic_schemas=[_Foo1, _Foo2])
expected = [_Foo1(bar=0), _Foo2(baz="a")]
actual = output_parser.parse_result(_RESULT)
assert expected == actual
def test_tools_output_parser_empty_content() -> None:
class ChartType(BaseModel):
chart_type: Literal["pie", "line", "bar"]
output_parser = ToolsOutputParser(
first_tool_only=True,
pydantic_schemas=[ChartType],
)
message = AIMessage(
"",
tool_calls=[
{
"name": "ChartType",
"args": {"chart_type": "pie"},
"id": "foo",
"type": "tool_call",
},
],
)
actual = output_parser.invoke(message)
expected = ChartType(chart_type="pie")
assert expected == actual
| _Foo2 |
python | mlflow__mlflow | examples/crewai/tracing.py | {
"start": 919,
"end": 1674
} | class ____:
def city_selection_agent(self):
return Agent(
role="City Selection Expert",
goal="Select the best city based on weather, season, and prices",
backstory="An expert in analyzing travel data to pick ideal destinations",
tools=[search_tool, web_rag_tool],
verbose=True,
)
def local_expert(self):
return Agent(
role="Local Expert at this city",
goal="Provide the BEST insights about the selected city",
backstory="""A knowledgeable local guide with extensive information
about the city, it's attractions and customs""",
tools=[search_tool, web_rag_tool],
verbose=True,
)
| TripAgents |
python | getsentry__sentry | src/social_auth/backends/__init__.py | {
"start": 19136,
"end": 28273
} | class ____(OAuthAuth):
"""Base class for OAuth2 providers.
OAuth2 draft details at:
http://tools.ietf.org/html/draft-ietf-oauth-v2-10
Attributes:
AUTHORIZATION_URL Authorization service url
ACCESS_TOKEN_URL Token URL
"""
AUTHORIZATION_URL: str
ACCESS_TOKEN_URL: str
REVOKE_TOKEN_URL: str | None = None
REVOKE_TOKEN_METHOD = "POST"
RESPONSE_TYPE = "code"
REDIRECT_STATE = True
STATE_PARAMETER = True
def state_token(self):
"""Generate csrf token to include as state parameter."""
return get_random_string(32)
def get_redirect_uri(self, state=None):
"""Build redirect_uri with redirect_state parameter."""
uri = self.redirect_uri
if self.REDIRECT_STATE and state:
uri = url_add_parameters(uri, {"redirect_state": state})
return uri
def auth_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
params = {"client_id": client_id, "redirect_uri": self.get_redirect_uri(state)}
if self.STATE_PARAMETER and state:
params["state"] = state
if self.RESPONSE_TYPE:
params["response_type"] = self.RESPONSE_TYPE
return params
def auth_url(self):
"""Return redirect url"""
if self.STATE_PARAMETER or self.REDIRECT_STATE:
# Store state in session for further request validation. The state
# value is passed as state parameter (as specified in OAuth2 spec),
# but also added to redirect_uri, that way we can still verify the
# request if the provider doesn't implement the state parameter.
# Reuse token if any.
name = self.AUTH_BACKEND.name + "_state"
state = self.request.session.get(name) or self.state_token()
self.request.session[self.AUTH_BACKEND.name + "_state"] = state
else:
state = None
params = self.auth_params(state)
params.update(self.get_scope_argument())
params.update(self.auth_extra_arguments())
query_string = self._get_safe_query_string()
return self.AUTHORIZATION_URL + "?" + urlencode(params) + query_string
def _get_safe_query_string(self):
"""
Returns filtered query string without client_id parameter.
"""
query_string = self.request.META.get("QUERY_STRING", "")
if not query_string:
return ""
parsed_params = parse_qsl(query_string, keep_blank_values=True)
safe_params = []
for param_name, param_value in parsed_params:
# Remove client_id parameter
if param_name.lower() not in UNSAFE_QUERY_PARAMS:
safe_params.append((param_name, param_value))
if safe_params:
return "&" + urlencode(safe_params)
else:
return ""
def validate_state(self):
"""Validate state value. Raises exception on error, returns state
value if valid."""
if not self.STATE_PARAMETER and not self.REDIRECT_STATE:
return None
state = self.request.session.get(self.AUTH_BACKEND.name + "_state")
if state:
request_state = self.data.get("state") or self.data.get("redirect_state")
if not request_state:
raise AuthMissingParameter(self, "state")
elif not state:
raise AuthStateMissing(self, "state")
elif not constant_time_compare(request_state, state):
raise AuthStateForbidden(self)
return state
def process_error(self, data):
error = data.get("error_description") or data.get("error")
if error:
raise AuthFailed(self, error)
def auth_complete_params(self, state=None):
client_id, client_secret = self.get_key_and_secret()
return {
"grant_type": "authorization_code", # request auth code
"code": self.data.get("code", ""), # server response code
"client_id": client_id,
"client_secret": client_secret,
"redirect_uri": self.get_redirect_uri(state),
}
@classmethod
def auth_headers(cls):
return {"Content-Type": "application/x-www-form-urlencoded", "Accept": "application/json"}
def auth_complete(self, *args, **kwargs):
"""Completes logging process, must return user instance"""
self.process_error(self.data)
params = self.auth_complete_params(self.validate_state())
request = Request(
self.ACCESS_TOKEN_URL,
data=urlencode(params).encode("utf-8"),
headers=self.auth_headers(),
)
try:
response = json.loads(dsa_urlopen(request).read())
except HTTPError as e:
logger.exception(
"plugins.auth.error",
extra={"class": type(self), "status_code": e.code, "response": e.read()[:128]},
)
raise AuthUnknownError(self)
except (ValueError, KeyError):
raise AuthUnknownError(self)
self.process_error(response)
return self.do_auth(response["access_token"], response=response, *args, **kwargs)
@classmethod
def refresh_token_params(cls, token, provider):
client_id, client_secret = cls.get_key_and_secret()
return {
"refresh_token": token,
"grant_type": "refresh_token",
"client_id": client_id,
"client_secret": client_secret,
}
@classmethod
def refresh_token(cls, token, provider):
params = cls.refresh_token_params(token, provider)
response = requests.post(cls.ACCESS_TOKEN_URL, data=params, headers=cls.auth_headers())
response.raise_for_status()
return response.json()
@classmethod
def revoke_token_params(cls, token, uid):
return None
@classmethod
def revoke_token_headers(cls, token, uid):
return None
@classmethod
def process_revoke_token_response(cls, response):
return response.code == 200
@classmethod
def revoke_token(cls, token, uid):
if not cls.REVOKE_TOKEN_URL:
return
url = cls.REVOKE_TOKEN_URL.format(token=token, uid=uid)
params = cls.revoke_token_params(token, uid) or {}
headers = cls.revoke_token_headers(token, uid) or {}
data: bytes | None = None
if cls.REVOKE_TOKEN_METHOD == "GET":
url = f"{url}?{urlencode(params)}"
else:
data = urlencode(params).encode()
request = Request(url, data=data, headers=headers, method=cls.REVOKE_TOKEN_METHOD)
response = dsa_urlopen(request)
return cls.process_revoke_token_response(response)
def do_auth(self, access_token, *args, **kwargs):
"""Finish the auth process once the access_token was retrieved"""
data = self.user_data(access_token, *args, **kwargs)
response = kwargs.get("response") or {}
response.update(data or {})
kwargs.update({"auth": self, "response": response, self.AUTH_BACKEND.name: True})
return authenticate(*args, **kwargs)
# Cache for discovered backends.
BACKENDSCACHE: dict[str, type[BaseAuth]] = {}
_import_lock = threading.Lock()
def get_backends(force_load=False):
"""
Entry point to the BACKENDS cache. If BACKENDSCACHE hasn't been
populated, each of the modules referenced in
AUTHENTICATION_BACKENDS is imported and checked for a BACKENDS
definition and if enabled, added to the cache.
Previously all backends were attempted to be loaded at
import time of this module, which meant that backends that subclass
bases found in this module would not have the chance to be loaded
by the time they were added to this module's BACKENDS dict. See:
https://github.com/omab/django-social-auth/issues/204
This new approach ensures that backends are allowed to subclass from
bases in this module and still be picked up.
A force_load boolean arg is also provided so that get_backend
below can retry a requested backend that may not yet be discovered.
"""
if not BACKENDSCACHE or force_load:
with _import_lock:
for auth_backend in setting("AUTHENTICATION_BACKENDS"):
mod, cls_name = auth_backend.rsplit(".", 1)
module = __import__(mod, {}, {}, ["BACKENDS", cls_name])
backend = getattr(module, cls_name)
if issubclass(backend, SocialAuthBackend):
name = backend.name
backends = getattr(module, "BACKENDS", {})
if name in backends and backends[name].enabled():
BACKENDSCACHE[name] = backends[name]
return BACKENDSCACHE
def get_backend(name, *args, **kwargs):
get_backends()
try:
# Cached backend which has previously been discovered.
backend_cls = BACKENDSCACHE[name]
except KeyError:
return None
else:
return backend_cls(*args, **kwargs)
| BaseOAuth2 |
python | scipy__scipy | scipy/signal/tests/test_filter_design.py | {
"start": 63934,
"end": 64395
} | class ____:
def test_basic(self, xp):
b = xp.asarray([1])
a = xp.asarray([1, 2, 2, 1])
b_bp, a_bp = lp2bp(b, a, 2*math.pi*4000, 2*math.pi*2000)
xp_assert_close(b_bp, xp.asarray([1.9844e12, 0, 0, 0]), rtol=1e-6)
xp_assert_close(
a_bp,
xp.asarray([1, 2.5133e4, 2.2108e9, 3.3735e13,
1.3965e18, 1.0028e22, 2.5202e26]), rtol=1e-4
)
@make_xp_test_case(lp2bs)
| TestLp2bp |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_numeric.py | {
"start": 7861,
"end": 9718
} | class ____:
def test_valid(self) -> None:
prop0 = bcpn.Positive(Int)
assert prop0.is_valid(1)
assert prop0.is_valid(2)
assert prop0.is_valid(100)
prop1 = bcpn.Positive(Float)
assert prop1.is_valid(1)
assert prop1.is_valid(1.1)
def test_invalid(self) -> None:
prop = bcpn.Positive(Int)
assert not prop.is_valid(None)
assert not prop.is_valid(True)
assert not prop.is_valid(False)
assert not prop.is_valid(-1)
assert not prop.is_valid(0)
assert not prop.is_valid(0.0)
assert not prop.is_valid(1.0)
assert not prop.is_valid(1.0+1.0j)
assert not prop.is_valid("")
assert not prop.is_valid(())
assert not prop.is_valid([])
assert not prop.is_valid({})
assert not prop.is_valid(_TestHasProps())
assert not prop.is_valid(_TestModel())
assert not prop.is_valid(-100)
assert not prop.is_valid(-0.001)
def test_has_ref(self) -> None:
prop = bcpn.Positive(Int)
assert not prop.has_ref
def test_str(self) -> None:
prop0 = bcpn.Positive(Int)
assert str(prop0) == "Positive(Int)"
prop1 = bcpn.Positive(Float)
assert str(prop1) == "Positive(Float)"
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
Test___all__ = verify_all(bcpn, ALL)
| Test_PositiveInt |
python | PyCQA__pycodestyle | tests/test_E101.py | {
"start": 139,
"end": 574
} | class ____(unittest.TestCase):
def test_E101(self):
errors = errors_from_src(
'if True:\n'
'\tprint(1) # tabs\n'
' print(2) # spaces\n'
)
if sys.version_info >= (3, 12): # pragma: >=3.12 cover
self.assertEqual(errors, ['W191:2:1', 'E901:3:28'])
else: # pragma: <3.12 cover
self.assertEqual(errors, ['W191:2:1', 'E101:3:1'])
| E101Test |
python | walkccc__LeetCode | solutions/1702. Maximum Binary String After Change/1702.py | {
"start": 0,
"end": 490
} | class ____:
def maximumBinaryString(self, binary: str) -> str:
# e.g. binary = '100110'
# Do Operation 2 -> '100011'
# Do Operation 1 -> '111011'
# So, the index of the only '0' is prefixOnes + zeros - 1.
zeros = binary.count('0')
prefixOnes = binary.find('0')
# Make the entire string as 1s.
ans = ['1'] * len(binary)
# Make the only '0' if necessary.
if prefixOnes != -1:
ans[prefixOnes + zeros - 1] = '0'
return ''.join(ans)
| Solution |
python | astropy__astropy | astropy/units/tests/test_quantity_non_ufuncs.py | {
"start": 26077,
"end": 28428
} | class ____(InvariantUnitTestSetup):
def test_max(self):
self.check(np.max)
def test_min(self):
self.check(np.min)
def test_amax(self):
self.check(np.amax)
def test_amin(self):
self.check(np.amin)
def test_sum(self):
self.check(np.sum)
def test_cumsum(self):
self.check(np.cumsum)
@pytest.mark.skipif(NUMPY_LT_2_1, reason="np.cumulative_sum is new in NumPy 2.1")
def test_cumulative_sum(self):
self.check(np.cumulative_sum, axis=1)
def test_any(self):
with pytest.raises(TypeError):
np.any(self.q)
def test_all(self):
with pytest.raises(TypeError):
np.all(self.q)
@pytest.mark.skipif(not NUMPY_LT_2_0, reason="np.sometrue is removed in NumPy 2.0")
@pytest.mark.filterwarnings("ignore:`sometrue` is deprecated as of NumPy 1.25.0")
def test_sometrue(self):
with pytest.raises(TypeError):
np.sometrue(self.q) # noqa: NPY003, NPY201
@pytest.mark.skipif(not NUMPY_LT_2_0, reason="np.alltrue is removed in NumPy 2.0")
@pytest.mark.filterwarnings("ignore:`alltrue` is deprecated as of NumPy 1.25.0")
def test_alltrue(self):
with pytest.raises(TypeError):
np.alltrue(self.q) # noqa: NPY003, NPY201
def test_prod(self):
with pytest.raises(u.UnitsError):
np.prod(self.q)
@pytest.mark.skipif(not NUMPY_LT_2_0, reason="np.product is removed in NumPy 2.0")
@pytest.mark.filterwarnings("ignore:`product` is deprecated as of NumPy 1.25.0")
def test_product(self):
with pytest.raises(u.UnitsError):
np.product(self.q) # noqa: NPY003, NPY201
def test_cumprod(self):
with pytest.raises(u.UnitsError):
np.cumprod(self.q)
@pytest.mark.skipif(
not NUMPY_LT_2_0, reason="np.cumproduct is removed in NumPy 2.0"
)
@pytest.mark.filterwarnings("ignore:`cumproduct` is deprecated as of NumPy 1.25.0")
def test_cumproduct(self):
with pytest.raises(u.UnitsError):
np.cumproduct(self.q) # noqa: NPY003, NPY201
@pytest.mark.skipif(NUMPY_LT_2_1, reason="np.cumulative_prod is new in NumPy 2.1")
def test_cumulative_prod(self):
with pytest.raises(u.UnitsError):
np.cumulative_prod(self.q, axis=1)
| TestUfuncReductions |
python | doocs__leetcode | solution/1500-1599/1578.Minimum Time to Make Rope Colorful/Solution.py | {
"start": 0,
"end": 469
} | class ____:
def minCost(self, colors: str, neededTime: List[int]) -> int:
ans = i = 0
n = len(colors)
while i < n:
j = i
s = mx = 0
while j < n and colors[j] == colors[i]:
s += neededTime[j]
if mx < neededTime[j]:
mx = neededTime[j]
j += 1
if j - i > 1:
ans += s - mx
i = j
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/util/tf_stack.py | {
"start": 3623,
"end": 3723
} | class ____(StackTraceFilter):
def get_filtered_filenames(self):
return EMPTY_SET
| SentinelFilter |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/containers.py | {
"start": 5064,
"end": 6316
} | class ____(Container):
"""
The common parts of `VSplit` and `HSplit`.
"""
def __init__(
self,
children: Sequence[AnyContainer],
window_too_small: Container | None = None,
padding: AnyDimension = Dimension.exact(0),
padding_char: str | None = None,
padding_style: str = "",
width: AnyDimension = None,
height: AnyDimension = None,
z_index: int | None = None,
modal: bool = False,
key_bindings: KeyBindingsBase | None = None,
style: str | Callable[[], str] = "",
) -> None:
self.children = [to_container(c) for c in children]
self.window_too_small = window_too_small or _window_too_small()
self.padding = padding
self.padding_char = padding_char
self.padding_style = padding_style
self.width = width
self.height = height
self.z_index = z_index
self.modal = modal
self.key_bindings = key_bindings
self.style = style
def is_modal(self) -> bool:
return self.modal
def get_key_bindings(self) -> KeyBindingsBase | None:
return self.key_bindings
def get_children(self) -> list[Container]:
return self.children
| _Split |
python | django__django | tests/filtered_relation/models.py | {
"start": 1445,
"end": 1979
} | class ____(models.Model):
NEW = "new"
STOPPED = "stopped"
STATES = (
(NEW, "New"),
(STOPPED, "Stopped"),
)
borrower = models.ForeignKey(
Borrower,
models.CASCADE,
related_name="reservations",
related_query_name="reservation",
)
book = models.ForeignKey(
Book,
models.CASCADE,
related_name="reservations",
related_query_name="reservation",
)
state = models.CharField(max_length=7, choices=STATES, default=NEW)
| Reservation |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 8764,
"end": 9100
} | class ____(MPTTModel):
name = models.CharField(max_length=50, null=True)
parent = TreeForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
class MPTTMeta:
order_insertion_by = ["name"]
def __str__(self):
return self.name
| NullableOrderedInsertionModel |
python | sqlalchemy__sqlalchemy | test/sql/test_operators.py | {
"start": 107863,
"end": 110084
} | class ____(fixtures.TestBase, testing.AssertsCompiledSQL):
__dialect__ = "default"
table1 = table("mytable", column("myid", String), column("name", String))
def test_match_1(self):
self.assert_compile(
self.table1.c.myid.match("somstr"),
"mytable.myid MATCH ?",
dialect=sqlite.dialect(),
)
def test_match_2(self):
self.assert_compile(
self.table1.c.myid.match("somstr"),
"MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect(),
)
def test_match_3(self):
self.assert_compile(
self.table1.c.myid.match("somstr"),
"CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect(),
)
def test_match_4(self):
self.assert_compile(
self.table1.c.myid.match("somstr"),
"mytable.myid @@ plainto_tsquery(%(myid_1)s)",
dialect=postgresql.dialect(),
)
def test_match_5(self):
self.assert_compile(
self.table1.c.myid.match("somstr"),
"CONTAINS (mytable.myid, :myid_1)",
dialect=oracle.dialect(),
)
def test_match_is_now_matchtype(self):
expr = self.table1.c.myid.match("somstr")
assert expr.type._type_affinity is MatchType()._type_affinity
assert isinstance(expr.type, MatchType)
def test_boolean_inversion_postgresql(self):
self.assert_compile(
~self.table1.c.myid.match("somstr"),
"NOT mytable.myid @@ plainto_tsquery(%(myid_1)s)",
dialect=postgresql.dialect(),
)
def test_boolean_inversion_mysql(self):
# because mysql doesn't have native boolean
self.assert_compile(
~self.table1.c.myid.match("somstr"),
"NOT MATCH (mytable.myid) AGAINST (%s IN BOOLEAN MODE)",
dialect=mysql.dialect(),
)
def test_boolean_inversion_mssql(self):
# because mssql doesn't have native boolean
self.assert_compile(
~self.table1.c.myid.match("somstr"),
"NOT CONTAINS (mytable.myid, :myid_1)",
dialect=mssql.dialect(),
)
| MatchTest |
python | walkccc__LeetCode | solutions/3540. Minimum Time to Visit All Houses/3540.py | {
"start": 2,
"end": 501
} | class ____:
def minTotalTime(
self,
forward: list[int],
backward: list[int],
queries: list[int]
) -> int:
summ = sum(backward)
ans = 0
pos = 0
prefixF = [0] + list(itertools.accumulate(forward))
prefixB = list(itertools.accumulate(backward)) + [0]
for q in queries:
r = int(q < pos) * prefixF[-1] + prefixF[q] - prefixF[pos]
l = int(q > pos) * summ + prefixB[pos] - prefixB[q]
ans += min(l, r)
pos = q
return ans
| Solution |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_michigan_zip.py | {
"start": 747,
"end": 1751
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_michigan_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_michigan_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidMichiganZip |
python | streamlit__streamlit | lib/streamlit/testing/v1/element_tree.py | {
"start": 26228,
"end": 28116
} | class ____(Widget):
"""A representation of ``st.number_input``."""
_value: Number | None | InitialValue
proto: NumberInputProto = field(repr=False)
label: str
min: Number | None
max: Number | None
step: Number
help: str
form_id: str
def __init__(self, proto: NumberInputProto, root: ElementTree) -> None:
super().__init__(proto, root)
self._value = InitialValue()
self.type = "number_input"
self.min = proto.min if proto.has_min else None
self.max = proto.max if proto.has_max else None
def set_value(self, v: Number | None) -> NumberInput:
"""Set the value of the ``st.number_input`` widget."""
self._value = v
return self
@property
def _widget_state(self) -> WidgetState:
ws = WidgetState()
ws.id = self.id
if self.value is not None:
ws.double_value = self.value
return ws
@property
def value(self) -> Number | None:
"""Get the current value of the ``st.number_input`` widget."""
if not isinstance(self._value, InitialValue):
return self._value
state = self.root.session_state
assert state
# Awkward to do this with `cast`
return state[self.id] # type: ignore
def increment(self) -> NumberInput:
"""Increment the ``st.number_input`` widget as if the user clicked "+"."""
if self.value is None:
return self
v = min(self.value + self.step, self.max or float("inf"))
return self.set_value(v)
def decrement(self) -> NumberInput:
"""Decrement the ``st.number_input`` widget as if the user clicked "-"."""
if self.value is None:
return self
v = max(self.value - self.step, self.min or float("-inf"))
return self.set_value(v)
@dataclass(repr=False)
| NumberInput |
python | facebookresearch__faiss | tests/test_refine.py | {
"start": 279,
"end": 2161
} | class ____(unittest.TestCase):
def do_test(self, factory_string, metric_type=faiss.METRIC_L2):
ds = datasets.SyntheticDataset(32, 1000, 200, 20)
index = faiss.index_factory(32, factory_string, metric_type)
index.train(ds.get_train())
index.add(ds.get_database())
xq = ds.get_queries()
Dref, Iref = index.search(xq, 10)
for is_FlatCodesDistanceComputer in False, True:
if not is_FlatCodesDistanceComputer:
dc = index.get_distance_computer()
else:
if not isinstance(index, faiss.IndexFlatCodes):
continue
dc = index.get_FlatCodesDistanceComputer()
self.assertTrue(dc.this.own())
for q in range(ds.nq):
dc.set_query(faiss.swig_ptr(xq[q]))
for j in range(10):
ref_dis = Dref[q, j]
new_dis = dc(int(Iref[q, j]))
np.testing.assert_almost_equal(
new_dis, ref_dis, decimal=5)
def test_distance_computer_PQ(self):
self.do_test("PQ8np")
def test_distance_computer_SQ(self):
self.do_test("SQ8")
def test_distance_computer_SQ6(self):
self.do_test("SQ6")
def test_distance_computer_PQbit6(self):
self.do_test("PQ8x6np")
def test_distance_computer_PQbit6_ip(self):
self.do_test("PQ8x6np", faiss.METRIC_INNER_PRODUCT)
def test_distance_computer_VT(self):
self.do_test("PCA20,SQ8")
def test_distance_computer_AQ_decompress(self):
self.do_test("RQ3x4") # test decompress path
def test_distance_computer_AQ_LUT(self):
self.do_test("RQ3x4_Nqint8") # test LUT path
def test_distance_computer_AQ_LUT_IP(self):
self.do_test("RQ3x4_Nqint8", faiss.METRIC_INNER_PRODUCT)
| TestDistanceComputer |
python | anthropics__anthropic-sdk-python | tests/lib/streaming/test_beta_messages.py | {
"start": 8400,
"end": 15187
} | class ____:
@pytest.mark.asyncio
@pytest.mark.respx(base_url=base_url)
async def test_basic_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt")))
)
async with async_client.beta.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-opus-4-0",
) as stream:
assert isinstance(cast(Any, stream), BetaAsyncMessageStream)
assert_basic_response([event async for event in stream], await stream.get_final_message())
@pytest.mark.asyncio
@pytest.mark.respx(base_url=base_url)
async def test_context_manager(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt")))
)
async with async_client.beta.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-3-opus-latest",
) as stream:
assert not stream.response.is_closed
# response should be closed even if the body isn't read
assert stream.response.is_closed
@pytest.mark.asyncio
@pytest.mark.respx(base_url=base_url)
async def test_deprecated_model_warning_stream(self, respx_mock: MockRouter) -> None:
for deprecated_model in DEPRECATED_MODELS:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=to_async_iter(get_response("basic_response.txt")))
)
with pytest.warns(DeprecationWarning, match=f"The model '{deprecated_model}' is deprecated"):
async with async_client.beta.messages.stream(
max_tokens=1024,
messages=[{"role": "user", "content": "Hello"}],
model=deprecated_model,
) as stream:
# Consume the stream to ensure the warning is triggered
await stream.get_final_message()
@pytest.mark.asyncio
@pytest.mark.respx(base_url=base_url)
async def test_tool_use(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(200, content=to_async_iter(get_response("tool_use_response.txt")))
)
async with async_client.beta.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-sonnet-4-20250514",
) as stream:
assert isinstance(cast(Any, stream), BetaAsyncMessageStream)
assert_tool_use_response([event async for event in stream], await stream.get_final_message())
@pytest.mark.asyncio
@pytest.mark.respx(base_url=base_url)
async def test_incomplete_response(self, respx_mock: MockRouter) -> None:
respx_mock.post("/v1/messages").mock(
return_value=httpx.Response(
200, content=to_async_iter(get_response("incomplete_partial_json_response.txt"))
)
)
async with async_client.beta.messages.stream(
max_tokens=1024,
messages=[
{
"role": "user",
"content": "Say hello there!",
}
],
model="claude-sonnet-4-20250514",
) as stream:
assert isinstance(cast(Any, stream), BetaAsyncMessageStream)
assert_incomplete_partial_input_response(
[event async for event in stream], await stream.get_final_message()
)
@pytest.mark.parametrize("sync", [True, False], ids=["sync", "async"])
def test_stream_method_definition_in_sync(sync: bool) -> None:
client: Anthropic | AsyncAnthropic = sync_client if sync else async_client
sig = inspect.signature(client.beta.messages.stream)
generated_sig = inspect.signature(client.beta.messages.create)
errors: list[str] = []
for name, generated_param in generated_sig.parameters.items():
if name == "stream":
# intentionally excluded
continue
if name == "output_format":
continue
custom_param = sig.parameters.get(name)
if not custom_param:
errors.append(f"the `{name}` param is missing")
continue
if custom_param.annotation != generated_param.annotation:
errors.append(
f"types for the `{name}` param are do not match; generated={repr(generated_param.annotation)} custom={repr(custom_param.annotation)}"
)
continue
if errors:
raise AssertionError(
f"{len(errors)} errors encountered with the {'sync' if sync else 'async'} client `messages.stream()` method:\n\n"
+ "\n\n".join(errors)
)
# go through all the ContentBlock types to make sure the type alias is up to date
# with any type that has an input property of type object
def test_tracks_tool_input_type_alias_is_up_to_date() -> None:
# only run this on Pydantic v2
if PYDANTIC_V1:
pytest.skip("This test is only applicable for Pydantic v2")
from typing import get_args
from pydantic import BaseModel
from anthropic.types.beta.beta_content_block import BetaContentBlock
# Get the content block union type
content_block_union = get_args(BetaContentBlock)[0]
# Get all types from BetaContentBlock union
content_block_types = get_args(content_block_union)
# Types that should have an input property
types_with_input: Set[Any] = set()
# Check each type to see if it has an input property in its model_fields
for block_type in content_block_types:
if issubclass(block_type, BaseModel) and "input" in block_type.model_fields:
types_with_input.add(block_type)
# Get the types included in TRACKS_TOOL_INPUT
tracked_types = TRACKS_TOOL_INPUT
# Make sure all types with input are tracked
for block_type in types_with_input:
assert block_type in tracked_types, (
f"ContentBlock type {block_type.__name__} has an input property, "
f"but is not included in TRACKS_TOOL_INPUT. You probably need to update the TRACKS_TOOL_INPUT type alias."
)
| TestAsyncMessages |
python | pypa__hatch | tests/env/plugin/test_interface.py | {
"start": 68342,
"end": 69598
} | class ____:
def test_unset(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
assert environment.get_env_var_option("foo") == ""
def test_set(self, isolation, isolated_data_dir, platform, global_application):
config = {"project": {"name": "my_app", "version": "0.0.1"}}
project = Project(isolation, config=config)
environment = MockEnvironment(
isolation,
project.metadata,
"default",
project.config.envs["default"],
{},
isolated_data_dir,
isolated_data_dir,
platform,
0,
global_application,
)
with EnvVars({"HATCH_ENV_TYPE_MOCK_FOO": "bar"}):
assert environment.get_env_var_option("foo") == "bar"
| TestEnvVarOption |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/engine/reflection.py | {
"start": 5041,
"end": 5374
} | class ____(Flag):
"""Enumerator that indicates which scope to use when calling
the ``get_multi`` methods.
"""
DEFAULT = auto()
"Include default scope"
TEMPORARY = auto()
"Include only temp scope"
ANY = DEFAULT | TEMPORARY
"Include both default and temp scope"
@inspection._self_inspects
| ObjectScope |
python | huggingface__transformers | tests/quantization/bnb/test_4bit.py | {
"start": 30952,
"end": 31340
} | class ____(unittest.TestCase):
def test_set_load_in_8_bit(self):
quantization_config = BitsAndBytesConfig(load_in_4bit=True)
with self.assertRaisesRegex(ValueError, "load_in_4bit and load_in_8bit are both True"):
quantization_config.load_in_8bit = True
@require_bitsandbytes
@require_accelerate
@slow
@apply_skip_if_not_implemented
| Bnb4BitTestBasicConfigTest |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/eks.py | {
"start": 31556,
"end": 38092
} | class ____(AwsBaseOperator[EksHook]):
"""
Deletes the Amazon EKS Cluster control plane and all nodegroups attached to it.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EksDeleteClusterOperator`
:param cluster_name: The name of the Amazon EKS Cluster to delete. (templated)
:param force_delete_compute: If True, will delete any attached resources. (templated)
Defaults to False.
:param wait_for_completion: If True, waits for operator to complete. (default: False) (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials. (templated)
If this is None or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then the default boto3 configuration would be used (and must be
maintained on each worker node).
:param region: Which AWS region the connection should use. (templated)
If this is None or empty then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check cluster state
:param waiter_max_attempts: The maximum number of attempts to check cluster state
:param deferrable: If True, the operator will wait asynchronously for the cluster to be deleted.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
"""
aws_hook_class = EksHook
template_fields: Sequence[str] = aws_template_fields(
"cluster_name", "force_delete_compute", "wait_for_completion"
)
def __init__(
self,
cluster_name: str,
force_delete_compute: bool = False,
region: str | None = None,
wait_for_completion: bool = False,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 40,
**kwargs,
) -> None:
self.cluster_name = cluster_name
self.force_delete_compute = force_delete_compute
if deferrable:
wait_for_completion = False
self.wait_for_completion = wait_for_completion
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
if region is not None:
warnings.warn(
message="Parameter `region` is deprecated. Use the parameter `region_name` instead",
category=AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["region_name"] = region
super().__init__(**kwargs)
def execute(self, context: Context):
if self.deferrable:
self.defer(
trigger=EksDeleteClusterTrigger(
cluster_name=self.cluster_name,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
force_delete_compute=self.force_delete_compute,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_delay * self.waiter_max_attempts),
)
elif self.force_delete_compute:
self.delete_any_nodegroups()
self.delete_any_fargate_profiles()
self.hook.delete_cluster(name=self.cluster_name)
if self.wait_for_completion:
self.log.info("Waiting for cluster to delete. This will take some time.")
self.hook.conn.get_waiter("cluster_deleted").wait(name=self.cluster_name)
def delete_any_nodegroups(self) -> None:
"""
Delete all Amazon EKS managed node groups for a provided Amazon EKS Cluster.
Amazon EKS managed node groups can be deleted in parallel, so we can send all
delete commands in bulk and move on once the count of nodegroups is zero.
"""
nodegroups = self.hook.list_nodegroups(clusterName=self.cluster_name)
if nodegroups:
self.log.info(CAN_NOT_DELETE_MSG.format(compute=NODEGROUP_FULL_NAME, count=len(nodegroups)))
for group in nodegroups:
self.hook.delete_nodegroup(clusterName=self.cluster_name, nodegroupName=group)
# Note this is a custom waiter so we're using hook.get_waiter(), not hook.conn.get_waiter().
self.log.info("Waiting for all nodegroups to delete. This will take some time.")
self.hook.get_waiter("all_nodegroups_deleted").wait(clusterName=self.cluster_name)
self.log.info(SUCCESS_MSG.format(compute=NODEGROUP_FULL_NAME))
def delete_any_fargate_profiles(self) -> None:
"""
Delete all EKS Fargate profiles for a provided Amazon EKS Cluster.
EKS Fargate profiles must be deleted one at a time, so we must wait
for one to be deleted before sending the next delete command.
"""
fargate_profiles = self.hook.list_fargate_profiles(clusterName=self.cluster_name)
if fargate_profiles:
self.log.info(CAN_NOT_DELETE_MSG.format(compute=FARGATE_FULL_NAME, count=len(fargate_profiles)))
self.log.info("Waiting for Fargate profiles to delete. This will take some time.")
for profile in fargate_profiles:
# The API will return a (cluster) ResourceInUseException if you try
# to delete Fargate profiles in parallel the way we can with nodegroups,
# so each must be deleted sequentially
self.hook.delete_fargate_profile(clusterName=self.cluster_name, fargateProfileName=profile)
self.hook.conn.get_waiter("fargate_profile_deleted").wait(
clusterName=self.cluster_name, fargateProfileName=profile
)
self.log.info(SUCCESS_MSG.format(compute=FARGATE_FULL_NAME))
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] == "success":
self.log.info("Cluster deleted successfully.")
| EksDeleteClusterOperator |
python | pexpect__pexpect | tests/test_socket_fd.py | {
"start": 1183,
"end": 2279
} | class ____(test_socket.ExpectTestCase):
""" duplicate of test_socket, but using fdpexpect rather than socket_expect """
def spawn(self, socket, timeout=30, use_poll=False):
return fdpexpect.fdspawn(socket.fileno(), timeout=timeout, use_poll=use_poll)
def test_not_int(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn('bogus', timeout=10)
def test_not_file_descriptor(self):
with self.assertRaises(pexpect.ExceptionPexpect):
session = fdpexpect.fdspawn(-1, timeout=10)
def test_fileobj(self):
sock = socket.socket(self.af, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
session = fdpexpect.fdspawn(sock, timeout=10) # Should get the fileno from the socket
session.expect(self.prompt1)
session.close()
assert not session.isalive()
session.close() # Smoketest - should be able to call this again
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(ExpectTestCase)
| ExpectTestCase |
python | getsentry__sentry | src/sentry/analytics/events/user_signup.py | {
"start": 255,
"end": 398
} | class ____(UserSignUpEvent):
pass
analytics.register(UserSignUpEvent)
analytics.register(RelocationUserSignUpEvent)
| RelocationUserSignUpEvent |
python | sphinx-doc__sphinx | sphinx/domains/c/_ast.py | {
"start": 58866,
"end": 59673
} | class ____(ASTBase):
def __init__(self, name: ASTNestedName) -> None:
self.name = name
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTEnum):
return NotImplemented
return self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
def get_id(self, version: int, objectType: str, symbol: Symbol) -> str:
return symbol.get_full_nested_name().get_id(version)
def _stringify(self, transform: StringifyTransform) -> str:
return transform(self.name)
def describe_signature(
self, signode: TextElement, mode: str, env: BuildEnvironment, symbol: Symbol
) -> None:
verify_description_mode(mode)
self.name.describe_signature(signode, mode, env, symbol=symbol)
| ASTEnum |
python | getsentry__sentry | src/sentry/integrations/msteams/unlink_identity.py | {
"start": 899,
"end": 1814
} | class ____(MsTeamsIdentityLinkageView, UnlinkIdentityView):
def get_success_template_and_context(
self, params: Mapping[str, Any], integration: Integration | None
) -> tuple[str, dict[str, Any]]:
return "sentry/integrations/msteams/unlinked.html", {}
@property
def confirmation_template(self) -> str:
return "sentry/integrations/msteams/unlink-identity.html"
@property
def no_identity_template(self) -> str | None:
return "sentry/integrations/msteams/no-identity.html"
@property
def filter_by_user_id(self) -> bool:
return True
def notify_on_success(
self, external_id: str, params: Mapping[str, Any], integration: Integration | None
) -> None:
client = get_preinstall_client(params["service_url"])
card = build_unlinked_card()
client.send_card(params["conversation_id"], card)
| MsTeamsUnlinkIdentityView |
python | pytorch__pytorch | test/jit/fixtures_srcs/fixtures_src.py | {
"start": 1310,
"end": 1464
} | class ____(torch.nn.Module):
def forward(self, x):
out = torch.zeros_like(x)
return torch._C._nn.gelu(x, out=out)
| TestVersionedGeluOutV9 |
python | bokeh__bokeh | src/bokeh/sphinxext/_internal/bokeh_palette_group.py | {
"start": 2172,
"end": 3052
} | class ____(nodes.General, nodes.Element):
@staticmethod
def visit_html(visitor, node):
visitor.body.append('<div class="container-fluid"><div class="row">')
group = getattr(bp, node["group"], None)
if not isinstance(group, dict):
group_name = node["group"]
raise SphinxError(f"invalid palette group name {group_name}")
names = sorted(group)
for name in names:
palettes = group[name]
# arbitrary cutoff here, idea is to not show large (e.g. 256 length) palettes
numbers = [x for x in sorted(palettes) if x < 30]
html = PALETTE_GROUP_DETAIL.render(name=name, numbers=numbers, palettes=palettes)
visitor.body.append(html)
visitor.body.append("</div></div>")
raise nodes.SkipNode
html = visit_html.__func__, None
| bokeh_palette_group |
python | wandb__wandb | wandb/sdk/artifacts/_generated/upsert_registry.py | {
"start": 367,
"end": 555
} | class ____(GQLResult):
inserted: Optional[bool]
project: Optional[RegistryFragment]
UpsertRegistry.model_rebuild()
UpsertRegistryUpsertModel.model_rebuild()
| UpsertRegistryUpsertModel |
python | ray-project__ray | python/ray/tests/test_ray_event_export_task_events.py | {
"start": 52844,
"end": 67701
} | class ____:
def __init__(self):
time.sleep(1)
raise Exception("actor creation error")
def task(self):
pass
actor = Actor.remote()
wait_for_condition(lambda: ray.util.state.list_actors(filters=[("class_name", "=", "Actor")])[0]["state"] == "DEAD")
ray.get(actor.task.options().remote())
"""
def validate_events(events: json):
(
driver_script_job_id,
driver_task_id,
) = get_job_id_and_driver_script_task_id_from_events(
events, preserve_proto_field_name
)
driver_task_definition_received = False
actor_creation_task_definition_received = False
actor_task_definition_received = False
for event in events:
if preserve_proto_field_name:
if event["event_type"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(event, preserve_proto_field_name)
if event["task_definition_event"]["task_type"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
else:
assert (
event["task_definition_event"]["task_type"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["task_definition_event"][
"task_id"
]
assert actor_creation_task_id is not None
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_name"]
== "__init__"
)
assert (
event["task_definition_event"]["task_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["task_definition_event"]["task_name"]
== "Actor.__init__"
)
assert event["task_definition_event"][
"required_resources"
] == {"CPU": 1.0}
assert (
event["task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert (
event["task_definition_event"]["job_id"]
== driver_script_job_id
)
assert event["task_definition_event"]["task_attempt"] == 0
assert (
event["task_definition_event"]["language"] == "PYTHON"
)
elif event["event_type"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actor_task_definition_event"]["task_id"]
assert actor_task_id is not None
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["module_name"]
== "__main__"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["class_name"]
== "Actor"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_name"]
== "task"
)
assert (
event["actor_task_definition_event"]["actor_func"][
"python_function_descriptor"
]["function_hash"]
is not None
)
assert (
event["actor_task_definition_event"]["actor_task_name"]
== "Actor.task"
)
assert (
event["actor_task_definition_event"]["required_resources"]
== {}
)
assert (
event["actor_task_definition_event"]["job_id"]
== driver_script_job_id
)
assert (
event["actor_task_definition_event"]["parent_task_id"]
== driver_task_id
)
assert event["actor_task_definition_event"]["task_attempt"] == 0
assert (
event["actor_task_definition_event"]["language"] == "PYTHON"
)
else:
assert event["event_type"] == "TASK_LIFECYCLE_EVENT"
else:
if event["eventType"] == "TASK_DEFINITION_EVENT":
check_task_event_base_fields(event, preserve_proto_field_name)
if event["taskDefinitionEvent"]["taskType"] == "DRIVER_TASK":
driver_task_definition_received = True
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
else:
assert (
event["taskDefinitionEvent"]["taskType"]
== "ACTOR_CREATION_TASK"
)
actor_creation_task_definition_received = True
actor_creation_task_id = event["taskDefinitionEvent"][
"taskId"
]
assert actor_creation_task_id is not None
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "__init__"
)
assert (
event["taskDefinitionEvent"]["taskFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["taskDefinitionEvent"]["taskName"]
== "Actor.__init__"
)
assert event["taskDefinitionEvent"][
"requiredResources"
] == {"CPU": 1.0}
assert (
event["taskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert (
event["taskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert event["taskDefinitionEvent"]["taskAttempt"] == 0
assert event["taskDefinitionEvent"]["language"] == "PYTHON"
elif event["eventType"] == "ACTOR_TASK_DEFINITION_EVENT":
actor_task_definition_received = True
actor_task_id = event["actorTaskDefinitionEvent"]["taskId"]
assert actor_task_id is not None
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["moduleName"]
== "__main__"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["className"]
== "Actor"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionName"]
== "task"
)
assert (
event["actorTaskDefinitionEvent"]["actorFunc"][
"pythonFunctionDescriptor"
]["functionHash"]
is not None
)
assert (
event["actorTaskDefinitionEvent"]["actorTaskName"]
== "Actor.task"
)
assert (
event["actorTaskDefinitionEvent"]["requiredResources"] == {}
)
assert (
event["actorTaskDefinitionEvent"]["jobId"]
== driver_script_job_id
)
assert (
event["actorTaskDefinitionEvent"]["parentTaskId"]
== driver_task_id
)
assert event["actorTaskDefinitionEvent"]["taskAttempt"] == 0
assert event["actorTaskDefinitionEvent"]["language"] == "PYTHON"
else:
assert event["eventType"] == "TASK_LIFECYCLE_EVENT"
assert driver_task_definition_received
assert actor_creation_task_definition_received
assert actor_task_definition_received
expected_driver_task_states = {"RUNNING", "FINISHED"}
expected_actor_creation_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"RUNNING",
"FAILED",
}
expected_actor_task_states = {
"PENDING_ARGS_AVAIL",
"PENDING_NODE_ASSIGNMENT",
"FAILED",
}
expected_task_id_states_dict = {
(driver_task_id, 0): expected_driver_task_states,
(actor_creation_task_id, 0): expected_actor_creation_task_states,
(actor_task_id, 0): expected_actor_task_states,
}
if preserve_proto_field_name:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"error_type": "TASK_EXECUTION_EXCEPTION",
"error_message": "CreationTaskError: Exception raised from an actor init method.",
},
(actor_task_id, 0): {
"error_type": "ACTOR_DIED",
"error_message": "ray.exceptions.ActorDiedError: The actor died because of an error raised in its creation task",
},
}
else:
expected_task_id_error_info_dict = {
(actor_creation_task_id, 0): {
"errorType": "TASK_EXECUTION_EXCEPTION",
"errorMessage": "CreationTaskError: Exception raised from an actor init method.",
},
(actor_task_id, 0): {
"errorType": "ACTOR_DIED",
"errorMessage": "ray.exceptions.ActorDiedError: The actor died because of an error raised in its creation task",
},
}
check_task_lifecycle_event_states_and_error_info(
events,
expected_task_id_states_dict,
expected_task_id_error_info_dict,
preserve_proto_field_name,
)
run_driver_script_and_wait_for_events(
script, httpserver, ray_start_cluster_head_with_env_vars, validate_events
)
@_cluster_with_aggregator_target
def test_actor_creation_canceled(
self,
ray_start_cluster_head_with_env_vars,
httpserver,
preserve_proto_field_name,
):
script = """
import ray
ray.init()
@ray.remote(num_cpus=2)
| Actor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/self9.py | {
"start": 172,
"end": 207
} | class ____:
a: list[Self]
| ParentA |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.