language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1264631,
"end": 1266115
} | class ____(Stream):
"""
DerivedStream schema wrapper.
Parameters
----------
stream : dict, :class:`Stream`, :class:`EventStream`, :class:`MergedStream`, :class:`DerivedStream`
between : Sequence[dict, :class:`Stream`, :class:`EventStream`, :class:`MergedStream`, :class:`DerivedStream`]
consume : bool
debounce : float
filter : str, :class:`Expr`, Sequence[str, :class:`Expr`]
markname : str
marktype : :class:`MarkType`, Literal['arc', 'area', 'image', 'group', 'line', 'path', 'rect', 'rule', 'shape', 'symbol', 'text', 'trail']
throttle : float
"""
_schema = {"$ref": "#/definitions/DerivedStream"}
def __init__(
self,
stream: Optional[SchemaBase | Map] = Undefined,
between: Optional[Sequence[SchemaBase | Map]] = Undefined,
consume: Optional[bool] = Undefined,
debounce: Optional[float] = Undefined,
filter: Optional[str | SchemaBase | Sequence[str | SchemaBase]] = Undefined,
markname: Optional[str] = Undefined,
marktype: Optional[SchemaBase | MarkType_T] = Undefined,
throttle: Optional[float] = Undefined,
**kwds,
):
super().__init__(
stream=stream,
between=between,
consume=consume,
debounce=debounce,
filter=filter,
markname=markname,
marktype=marktype,
throttle=throttle,
**kwds,
)
| DerivedStream |
python | tensorflow__tensorflow | tensorflow/python/feature_column/feature_column_test.py | {
"start": 110431,
"end": 113823
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_retrieving_input(self):
features = {'a': [0.]}
input_layer = InputLayer(fc._numeric_column('a'))
inputs = self.evaluate(input_layer(features))
self.assertAllClose([[0.]], inputs)
def test_reuses_variables(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc._categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
inputs = input_layer(features)
variables = input_layer.variables
# Sanity check: test that the inputs are correct.
self.assertAllEqual([[1, 0], [0, 1], [1, 1]], inputs)
# Check that only one variable was created.
self.assertEqual(1, len(variables))
# Check that invoking input_layer on the same features does not create
# additional variables
_ = input_layer(features)
self.assertEqual(1, len(variables))
self.assertIs(variables[0], input_layer.variables[0])
def test_feature_column_input_layer_gradient(self):
with context.eager_mode():
sparse_input = sparse_tensor.SparseTensor(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 2),
dense_shape=(3, 3))
# Create feature columns (categorical and embedding).
categorical_column = fc._categorical_column_with_identity(
key='a', num_buckets=3)
embedding_dimension = 2
def _embedding_column_initializer(shape, dtype, partition_info):
del shape # unused
del dtype # unused
del partition_info # unused
embedding_values = (
(1, 0), # id 0
(0, 1), # id 1
(1, 1)) # id 2
return embedding_values
embedding_column = fc._embedding_column(
categorical_column,
dimension=embedding_dimension,
initializer=_embedding_column_initializer)
input_layer = InputLayer([embedding_column])
features = {'a': sparse_input}
def scale_matrix():
matrix = input_layer(features)
return 2 * matrix
# Sanity check: Verify that scale_matrix returns the correct output.
self.assertAllEqual([[2, 0], [0, 2], [2, 2]], scale_matrix())
# Check that the returned gradient is correct.
grad_function = backprop.implicit_grad(scale_matrix)
grads_and_vars = grad_function()
indexed_slice = grads_and_vars[0][0]
gradient = grads_and_vars[0][0].values
self.assertAllEqual([0, 1, 2], indexed_slice.indices)
self.assertAllEqual([[2, 2], [2, 2], [2, 2]], gradient)
| InputLayerTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 140811,
"end": 141200
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(
sgqlc.types.non_null(SponsorOrderField), graphql_name="field"
)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| SponsorOrder |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_new_hampshire_zip.py | {
"start": 1790,
"end": 4171
} | class ____(ColumnMapExpectation):
"""Expect values in this column to be valid New Hampshire zipcodes.
See https://pypi.org/project/zipcodes/ for more information.
"""
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = [
{
"data": {
"valid_new_hampshire_zip": ["03031", "03752", "03884", "03896"],
"invalid_new_hampshire_zip": ["-10000", "1234", "99999", "25487"],
},
"tests": [
{
"title": "basic_positive_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "valid_new_hampshire_zip"},
"out": {"success": True},
},
{
"title": "basic_negative_test",
"exact_match_out": False,
"include_in_gallery": True,
"in": {"column": "invalid_new_hampshire_zip"},
"out": {"success": False},
},
],
}
]
# This is the id string of the Metric used by this Expectation.
# For most Expectations, it will be the same as the `condition_metric_name` defined in your Metric class above.
map_metric = "column_values.valid_new_hampshire_zip"
# This is a list of parameter names that can affect whether the Expectation evaluates to True or False
success_keys = ("mostly",)
# This dictionary contains default values for any parameters that should have default values
default_kwarg_values = {}
# This object contains metadata for display in the public Gallery
library_metadata = {
"maturity": "experimental", # "experimental", "beta", or "production"
"tags": [
"hackathon",
"typed-entities",
], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@luismdiaz01",
"@derekma73", # Don't forget to add your github handle here!
],
"requirements": ["zipcodes"],
}
if __name__ == "__main__":
ExpectColumnValuesToBeValidNewHampshireZip().print_diagnostic_checklist()
| ExpectColumnValuesToBeValidNewHampshireZip |
python | kamyu104__LeetCode-Solutions | Python/find-duplicate-subtrees.py | {
"start": 810,
"end": 1514
} | class ____(object):
def findDuplicateSubtrees(self, root):
"""
:type root: TreeNode
:rtype: List[TreeNode]
"""
def postOrderTraversal(node, lookup, result):
if not node:
return ""
s = "(" + postOrderTraversal(node.left, lookup, result) + \
str(node.val) + \
postOrderTraversal(node.right, lookup, result) + \
")"
if lookup[s] == 1:
result.append(node)
lookup[s] += 1
return s
lookup = collections.defaultdict(int)
result = []
postOrderTraversal(root, lookup, result)
return result
| Solution2 |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_trace_meta.py | {
"start": 5071,
"end": 8590
} | class ____(OrganizationEventsTraceEndpointBase, UptimeResultEAPTestCase):
url_name = "sentry-api-0-organization-trace-meta"
FEATURES = ["organizations:trace-spans-format"]
def create_uptime_check(self, trace_id=None, **kwargs):
defaults = {
"trace_id": trace_id or self.trace_id,
"scheduled_check_time": self.day_ago,
}
defaults.update(kwargs)
return self.create_eap_uptime_result(**defaults)
def test_trace_meta_without_uptime_param(self) -> None:
"""Test that uptime_checks field is NOT present when include_uptime is not set"""
self.load_trace(is_eap=True)
uptime_result = self.create_uptime_check()
self.store_uptime_results([uptime_result])
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": -1},
format="json",
)
assert response.status_code == 200
data = response.data
assert "uptime_checks" not in data
assert data["errors"] == 0
assert data["performance_issues"] == 2
assert data["span_count"] == 19
def test_trace_meta_with_uptime_param(self) -> None:
"""Test that uptime_checks shows correct count when include_uptime=1"""
self.load_trace(is_eap=True)
uptime_results = [
self.create_uptime_check(check_status="success"),
self.create_uptime_check(check_status="failure"),
self.create_uptime_check(check_status="success"),
]
self.store_uptime_results(uptime_results)
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": "-1", "include_uptime": "1"},
format="json",
)
assert response.status_code == 200
data = response.data
assert "uptime_checks" in data
assert data["uptime_checks"] == 3
assert data["errors"] == 0
assert data["performance_issues"] == 2
assert data["span_count"] == 19
def test_trace_meta_no_uptime_results(self) -> None:
"""Test that uptime_checks is 0 when there are no uptime results"""
self.load_trace(is_eap=True)
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": "-1", "include_uptime": "1"},
format="json",
)
assert response.status_code == 200
data = response.data
assert "uptime_checks" in data
assert data["uptime_checks"] == 0
assert data["errors"] == 0
assert data["performance_issues"] == 2
assert data["span_count"] == 19
def test_trace_meta_different_trace_id(self) -> None:
"""Test that uptime results from different traces are not counted"""
self.load_trace(is_eap=True)
other_trace_id = uuid4().hex
uptime_result = self.create_uptime_check(trace_id=other_trace_id)
self.store_uptime_results([uptime_result])
with self.feature(self.FEATURES):
response = self.client.get(
self.url,
data={"project": "-1", "include_uptime": "1"},
format="json",
)
assert response.status_code == 200
data = response.data
assert "uptime_checks" in data
assert data["uptime_checks"] == 0
| OrganizationTraceMetaUptimeTest |
python | kamyu104__LeetCode-Solutions | Python/largest-submatrix-with-rearrangements.py | {
"start": 37,
"end": 559
} | class ____(object):
def largestSubmatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
for c in xrange(len(matrix[0])):
h = 0
for r in xrange(len(matrix)):
h = h+1 if matrix[r][c] == 1 else 0
matrix[r][c] = h
result = 0
for row in matrix:
row.sort()
for c in xrange(len(row)):
result = max(result, (len(row)-c) * row[c])
return result
| Solution |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/status.py | {
"start": 1287,
"end": 1680
} | class ____(StatusBarWidget):
"""Status bar widget for the current file encoding."""
ID = "encoding_status"
def update_encoding(self, encoding):
"""Update encoding of current file."""
value = str(encoding).upper()
self.set_value(value)
def get_tooltip(self):
"""Return localized tool tip for widget."""
return _("Encoding")
| EncodingStatus |
python | has2k1__plotnine | plotnine/_utils/registry.py | {
"start": 1953,
"end": 3321
} | class ____(type):
"""
Create a class that registers subclasses and the Hierarchy
The class has gets two properties:
1. `_registry` a dictionary of all the subclasses of the
base class. The keys are the names of the classes and
the values are the class objects.
2. `_hierarchy` a dictionary (default) that holds the
inheritance hierarchy of each class. Each key is a class
and the value is a list of classes. The first name in the
list is that of the key class.
The goal of the `_hierarchy` object to facilitate the
lookup of themeable properties taking into consideration the
inheritance hierarchy. For example if `strip_text_x` inherits
from `strip_text` which inherits from `text`, then if a property
of `strip_text_x` is requested, the lookup should fallback to
the other two if `strip_text_x` is not present or is missing
the requested property.
"""
def __init__(cls, name, bases, namespace):
if not hasattr(cls, "_registry"):
cls._registry = {}
cls._hierarchy = defaultdict(list)
else:
cls._registry[name] = cls
cls._hierarchy[name].append(name)
for base in bases:
for base2 in base.mro()[:-2]:
cls._hierarchy[base2.__name__].append(name)
| RegistryHierarchyMeta |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/paramSpec13.py | {
"start": 752,
"end": 1733
} | class ____(Generic[_P, _R]):
def __init__(self, func: Callable[_P, _R]) -> None: ...
def __call__(self, *args: _P.args, **kwargs: _P.kwargs) -> _R: ...
def remote(self, *args: _P.args, **kwargs: _P.kwargs) -> RemoteResponse[_R]: ...
r1 = RemoteFunction(func2)
reveal_type(r1, expected_text="RemoteFunction[(a: str, b: list[int]), str]")
v2 = r1("hi", [])
reveal_type(v2, expected_text="str")
v3 = r1.remote("hi", [])
reveal_type(v3, expected_text="RemoteResponse[str]")
# This should generate an error
r1(1, [])
# This should generate an error
r1("hi")
# This should generate an error
r1.remote(1, [])
# This should generate an error because 'int' is not assignable
# to ParamSpec _P.
A = RemoteFunction[int, int]
def remote(func: Callable[_P, _R]) -> RemoteFunction[_P, _R]: ...
v4 = remote(func2)
reveal_type(v4, expected_text="RemoteFunction[(a: str, b: list[int]), str]")
Coro = Coroutine[Any, Any, _T]
CoroFunc = Callable[_P, Coro[_T]]
| RemoteFunction |
python | doocs__leetcode | lcof/面试题11. 旋转数组的最小数字/Solution2.py | {
"start": 0,
"end": 433
} | class ____:
def minArray(self, numbers: List[int]) -> int:
l, r = 0, len(numbers) - 1
while l < r:
if numbers[l] < numbers[r]:
return numbers[l]
mid = (l + r) >> 1
if numbers[mid] > numbers[l]:
l = mid + 1
elif numbers[mid] < numbers[l]:
r = mid
else:
l += 1
return numbers[l]
| Solution |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/used_solid.py | {
"start": 232,
"end": 516
} | class ____(graphene.ObjectType):
class Meta:
description = """An invocation of a solid within a repo."""
name = "NodeInvocationSite"
pipeline = graphene.NonNull(GraphenePipeline)
solidHandle = graphene.NonNull(GrapheneSolidHandle)
| GrapheneNodeInvocationSite |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 76558,
"end": 77197
} | class ____(torch.nn.Module):
def __init__(self, qengine):
super().__init__()
self.sub1 = LinearReluModel()
self.sub2 = TwoLayerLinearModel()
self.fc3 = QuantWrapper(torch.nn.Linear(5, 5).to(dtype=torch.float))
self.fc3.qconfig = default_qconfig
self.sub2.fc1 = QuantWrapper(self.sub2.fc1)
if qengine == "fbgemm":
self.sub2.fc1.qconfig = default_per_channel_qconfig
else:
self.sub2.fc1.qconfig = default_qconfig
def forward(self, x):
x = self.sub1(x)
x = self.sub2(x)
x = self.fc3(x)
return x
| AnnotatedNestedModel |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 3116,
"end": 3264
} | class ____(scale_color_hue):
"""
Qualitative color scale with evenly spaced hues
"""
_aesthetics = ["fill"]
@dataclass
| scale_fill_hue |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/sqltypes.py | {
"start": 122265,
"end": 122343
} | class ____(Time):
"""The SQL TIME type."""
__visit_name__ = "TIME"
| TIME |
python | PrefectHQ__prefect | tests/cli/transfer/test_concurrency_limits.py | {
"start": 508,
"end": 14007
} | class ____:
async def test_construct_creates_new_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that construct creates a new MigratableGlobalConcurrencyLimit instance."""
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
assert isinstance(migratable, MigratableGlobalConcurrencyLimit)
assert (
migratable.source_global_concurrency_limit
== transfer_global_concurrency_limit
)
assert migratable.source_id == transfer_global_concurrency_limit.id
assert migratable.destination_global_concurrency_limit is None
assert migratable.destination_id is None
async def test_construct_returns_cached_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that construct returns cached instance for same ID."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
# Create first instance
migratable1 = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Create second instance with same limit
migratable2 = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Should be the same instance
assert migratable1 is migratable2
assert len(MigratableGlobalConcurrencyLimit._instances) == 1
async def test_construct_different_limits_create_different_instances(
self, session: AsyncSession
):
"""Test that different concurrency limits create different instances."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Create two different concurrency limits
orm_limit1 = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-1-{uuid.uuid4()}",
limit=3,
active=True,
active_slots=0,
),
)
orm_limit2 = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-2-{uuid.uuid4()}",
limit=10,
active=False,
active_slots=5,
),
)
await session.commit()
# Convert to client schema objects
limit1 = GlobalConcurrencyLimitResponse(
id=orm_limit1.id,
name=orm_limit1.name,
limit=orm_limit1.limit,
active=orm_limit1.active,
active_slots=orm_limit1.active_slots,
slot_decay_per_second=orm_limit1.slot_decay_per_second,
created=orm_limit1.created,
updated=orm_limit1.updated,
)
limit2 = GlobalConcurrencyLimitResponse(
id=orm_limit2.id,
name=orm_limit2.name,
limit=orm_limit2.limit,
active=orm_limit2.active,
active_slots=orm_limit2.active_slots,
slot_decay_per_second=orm_limit2.slot_decay_per_second,
created=orm_limit2.created,
updated=orm_limit2.updated,
)
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
migratable1 = await MigratableGlobalConcurrencyLimit.construct(limit1)
migratable2 = await MigratableGlobalConcurrencyLimit.construct(limit2)
assert migratable1 is not migratable2
assert len(MigratableGlobalConcurrencyLimit._instances) == 2
assert migratable1.source_id != migratable2.source_id
async def test_get_instance_returns_cached_instance(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that get_instance returns cached instance."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
# Create instance
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Retrieve instance
retrieved = await MigratableGlobalConcurrencyLimit.get_instance(
transfer_global_concurrency_limit.id
)
assert retrieved is migratable
async def test_get_instance_returns_none_for_unknown_id(self):
"""Test that get_instance returns None for unknown ID."""
# Clear any existing instances
MigratableGlobalConcurrencyLimit._instances.clear()
unknown_id = uuid.uuid4()
retrieved = await MigratableGlobalConcurrencyLimit.get_instance(unknown_id)
assert retrieved is None
async def test_get_dependencies_returns_empty_list(
self, transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse
):
"""Test that get_dependencies returns empty list (concurrency limits have no dependencies)."""
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
dependencies = await migratable.get_dependencies()
assert dependencies == []
@patch("prefect.cli.transfer._migratable_resources.concurrency_limits.get_client")
async def test_migrate_success(
self,
mock_get_client: MagicMock,
transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse,
):
"""Test successful concurrency limit migration."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock successful creation and read
destination_limit = GlobalConcurrencyLimitResponse(
id=uuid.uuid4(),
name=transfer_global_concurrency_limit.name,
limit=transfer_global_concurrency_limit.limit,
active=transfer_global_concurrency_limit.active,
active_slots=transfer_global_concurrency_limit.active_slots,
slot_decay_per_second=transfer_global_concurrency_limit.slot_decay_per_second,
created=transfer_global_concurrency_limit.created,
updated=transfer_global_concurrency_limit.updated,
)
mock_client.create_global_concurrency_limit.return_value = (
None # This method doesn't return the object
)
mock_client.read_global_concurrency_limit_by_name.return_value = (
destination_limit
)
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
await migratable.migrate()
# Verify client was called correctly
mock_client.create_global_concurrency_limit.assert_called_once_with(
concurrency_limit=GlobalConcurrencyLimitCreate(
name=transfer_global_concurrency_limit.name,
limit=transfer_global_concurrency_limit.limit,
active=transfer_global_concurrency_limit.active,
active_slots=transfer_global_concurrency_limit.active_slots,
)
)
mock_client.read_global_concurrency_limit_by_name.assert_called_once_with(
transfer_global_concurrency_limit.name
)
# Verify destination_global_concurrency_limit is set
assert migratable.destination_global_concurrency_limit == destination_limit
assert migratable.destination_id == destination_limit.id
@patch("prefect.cli.transfer._migratable_resources.concurrency_limits.get_client")
async def test_migrate_already_exists_raises_transfer_skipped(
self,
mock_get_client: MagicMock,
transfer_global_concurrency_limit: GlobalConcurrencyLimitResponse,
):
"""Test migration when concurrency limit already exists raises TransferSkipped."""
# Mock the client
mock_client = AsyncMock()
mock_get_client.return_value.__aenter__.return_value = mock_client
# Mock ObjectAlreadyExists exception on create
mock_http_exc = Exception("Conflict")
mock_client.create_global_concurrency_limit.side_effect = ObjectAlreadyExists(
mock_http_exc
)
# Mock successful read of existing limit
existing_limit = GlobalConcurrencyLimitResponse(
id=uuid.uuid4(),
name=transfer_global_concurrency_limit.name,
limit=10, # Different limit to show it reads existing
active=False, # Different active state
active_slots=2,
slot_decay_per_second=1.5,
created=transfer_global_concurrency_limit.created,
updated=transfer_global_concurrency_limit.updated,
)
mock_client.read_global_concurrency_limit_by_name.return_value = existing_limit
migratable = await MigratableGlobalConcurrencyLimit.construct(
transfer_global_concurrency_limit
)
# Should raise TransferSkipped
with pytest.raises(TransferSkipped, match="Already exists"):
await migratable.migrate()
# Verify client calls
mock_client.create_global_concurrency_limit.assert_called_once()
mock_client.read_global_concurrency_limit_by_name.assert_called_once_with(
transfer_global_concurrency_limit.name
)
# Verify destination_global_concurrency_limit is still set to the existing limit
assert migratable.destination_global_concurrency_limit == existing_limit
assert migratable.destination_id == existing_limit.id
@pytest.mark.parametrize(
"active,active_slots,limit",
[
(True, 0, 5),
(False, 2, 10),
(True, 8, 8), # At capacity
],
)
async def test_concurrency_limit_with_different_states(
self, session: AsyncSession, active: bool, active_slots: int, limit: int
):
"""Test concurrency limits with different active states."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Clear instances before test
MigratableGlobalConcurrencyLimit._instances.clear()
orm_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"test-limit-{uuid.uuid4()}",
limit=limit,
active=active,
active_slots=active_slots,
),
)
await session.commit()
# Convert to client schema object
client_limit = GlobalConcurrencyLimitResponse(
id=orm_limit.id,
name=orm_limit.name,
limit=orm_limit.limit,
active=orm_limit.active,
active_slots=orm_limit.active_slots,
slot_decay_per_second=orm_limit.slot_decay_per_second,
created=orm_limit.created,
updated=orm_limit.updated,
)
# Test construction works with different states
migratable = await MigratableGlobalConcurrencyLimit.construct(client_limit)
assert migratable.source_global_concurrency_limit.active == active
assert migratable.source_global_concurrency_limit.active_slots == active_slots
assert migratable.source_global_concurrency_limit.limit == limit
@pytest.mark.parametrize(
"name_prefix,limit,active_slots",
[
("zero-limit", 0, 0),
("large-limit", 1000000, 0),
("single-limit", 1, 1),
],
)
async def test_concurrency_limit_with_edge_case_values(
self, session: AsyncSession, name_prefix: str, limit: int, active_slots: int
):
"""Test concurrency limits with edge case values."""
from prefect.client.schemas.responses import GlobalConcurrencyLimitResponse
from prefect.server import models, schemas
# Clear instances before test
MigratableGlobalConcurrencyLimit._instances.clear()
orm_limit = await models.concurrency_limits_v2.create_concurrency_limit(
session=session,
concurrency_limit=schemas.core.ConcurrencyLimitV2(
name=f"{name_prefix}-{uuid.uuid4()}",
limit=limit,
active=True,
active_slots=active_slots,
),
)
await session.commit()
# Convert to client schema object
client_limit = GlobalConcurrencyLimitResponse(
id=orm_limit.id,
name=orm_limit.name,
limit=orm_limit.limit,
active=orm_limit.active,
active_slots=orm_limit.active_slots,
slot_decay_per_second=orm_limit.slot_decay_per_second,
created=orm_limit.created,
updated=orm_limit.updated,
)
# Test construction works with edge case values
migratable = await MigratableGlobalConcurrencyLimit.construct(client_limit)
assert migratable.source_global_concurrency_limit.limit == limit
assert migratable.source_global_concurrency_limit.active_slots == active_slots
| TestMigratableGlobalConcurrencyLimit |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/shortcuts/progress_bar/formatters.py | {
"start": 913,
"end": 1281
} | class ____(metaclass=ABCMeta):
"""
Base class for any formatter.
"""
@abstractmethod
def format(
self,
progress_bar: ProgressBar,
progress: ProgressBarCounter[object],
width: int,
) -> AnyFormattedText:
pass
def get_width(self, progress_bar: ProgressBar) -> AnyDimension:
return D()
| Formatter |
python | python-pillow__Pillow | Tests/test_shell_injection.py | {
"start": 570,
"end": 2122
} | class ____:
def assert_save_filename_check(
self,
tmp_path: Path,
src_img: Image.Image,
save_func: Callable[[Image.Image, IO[bytes], str | bytes], None],
) -> None:
for filename in test_filenames:
dest_file = str(tmp_path / filename)
save_func(src_img, BytesIO(), dest_file)
# If file can't be opened, shell injection probably occurred
with Image.open(dest_file) as im:
im.load()
@pytest.mark.skipif(not djpeg_available(), reason="djpeg not available")
def test_load_djpeg_filename(self, tmp_path: Path) -> None:
for filename in test_filenames:
src_file = tmp_path / filename
shutil.copy(TEST_JPG, src_file)
with Image.open(src_file) as im:
assert isinstance(im, JpegImagePlugin.JpegImageFile)
im.load_djpeg()
@pytest.mark.skipif(not netpbm_available(), reason="Netpbm not available")
def test_save_netpbm_filename_bmp_mode(self, tmp_path: Path) -> None:
with Image.open(TEST_GIF) as im:
im = im.convert("RGB")
self.assert_save_filename_check(tmp_path, im, GifImagePlugin._save_netpbm)
@pytest.mark.skipif(not netpbm_available(), reason="Netpbm not available")
def test_save_netpbm_filename_l_mode(self, tmp_path: Path) -> None:
with Image.open(TEST_GIF) as im:
im = im.convert("L")
self.assert_save_filename_check(tmp_path, im, GifImagePlugin._save_netpbm)
| TestShellInjection |
python | doocs__leetcode | solution/1300-1399/1314.Matrix Block Sum/Solution.py | {
"start": 0,
"end": 705
} | class ____:
def matrixBlockSum(self, mat: List[List[int]], k: int) -> List[List[int]]:
m, n = len(mat), len(mat[0])
s = [[0] * (n + 1) for _ in range(m + 1)]
for i, row in enumerate(mat, 1):
for j, x in enumerate(row, 1):
s[i][j] = s[i - 1][j] + s[i][j - 1] - s[i - 1][j - 1] + x
ans = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
x1, y1 = max(i - k, 0), max(j - k, 0)
x2, y2 = min(m - 1, i + k), min(n - 1, j + k)
ans[i][j] = (
s[x2 + 1][y2 + 1] - s[x1][y2 + 1] - s[x2 + 1][y1] + s[x1][y1]
)
return ans
| Solution |
python | pypa__pipenv | pipenv/patched/pip/_internal/cli/parser.py | {
"start": 4606,
"end": 5256
} | class ____(optparse.OptionParser):
def insert_option_group(
self, idx: int, *args: Any, **kwargs: Any
) -> optparse.OptionGroup:
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self) -> List[optparse.Option]:
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
| CustomOptionParser |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 179957,
"end": 183116
} | class ____(Request):
"""
Indicates that task has failed
:param force: Allows forcing state change even if transition is not supported
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "failed"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allows forcing state change even if transition is not supported",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task: str,
force: Optional[bool] = False,
status_reason: Optional[str] = None,
status_message: Optional[str] = None,
**kwargs: Any
) -> None:
super(FailedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self) -> Optional[bool]:
return self._property_force
@force.setter
def force(self, value: Optional[bool]) -> None:
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| FailedRequest |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/errors.py | {
"start": 9548,
"end": 9656
} | class ____(HypothesisException):
"""Signal that the example matches condition. Internal use only."""
| Found |
python | getsentry__sentry | fixtures/page_objects/dashboard_detail.py | {
"start": 381,
"end": 3983
} | class ____(BasePage):
def __init__(self, browser, client, *, organization: Organization, dashboard: Dashboard):
super().__init__(browser)
self.client = client
self.organization = organization
self.dashboard = dashboard
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="events-request-loading"]')
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
self.browser.wait_until_not(".loading")
def visit_default_overview(self):
self.browser.get(f"/organizations/{self.organization.slug}/dashboard/default-overview/")
self.wait_until_loaded()
self.browser.driver.execute_script("window.scrollTo(0, document.body.scrollHeight)")
self.wait_until_loaded()
def visit_create_dashboard(self):
self.browser.get(f"/organizations/{self.organization.slug}/dashboards/new/")
self.wait_until_loaded()
def visit_dashboard_detail(self):
self.browser.get(f"/organizations/{self.organization.slug}/dashboard/{self.dashboard.id}/")
self.wait_until_loaded()
def enter_edit_state(self):
button = self.browser.element('[data-test-id="dashboard-edit"]')
self.browser.wait_until_clickable('[data-test-id="dashboard-edit"]')
button.click()
self.wait_until_loaded()
def click_dashboard_add_widget_button(self):
button = self.browser.element('[data-test-id="widget-add"]')
# HACK: Use JavaScript to execute click to avoid click intercepted issues
self.browser.driver.execute_script("arguments[0].click()", button)
self.wait_until_loaded()
def click_dashboard_header_add_widget_button(self):
button = self.browser.element('[data-test-id="add-widget-library"]')
self.browser.wait_until_clickable('[data-test-id="add-widget-library"]')
button.click()
self.wait_until_loaded()
def click_cancel_button(self):
button = self.browser.element('[data-test-id="dashboard-cancel"]')
self.browser.wait_until_clickable('[data-test-id="dashboard-cancel"]')
button.click()
self.wait_until_loaded()
def add_widget_through_dashboard(self, widget_title):
self.click_dashboard_add_widget_button()
title_input = self.browser.element(WIDGET_TITLE_FIELD)
title_input.clear()
title_input.send_keys(widget_title)
button = self.browser.element('[aria-label="Add Widget"]')
button.click()
self.wait_until_loaded()
def save_dashboard(self):
button = self.browser.element('[data-test-id="dashboard-commit"]')
self.browser.wait_until_clickable('[data-test-id="dashboard-commit"]')
button.click()
# This is a kind of hack.
# After we click the button, an API call is made and we want to wait
# until the API call finishes. Since the loading indicator isn't used
# we can't rely on self.wait_until_loaded(). The UI shows a
# success toast, however if a previous step of a test shows a success
# toast, a wait_until([data-test-id="toast-success"]) will return
# immediately due to the previous toast still being in the DOM.
# Since clicking the save dasboard button is removed once the API
# call is complete, we can wait for that as a signal
# that the API is complete.
self.browser.wait_until_not('[data-test-id="dashboard-commit"]')
self.wait_until_loaded()
| DashboardDetailPage |
python | django-compressor__django-compressor | compressor/base.py | {
"start": 852,
"end": 15891
} | class ____:
"""
Base compressor object to be subclassed for content type
depending implementations details.
"""
output_mimetypes = {}
def __init__(
self,
resource_kind,
content=None,
output_prefix=None,
context=None,
filters=None,
log=None,
verbosity=1,
*args,
**kwargs
):
if filters is None:
self.filters = settings.COMPRESS_FILTERS[resource_kind]
else:
self.filters = filters
if output_prefix is None:
self.output_prefix = resource_kind
else:
self.output_prefix = output_prefix
self.content = content or "" # rendered contents of {% compress %} tag
self.output_dir = settings.COMPRESS_OUTPUT_DIR.strip("/")
self.charset = settings.DEFAULT_CHARSET
self.split_content = []
self.context = context or {}
self.resource_kind = resource_kind
self.extra_context = {}
self.precompiler_mimetypes = dict(settings.COMPRESS_PRECOMPILERS)
self.finders = staticfiles.finders
self._storage = None
self.log = log
self.verbosity = verbosity
def copy(self, **kwargs):
keywords = dict(
content=self.content,
context=self.context,
output_prefix=self.output_prefix,
filters=self.filters,
)
keywords.update(kwargs)
return self.__class__(self.resource_kind, **keywords)
@cached_property
def storage(self):
from compressor.storage import default_storage
return default_storage
def split_contents(self):
"""
To be implemented in a subclass, should return an
iterable with four values: kind, value, basename, element
"""
raise NotImplementedError
def get_template_name(self, mode):
"""
Returns the template path for the given mode.
"""
try:
template = getattr(self, "template_name_%s" % mode)
if template:
return template
except AttributeError:
pass
return "compressor/%s_%s.html" % (self.resource_kind, mode)
def get_basename(self, url):
"""
Takes full path to a static file (eg. "/static/css/style.css") and
returns path with storage's base url removed (eg. "css/style.css").
"""
try:
base_url = self.storage.base_url
except AttributeError:
base_url = settings.COMPRESS_URL
# Cast ``base_url`` to a string to allow it to be
# a string-alike object to e.g. add ``SCRIPT_NAME``
# WSGI param as a *path prefix* to the output URL.
# See https://code.djangoproject.com/ticket/25598.
base_url = str(base_url)
if not url.startswith(base_url):
raise UncompressableFileError(
"'%s' isn't accessible via "
"COMPRESS_URL ('%s') and can't be "
"compressed" % (url, base_url)
)
basename = url.replace(base_url, "", 1)
# drop the querystring, which is used for non-compressed cache-busting.
return basename.split("?", 1)[0]
def get_filepath(self, content, basename=None):
"""
Returns file path for an output file based on contents.
Returned path is relative to compressor storage's base url, for
example "CACHE/css/58a8c0714e59.css".
When `basename` argument is provided then file name (without extension)
will be used as a part of returned file name, for example:
get_filepath(content, "my_file.css") -> 'CACHE/css/my_file.58a8c0714e59.css'
"""
parts = []
if basename:
filename = os.path.split(basename)[1]
parts.append(os.path.splitext(filename)[0])
parts.extend([get_hexdigest(content, 12), self.resource_kind])
return os.path.join(self.output_dir, self.output_prefix, ".".join(parts))
def get_filename(self, basename):
"""
Returns full path to a file, for example:
get_filename('css/one.css') -> '/full/path/to/static/css/one.css'
"""
filename = None
# First try finding the file using the storage class.
# This is skipped in DEBUG mode as files might be outdated in
# compressor's final destination (COMPRESS_ROOT) during development
if not settings.DEBUG:
try:
# call path first so remote storages don't make it to exists,
# which would cause network I/O
if self.log and self.verbosity >= 2:
self.log.write("Looking for '{}' in storage\n".format(basename))
filename = self.storage.path(basename)
if not self.storage.exists(basename):
filename = None
except NotImplementedError:
# remote storages don't implement path, access the file locally
if self.log and self.verbosity >= 2:
self.log.write(
"Remote storages don't implement path, looking for the file locally\n"
)
if compressor_file_storage.exists(basename):
filename = compressor_file_storage.path(basename)
# secondly try to find it with staticfiles
if not filename and self.finders:
if self.log and self.verbosity >= 2:
if not settings.DEBUG:
self.log.write(
"'{}' was not found in storage, using static finders\n".format(
basename
)
)
else:
self.log.write("Using static finders for '{}'\n".format(basename))
filename = self.finders.find(url2pathname(basename))
if filename:
return filename
# or just raise an exception as the last resort
raise UncompressableFileError(
"'%s' could not be found in the COMPRESS_ROOT '%s'%s"
% (
basename,
settings.COMPRESS_ROOT,
self.finders and " or with staticfiles." or ".",
)
)
def get_filecontent(self, filename, charset):
"""
Reads file contents using given `charset` and returns it as text.
"""
if charset == "utf-8":
# Removes BOM
charset = "utf-8-sig"
with codecs.open(filename, "r", charset) as fd:
try:
return fd.read()
except IOError as e:
raise UncompressableFileError(
"IOError while processing " "'%s': %s" % (filename, e)
)
except UnicodeDecodeError as e:
raise UncompressableFileError(
"UnicodeDecodeError while "
"processing '%s' with "
"charset %s: %s" % (filename, charset, e)
)
@cached_property
def parser(self):
return get_class(settings.COMPRESS_PARSER)(self.content)
@cached_property
def cached_filters(self):
return [get_class(filter_cls) for filter_cls in self.filters]
@cached_property
def mtimes(self):
return [
str(get_mtime(value))
for kind, value, basename, elem in self.split_contents()
if kind == SOURCE_FILE
]
@cached_property
def cachekey(self):
return get_hexdigest(
"".join([self.content] + self.mtimes).encode(self.charset), 12
)
def hunks(self, forced=False):
"""
The heart of content parsing, iterates over the
list of split contents and looks at its kind
to decide what to do with it. Should yield a
bunch of precompiled and/or rendered hunks.
"""
enabled = settings.COMPRESS_ENABLED or forced
for kind, value, basename, elem in self.split_contents():
precompiled = False
attribs = self.parser.elem_attribs(elem)
charset = attribs.get("charset", self.charset)
options = {
"method": METHOD_INPUT,
"elem": elem,
"kind": kind,
"basename": basename,
"charset": charset,
}
if kind == SOURCE_FILE:
options = dict(options, filename=value)
value = self.get_filecontent(value, charset)
if self.precompiler_mimetypes:
precompiled, value = self.precompile(value, **options)
if enabled:
yield self.filter(value, self.cached_filters, **options)
elif precompiled:
for filter_cls in self.cached_filters:
if filter_cls.run_with_compression_disabled:
value = self.filter(value, [filter_cls], **options)
yield self.handle_output(kind, value, forced=True, basename=basename)
else:
yield self.parser.elem_str(elem)
def filter_output(self, content):
"""
Passes the concatenated content to the 'output' methods
of the compressor filters.
"""
return self.filter(content, self.cached_filters, method=METHOD_OUTPUT)
def filter_input(self, forced=False):
"""
Passes each hunk (file or code) to the 'input' methods
of the compressor filters.
"""
content = []
for hunk in self.hunks(forced):
content.append(hunk)
return content
def precompile(
self, content, kind=None, elem=None, filename=None, charset=None, **kwargs
):
"""
Processes file using a pre compiler.
This is the place where files like coffee script are processed.
"""
if not kind:
return False, content
attrs = self.parser.elem_attribs(elem)
mimetype = attrs.get("type", None)
if mimetype is None:
return False, content
filter_or_command = self.precompiler_mimetypes.get(mimetype)
if filter_or_command is None:
if mimetype in self.output_mimetypes:
return False, content
raise CompressorError(
"Couldn't find any precompiler in "
"COMPRESS_PRECOMPILERS setting for "
"mimetype '%s'." % mimetype
)
mod_name, cls_name = get_mod_func(filter_or_command)
try:
mod = import_module(mod_name)
except (ImportError, TypeError):
filter = CachedCompilerFilter(
content=content,
filter_type=self.resource_kind,
filename=filename,
charset=charset,
command=filter_or_command,
mimetype=mimetype,
)
return True, filter.input(**kwargs)
try:
precompiler_class = getattr(mod, cls_name)
except AttributeError:
raise FilterDoesNotExist('Could not find "%s".' % filter_or_command)
filter = precompiler_class(
content,
attrs=attrs,
filter_type=self.resource_kind,
charset=charset,
filename=filename,
)
return True, filter.input(**kwargs)
def filter(self, content, filters, method, **kwargs):
for filter_cls in filters:
filter_func = getattr(
filter_cls(content, filter_type=self.resource_kind), method
)
try:
if callable(filter_func):
content = filter_func(**kwargs)
except NotImplementedError:
pass
return content
def output(self, mode="file", forced=False, basename=None):
"""
The general output method, override in subclass if you need to do
any custom modification. Calls other mode specific methods or simply
returns the content directly.
"""
output = "\n".join(self.filter_input(forced))
if not output:
return ""
if settings.COMPRESS_ENABLED or forced:
filtered_output = self.filter_output(output)
return self.handle_output(mode, filtered_output, forced, basename)
return output
def handle_output(self, mode, content, forced, basename=None):
# Then check for the appropriate output method and call it
output_func = getattr(self, "output_%s" % mode, None)
if callable(output_func):
return output_func(mode, content, forced, basename)
# Total failure, raise a general exception
raise CompressorError("Couldn't find output method for mode '%s'" % mode)
def output_file(self, mode, content, forced=False, basename=None):
"""
The output method that saves the content to a file and renders
the appropriate template with the file's URL.
"""
new_filepath = self.get_filepath(content, basename=basename)
if not self.storage.exists(new_filepath) or forced:
self.storage.save(new_filepath, ContentFile(content.encode(self.charset)))
url = mark_safe(self.storage.url(new_filepath))
return self.render_output(mode, {"url": url})
def output_inline(self, mode, content, forced=False, basename=None):
"""
The output method that directly returns the content for inline
display.
"""
return self.render_output(mode, {"content": content})
def output_preload(self, mode, content, forced=False, basename=None):
"""
The output method that returns <link> with rel="preload" and
proper href attribute for given file.
"""
return self.output_file(mode, content, forced, basename)
def render_output(self, mode, context=None):
"""
Renders the compressor output with the appropriate template for
the given mode and template context.
"""
# Just in case someone renders the compressor outside
# the usual template rendering cycle
if "compressed" not in self.context:
self.context["compressed"] = {}
self.context["compressed"].update(context or {})
self.context["compressed"].update(self.extra_context)
if hasattr(self.context, "flatten"):
# Passing Contexts to Template.render is deprecated since Django 1.8.
final_context = self.context.flatten()
else:
final_context = self.context
post_compress.send(
sender=self.__class__,
type=self.resource_kind,
mode=mode,
context=final_context,
)
template_name = self.get_template_name(mode)
return render_to_string(template_name, context=final_context)
| Compressor |
python | numba__numba | numba/tests/test_errorhandling.py | {
"start": 14139,
"end": 14985
} | class ____(SerialMixin, TestCase):
"""Checks that the way errors are captured.
"""
def test_error_in_overload(self):
def bar(x):
pass
@overload(bar)
def ol_bar(x):
x.some_invalid_attr # doesn't exist!
def impl(x):
pass
return impl
with warnings.catch_warnings():
# Suppress error going into stdout
warnings.simplefilter("ignore",
errors.NumbaPendingDeprecationWarning)
with self.assertRaises(AttributeError) as raises:
@njit('void(int64)')
def foo(x):
bar(x)
expected = "object has no attribute 'some_invalid_attr'"
self.assertIn(expected, str(raises.exception))
| TestCapturedErrorHandling |
python | faif__python-patterns | patterns/structural/facade.py | {
"start": 1739,
"end": 1937
} | class ____:
"""
Simple solid state drive representation.
"""
def read(self, lba: str, size: str) -> str:
return f"Some data from sector {lba} with size {size}"
| SolidStateDrive |
python | mlflow__mlflow | mlflow/genai/scorers/builtin_scorers.py | {
"start": 50245,
"end": 56865
} | class ____(BuiltInScorer):
"""
Equivalence compares outputs against expected outputs for semantic equivalence.
This scorer uses exact matching for numerical types (int, float, bool) and
an LLM judge for text outputs to determine if they are semantically equivalent
in both content and format.
You can invoke the scorer directly with a single input for testing, or pass it to
`mlflow.genai.evaluate` or `mlflow.genai.optimize_prompts` for evaluation.
Args:
name: The name of the scorer. Defaults to "equivalence".
model: {{ model }}
Example (direct usage):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import Equivalence
# Numerical equivalence
assessment = Equivalence()(
outputs=42,
expectations={"expected_response": 42},
)
print(assessment) # value: ategoricalRating.YES, rationale: 'Exact numerical match'
# Text equivalence
assessment = Equivalence()(
outputs="The capital is Paris",
expectations={"expected_response": "Paris is the capital"},
)
print(assessment) # value: CategoricalRating.YES (semantically equivalent)
Example (with evaluate):
.. code-block:: python
import mlflow
from mlflow.genai.scorers import Equivalence
data = [
{
"outputs": "The capital is Paris",
"expectations": {"expected_response": "Paris"},
}
]
result = mlflow.genai.evaluate(data=data, scorers=[Equivalence()])
"""
name: str = "equivalence"
model: str | None = None
required_columns: set[str] = {"outputs"}
description: str = "Compare outputs against expected outputs for semantic equivalence."
@property
def instructions(self) -> str:
"""Get the instructions of what this scorer evaluates."""
return EQUIVALENCE_PROMPT_INSTRUCTIONS
def validate_columns(self, columns: set[str]) -> None:
super().validate_columns(columns)
if "expectations/expected_response" not in columns:
raise MissingColumnsException(self.name, {"expectations/expected_response"})
def get_input_fields(self) -> list[JudgeField]:
"""
Get the input fields for the Equivalence scorer.
Returns:
List of JudgeField objects defining the input fields.
"""
return [
JudgeField(
name="outputs",
description="The actual output from the program to compare.",
),
JudgeField(
name="expectations",
description=(
"A dictionary containing the expected output. Must contain an "
"'expected_response' key with the expected value, e.g. "
"{'expected_response': 'Paris'}."
),
),
]
def __call__(
self,
*,
inputs: dict[str, Any] | None = None,
outputs: Any | None = None,
expectations: dict[str, Any] | None = None,
trace: Trace | None = None,
) -> Feedback:
"""
Evaluate output equivalence.
This scorer can be used in two ways:
1. Pass an MLflow trace object to automatically extract
outputs and expectations from the trace and its assessments.
2. Directly provide outputs and expectations to evaluate.
Args:
inputs: A dictionary of input data (optional, not used in evaluation).
outputs: The actual output to compare. Optional when trace is provided.
expectations: A dictionary containing the expected output. Must contain an
'expected_response' key. Optional when trace is provided.
trace: MLflow trace object containing the execution to evaluate. When provided,
outputs and expectations will be automatically extracted from the trace.
Returns:
Feedback object with 'yes'/'no' value and rationale
"""
from mlflow.genai.judges.builtin import _sanitize_feedback
from mlflow.genai.judges.prompts.equivalence import (
EQUIVALENCE_FEEDBACK_NAME,
get_prompt,
)
# Use resolve_scorer_fields to extract fields from trace if provided
fields = resolve_scorer_fields(
trace,
self,
inputs,
outputs,
expectations,
model=self.model,
extract_expectations=True,
)
_validate_required_fields(fields, self, "Equivalence scorer")
# Validate that expected_response is present
if not fields.expectations or fields.expectations.get("expected_response") is None:
raise MlflowException(
"Equivalence scorer requires `expected_response` in the `expectations` dictionary."
)
# Extract the expected response
expected_output = fields.expectations.get("expected_response")
actual_output = fields.outputs
# Handle exact match for numerical types
if isinstance(actual_output, (int, float, bool)) and isinstance(
expected_output, (int, float, bool)
):
if math.isclose(actual_output, expected_output):
return Feedback(
name=self.name,
value=CategoricalRating.YES,
rationale="Exact numerical match",
)
else:
return Feedback(
name=self.name,
value=CategoricalRating.NO,
rationale=f"Values do not match: {actual_output} != {expected_output}",
)
# Convert to strings for comparison
outputs_str = str(actual_output)
expectations_str = str(expected_output)
# Use exact match first
if outputs_str == expectations_str:
return Feedback(
name=self.name,
value=CategoricalRating.YES,
rationale="Exact string match",
)
# Use LLM judge for semantic equivalence
model = self.model or get_default_model()
assessment_name = self.name or EQUIVALENCE_FEEDBACK_NAME
prompt = get_prompt(
output=outputs_str,
expected_output=expectations_str,
)
feedback = invoke_judge_model(model, prompt, assessment_name=assessment_name)
return _sanitize_feedback(feedback)
| Equivalence |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 355305,
"end": 355701
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("client_mutation_id", "invitation")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
invitation = sgqlc.types.Field(
"EnterpriseAdministratorInvitation", graphql_name="invitation"
)
| InviteEnterpriseAdminPayload |
python | readthedocs__readthedocs.org | dockerfiles/settings/web.py | {
"start": 49,
"end": 379
} | class ____(DockerBaseSettings):
DONT_HIT_DB = False
# Router is useful from webs only because they have access to the database.
# Builders will use the same queue that was assigned the first time on retry
CELERY_ROUTES = ("readthedocs.builds.tasks.TaskRouter",)
WebDevSettings.load_settings(__name__)
| WebDevSettings |
python | huggingface__transformers | src/transformers/models/gpt_neox/tokenization_gpt_neox.py | {
"start": 1067,
"end": 7403
} | class ____(TokenizersBackend):
"""
Construct a GPT-NeoX-20B tokenizer (backed by HuggingFace's tokenizers library). Based on byte-level
Byte-Pair-Encoding.
This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will
be encoded differently whether it is at the beginning of the sentence (without space) or not:
```python
>>> from transformers import GPTNeoXTokenizer
>>> tokenizer = GPTNeoXTokenizer.from_pretrained("EleutherAI/gpt-neox-20b")
>>> tokenizer("Hello world")["input_ids"]
[15496, 995]
>>> tokenizer(" Hello world")["input_ids"]
[18435, 995]
```
You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since
the model was not pretrained this way, it might yield a decrease in performance.
<Tip>
When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.
</Tip>
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
vocab_file (`str`, *optional*):
Path to the vocabulary file.
merges_file (`str`, *optional*):
Path to the merges file.
tokenizer_file (`str`, *optional*):
Path to a tokenizers JSON file containing the serialization of a tokenizer.
errors (`str`, *optional*, defaults to `"replace"`):
Paradigm to follow when decoding bytes to UTF-8. See
[bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
bos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The beginning of sequence token.
eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
The end of sequence token.
pad_token (`str`, *optional*, defaults to `"<|padding|>"`):
Token for padding a sequence.
add_prefix_space (`bool`, *optional*, defaults to `False`):
Whether or not to add an initial space to the input. This allows to treat the leading word just as any
other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).
add_bos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add a `bos_token` at the start of sequences.
add_eos_token (`bool`, *optional*, defaults to `False`):
Whether or not to add an `eos_token` at the end of sequences.
trim_offsets (`bool`, *optional*, defaults to `True`):
Whether or not the post-processing step should trim offsets to avoid including whitespaces.
vocab (`dict`, *optional*):
Custom vocabulary dictionary. If not provided, vocabulary is loaded from vocab_file.
merges (`list`, *optional*):
Custom merges list. If not provided, merges are loaded from merges_file.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
errors: str = "replace",
unk_token: str = "<|endoftext|>",
bos_token: str = "<|endoftext|>",
eos_token: str = "<|endoftext|>",
pad_token: str = "<|padding|>",
add_bos_token: bool = False,
add_eos_token: bool = False,
add_prefix_space: bool = False,
trim_offsets: bool = True,
vocab: Optional[dict] = None,
merges: Optional[list] = None,
**kwargs,
):
self._add_bos_token = add_bos_token
self._add_eos_token = add_eos_token
self.add_prefix_space = add_prefix_space
self.trim_offsets = trim_offsets
if vocab is not None:
self._vocab = (
{token: idx for idx, (token, _score) in enumerate(vocab)} if isinstance(vocab, list) else vocab
)
else:
self._vocab = {
str(unk_token): 0,
str(pad_token): 1,
}
if merges is not None:
self._merges = merges
else:
self._merges = []
self._tokenizer = Tokenizer(
BPE(
vocab=self._vocab,
merges=self._merges,
dropout=None,
continuing_subword_prefix="",
end_of_word_suffix="",
fuse_unk=False,
)
)
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=add_prefix_space, trim_offsets=trim_offsets
)
self._tokenizer.decoder = decoders.ByteLevel(add_prefix_space=False, trim_offsets=True)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
errors=errors,
unk_token=unk_token,
bos_token=bos_token,
eos_token=eos_token,
pad_token=pad_token,
add_bos_token=add_bos_token,
add_eos_token=add_eos_token,
add_prefix_space=add_prefix_space,
trim_offsets=trim_offsets,
**kwargs,
)
self.update_post_processor()
def _post_init(self):
"""Post-initialization to ensure tokenizer settings are applied correctly."""
# Re-apply settings to ensure they're correct after loading from pretrained
self._tokenizer.normalizer = normalizers.NFC()
self._tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(
add_prefix_space=self.add_prefix_space, trim_offsets=self.trim_offsets
)
self._tokenizer.decoder = decoders.ByteLevel(add_prefix_space=False, trim_offsets=True)
# Call parent to handle AddedToken properties
super()._post_init()
# Update post processor with current bos/eos settings
self.update_post_processor()
__all__ = ["GPTNeoXTokenizer"]
| GPTNeoXTokenizer |
python | django__django | tests/test_client_regress/tests.py | {
"start": 51558,
"end": 51697
} | class ____:
def __init__(self, filename):
self.name = filename
def read(self):
return b"TEST_FILE_CONTENT"
| DummyFile |
python | PyCQA__pylint | tests/functional/ext/docparams/raise/missing_raises_doc_required_exc_inheritance.py | {
"start": 152,
"end": 830
} | class ____(CustomError):
pass
def test_find_missing_raise_for_parent(): # [missing-raises-doc]
"""This is a docstring.
Raises:
CustomError: Never
"""
raise NameError("hi")
def test_no_missing_raise_for_child_builtin():
"""This is a docstring.
Raises:
Exception: Never
"""
raise ValueError("hi")
def test_no_missing_raise_for_child_custom():
"""This is a docstring.
Raises:
NameError: Never
"""
raise CustomError("hi")
def test_no_missing_raise_for_child_custom_nested():
"""This is a docstring.
Raises:
NameError: Never
"""
raise CustomChildError("hi")
| CustomChildError |
python | realpython__materials | inheritance-and-composition/choosing/hr.py | {
"start": 1240,
"end": 1458
} | class ____(PayrollPolicy):
def __init__(self, hour_rate):
super().__init__()
self.hour_rate = hour_rate
def calculate_payroll(self):
return self.hours_worked * self.hour_rate
| HourlyPolicy |
python | pypa__warehouse | warehouse/legacy/api/xmlrpc/cache/fncache.py | {
"start": 243,
"end": 3085
} | class ____:
"""
Redis backed LRU cache for functions which return an object which
can survive orjson.dumps() and orjson.loads() intact
"""
def __init__(self, conn, name="lru", expires=None, metric_reporter=None):
"""
conn: Redis Connection Object
name: Prefix for all keys in the cache
expires: Default expiration
metric_reporter: Object implementing an `increment(<string>)` method
"""
self.conn = conn
self.name = name
self.expires = expires if expires else DEFAULT_EXPIRES
if callable(getattr(metric_reporter, "increment", None)):
self.metric_reporter = metric_reporter
else:
self.metric_reporter = StubMetricReporter()
def format_key(self, func_name, tag):
if tag is not None and tag != "None":
return ":".join([self.name, tag, func_name])
return ":".join([self.name, "tag", func_name])
def get(self, func_name, key, tag):
try:
value = self.conn.hget(self.format_key(func_name, tag), str(key))
except (redis.exceptions.RedisError, redis.exceptions.ConnectionError):
self.metric_reporter.increment(f"{self.name}.cache.error")
return None
if value:
self.metric_reporter.increment(f"{self.name}.cache.hit")
value = orjson.loads(value)
return value
def add(self, func_name, key, value, tag, expires):
try:
self.metric_reporter.increment(f"{self.name}.cache.miss")
pipeline = self.conn.pipeline()
pipeline.hset(
self.format_key(func_name, tag), str(key), orjson.dumps(value)
)
ttl = expires if expires else self.expires
pipeline.expire(self.format_key(func_name, tag), ttl)
pipeline.execute()
return value
except (redis.exceptions.RedisError, redis.exceptions.ConnectionError):
self.metric_reporter.increment(f"{self.name}.cache.error")
return value
def purge(self, tag):
try:
keys = self.conn.scan_iter(":".join([self.name, tag, "*"]), count=1000)
pipeline = self.conn.pipeline()
for key in keys:
pipeline.delete(key)
pipeline.execute()
self.metric_reporter.increment(f"{self.name}.cache.purge")
except (redis.exceptions.RedisError, redis.exceptions.ConnectionError):
self.metric_reporter.increment(f"{self.name}.cache.error")
raise CacheError()
def fetch(self, func, args, kwargs, key, tag, expires):
return self.get(func.__name__, str(key), str(tag)) or self.add(
func.__name__, str(key), func(*args, **kwargs), str(tag), expires
)
| RedisLru |
python | ray-project__ray | python/ray/_private/thirdparty/pyamdsmi/pyamdsmi.py | {
"start": 5328,
"end": 5489
} | class ____(Structure):
_fields_ = [('transfer_rate', rsmi_frequencies_t),
('lanes', c_uint32 * RSMI_MAX_NUM_FREQUENCIES)]
| rsmi_pcie_bandwidth_t |
python | pypa__pip | src/pip/_vendor/cachecontrol/controller.py | {
"start": 1269,
"end": 19101
} | class ____:
"""An interface to see if request should cached or not."""
def __init__(
self,
cache: BaseCache | None = None,
cache_etags: bool = True,
serializer: Serializer | None = None,
status_codes: Collection[int] | None = None,
):
self.cache = DictCache() if cache is None else cache
self.cache_etags = cache_etags
self.serializer = serializer or Serializer()
self.cacheable_status_codes = status_codes or (200, 203, 300, 301, 308)
@classmethod
def _urlnorm(cls, uri: str) -> str:
"""Normalize the URL to create a safe key for the cache"""
(scheme, authority, path, query, fragment) = parse_uri(uri)
if not scheme or not authority:
raise Exception("Only absolute URIs are allowed. uri = %s" % uri)
scheme = scheme.lower()
authority = authority.lower()
if not path:
path = "/"
# Could do syntax based normalization of the URI before
# computing the digest. See Section 6.2.2 of Std 66.
request_uri = query and "?".join([path, query]) or path
defrag_uri = scheme + "://" + authority + request_uri
return defrag_uri
@classmethod
def cache_url(cls, uri: str) -> str:
return cls._urlnorm(uri)
def parse_cache_control(self, headers: Mapping[str, str]) -> dict[str, int | None]:
known_directives = {
# https://tools.ietf.org/html/rfc7234#section-5.2
"max-age": (int, True),
"max-stale": (int, False),
"min-fresh": (int, True),
"no-cache": (None, False),
"no-store": (None, False),
"no-transform": (None, False),
"only-if-cached": (None, False),
"must-revalidate": (None, False),
"public": (None, False),
"private": (None, False),
"proxy-revalidate": (None, False),
"s-maxage": (int, True),
}
cc_headers = headers.get("cache-control", headers.get("Cache-Control", ""))
retval: dict[str, int | None] = {}
for cc_directive in cc_headers.split(","):
if not cc_directive.strip():
continue
parts = cc_directive.split("=", 1)
directive = parts[0].strip()
try:
typ, required = known_directives[directive]
except KeyError:
logger.debug("Ignoring unknown cache-control directive: %s", directive)
continue
if not typ or not required:
retval[directive] = None
if typ:
try:
retval[directive] = typ(parts[1].strip())
except IndexError:
if required:
logger.debug(
"Missing value for cache-control " "directive: %s",
directive,
)
except ValueError:
logger.debug(
"Invalid value for cache-control directive " "%s, must be %s",
directive,
typ.__name__,
)
return retval
def _load_from_cache(self, request: PreparedRequest) -> HTTPResponse | None:
"""
Load a cached response, or return None if it's not available.
"""
# We do not support caching of partial content: so if the request contains a
# Range header then we don't want to load anything from the cache.
if "Range" in request.headers:
return None
cache_url = request.url
assert cache_url is not None
cache_data = self.cache.get(cache_url)
if cache_data is None:
logger.debug("No cache entry available")
return None
if isinstance(self.cache, SeparateBodyBaseCache):
body_file = self.cache.get_body(cache_url)
else:
body_file = None
result = self.serializer.loads(request, cache_data, body_file)
if result is None:
logger.warning("Cache entry deserialization failed, entry ignored")
return result
def cached_request(self, request: PreparedRequest) -> HTTPResponse | Literal[False]:
"""
Return a cached response if it exists in the cache, otherwise
return False.
"""
assert request.url is not None
cache_url = self.cache_url(request.url)
logger.debug('Looking up "%s" in the cache', cache_url)
cc = self.parse_cache_control(request.headers)
# Bail out if the request insists on fresh data
if "no-cache" in cc:
logger.debug('Request header has "no-cache", cache bypassed')
return False
if "max-age" in cc and cc["max-age"] == 0:
logger.debug('Request header has "max_age" as 0, cache bypassed')
return False
# Check whether we can load the response from the cache:
resp = self._load_from_cache(request)
if not resp:
return False
# If we have a cached permanent redirect, return it immediately. We
# don't need to test our response for other headers b/c it is
# intrinsically "cacheable" as it is Permanent.
#
# See:
# https://tools.ietf.org/html/rfc7231#section-6.4.2
#
# Client can try to refresh the value by repeating the request
# with cache busting headers as usual (ie no-cache).
if int(resp.status) in PERMANENT_REDIRECT_STATUSES:
msg = (
"Returning cached permanent redirect response "
"(ignoring date and etag information)"
)
logger.debug(msg)
return resp
headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers)
if not headers or "date" not in headers:
if "etag" not in headers:
# Without date or etag, the cached response can never be used
# and should be deleted.
logger.debug("Purging cached response: no date or etag")
self.cache.delete(cache_url)
logger.debug("Ignoring cached response: no date")
return False
now = time.time()
time_tuple = parsedate_tz(headers["date"])
assert time_tuple is not None
date = calendar.timegm(time_tuple[:6])
current_age = max(0, now - date)
logger.debug("Current age based on date: %i", current_age)
# TODO: There is an assumption that the result will be a
# urllib3 response object. This may not be best since we
# could probably avoid instantiating or constructing the
# response until we know we need it.
resp_cc = self.parse_cache_control(headers)
# determine freshness
freshness_lifetime = 0
# Check the max-age pragma in the cache control header
max_age = resp_cc.get("max-age")
if max_age is not None:
freshness_lifetime = max_age
logger.debug("Freshness lifetime from max-age: %i", freshness_lifetime)
# If there isn't a max-age, check for an expires header
elif "expires" in headers:
expires = parsedate_tz(headers["expires"])
if expires is not None:
expire_time = calendar.timegm(expires[:6]) - date
freshness_lifetime = max(0, expire_time)
logger.debug("Freshness lifetime from expires: %i", freshness_lifetime)
# Determine if we are setting freshness limit in the
# request. Note, this overrides what was in the response.
max_age = cc.get("max-age")
if max_age is not None:
freshness_lifetime = max_age
logger.debug(
"Freshness lifetime from request max-age: %i", freshness_lifetime
)
min_fresh = cc.get("min-fresh")
if min_fresh is not None:
# adjust our current age by our min fresh
current_age += min_fresh
logger.debug("Adjusted current age from min-fresh: %i", current_age)
# Return entry if it is fresh enough
if freshness_lifetime > current_age:
logger.debug('The response is "fresh", returning cached response')
logger.debug("%i > %i", freshness_lifetime, current_age)
return resp
# we're not fresh. If we don't have an Etag, clear it out
if "etag" not in headers:
logger.debug('The cached response is "stale" with no etag, purging')
self.cache.delete(cache_url)
# return the original handler
return False
def conditional_headers(self, request: PreparedRequest) -> dict[str, str]:
resp = self._load_from_cache(request)
new_headers = {}
if resp:
headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(resp.headers)
if "etag" in headers:
new_headers["If-None-Match"] = headers["ETag"]
if "last-modified" in headers:
new_headers["If-Modified-Since"] = headers["Last-Modified"]
return new_headers
def _cache_set(
self,
cache_url: str,
request: PreparedRequest,
response: HTTPResponse,
body: bytes | None = None,
expires_time: int | None = None,
) -> None:
"""
Store the data in the cache.
"""
if isinstance(self.cache, SeparateBodyBaseCache):
# We pass in the body separately; just put a placeholder empty
# string in the metadata.
self.cache.set(
cache_url,
self.serializer.dumps(request, response, b""),
expires=expires_time,
)
# body is None can happen when, for example, we're only updating
# headers, as is the case in update_cached_response().
if body is not None:
self.cache.set_body(cache_url, body)
else:
self.cache.set(
cache_url,
self.serializer.dumps(request, response, body),
expires=expires_time,
)
def cache_response(
self,
request: PreparedRequest,
response_or_ref: HTTPResponse | weakref.ReferenceType[HTTPResponse],
body: bytes | None = None,
status_codes: Collection[int] | None = None,
) -> None:
"""
Algorithm for caching requests.
This assumes a requests Response object.
"""
if isinstance(response_or_ref, weakref.ReferenceType):
response = response_or_ref()
if response is None:
# The weakref can be None only in case the user used streamed request
# and did not consume or close it, and holds no reference to requests.Response.
# In such case, we don't want to cache the response.
return
else:
response = response_or_ref
# From httplib2: Don't cache 206's since we aren't going to
# handle byte range requests
cacheable_status_codes = status_codes or self.cacheable_status_codes
if response.status not in cacheable_status_codes:
logger.debug(
"Status code %s not in %s", response.status, cacheable_status_codes
)
return
response_headers: CaseInsensitiveDict[str] = CaseInsensitiveDict(
response.headers
)
if "date" in response_headers:
time_tuple = parsedate_tz(response_headers["date"])
assert time_tuple is not None
date = calendar.timegm(time_tuple[:6])
else:
date = 0
# If we've been given a body, our response has a Content-Length, that
# Content-Length is valid then we can check to see if the body we've
# been given matches the expected size, and if it doesn't we'll just
# skip trying to cache it.
if (
body is not None
and "content-length" in response_headers
and response_headers["content-length"].isdigit()
and int(response_headers["content-length"]) != len(body)
):
return
cc_req = self.parse_cache_control(request.headers)
cc = self.parse_cache_control(response_headers)
assert request.url is not None
cache_url = self.cache_url(request.url)
logger.debug('Updating cache with response from "%s"', cache_url)
# Delete it from the cache if we happen to have it stored there
no_store = False
if "no-store" in cc:
no_store = True
logger.debug('Response header has "no-store"')
if "no-store" in cc_req:
no_store = True
logger.debug('Request header has "no-store"')
if no_store and self.cache.get(cache_url):
logger.debug('Purging existing cache entry to honor "no-store"')
self.cache.delete(cache_url)
if no_store:
return
# https://tools.ietf.org/html/rfc7234#section-4.1:
# A Vary header field-value of "*" always fails to match.
# Storing such a response leads to a deserialization warning
# during cache lookup and is not allowed to ever be served,
# so storing it can be avoided.
if "*" in response_headers.get("vary", ""):
logger.debug('Response header has "Vary: *"')
return
# If we've been given an etag, then keep the response
if self.cache_etags and "etag" in response_headers:
expires_time = 0
if response_headers.get("expires"):
expires = parsedate_tz(response_headers["expires"])
if expires is not None:
expires_time = calendar.timegm(expires[:6]) - date
expires_time = max(expires_time, 14 * 86400)
logger.debug(f"etag object cached for {expires_time} seconds")
logger.debug("Caching due to etag")
self._cache_set(cache_url, request, response, body, expires_time)
# Add to the cache any permanent redirects. We do this before looking
# that the Date headers.
elif int(response.status) in PERMANENT_REDIRECT_STATUSES:
logger.debug("Caching permanent redirect")
self._cache_set(cache_url, request, response, b"")
# Add to the cache if the response headers demand it. If there
# is no date header then we can't do anything about expiring
# the cache.
elif "date" in response_headers:
time_tuple = parsedate_tz(response_headers["date"])
assert time_tuple is not None
date = calendar.timegm(time_tuple[:6])
# cache when there is a max-age > 0
max_age = cc.get("max-age")
if max_age is not None and max_age > 0:
logger.debug("Caching b/c date exists and max-age > 0")
expires_time = max_age
self._cache_set(
cache_url,
request,
response,
body,
expires_time,
)
# If the request can expire, it means we should cache it
# in the meantime.
elif "expires" in response_headers:
if response_headers["expires"]:
expires = parsedate_tz(response_headers["expires"])
if expires is not None:
expires_time = calendar.timegm(expires[:6]) - date
else:
expires_time = None
logger.debug(
"Caching b/c of expires header. expires in {} seconds".format(
expires_time
)
)
self._cache_set(
cache_url,
request,
response,
body,
expires_time,
)
def update_cached_response(
self, request: PreparedRequest, response: HTTPResponse
) -> HTTPResponse:
"""On a 304 we will get a new set of headers that we want to
update our cached value with, assuming we have one.
This should only ever be called when we've sent an ETag and
gotten a 304 as the response.
"""
assert request.url is not None
cache_url = self.cache_url(request.url)
cached_response = self._load_from_cache(request)
if not cached_response:
# we didn't have a cached response
return response
# Lets update our headers with the headers from the new request:
# http://tools.ietf.org/html/draft-ietf-httpbis-p4-conditional-26#section-4.1
#
# The server isn't supposed to send headers that would make
# the cached body invalid. But... just in case, we'll be sure
# to strip out ones we know that might be problmatic due to
# typical assumptions.
excluded_headers = ["content-length"]
cached_response.headers.update(
{
k: v
for k, v in response.headers.items()
if k.lower() not in excluded_headers
}
)
# we want a 200 b/c we have content via the cache
cached_response.status = 200
# update our cache
self._cache_set(cache_url, request, cached_response)
return cached_response
| CacheController |
python | kamyu104__LeetCode-Solutions | Python/maximum-genetic-difference-query.py | {
"start": 930,
"end": 2209
} | class ____(object):
def maxGeneticDifference(self, parents, queries):
"""
:type parents: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
def iter_dfs(adj, qs, trie, result):
stk = [(1, adj[-1][0])]
while stk:
step, node = stk.pop()
if step == 1:
trie.insert(node, 1)
for i, val in qs[node]:
result[i] = trie.query(val)
stk.append((2, node))
for child in reversed(adj[node]):
stk.append((1, child))
elif step == 2:
trie.insert(node, -1)
adj = collections.defaultdict(list)
for node, parent in enumerate(parents):
adj[parent].append(node)
qs = collections.defaultdict(list)
max_val = len(parents)-1
for i, (node, val) in enumerate(queries):
qs[node].append((i, val))
max_val = max(max_val, val)
result = [0]*len(queries)
iter_dfs(adj, qs, Trie(max_val.bit_length()), result)
return result
# Time: O(nlogk + mlogk), k is max(max(vals), n-1)
# Space: O(n + logk)
import collections
| Solution |
python | doocs__leetcode | solution/1300-1399/1367.Linked List in Binary Tree/Solution.py | {
"start": 343,
"end": 888
} | class ____:
def isSubPath(self, head: Optional[ListNode], root: Optional[TreeNode]) -> bool:
def dfs(head, root):
if head is None:
return True
if root is None or root.val != head.val:
return False
return dfs(head.next, root.left) or dfs(head.next, root.right)
if root is None:
return False
return (
dfs(head, root)
or self.isSubPath(head, root.left)
or self.isSubPath(head, root.right)
)
| Solution |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pycodestyle/E731.py | {
"start": 2250,
"end": 3127
} | class ____:
# OK
filter: Callable[[str], bool] = lambda _: True
# Regression tests for:
# * https://github.com/astral-sh/ruff/issues/7720
x = lambda: """
a
b
"""
# * https://github.com/astral-sh/ruff/issues/10277
at_least_one_million = lambda _: _ >= 1_000_000
x = lambda: (
# comment
5 + 10
)
x = lambda: (
# comment
y := 10
)
# https://github.com/astral-sh/ruff/issues/18475
foo_tooltip = (
lambda x, data: f"\nfoo: {data['foo'][int(x)]}"
if data["foo"] is not None
else ""
)
foo_tooltip = (
lambda x, data: f"\nfoo: {data['foo'][int(x)]}" +
more
)
# https://github.com/astral-sh/ruff/issues/20097
def scope():
from collections.abc import Callable
from typing import ParamSpec
P = ParamSpec("P")
f1: Callable[P, str] = lambda x: str(x)
f2: Callable[..., str] = lambda x: str(x)
| FilterDataclass |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 24926,
"end": 26657
} | class ____(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
def do(self, a, b, tags):
c = asarray(a) # a might be a matrix
if 'size-0' in tags:
assert_raises(LinAlgError, linalg.cond, c)
return
# +-2 norms
s = linalg.svd(c, compute_uv=False)
assert_almost_equal(
linalg.cond(a), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 2), s[..., 0] / s[..., -1],
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -2), s[..., -1] / s[..., 0],
single_decimal=5, double_decimal=11)
# Other norms
cinv = np.linalg.inv(c)
assert_almost_equal(
linalg.cond(a, 1),
abs(c).sum(-2).max(-1) * abs(cinv).sum(-2).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -1),
abs(c).sum(-2).min(-1) * abs(cinv).sum(-2).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, np.inf),
abs(c).sum(-1).max(-1) * abs(cinv).sum(-1).max(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, -np.inf),
abs(c).sum(-1).min(-1) * abs(cinv).sum(-1).min(-1),
single_decimal=5, double_decimal=11)
assert_almost_equal(
linalg.cond(a, 'fro'),
np.sqrt((abs(c)**2).sum(-1).sum(-1)
* (abs(cinv)**2).sum(-1).sum(-1)),
single_decimal=5, double_decimal=11)
| CondCases |
python | getsentry__sentry | tests/sentry/auth/authenticators/test_sms.py | {
"start": 408,
"end": 4997
} | class ____(TestCase):
def setUp(self) -> None:
self.user = self.create_user(email="test@example.com", is_superuser=False)
@responses.activate
def test_activate(self) -> None:
request = HttpRequest()
request.user = self.user
request.META["REMOTE_ADDR"] = "127.0.0.1"
responses.add(
responses.POST,
"https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages.json",
json={
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"api_version": "2010-04-01",
"body": "Hi there!",
"date_created": "Thu, 30 Jul 2015 20:12:31 +0000",
"date_sent": "Thu, 30 Jul 2015 20:12:33 +0000",
"date_updated": "Thu, 30 Jul 2015 20:12:33 +0000",
"direction": "outbound-api",
"error_code": None,
"error_message": None,
"from": "+15551231234",
"messaging_service_sid": None,
"num_media": "0",
"num_segments": "1",
"price": None,
"price_unit": None,
"sid": "SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"status": "sent",
"subresource_uris": {
"media": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Media.json"
},
"to": "+12345678901",
"uri": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json",
},
)
interface = SmsInterface()
interface.phone_number = "2345678901"
with self.options({"sms.twilio-account": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}):
rv = interface.activate(request)
assert (
rv.message
== "A confirmation code was sent to <strong>(***) ***-**01</strong>. It is valid for 45 seconds."
)
@responses.activate
def test_ratelimit_exception(self) -> None:
request = HttpRequest()
request.user = self.user
request.META["REMOTE_ADDR"] = "127.0.0.1"
responses.add(
responses.POST,
"https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages.json",
json={
"account_sid": "ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"api_version": "2010-04-01",
"body": "Hi there!",
"date_created": "Thu, 30 Jul 2015 20:12:31 +0000",
"date_sent": "Thu, 30 Jul 2015 20:12:33 +0000",
"date_updated": "Thu, 30 Jul 2015 20:12:33 +0000",
"direction": "outbound-api",
"error_code": None,
"error_message": None,
"from": "+15551231234",
"messaging_service_sid": None,
"num_media": "0",
"num_segments": "1",
"price": None,
"price_unit": None,
"sid": "SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX",
"status": "sent",
"subresource_uris": {
"media": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Media.json"
},
"to": "+12345678901",
"uri": "/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Messages/SMXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX.json",
},
)
interface = SmsInterface()
interface.phone_number = "2345678901"
with freeze_time(datetime.datetime.now()):
with self.options({"sms.twilio-account": "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"}):
with pytest.raises(SMSRateLimitExceeded):
for _ in range(4):
rv = interface.activate(request)
interface.phone_number = "2345678900"
rv = interface.activate(request)
assert (
rv.message
== "A confirmation code was sent to <strong>(***) ***-**00</strong>. It is valid for 45 seconds."
)
def test_invalid_phone_number(self) -> None:
with pytest.raises(InvalidPhoneNumber):
phone_number_as_e164("+15555555555")
def test_valid_phone_number(self) -> None:
formatted_number = phone_number_as_e164("2345678900")
assert "+12345678900" == formatted_number
| SmsInterfaceTest |
python | google__jax | tests/mock_gpu_test.py | {
"start": 1006,
"end": 2314
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["gpu"]):
self.skipTest("Mocking devices only works on the GPU backend.")
@jtu.skip_under_pytest("Test must run in an isolated process")
def testMockDeviceCount(self):
self.assertEqual(jax.device_count(), jax.local_device_count() * NUM_SHARDS)
@jtu.skip_under_pytest("Test must run in an isolated process")
def testMockWithSharding(self):
mesh = jax.sharding.Mesh(jax.devices(), ('x',))
@partial(
jax.jit,
in_shardings=NamedSharding(mesh, P('x',)),
out_shardings=NamedSharding(mesh, P('x',)),
)
def f(x, y):
z = x @ y
return z @ y
shape = (1024, 1024)
x = jnp.arange(math.prod(shape)).reshape(shape).astype(np.float32)
y = x + 1
f_lowered = f.lower(x, y)
hlo = f_lowered.compiler_ir()
mocked_count = NUM_SHARDS * jax.local_device_count()
if config.use_shardy_partitioner.value:
self.assertIn(
'sharding = #sdy.sharding<@mesh, [{"x"}, {}]>}',
str(hlo)
)
else:
self.assertIn(
f'sharding = "{{devices=[{mocked_count},1]<=[{mocked_count}]}}"',
str(hlo)
)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| MockGPUTest |
python | pyodide__pyodide | tools/backport.py | {
"start": 3793,
"end": 4507
} | class ____:
"""A changelog entry, represented as a list of strings.
An entry is started by a line beginning with `-`. It ends when there is a
line starting with `##` (begins a new version) `###` (begins a new
subsection), a blank line (begins a new paragraph) or `-` (begins a new
entry).
This is nearly the same thing as its content.
"""
content: list[str] = field(default_factory=list)
def get_text(self) -> str:
if self.content:
return "\n".join(self.content) + "\n"
return ""
def __bool__(self) -> bool:
return bool(self.content)
def append(self, line: str) -> None:
self.content.append(line)
@dataclass
| ChangelogEntry |
python | huggingface__transformers | tests/models/falcon_mamba/test_modeling_falcon_mamba.py | {
"start": 9463,
"end": 16631
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (FalconMambaModel, FalconMambaForCausalLM) if is_torch_available() else ()
has_attentions = False # FalconMamba does not support attentions
test_missing_keys = False
pipeline_model_mapping = (
{"feature-extraction": FalconMambaModel, "text-generation": FalconMambaForCausalLM}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = FalconMambaModelTester(self)
self.config_tester = ConfigTester(
self, config_class=FalconMambaConfig, n_embd=37, common_properties=["hidden_size", "num_hidden_layers"]
)
def _check_past_key_values_for_generate(self, batch_size, past_key_values, seq_length, config):
self.assertIsInstance(past_key_values, FalconMambaCache)
conv_shape = (batch_size, config.intermediate_size, config.conv_kernel)
ssm_shape = (batch_size, config.intermediate_size, config.state_size)
self.assertTrue(config.num_hidden_layers, len(past_key_values.conv_states))
for idx in range(len(past_key_values.conv_states)):
self.assertEqual(past_key_values.conv_states[idx].shape, conv_shape)
self.assertEqual(past_key_values.ssm_states[idx].shape, ssm_shape)
def assertInterval(self, member, container, msg=None):
r"""
Simple utility function to check if a member is inside an interval.
"""
if isinstance(member, torch.Tensor):
max_value, min_value = member.max().item(), member.min().item()
elif isinstance(member, (list, tuple)):
max_value, min_value = max(member), min(member)
if not isinstance(container, list):
raise TypeError("container should be a list or tuple")
elif len(container) != 2:
raise ValueError("container should have 2 elements")
expected_min, expected_max = container
is_inside_interval = (min_value >= expected_min) and (max_value <= expected_max)
if not is_inside_interval:
standardMsg = f"{safe_repr(member)} not found in {safe_repr(container)}"
self.fail(self._formatMessage(msg, standardMsg))
def test_config(self):
self.config_tester.run_common_tests()
def test_falcon_mamba_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_model(*config_and_inputs)
def test_falcon_mamba_lm_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_causal_lm(*config_and_inputs)
def test_state_equivalency(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_state_equivalency(*config_and_inputs)
def test_falcon_mamba_cached_slow_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_cached_slow_forward_and_backwards(*config_and_inputs)
def test_falcon_mamba_lm_head_forward_and_backwards(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_falcon_mamba_lm_head_forward_and_backwards(*config_and_inputs)
@slow
# Ignore copy
def test_model_from_pretrained(self):
model = FalconMambaModel.from_pretrained("tiiuae/falcon-mamba-7b", dtype=torch.float16)
self.assertIsNotNone(model)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, FalconMambaCache): # MODIFIED PART START
recursive_check(tuple_object.conv_states, dict_object.conv_states)
recursive_check(tuple_object.ssm_states, dict_object.ssm_states)
elif isinstance(tuple_object, (list, tuple)): # MODIFIED PART END
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif isinstance(tuple_object, dict):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values(), dict_object.values()
):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(tuple_object, dict_object, atol=1e-5),
msg=(
"Tuple and dict output are not equal. Difference:"
f" {torch.max(torch.abs(tuple_object - dict_object))}. Tuple has `nan`:"
f" {torch.isnan(tuple_object).any()} and `inf`: {torch.isinf(tuple_object)}. Dict has"
f" `nan`: {torch.isnan(dict_object).any()} and `inf`: {torch.isinf(dict_object)}."
),
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
@unittest.skip("Mamba models do not support DDP.")
def test_multi_gpu_data_parallel_forward(self):
pass
@require_torch
@require_torch_accelerator
@slow
| FalconMambaModelTest |
python | apache__airflow | providers/microsoft/azure/tests/unit/microsoft/azure/operators/test_asb.py | {
"start": 1996,
"end": 4134
} | class ____:
@pytest.mark.parametrize(
("mock_dl_msg_expiration", "mock_batched_operation"),
[
(True, True),
(True, False),
(False, True),
(False, False),
],
)
def test_init(self, mock_dl_msg_expiration, mock_batched_operation):
"""
Test init by creating AzureServiceBusCreateQueueOperator with task id,
queue_name and asserting with value
"""
asb_create_queue_operator = AzureServiceBusCreateQueueOperator(
task_id="asb_create_queue",
queue_name=QUEUE_NAME,
max_delivery_count=10,
dead_lettering_on_message_expiration=mock_dl_msg_expiration,
enable_batched_operations=mock_batched_operation,
)
assert asb_create_queue_operator.task_id == "asb_create_queue"
assert asb_create_queue_operator.queue_name == QUEUE_NAME
assert asb_create_queue_operator.max_delivery_count == 10
assert asb_create_queue_operator.dead_lettering_on_message_expiration is mock_dl_msg_expiration
assert asb_create_queue_operator.enable_batched_operations is mock_batched_operation
@mock.patch("airflow.providers.microsoft.azure.hooks.asb.AdminClientHook.get_conn")
def test_create_queue(self, mock_get_conn):
"""
Test AzureServiceBusCreateQueueOperator passed with the queue name,
mocking the connection details, hook create_queue function
"""
asb_create_queue_operator = AzureServiceBusCreateQueueOperator(
task_id="asb_create_queue_operator",
queue_name=QUEUE_NAME,
max_delivery_count=10,
dead_lettering_on_message_expiration=True,
enable_batched_operations=True,
)
asb_create_queue_operator.execute(None)
mock_get_conn.return_value.__enter__.return_value.create_queue.assert_called_once_with(
QUEUE_NAME,
max_delivery_count=10,
dead_lettering_on_message_expiration=True,
enable_batched_operations=True,
)
| TestAzureServiceBusCreateQueueOperator |
python | facebook__pyre-check | tools/generate_taint_models/get_REST_api_sources.py | {
"start": 568,
"end": 1824
} | class ____(ModelGenerator[CallableModel]):
annotations: AnnotationSpecification
whitelisted_parameters: WhitelistSpecification
def __init__(
self,
django_urls: DjangoUrls,
annotations: Optional[AnnotationSpecification] = None,
whitelisted_parameters: Optional[WhitelistSpecification] = None,
whitelisted_views: Optional[List[str]] = None,
) -> None:
self.django_urls: DjangoUrls = django_urls
self.annotations = annotations or default_entrypoint_taint
self.whitelisted_parameters = whitelisted_parameters or WhitelistSpecification(
parameter_name={"self"}, parameter_type={"HttpRequest"}
)
self.whitelisted_views: List[str] = whitelisted_views or []
def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:
return get_all_views(self.django_urls)
def compute_models(
self, functions_to_model: Iterable[Callable[..., object]]
) -> Iterable[CallableModel]:
return taint_callable_functions(
functions_to_model,
whitelisted_views=self.whitelisted_views,
annotations=self.annotations,
whitelist=self.whitelisted_parameters,
)
| RESTApiSourceGenerator |
python | PyCQA__pylint | tests/functional/r/redefined/redefined_slots.py | {
"start": 601,
"end": 698
} | class ____:
"""Class defining the `i`, `j` & `k` slots"""
__slots__ = ("i", "j", "k")
| Base2 |
python | pytest-dev__pytest | testing/io/test_pprint.py | {
"start": 418,
"end": 462
} | class ____:
pass
@dataclass
| EmptyDataclass |
python | redis__redis-py | tests/test_connect.py | {
"start": 3099,
"end": 4820
} | class ____ override the initialization of the socket_timeout parameter.
"""
def test_unix_socket_with_timeout():
conn = UnixDomainSocketConnection(socket_timeout=1000)
# Check if the base class defaults were taken over.
assert conn.db == 0
# Verify if the timeout and the path is set correctly.
assert conn.socket_timeout == 1000
assert conn.path == ""
@pytest.mark.ssl
@pytest.mark.skipif(not ssl.HAS_TLSv1_3, reason="requires TLSv1.3")
def test_tcp_ssl_version_mismatch(tcp_address):
host, port = tcp_address
certfile, keyfile, _ = get_tls_certificates(cert_type=CertificateType.server)
conn = SSLConnection(
host=host,
port=port,
client_name=_CLIENT_NAME,
ssl_ca_certs=certfile,
socket_timeout=3,
ssl_min_version=ssl.TLSVersion.TLSv1_3,
)
with pytest.raises(RedisError):
_assert_connect(
conn,
tcp_address,
certfile=certfile,
keyfile=keyfile,
maximum_ssl_version=ssl.TLSVersion.TLSv1_2,
)
def _assert_connect(conn, server_address, **tcp_kw):
if isinstance(server_address, str):
if not _RedisUDSServer:
pytest.skip("Unix domain sockets are not supported on this platform")
server = _RedisUDSServer(server_address, _RedisRequestHandler)
else:
server = _RedisTCPServer(server_address, _RedisRequestHandler, **tcp_kw)
with server as aserver:
t = threading.Thread(target=aserver.serve_forever)
t.start()
try:
aserver.wait_online()
conn.connect()
conn.disconnect()
finally:
aserver.stop()
t.join(timeout=5)
| did |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 11896,
"end": 12472
} | class ____: # can't be a Pydantic model because of validation issues parsing numpy, pd, pl arrays/series
vector: NearVectorInputType
distance: Optional[float]
certainty: Optional[float]
def __init__(
self,
*,
vector: NearVectorInputType,
distance: Optional[float] = None,
certainty: Optional[float] = None,
) -> None:
self.vector = vector
self.distance = distance
self.certainty = certainty
HybridVectorType = Union[NearVectorInputType, _HybridNearText, _HybridNearVector]
| _HybridNearVector |
python | google__python-fire | fire/test_components_py3.py | {
"start": 1039,
"end": 1222
} | class ____:
def double(self, *, count):
return count * 2
def triple(self, *, count):
return count * 3
def with_default(self, *, x="x"):
print("x: " + x)
| KeywordOnly |
python | scrapy__scrapy | tests/test_pipelines.py | {
"start": 2203,
"end": 2404
} | class ____(Spider):
name = "itemspider"
async def start(self):
yield Request(self.mockserver.url("/status?n=200"))
def parse(self, response):
return {"field": 42}
| ItemSpider |
python | huggingface__transformers | tests/utils/test_core_model_loading.py | {
"start": 6871,
"end": 7032
} | class ____(nn.Module):
def __init__(self):
super().__init__()
self.self_attn = DummySelfAttn()
self.experts = DummyExperts()
| DummyLayer |
python | huggingface__transformers | src/transformers/models/recurrent_gemma/modeling_recurrent_gemma.py | {
"start": 19954,
"end": 22706
} | class ____(nn.Module):
"""Griffin and Hawk's recurrent block."""
def __init__(self, config):
super().__init__()
self.lru_width = config.lru_width
self.hidden_size = config.hidden_size
self.linear_y = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
self.linear_x = nn.Linear(in_features=config.hidden_size, out_features=config.lru_width)
self.linear_out = nn.Linear(in_features=config.lru_width, out_features=config.hidden_size)
self.conv1d_width = config.conv1d_width
self.conv_1d = nn.Conv1d(
config.lru_width,
config.lru_width,
kernel_size=config.conv1d_width,
groups=config.lru_width,
padding=config.conv1d_width - 1,
)
self.rg_lru = RecurrentGemmaRglru(config)
self.act_fn = ACT2FN[config.hidden_activation]
self.conv1d_state = None
def forward(
self,
input_states: torch.Tensor,
position_ids: torch.Tensor,
attention_mask: torch.Tensor,
cache_position: torch.Tensor,
use_cache: bool = True,
) -> tuple[torch.Tensor, dict[str, torch.Tensor]]:
_, seq_len, _ = input_states.shape
y_branch = self.linear_y(input_states)
y_branch = self.act_fn(y_branch)
x_branch = self.linear_x(input_states)
x_branch = x_branch.transpose(1, 2)
if use_cache:
if cache_position.shape[0] != 1: # prefill
self.conv1d_state = nn.functional.pad(x_branch, (self.conv1d_width - x_branch.shape[-1] - 1, 0))
x_branch = self.conv_1d(x_branch)[..., :seq_len]
else: # decoding
conv_state = torch.cat((self.conv1d_state, x_branch), -1)
x_branch = torch.sum(conv_state * self.conv_1d.weight[:, 0, :], dim=-1) + self.conv_1d.bias
x_branch = x_branch.unsqueeze(-1)
self.conv1d_state = conv_state[:, :, 1:]
else:
x_branch = self.conv_1d(x_branch)[..., :seq_len]
x_branch = self.rg_lru(x_branch.transpose(1, 2), position_ids)
hidden_states = x_branch * y_branch
hidden_states = self.linear_out(hidden_states)
return hidden_states
def _setup_cache(self, batch, device, dtype):
# recurrent_states always computed in full precision
self.rg_lru.recurrent_states = torch.zeros((batch, self.lru_width), device=device, dtype=torch.float32)
self.conv1d_state = torch.zeros((batch, self.hidden_size, self.conv1d_width - 1), device=device, dtype=dtype)
TEMPORAL_BLOCK_CLASSES = {"recurrent": RecurrentGemmaRecurrentBlock, "attention": RecurrentGemmaSdpaAttention}
| RecurrentGemmaRecurrentBlock |
python | scrapy__scrapy | scrapy/core/downloader/contextfactory.py | {
"start": 3988,
"end": 5228
} | class ____(ScrapyClientContextFactory):
"""
Twisted-recommended context factory for web clients.
Quoting the documentation of the :class:`~twisted.web.client.Agent` class:
The default is to use a
:class:`~twisted.web.client.BrowserLikePolicyForHTTPS`, so unless you
have special requirements you can leave this as-is.
:meth:`creatorForNetloc` is the same as
:class:`~twisted.web.client.BrowserLikePolicyForHTTPS` except this context
factory allows setting the TLS/SSL method to use.
The default OpenSSL method is ``TLS_METHOD`` (also called
``SSLv23_METHOD``) which allows TLS protocol negotiation.
"""
def creatorForNetloc(self, hostname: bytes, port: int) -> ClientTLSOptions:
# trustRoot set to platformTrust() will use the platform's root CAs.
#
# This means that a website like https://www.cacert.org will be rejected
# by default, since CAcert.org CA certificate is seldom shipped.
return optionsForClientTLS(
hostname=hostname.decode("ascii"),
trustRoot=platformTrust(),
extraCertificateOptions={"method": self._ssl_method},
)
@implementer(IPolicyForHTTPS)
| BrowserLikeContextFactory |
python | openai__openai-python | src/openai/types/chat/chat_completion_custom_tool_param.py | {
"start": 1412,
"end": 1638
} | class ____(TypedDict, total=False):
custom: Required[Custom]
"""Properties of the custom tool."""
type: Required[Literal["custom"]]
"""The type of the custom tool. Always `custom`."""
| ChatCompletionCustomToolParam |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 60748,
"end": 63035
} | class ____(BaseModel):
"""
TaskInstance serializer for responses.
"""
id: Annotated[str, Field(title="Id")]
task_id: Annotated[str, Field(title="Task Id")]
dag_id: Annotated[str, Field(title="Dag Id")]
dag_run_id: Annotated[str, Field(title="Dag Run Id")]
map_index: Annotated[int, Field(title="Map Index")]
logical_date: Annotated[datetime | None, Field(title="Logical Date")] = None
run_after: Annotated[datetime, Field(title="Run After")]
start_date: Annotated[datetime | None, Field(title="Start Date")] = None
end_date: Annotated[datetime | None, Field(title="End Date")] = None
duration: Annotated[float | None, Field(title="Duration")] = None
state: TaskInstanceState | None = None
try_number: Annotated[int, Field(title="Try Number")]
max_tries: Annotated[int, Field(title="Max Tries")]
task_display_name: Annotated[str, Field(title="Task Display Name")]
dag_display_name: Annotated[str, Field(title="Dag Display Name")]
hostname: Annotated[str | None, Field(title="Hostname")] = None
unixname: Annotated[str | None, Field(title="Unixname")] = None
pool: Annotated[str, Field(title="Pool")]
pool_slots: Annotated[int, Field(title="Pool Slots")]
queue: Annotated[str | None, Field(title="Queue")] = None
priority_weight: Annotated[int | None, Field(title="Priority Weight")] = None
operator: Annotated[str | None, Field(title="Operator")] = None
operator_name: Annotated[str | None, Field(title="Operator Name")] = None
queued_when: Annotated[datetime | None, Field(title="Queued When")] = None
scheduled_when: Annotated[datetime | None, Field(title="Scheduled When")] = None
pid: Annotated[int | None, Field(title="Pid")] = None
executor: Annotated[str | None, Field(title="Executor")] = None
executor_config: Annotated[str, Field(title="Executor Config")]
note: Annotated[str | None, Field(title="Note")] = None
rendered_map_index: Annotated[str | None, Field(title="Rendered Map Index")] = None
rendered_fields: Annotated[dict[str, Any] | None, Field(title="Rendered Fields")] = None
trigger: TriggerResponse | None = None
triggerer_job: JobResponse | None = None
dag_version: DagVersionResponse | None = None
| TaskInstanceResponse |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_http_method.py | {
"start": 917,
"end": 1918
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_http_method"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_http_method(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidHttpMethod |
python | encode__django-rest-framework | rest_framework/utils/serializer_helpers.py | {
"start": 4943,
"end": 5769
} | class ____(MutableMapping):
"""
This dict-like object is used to store fields on a serializer.
This ensures that whenever fields are added to the serializer we call
`field.bind()` so that the `field_name` and `parent` attributes
can be set correctly.
"""
def __init__(self, serializer):
self.serializer = serializer
self.fields = {}
def __setitem__(self, key, field):
self.fields[key] = field
field.bind(field_name=key, parent=self.serializer)
def __getitem__(self, key):
return self.fields[key]
def __delitem__(self, key):
del self.fields[key]
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __repr__(self):
return dict.__repr__(self.fields)
| BindingDict |
python | TheAlgorithms__Python | ciphers/bifid.py | {
"start": 420,
"end": 3584
} | class ____:
def __init__(self) -> None:
self.SQUARE = np.array(SQUARE)
def letter_to_numbers(self, letter: str) -> np.ndarray:
"""
Return the pair of numbers that represents the given letter in the
polybius square
>>> np.array_equal(BifidCipher().letter_to_numbers('a'), [1,1])
True
>>> np.array_equal(BifidCipher().letter_to_numbers('u'), [4,5])
True
"""
index1, index2 = np.where(letter == self.SQUARE)
indexes = np.concatenate([index1 + 1, index2 + 1])
return indexes
def numbers_to_letter(self, index1: int, index2: int) -> str:
"""
Return the letter corresponding to the position [index1, index2] in
the polybius square
>>> BifidCipher().numbers_to_letter(4, 5) == "u"
True
>>> BifidCipher().numbers_to_letter(1, 1) == "a"
True
"""
letter = self.SQUARE[index1 - 1, index2 - 1]
return letter
def encode(self, message: str) -> str:
"""
Return the encoded version of message according to the polybius cipher
>>> BifidCipher().encode('testmessage') == 'qtltbdxrxlk'
True
>>> BifidCipher().encode('Test Message') == 'qtltbdxrxlk'
True
>>> BifidCipher().encode('test j') == BifidCipher().encode('test i')
True
"""
message = message.lower()
message = message.replace(" ", "")
message = message.replace("j", "i")
first_step = np.empty((2, len(message)))
for letter_index in range(len(message)):
numbers = self.letter_to_numbers(message[letter_index])
first_step[0, letter_index] = numbers[0]
first_step[1, letter_index] = numbers[1]
second_step = first_step.reshape(2 * len(message))
encoded_message = ""
for numbers_index in range(len(message)):
index1 = int(second_step[numbers_index * 2])
index2 = int(second_step[(numbers_index * 2) + 1])
letter = self.numbers_to_letter(index1, index2)
encoded_message = encoded_message + letter
return encoded_message
def decode(self, message: str) -> str:
"""
Return the decoded version of message according to the polybius cipher
>>> BifidCipher().decode('qtltbdxrxlk') == 'testmessage'
True
"""
message = message.lower()
message.replace(" ", "")
first_step = np.empty(2 * len(message))
for letter_index in range(len(message)):
numbers = self.letter_to_numbers(message[letter_index])
first_step[letter_index * 2] = numbers[0]
first_step[letter_index * 2 + 1] = numbers[1]
second_step = first_step.reshape((2, len(message)))
decoded_message = ""
for numbers_index in range(len(message)):
index1 = int(second_step[0, numbers_index])
index2 = int(second_step[1, numbers_index])
letter = self.numbers_to_letter(index1, index2)
decoded_message = decoded_message + letter
return decoded_message
| BifidCipher |
python | keras-team__keras | keras/src/ops/numpy.py | {
"start": 215633,
"end": 216700
} | class ____(Operation):
def call(self, x1, x2):
return backend.numpy.multiply(x1, x2)
def compute_output_spec(self, x1, x2):
x1_shape = getattr(x1, "shape", [])
x2_shape = getattr(x2, "shape", [])
output_shape = broadcast_shapes(x1_shape, x2_shape)
x1_sparse = getattr(x1, "sparse", True)
x2_sparse = getattr(x2, "sparse", True)
output_sparse = x1_sparse or x2_sparse
dtype = dtypes.result_type(
getattr(x1, "dtype", type(x1)),
getattr(x2, "dtype", type(x2)),
)
return KerasTensor(output_shape, dtype=dtype, sparse=output_sparse)
@keras_export(["keras.ops.multiply", "keras.ops.numpy.multiply"])
def multiply(x1, x2):
"""Multiply arguments element-wise.
Args:
x1: First input tensor.
x2: Second input tensor.
Returns:
Output tensor, element-wise product of `x1` and `x2`.
"""
if any_symbolic_tensors((x1, x2)):
return Multiply().symbolic_call(x1, x2)
return backend.numpy.multiply(x1, x2)
| Multiply |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 32236,
"end": 32529
} | class ____(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
settings = {
"REFERRER_POLICY": "scrapy.spidermiddlewares.referer.OriginWhenCrossOriginPolicy"
}
resp_headers = {"Referrer-Policy": POLICY_NO_REFERRER_WHEN_DOWNGRADE.title()}
| TestPolicyHeaderPrecedence003 |
python | gevent__gevent | src/greentest/3.13/test_queue.py | {
"start": 22224,
"end": 22294
} | class ____(QueueTest, unittest.TestCase):
queue = c_queue
| CQueueTest |
python | google__jax | tests/pallas/tpu_pallas_test.py | {
"start": 61315,
"end": 88384
} | class ____(PallasBaseTest):
@parameterized.parameters([
dict(shape=shape, dty=dty)
for shape, dty in itertools.product(
[(4, 2, 9), (1, 1025), (1024, 1024)], [jnp.float32, jnp.int32]
)
])
def test_double_replicated_reduction(self, shape, dty):
def body(o_ref):
x = jnp.full(shape, 2.0, dtype=dty)
reduction = jnp.sum(x, axis=None)
bcast = jnp.full((vregs_in_block * 1024,), reduction)
o_ref[:] = bcast
vregs_in_block = 2
total_vregs = 4
data_size = total_vregs * 1024
block_size = vregs_in_block * 1024
@jax.jit
def reduce():
return self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct((data_size,), dty),
in_specs=[],
out_specs=pl.BlockSpec((block_size,), lambda i: i),
grid= data_size // block_size,
)()
x = jnp.full(shape, 2.0, dtype=dty)
z = jax.block_until_ready(reduce())
reduce_value = jnp.sum(jnp.full(shape, x), dtype=dty)
np.testing.assert_allclose(z, reduce_value)
if not jtu.if_cloud_tpu_at_least(2025, 10, 12):
self.skipTest(
'New CompilerParams shape_invariant_numerics was added on Oct 12,'
' 2025'
)
@jax.jit
def reduce_with_shape_invariant_numerics():
return self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct((data_size,), dty),
in_specs=[],
out_specs=pl.BlockSpec((block_size,), lambda i: i),
grid=data_size // block_size,
compiler_params=pltpu.CompilerParams(shape_invariant_numerics=True),
)()
np.testing.assert_allclose(
jax.block_until_ready(reduce_with_shape_invariant_numerics()),
reduce_value,
)
def test_scalar_any_input(self):
if not jtu.is_device_tpu_at_least(4):
self.skipTest("Needs a newer TPU")
def kernel(src, dst, sem):
pltpu.async_copy(src, dst, sem).wait()
def run(src):
return pl.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct(src.shape, jnp.float32),
in_specs=[pl.BlockSpec(memory_space=pltpu.ANY)],
scratch_shapes=[pltpu.SemaphoreType.DMA],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
)(src)
x = jnp.full((1,), 3.1415, dtype=jnp.float32)
np.testing.assert_array_equal(run(x), x)
def test_sum_in_smem(self):
def kernel(x, out):
a = jnp.array(0, dtype=jnp.int32)
for i in range(4):
for j in range(4):
out[i, j] = a.astype(out.dtype)
a += x[i, j].astype(jnp.int32)
x = jnp.ones((4, 4), jnp.int16)
spec = pl.BlockSpec(memory_space=pltpu.SMEM)
y = pl.pallas_call(kernel, in_specs=[spec], out_specs=spec, out_shape=x)(x)
np.testing.assert_array_equal(
y, jnp.arange(16, dtype=jnp.int32).reshape(4, 4)
)
@parameterized.parameters([
dict(
m=m,
replicated=replicated,
reduced_dims=reduced_dims,
dty=dty,
reduce_func=reduce_func,
)
for m, replicated, reduced_dims, dty, reduce_func in itertools.product(
[128, 256],
[(True, True), (False, True), (True, False)],
[(0, 1), (0,), (1,)],
[jnp.float32, jnp.int32],
[jnp.sum, jnp.max, jnp.min],
)
])
def test_replicated_broadcast_reduction(
self, m, replicated, reduced_dims, dty, reduce_func
):
# TODO(b/395579834): Remove this skip later.
if (
dty == jnp.int32
and 1 in reduced_dims
):
self.skipTest('Requires libtpu built after 2025-09-01')
if not jtu.is_device_tpu_at_least(4) and len(replicated) == 2:
self.skipTest(
'Brodcast in both sublanes and lanes not supported on this hardware'
)
in_shape = (1 if replicated[0] else m, 1 if replicated[1] else m)
red_shape = [m, m]
for d in reduced_dims:
red_shape[d] = 1
def body(x_ref, o_ref):
x = x_ref[:]
dilated_x = jnp.broadcast_to(x, (m, m))
reduced = reduce_func(dilated_x, axis=reduced_dims).reshape(red_shape)
o_ref[:] = reduced
@jax.jit
def reduce(x):
return self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(red_shape, dty),
in_specs=[pl.BlockSpec(in_shape)],
out_specs=pl.BlockSpec(red_shape),
grid=1,
)(x)
x = jnp.full(in_shape, 2.0, dtype=dty)
y = jax.block_until_ready(reduce(x))
dilated_x = jnp.broadcast_to(x, (m, m))
expected = reduce_func(dilated_x, axis=reduced_dims).reshape(red_shape)
np.testing.assert_allclose(y, expected)
if not jtu.if_cloud_tpu_at_least(2025, 10, 12):
self.skipTest(
'New CompilerParams shape_invariant_numerics was added on Oct 12,'
' 2025'
)
@jax.jit
def reduce_with_shape_invariant_numerics(x):
return self.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(red_shape, dty),
in_specs=[pl.BlockSpec(in_shape)],
out_specs=pl.BlockSpec(red_shape),
grid=1,
compiler_params=pltpu.CompilerParams(shape_invariant_numerics=True),
)(x)
np.testing.assert_allclose(
jax.block_until_ready(reduce_with_shape_invariant_numerics(x)), expected
)
def test_cost_analysis(self):
def kernel(x, y):
y[:] = x[:]
x = jnp.arange(1024.).reshape(8, 128)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
cost_estimate=pl.CostEstimate(
flops=1234, transcendentals=21, bytes_accessed=12345
),
)
analysis_result = jax.jit(f).lower(x).compile().cost_analysis()
self.assertEqual(analysis_result['flops'], 1234)
self.assertEqual(analysis_result['transcendentals'], 21)
self.assertEqual(analysis_result['bytes accessed'], 12345)
def test_cost_analysis_vmap(self):
def kernel(x, y):
y[:] = x[:]
batch_size = 3
x = jnp.arange(batch_size * 1024.).reshape(batch_size, 8, 128)
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
cost_estimate=pl.CostEstimate(
flops=1234, transcendentals=21, bytes_accessed=12345
),
)
f = jax.vmap(f)
analysis_result = jax.jit(f).lower(x).compile().cost_analysis()
self.assertEqual(analysis_result['flops'], batch_size * 1234)
self.assertEqual(analysis_result['transcendentals'], batch_size * 21)
self.assertEqual(analysis_result['bytes accessed'], batch_size * 12345)
def test_cost_analysis_vmap_symbolic_batch_size(self):
# When exporting a module with a symbolic batch size, the cost analysis
# should be stripped from the tpu_custom_call because we can't accurately
# scale it by the dynamic batch size.
def kernel(x, y):
y[:] = x[:]
flops = 1234
transcendentals = 21
bytes_accessed = 12345
f = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((8, 128), jnp.float32),
cost_estimate=pl.CostEstimate(
flops=flops,
transcendentals=transcendentals,
bytes_accessed=bytes_accessed,
),
)
f = jax.vmap(f)
batch_size = 3
x = jnp.arange(batch_size * 1024.0).reshape(batch_size, 8, 128)
exported_module = pl.lower_as_mlir(jax.jit(f), x, dynamic_shapes=True)
self.assertIn('tpu_custom_call', str(exported_module))
self.assertIn('cost_estimate', str(exported_module))
# The exported module string encodes " as \22.
self.assertIn(f'flops\\22: {batch_size * flops}', str(exported_module))
self.assertIn(
f'transcendentals\\22: {batch_size * transcendentals}',
str(exported_module),
)
self.assertIn(
f'bytes_accessed\\22: {batch_size * bytes_accessed}',
str(exported_module),
)
x_shape = jax.ShapeDtypeStruct(
jax.export.symbolic_shape('b, 8, 128'), jnp.float32
)
exported_module = pl.lower_as_mlir(jax.jit(f), x_shape, dynamic_shapes=True)
# Assert that the cost analysis is not present in the serialized module.
self.assertIn('tpu_custom_call', str(exported_module))
self.assertNotIn('cost_estimate', str(exported_module))
self.assertNotIn('flops', str(exported_module))
self.assertNotIn('transcendentals', str(exported_module))
self.assertNotIn('bytes_accessed', str(exported_module))
def test_vmem_limit(self):
shape = (128, 128)
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...]
x = jnp.arange(np.prod(shape), dtype=np.float32).reshape(shape)
with self.assertRaises(jax.errors.JaxRuntimeError):
self.pallas_call(
kernel,
out_shape=x,
compiler_params=pltpu.CompilerParams(vmem_limit_bytes=256),
)(x)
self.pallas_call(
kernel,
out_shape=x,
compiler_params=pltpu.CompilerParams(vmem_limit_bytes=int(2**18)),
)(x)
@parameterized.parameters([
pl.Buffered(1),
pl.Buffered(2),
])
def test_vmem_oom_error_message_basics(self, pmode: pl.Buffered):
if not jtu.if_cloud_tpu_at_least(2025, 11, 12):
self.skipTest('Support added on Nov 12, 2025')
if jtu.is_device_tpu(version=5, variant='e') or jtu.is_device_tpu(
version=6, variant='e'
):
block_shape = (4096 // pmode.buffer_count, 8192)
elif jtu.is_device_tpu(version=5, variant='p'):
block_shape = (1024, 8192)
else:
self.skipTest('Unsupported TPU variant')
grid = (2, 2)
shape = (grid[0] * block_shape[0], grid[1] * block_shape[1])
def kernel(x_ref, o_ref):
o_ref[...] = x_ref[...]
x = jnp.arange(np.prod(shape), dtype=np.float32).reshape(shape)
out_shape = jax.ShapeDtypeStruct(shape, x.dtype)
def index_map(i, j):
return (i * block_shape[0], j * block_shape[1])
spec = pl.BlockSpec(
block_shape=block_shape, index_map=index_map, pipeline_mode=pmode
)
with self.assertRaises(jax.errors.JaxRuntimeError) as cm:
self.pallas_call(
kernel,
out_shape=out_shape,
grid=grid,
in_specs=[spec],
out_specs=spec,
)(x)
error_message = str(cm.exception)
self.assertIn(
'input window allocation for operator input 0',
error_message,
)
self.assertIn(
'output window allocation for operator output 0',
error_message,
)
self.assertIn(
f'The window shape is f32[{block_shape[0]},{block_shape[1]}], while the'
f' full shape is f32[{shape[0]},{shape[1]}].',
error_message,
)
if jtu.if_cloud_tpu_at_least(2025, 11, 5):
self.assertIn(
'This allocation is single buffered.'
if pmode.buffer_count == 1
else 'This allocation has 2 buffering levels',
error_message,
)
def test_vmem_oom_error_message_dynamic_grid_scalar_prefetch_and_vmem_scratch(
self,
):
if jax.device_count() > 1:
self.skipTest("Test only works with a single device.")
if not jtu.if_cloud_tpu_at_least(2025, 10, 14):
self.skipTest('Support added on Oct 14, 2025')
def body(s_ref, x_hbm_ref, o_hbm_ref, vmem_scratch_ref):
del s_ref, vmem_scratch_ref
o_hbm_ref[...] = x_hbm_ref[...]
s = jnp.array([5.0], jnp.float32)
if jtu.is_device_tpu(version=5, variant='e') or jtu.is_device_tpu(
version=6, variant='e'
):
x_shape = (4096, 8192)
elif jtu.is_device_tpu(version=5, variant='p'):
x_shape = (1024, 8192)
else:
x_shape = (512, 8192)
scratch_shape = (x_shape[0] // 4, 8192)
x = jnp.arange(x_shape[0] * x_shape[1], dtype=jnp.float32).reshape(x_shape)
out_shape = jax.ShapeDtypeStruct(x_shape, jnp.float32)
@jax.jit
def run(num_grid, s, x):
return pl.pallas_call(
body,
out_shape=out_shape,
# use dynamic grid, scalar prefetch, and scratch input.
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=1,
grid=(num_grid,),
in_specs=[pl.BlockSpec()],
out_specs=pl.BlockSpec(),
scratch_shapes=[pltpu.VMEM(scratch_shape, jnp.float32)],
),
)(s, x)
with self.assertRaises(jax.errors.JaxRuntimeError) as cm:
run(4, s, x)
error_message = str(cm.exception)
self.assertIn(
'input window allocation for operator input 1',
error_message,
)
self.assertIn(
'output window allocation for operator output 0',
error_message,
)
def test_automatic_single_buffering(self,):
if self.INTERPRET:
self.skipTest('OOM tests need us to compile the kernels')
if not jtu.if_cloud_tpu_at_least(2025, 11, 12):
self.skipTest('Support added on Oct 14, 2025')
def body(*_):
pass # We only want to compile the kernel.
window_mib = 10
if jtu.is_device_tpu_at_least(6):
window_mib = 20
x = jax.ShapeDtypeStruct((100 * 1024 * 1024,), jnp.int8)
x_small = jax.ShapeDtypeStruct((window_mib * 1024 * 1024,), jnp.int8)
# Should recognize that the block specs only load a single window.
self.pallas_call(body, grid=(4,), out_shape=x_small).lower().compile()
# Should recognize that the block specs only load a single window, as it
# only depends on the 1-sized grid dim
self.pallas_call(
body, grid=(4, 1), out_shape=x,
out_specs=pl.BlockSpec((window_mib * 1024 * 1024,), lambda i, j: (j,))
).lower().compile()
self.pallas_call(
body, grid=(1, 4), out_shape=x,
out_specs=pl.BlockSpec((window_mib * 1024 * 1024,), lambda i, j: (i,))
).lower().compile()
# Should OOM, as now we are extracting different windows
with self.assertRaisesRegex(
jax.errors.JaxRuntimeError, '(Ran out of memory)|(exceed memory)'
):
self.pallas_call(
body, grid=(4, 1), out_shape=x,
out_specs=pl.BlockSpec((window_mib * 1024 * 1024,), lambda i, j: (j + i,))
).lower().compile()
# Explicitly setting single-buffering should fix it, though.
self.pallas_call(
body, grid=(4, 1), out_shape=x,
out_specs=pl.BlockSpec((window_mib * 1024 * 1024,),lambda i, j: (j + i,),
pipeline_mode=pl.Buffered(1))
).lower().compile()
# Add unused scalar prefetch args to make sure we don't incorrectly consider
# them to be unused grid indices.
scalar = jnp.array([0], jnp.int32)
with self.assertRaisesRegex(
jax.errors.JaxRuntimeError, '(Ran out of memory)|(exceed memory)'
):
self.pallas_call(
body,
out_shape=x,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=2,
grid=(4, 1),
out_specs=pl.BlockSpec(
(window_mib * 1024 * 1024,),
lambda i, j, *_: (j + i,),
),
),
).lower(scalar, scalar).compile()
def test_allow_input_fusion(self):
shape = (3, 128, 128)
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...]
def f(x, y):
z = jax.numpy.add(x, y)
return self.pallas_call(
kernel,
grid=(3,),
in_specs=[pl.BlockSpec((1, 128, 128), lambda i: (i, 0, 0))],
out_specs=pl.BlockSpec((1, 128, 128), lambda i: (i, 0, 0)),
out_shape=x,
compiler_params=pltpu.CompilerParams(allow_input_fusion=[True]),
)(z)
x = jnp.arange(np.prod(shape), dtype=np.float32).reshape(shape)
y = jnp.arange(np.prod(shape), dtype=np.float32).reshape(shape)
out = f(x, y)
expected = x + y
np.testing.assert_array_equal(out, expected)
compiled = jax.jit(f).lower(x, y).compile().as_text()
assert re.search(r'fusion.*kind=kCustom.*fused_computation', compiled)
def test_set_internal_scratch_size(self):
shape = (128, 128)
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...]
x = np.arange(np.prod(shape), dtype=np.float32).reshape(shape)
requested_bytes = 128 * 4
with self.assertRaisesRegex(
Exception,
f'Requested internal scratch size {requested_bytes} needs to be at'
' least',
):
self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct(shape, jnp.float32),
compiler_params=pltpu.CompilerParams(
internal_scratch_in_bytes=requested_bytes,
),
)(x)
@parameterized.product(
dtype=[jnp.bfloat16, jnp.float32],
axis=[1, -1],
)
def test_pltpu_repeat(self, dtype, axis):
def test_kernel(x_ref, o_ref):
x = x_ref[...]
o_ref[...] = pltpu.repeat(x, 2, axis=axis)
@jax.jit
def test(x: jax.Array) -> jax.Array:
return pl.pallas_call(
test_kernel,
out_shape=jax.ShapeDtypeStruct([x.shape[0], x.shape[1] * 2], x.dtype),
)(x)
x = jnp.arange(2048, dtype=dtype).reshape((8, 256))
y = test(x)
np.testing.assert_array_equal(y, jnp.concatenate([x, x], axis=1))
def test_mixed_precision_dot(self):
if not jtu.is_device_tpu_at_least(5):
self.skipTest('float8_e4m3b11fnuz not supported on TPU generations <= 4')
if jtu.is_device_tpu(7, 'x'):
self.skipTest('float8_e4m3b11fnuz not supported on TPU v7x')
def kernel(x_ref, w_ref, o_ref):
o_ref[:] = jax.lax.dot_general(
x_ref[:],
w_ref[:],
dimension_numbers=(((1,), (0,)), ((), ())),
preferred_element_type=jnp.float32,
)
x = jnp.ones((64, 128), dtype=jnp.bfloat16)
w = jnp.full((128, 128), jnp.nan, jnp.float8_e4m3b11fnuz)
run = pl.pallas_call(kernel, jax.ShapeDtypeStruct((64, 128), jnp.float32))
run = jax.named_call(run, name='run')
run = jax.jit(run)
expected = jax.lax.dot_general(
x,
w,
dimension_numbers=(((1,), (0,)), ((), ())),
preferred_element_type=jnp.float32,
)
jax_nans = jnp.isnan(expected).sum()
mosaic_nans = jnp.isnan(run(x, w)).sum()
self.assertEqual(jax_nans, mosaic_nans)
@parameterized.product(
in_dtype=[
jnp.int8,
jnp.int16,
jnp.int32,
jnp.float8_e5m2,
jnp.float8_e4m3fn,
jnp.float8_e4m3b11fnuz,
jnp.bfloat16,
jnp.float32,
],
out_dtype=[
jnp.int8,
jnp.int16,
jnp.int32,
jnp.float32,
],
)
def test_scalar_casting(self, in_dtype, out_dtype):
def kernel(x_ref, o_ref):
o_ref[0] = x_ref[0].astype(out_dtype)
x = jnp.asarray([7], dtype=in_dtype)
if jnp.issubdtype(in_dtype, jnp.signedinteger):
x *= -1
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, out_dtype),
)(x)
self.assertEqual(y, x.astype(out_dtype))
@parameterized.product(in_dtype=[jnp.int4, jnp.int8, jnp.int16, jnp.int32])
def test_scalar_load_upcast(self, in_dtype):
if in_dtype == jnp.int4 and not jtu.is_device_tpu_at_least(4):
self.skipTest("Triggers an XLA bug") # TODO(b/413602952)
def kernel(x_ref, o_ref):
o_ref[0, 0] = x_ref[0, 0].astype(o_ref.dtype)
x = jnp.asarray([[-1]], dtype=in_dtype)
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct((1, 1), jnp.int32),
)(x)
self.assertEqual(y, x.astype(jnp.int32))
@parameterized.product(in_dtype=[jnp.int4, jnp.int8, jnp.int16, jnp.int32])
def test_scalar_indirect_load(self, in_dtype):
def kernel(x_ref, o_ref):
o_ref[0, 0] = x_ref[0, x_ref[0, 0].astype(jnp.int32)].astype(o_ref.dtype)
if in_dtype == jnp.int4 and not jtu.is_device_tpu_at_least(4):
self.skipTest("Triggers an XLA bug") # TODO(b/413602952)
x = jnp.asarray([[3, 0, 0, 1]], dtype=in_dtype)
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct((1, 1), jnp.int32),
)(x)
self.assertEqual(y, x[0, x[0, 0]].astype(jnp.int32)[None, None])
def test_masked_store(self):
shape = (16, 256)
mask_shape = (10, 130)
mask_start = (4, 5)
dtype = jnp.float32
def body(scalar_ref, x_ref, o_ref):
o_ref[...] = jnp.full(shape, -1, dtype=dtype)
b0, b1 = scalar_ref[0], scalar_ref[1]
e0, e1 = b0 + mask_shape[0], b1 + mask_shape[1]
iota0 = lax.broadcasted_iota(jnp.int32, shape, 0)
iota1 = lax.broadcasted_iota(jnp.int32, shape, 1)
mask0 = jnp.logical_and(b0 <= iota0, iota0 < e0)
mask1 = jnp.logical_and(b1 <= iota1, iota1 < e1)
pltpu.store(o_ref, x_ref[...], mask=jnp.logical_and(mask0, mask1))
s = jnp.array(mask_start, jnp.int32)
x = jnp.arange(np.prod(shape), dtype=dtype).reshape(shape)
out = pl.pallas_call(
body,
out_shape=jax.ShapeDtypeStruct(shape, dtype),
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=1,
),
)(s, x)
slices = tuple(slice(b, b + l) for b, l in zip(mask_start, mask_shape))
expected = jnp.full(shape, -1, dtype=dtype)
expected = expected.at[slices].set(x[slices])
np.testing.assert_array_equal(out, expected)
def test_custom_vjp(self):
@jax.custom_vjp
def f(x):
return jnp.tanh(x)
def f_fwd(x):
return jnp.tanh(x) * 2, ()
def f_bwd(_, g):
return (g * 2,)
f.defvjp(f_fwd, f_bwd)
def kernel(x_ref, dy_ref, y_ref, y_p_ref, dx_ref):
x = x_ref[...]
y_ref[...] = f(x)
y_p, f_vjp = jax.vjp(f, x)
y_p_ref[...] = y_p
dx_ref[...] = f_vjp(dy_ref[...])[0]
x = jax.random.normal(jax.random.key(0), (8, 128), dtype=jnp.float32)
dy = jax.random.normal(jax.random.key(1), (8, 128), dtype=jnp.float32)
y, y_p, dx = pl.pallas_call(
kernel,
out_shape=(
jax.ShapeDtypeStruct((8, 128), jnp.float32),
jax.ShapeDtypeStruct((8, 128), jnp.float32),
jax.ShapeDtypeStruct((8, 128), jnp.float32),
),
)(x, dy)
np.testing.assert_array_equal(y, f(x))
np.testing.assert_array_equal(y_p, f(x) * 2)
np.testing.assert_array_equal(dx, dy * 2)
@parameterized.parameters([
jnp.int4,
jnp.int8,
jnp.int16,
jnp.int32,
jnp.uint4,
jnp.uint8,
jnp.uint16,
jnp.uint32,
])
def test_scalar_integer_addition(self, dtype):
def kernel(x_ref, y_ref):
y_ref[0] = x_ref[0] + x_ref[0]
if not jtu.if_cloud_tpu_at_least(2025, 9, 13):
self.skipTest('Scalar integer addition support was added on Sep 13, 2025')
x = jnp.asarray([3], dtype=dtype)
if dtype in [jnp.int32, jnp.uint32]:
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
np.testing.assert_array_equal(y, x + x)
else:
with self.assertRaisesRegex(
error_handling.MosaicError,
'Not implemented: Only i32 addition is supported.',
):
_ = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
@parameterized.parameters([
jnp.int4,
jnp.int8,
jnp.int16,
jnp.int32,
jnp.uint4,
jnp.uint8,
jnp.uint16,
jnp.uint32,
])
def test_vector_integer_addition(self, dtype):
def kernel(x_ref, y_ref):
y_ref[...] = x_ref[...] + x_ref[...]
if not jtu.if_cloud_tpu_at_least(2025, 9, 15):
self.skipTest('Descriptive message was added on Sep 15, 2025')
x = jnp.full((128, 16), 7, dtype=dtype)
if dtype in [jnp.int32, jnp.uint32, jnp.int16, jnp.uint16]:
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
np.testing.assert_array_equal(y, x + x)
else:
with self.assertRaisesRegex(
error_handling.MosaicError,
'Not implemented: Only vector<i16> and vector<i32>'
):
_ = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.VMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.VMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
@parameterized.parameters([
jnp.int32,
jnp.uint32,
jnp.float32,
])
def test_max_operation(self, dtype):
def kernel(x_ref, y_ref):
y_ref[0] = jnp.maximum(x_ref[0], x_ref[1])
if not jtu.if_cloud_tpu_at_least(2025, 9, 20):
self.skipTest('Support added on Sep 20, 2025')
x = jnp.asarray([242, 87], dtype=dtype)
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
np.testing.assert_array_equal(y[0], jnp.maximum(x[0], x[1]))
@parameterized.parameters([
jnp.int32,
jnp.uint32,
jnp.float32,
])
def test_min_operation(self, dtype):
def kernel(x_ref, y_ref):
y_ref[0] = jnp.minimum(x_ref[0], x_ref[1])
if not jtu.if_cloud_tpu_at_least(2025, 9, 20):
self.skipTest('Support added on Sep 20, 2025')
x = jnp.asarray([242, 87], dtype=dtype)
y = pl.pallas_call(
kernel,
in_specs=[pl.BlockSpec(memory_space=pltpu.SMEM)],
out_specs=pl.BlockSpec(memory_space=pltpu.SMEM),
out_shape=jax.ShapeDtypeStruct(x.shape, dtype),
)(x)
np.testing.assert_array_equal(y[0], jnp.minimum(x[0], x[1]))
@parameterized.parameters([
jnp.int32,
jnp.uint32,
jnp.int16,
jnp.uint16,
jnp.int8,
jnp.uint8,
jnp.int4,
jnp.uint4,
jnp.float32,
jnp.bfloat16,
])
def test_bool_select_operation(self, dtype):
def kernel(condlist, choicelist, out_ref):
out_ref[...] = jnp.where(condlist[...], choicelist[...], 0)
if not jtu.if_cloud_tpu_at_least(2025, 10, 15):
self.skipTest('Support added on Oct 15, 2025')
if dtype in [jnp.int4, jnp.uint4] and not jtu.is_device_tpu_at_least(4):
self.skipTest('i4 is not supported on TPU generations <= 3')
shape = (8, 128)
condlist = jax.random.bernoulli(jax.random.key(0), 0.5, shape)
choicelist = jnp.arange(shape[0]*shape[1], dtype=dtype).reshape(shape)
z = self.pallas_call(
kernel,
out_shape=jax.ShapeDtypeStruct((shape[0],shape[1]), dtype=dtype),
)(condlist, choicelist)
np.testing.assert_array_equal(z, jnp.where(condlist, choicelist, 0))
| PallasCallTest |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_gtk4cairo.py | {
"start": 1045,
"end": 1125
} | class ____(_BackendGTK4):
FigureCanvas = FigureCanvasGTK4Cairo
| _BackendGTK4Cairo |
python | python-openxml__python-docx | src/docx/image/jpeg.py | {
"start": 4950,
"end": 8194
} | class ____:
"""Service class that knows how to find the next JFIF marker in a stream."""
def __init__(self, stream):
super(_MarkerFinder, self).__init__()
self._stream = stream
@classmethod
def from_stream(cls, stream):
"""Return a |_MarkerFinder| instance to find JFIF markers in `stream`."""
return cls(stream)
def next(self, start):
"""Return a (marker_code, segment_offset) 2-tuple identifying and locating the
first marker in `stream` occuring after offset `start`.
The returned `segment_offset` points to the position immediately following the
2-byte marker code, the start of the marker segment, for those markers that have
a segment.
"""
position = start
while True:
# skip over any non-\xFF bytes
position = self._offset_of_next_ff_byte(start=position)
# skip over any \xFF padding bytes
position, byte_ = self._next_non_ff_byte(start=position + 1)
# 'FF 00' sequence is not a marker, start over if found
if byte_ == b"\x00":
continue
# this is a marker, gather return values and break out of scan
marker_code, segment_offset = byte_, position + 1
break
return marker_code, segment_offset
def _next_non_ff_byte(self, start):
"""Return an offset, byte 2-tuple for the next byte in `stream` that is not
'\xff', starting with the byte at offset `start`.
If the byte at offset `start` is not '\xff', `start` and the returned `offset`
will be the same.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ == b"\xff":
byte_ = self._read_byte()
offset_of_non_ff_byte = self._stream.tell() - 1
return offset_of_non_ff_byte, byte_
def _offset_of_next_ff_byte(self, start):
"""Return the offset of the next '\xff' byte in `stream` starting with the byte
at offset `start`.
Returns `start` if the byte at that offset is a hex 255; it does not necessarily
advance in the stream.
"""
self._stream.seek(start)
byte_ = self._read_byte()
while byte_ != b"\xff":
byte_ = self._read_byte()
offset_of_ff_byte = self._stream.tell() - 1
return offset_of_ff_byte
def _read_byte(self):
"""Return the next byte read from stream.
Raise Exception if stream is at end of file.
"""
byte_ = self._stream.read(1)
if not byte_: # pragma: no cover
raise Exception("unexpected end of file")
return byte_
def _MarkerFactory(marker_code, stream, offset):
"""Return |_Marker| or subclass instance appropriate for marker at `offset` in
`stream` having `marker_code`."""
if marker_code == JPEG_MARKER_CODE.APP0:
marker_cls = _App0Marker
elif marker_code == JPEG_MARKER_CODE.APP1:
marker_cls = _App1Marker
elif marker_code in JPEG_MARKER_CODE.SOF_MARKER_CODES:
marker_cls = _SofMarker
else:
marker_cls = _Marker
return marker_cls.from_stream(stream, marker_code, offset)
| _MarkerFinder |
python | walkccc__LeetCode | solutions/1696. Jump Game VI/1696.py | {
"start": 0,
"end": 568
} | class ____:
def maxResult(self, nums: list[int], k: int) -> int:
# Stores dp[i] within the bounds.
maxQ = collections.deque([0])
# dp[i] := the maximum score to consider nums[0..i]
dp = [0] * len(nums)
dp[0] = nums[0]
for i in range(1, len(nums)):
# Pop the index if it's out-of-bounds.
if maxQ[0] + k < i:
maxQ.popleft()
dp[i] = dp[maxQ[0]] + nums[i]
# Pop indices that won't be chosen in the future.
while maxQ and dp[maxQ[-1]] <= dp[i]:
maxQ.pop()
maxQ.append(i)
return dp[-1]
| Solution |
python | kamyu104__LeetCode-Solutions | Python/minimum-cost-to-change-the-final-value-of-expression.py | {
"start": 29,
"end": 1302
} | class ____(object):
def minOperationsToFlip(self, expression):
"""
:type expression: str
:rtype: int
"""
def compute(operands, operators):
right, left = operands.pop(), operands.pop()
operands.append(ops[operators.pop()](left, right))
ops = {'&':lambda x, y: [min(x[0], y[0]), min(x[1]+y[1], min(x[1], y[1])+1)],
'|':lambda x, y: [min(x[0]+y[0], min(x[0], y[0])+1), min(x[1], y[1])]}
precedence = {'&':0, '|':0}
operands, operators = [], []
for c in expression:
if c.isdigit():
operands.append([int(c != '0'), int(c != '1')])
elif c == '(':
operators.append(c)
elif c == ')':
while operators[-1] != '(':
compute(operands, operators)
operators.pop()
elif c in precedence:
while operators and operators[-1] in precedence and \
precedence[operators[-1]] >= precedence[c]:
compute(operands, operators)
operators.append(c)
while operators:
compute(operands, operators)
return max(operands[-1])
# Time: O(n)
# Space: O(n)
| Solution |
python | pytorch__pytorch | test/inductor/test_caching.py | {
"start": 51984,
"end": 55433
} | class ____(TestMixin, TestCase):
def test_lru_cache(self) -> None:
"""Test that the LRU cache decorator works correctly with various argument types.
Verifies that the _lru_cache decorator properly caches function results
and handles different types of arguments including integers, floats, strings,
and keyword arguments. Tests that cached calls return identical results
to non-cached calls with proper argument preservation.
"""
@utils._lru_cache
def foo(*args, **kwargs):
return args, kwargs
self.assertEqual(
foo(0),
(
(0,),
{},
),
)
self.assertEqual(
foo(0.0),
(
(0.0,),
{},
),
)
self.assertEqual(
foo("foo"),
(
("foo",),
{},
),
)
self.assertEqual(
foo("foo", bar="bar"),
(
("foo",),
{"bar": "bar"},
),
)
@parametrize("pickle_able", [True, False])
def test_try_pickle_key(self, pickle_able: bool) -> None:
"""Test that cache key pickling works correctly and raises appropriate exceptions.
Verifies that the _try_pickle_key function successfully pickles serializable
cache keys and raises KeyPicklingError for non-serializable keys like lambda
functions. Tests both the successful pickling path and error handling.
"""
if pickle_able:
key: str = self.random_string
self.assertEqual(pickle.loads(utils._try_pickle_key(key)), key)
else:
with self.assertRaises(exceptions.KeyPicklingError):
_ = utils._try_pickle_key(lambda: None)
@parametrize("pickle_able", [True, False])
def test_try_pickle_value(self, pickle_able: bool) -> None:
"""Test that cache value pickling works correctly and raises appropriate exceptions.
Verifies that the _try_pickle_value function successfully pickles serializable
cache values and raises ValuePicklingError for non-serializable values like
lambda functions. Tests both successful pickling and proper error handling.
"""
if pickle_able:
value: str = self.random_string
self.assertEqual(pickle.loads(utils._try_pickle_value(value)), value)
else:
with self.assertRaises(exceptions.ValuePicklingError):
_ = utils._try_pickle_value(lambda: None)
@parametrize("unpickle_able", [True, False])
def test_try_unpickle_value(self, unpickle_able: bool) -> None:
"""Test that cache value unpickling works correctly and raises appropriate exceptions.
Verifies that the _try_unpickle_value function successfully unpickles valid
pickled data and raises ValueUnPicklingError for invalid data like None.
Tests both successful unpickling and proper error handling for corrupted data.
"""
if unpickle_able:
value: str = self.random_string
self.assertEqual(utils._try_unpickle_value(pickle.dumps(value)), value)
else:
with self.assertRaises(exceptions.ValueUnPicklingError):
_ = utils._try_unpickle_value(b"foo")
if __name__ == "__main__":
run_tests()
| UtilsTest |
python | Netflix__metaflow | metaflow/runtime.py | {
"start": 63924,
"end": 83587
} | class ____(object):
clone_pathspec_mapping = {}
def __init__(
self,
flow_datastore,
flow,
step,
run_id,
metadata,
environment,
entrypoint,
event_logger,
monitor,
input_paths=None,
may_clone=False,
clone_run_id=None,
clone_only=False,
reentrant=False,
origin_ds_set=None,
decos=None,
logger=None,
# Anything below this is passed as part of kwargs
split_index=None,
ubf_context=None,
ubf_iter=None,
join_type=None,
task_id=None,
resume_identifier=None,
pathspec_index=None,
):
self.step = step
self.flow = flow
self.flow_name = flow.name
self.run_id = run_id
self.task_id = None
self._path = None
self.input_paths = input_paths
self.split_index = split_index
self.ubf_context = ubf_context
self.ubf_iter = ubf_iter
self.decos = [] if decos is None else decos
self.entrypoint = entrypoint
self.environment = environment
self.environment_type = self.environment.TYPE
self.clone_run_id = clone_run_id
self.clone_origin = None
self.origin_ds_set = origin_ds_set
self.metadata = metadata
self.event_logger = event_logger
self.monitor = monitor
self._logger = logger
self.retries = 0
self.user_code_retries = 0
self.error_retries = 0
self.tags = metadata.sticky_tags
self.event_logger_type = self.event_logger.TYPE
self.monitor_type = monitor.TYPE
self.metadata_type = metadata.TYPE
self.datastore_type = flow_datastore.TYPE
self._flow_datastore = flow_datastore
self.datastore_sysroot = flow_datastore.datastore_root
self._results_ds = None
# Only used in clone-only resume.
self._is_resume_leader = None
self._resume_done = None
self._resume_identifier = resume_identifier
origin = None
if clone_run_id and may_clone:
origin = self._find_origin_task(clone_run_id, join_type, pathspec_index)
if origin and origin["_task_ok"]:
# At this point, we know we are going to clone
self._is_cloned = True
task_id_exists_already = False
task_completed = False
if reentrant:
# A re-entrant clone basically allows multiple concurrent processes
# to perform the clone at the same time to the same new run id. Let's
# assume two processes A and B both simultaneously calling
# `resume --reentrant --run-id XX`.
# We want to guarantee that:
# - All incomplete tasks are cloned exactly once.
# To achieve this, we will select a resume leader and let it clone the
# entire execution graph. This ensures that we only write once to the
# datastore and metadata.
#
# We use the cloned _parameter task's task-id as the "key" to synchronize
# on. We try to "register" this new task-id (or rather the full pathspec
# <run>/<step>/<taskid>) with the metadata service which will indicate
# if we actually registered it or if it existed already. If we did manage
# to register it (_parameter task), we are the "elected resume leader"
# in essence and proceed to clone. If we didn't, we just wait to make
# sure the entire clone execution is fully done (ie: the clone is finished).
if task_id is not None:
# Sanity check -- this should never happen. We cannot allow
# for explicit task-ids because in the reentrant case, we use the
# cloned task's id as the new task's id.
raise MetaflowInternalError(
"Reentrant clone-only resume does not allow for explicit task-id"
)
if resume_identifier:
self.log(
"Resume identifier is %s." % resume_identifier,
system_msg=True,
)
else:
raise MetaflowInternalError(
"Reentrant clone-only resume needs a resume identifier."
)
# We will use the same task_id as the original task
# to use it effectively as a synchronization key
clone_task_id = origin.task_id
# Make sure the task-id is a non-integer to not clash with task ids
# assigned by the metadata provider. If this is an integer, we
# add some string to it. It doesn't matter what we add as long as it is
# consistent.
try:
clone_task_int = int(clone_task_id)
clone_task_id = "resume-%d" % clone_task_int
except ValueError:
pass
# If _get_task_id returns True it means the task already existed, so
# we wait for it.
task_id_exists_already = self._get_task_id(clone_task_id)
# We may not have access to task datastore on first resume attempt, but
# on later resume attempt, we should check if the resume task is complete
# or not. This is to fix the issue where the resume leader was killed
# unexpectedly during cloning and never mark task complete.
try:
task_completed = self.results["_task_ok"]
except DataException as e:
pass
else:
self._get_task_id(task_id)
# Store the mapping from current_pathspec -> origin_pathspec which
# will be useful for looking up origin_ds_set in find_origin_task.
self.clone_pathspec_mapping[self._path] = origin.pathspec
if self.step == "_parameters":
# In the _parameters task, we need to resolve who is the resume leader.
self._is_resume_leader = False
resume_leader = None
if task_id_exists_already:
# If the task id already exists, we need to check if current task is the resume leader in previous attempt.
ds = self._flow_datastore.get_task_datastore(
self.run_id, self.step, self.task_id
)
if not ds["_task_ok"]:
raise MetaflowInternalError(
"Externally cloned _parameters task did not succeed"
)
# Check if we should be the resume leader (maybe from previous attempt).
# To avoid the edge case where the resume leader is selected but has not
# yet written the _resume_leader metadata, we will wait for a few seconds.
# We will wait for resume leader for at most 3 times.
for _ in range(3):
if ds.has_metadata("_resume_leader", add_attempt=False):
resume_leader = ds.load_metadata(
["_resume_leader"], add_attempt=False
)["_resume_leader"]
self._is_resume_leader = resume_leader == resume_identifier
else:
self.log(
"Waiting for resume leader to be selected. Sleeping ...",
system_msg=True,
)
time.sleep(3)
else:
# If the task id does not exist, current task is the resume leader.
resume_leader = resume_identifier
self._is_resume_leader = True
if reentrant:
if resume_leader:
self.log(
"Resume leader is %s." % resume_leader,
system_msg=True,
)
else:
raise MetaflowInternalError(
"Can not determine the resume leader in distributed resume mode."
)
if self._is_resume_leader:
if reentrant:
self.log(
"Selected as the reentrant clone leader.",
system_msg=True,
)
# Clone in place without relying on run_queue.
self.new_attempt()
self._ds.clone(origin)
# Set the resume leader be the task that calls the resume (first task to clone _parameters task).
# We will always set resume leader regardless whether we are in distributed resume case or not.
if resume_identifier:
self._ds.save_metadata(
{"_resume_leader": resume_identifier}, add_attempt=False
)
self._ds.done()
else:
# Wait for the resume leader to complete
while True:
ds = self._flow_datastore.get_task_datastore(
self.run_id, self.step, self.task_id
)
# Check if resume is complete. Resume leader will write the done file.
self._resume_done = ds.has_metadata(
"_resume_done", add_attempt=False
)
if self._resume_done:
break
self.log(
"Waiting for resume leader to complete. Sleeping for %ds..."
% RESUME_POLL_SECONDS,
system_msg=True,
)
time.sleep(RESUME_POLL_SECONDS)
self.log(
"_parameters clone completed by resume leader", system_msg=True
)
else:
# Only leader can reach non-parameter steps in resume.
# Store the origin pathspec in clone_origin so this can be run
# as a task by the runtime.
self.clone_origin = origin.pathspec
# Save a call to creating the results_ds since its same as origin.
self._results_ds = origin
# If the task is already completed in new run, we don't need to clone it.
self._should_skip_cloning = task_completed
if self._should_skip_cloning:
self.log(
"Skipping cloning of previously run task %s"
% self.clone_origin,
system_msg=True,
)
else:
self.log(
"Cloning previously run task %s" % self.clone_origin,
system_msg=True,
)
else:
self._is_cloned = False
if clone_only:
# We are done -- we don't proceed to create new task-ids
return
self._get_task_id(task_id)
# Open the output datastore only if the task is not being cloned.
if not self._is_cloned:
self.new_attempt()
for deco in decos:
deco.runtime_task_created(
self._ds,
task_id,
split_index,
input_paths,
self._is_cloned,
ubf_context,
)
# determine the number of retries of this task
user_code_retries, error_retries = deco.step_task_retry_count()
if user_code_retries is None and error_retries is None:
# This signals the runtime that the task doesn't want any
# retries indifferent to other decorator opinions.
# NOTE: This is needed since we don't statically disallow
# specifying `@retry` in combination with decorators which
# implement `unbounded_foreach` semantics. This allows for
# ergonomic user invocation of `--with retry`; instead
# choosing to specially handle this way in the runtime.
self.user_code_retries = None
self.error_retries = None
if (
self.user_code_retries is not None
and self.error_retries is not None
):
self.user_code_retries = max(
self.user_code_retries, user_code_retries
)
self.error_retries = max(self.error_retries, error_retries)
if self.user_code_retries is None and self.error_retries is None:
self.user_code_retries = 0
self.error_retries = 0
def new_attempt(self):
self._ds = self._flow_datastore.get_task_datastore(
self.run_id, self.step, self.task_id, attempt=self.retries, mode="w"
)
self._ds.init_task()
def log(self, msg, system_msg=False, pid=None, timestamp=True):
if pid:
prefix = "[%s (pid %s)] " % (self._path, pid)
else:
prefix = "[%s] " % self._path
self._logger(msg, head=prefix, system_msg=system_msg, timestamp=timestamp)
sys.stdout.flush()
def is_resume_leader(self):
assert (
self.step == "_parameters"
), "Only _parameters step can check resume leader."
return self._is_resume_leader
def resume_done(self):
assert (
self.step == "_parameters"
), "Only _parameters step can check wheather resume is complete."
return self._resume_done
def mark_resume_done(self):
assert (
self.step == "_parameters"
), "Only _parameters step can mark resume as done."
assert self.is_resume_leader(), "Only resume leader can mark resume as done."
# Mark the resume as done. This is called at the end of the resume flow and after
# the _parameters step was successfully cloned, so we need to 'dangerously' save
# this done file, but the risk should be minimal.
self._ds._dangerous_save_metadata_post_done(
{"_resume_done": True}, add_attempt=False
)
def _get_task_id(self, task_id):
already_existed = True
tags = []
if self.ubf_context == UBF_CONTROL:
tags = [CONTROL_TASK_TAG]
# Register Metaflow tasks.
if task_id is None:
task_id = str(
self.metadata.new_task_id(self.run_id, self.step, sys_tags=tags)
)
already_existed = False
else:
# task_id is preset only by persist_constants().
already_existed = not self.metadata.register_task_id(
self.run_id,
self.step,
task_id,
0,
sys_tags=tags,
)
self.task_id = task_id
self._path = "%s/%s/%s" % (self.run_id, self.step, self.task_id)
return already_existed
def _find_origin_task(self, clone_run_id, join_type, pathspec_index=None):
if pathspec_index:
origin = self.origin_ds_set.get_with_pathspec_index(pathspec_index)
return origin
elif self.step == "_parameters":
pathspec = "%s/_parameters[]" % clone_run_id
origin = self.origin_ds_set.get_with_pathspec_index(pathspec)
if origin is None:
# This is just for usability: We could rerun the whole flow
# if an unknown clone_run_id is provided but probably this is
# not what the user intended, so raise a warning
raise MetaflowException(
"Resume could not find run id *%s*" % clone_run_id
)
else:
return origin
else:
# all inputs must have the same foreach stack, so we can safely
# pick the first one
parent_pathspec = self.input_paths[0]
origin_parent_pathspec = self.clone_pathspec_mapping[parent_pathspec]
parent = self.origin_ds_set.get_with_pathspec(origin_parent_pathspec)
# Parent should be non-None since only clone the child if the parent
# was successfully cloned.
foreach_stack = parent["_foreach_stack"]
if join_type == "foreach":
# foreach-join pops the topmost index
index = ",".join(str(s.index) for s in foreach_stack[:-1])
elif self.split_index or self.ubf_context == UBF_CONTROL:
# foreach-split pushes a new index
index = ",".join(
[str(s.index) for s in foreach_stack] + [str(self.split_index)]
)
else:
# all other transitions keep the parent's foreach stack intact
index = ",".join(str(s.index) for s in foreach_stack)
pathspec = "%s/%s[%s]" % (clone_run_id, self.step, index)
return self.origin_ds_set.get_with_pathspec_index(pathspec)
@property
def path(self):
return self._path
@property
def results(self):
if self._results_ds:
return self._results_ds
else:
self._results_ds = self._flow_datastore.get_task_datastore(
self.run_id, self.step, self.task_id
)
return self._results_ds
@property
def task_index(self):
_, task_index = self.results.pathspec_index.split("/")
return task_index
@property
def finished_id(self):
# note: id is not available before the task has finished.
# Index already identifies the task within the foreach and loop.
# We will remove foreach value so that it is easier to
# identify siblings within a foreach.
foreach_stack_tuple = tuple(
[s._replace(value=0) for s in self.results["_foreach_stack"]]
)
# _iteration_stack requires a fallback, as it does not exist for runs before v2.17.4
iteration_stack_tuple = tuple(self.results.get("_iteration_stack", []))
return (self.step, foreach_stack_tuple, iteration_stack_tuple)
@property
def is_cloned(self):
return self._is_cloned
@property
def should_skip_cloning(self):
return self._should_skip_cloning
def persist(self, flow):
# this is used to persist parameters before the start step
flow._task_ok = flow._success = True
flow._foreach_stack = []
self._ds.persist(flow)
self._ds.done()
def save_logs(self, logtype_to_logs):
self._ds.save_logs(RUNTIME_LOG_SOURCE, logtype_to_logs)
def save_metadata(self, name, metadata):
self._ds.save_metadata({name: metadata})
def __str__(self):
return " ".join(self._args)
| Task |
python | spyder-ide__spyder | external-deps/qtconsole/qtconsole/tests/test_jupyter_widget.py | {
"start": 250,
"end": 5207
} | class ____(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" Create the application for the test case.
"""
cls._app = QtWidgets.QApplication.instance()
if cls._app is None:
cls._app = QtWidgets.QApplication([])
cls._app.setQuitOnLastWindowClosed(False)
@classmethod
def tearDownClass(cls):
""" Exit the application.
"""
QtWidgets.QApplication.quit()
def test_stylesheet_changed(self):
""" Test changing stylesheets.
"""
w = JupyterWidget(kind='rich')
# By default, the background is light. White text is rendered as black
self.assertEqual(w._ansi_processor.get_color(15).name(), '#000000')
# Color code 40
self.assertEqual(w._ansi_processor.get_color(40).name(), '#00d700')
# Change to a dark colorscheme. White text is rendered as white
w.syntax_style = 'monokai'
self.assertEqual(w._ansi_processor.get_color(15).name(), '#ffffff')
# Color code 40 with monokai
self.assertEqual(w._ansi_processor.get_color(40).name(), '#00d700')
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Works only on Linux")
def test_other_output(self):
""" Test displaying output from other clients.
"""
w = JupyterWidget(kind='rich')
w._append_plain_text('Header\n')
w._show_interpreter_prompt(1)
w.other_output_prefix = '[other] '
w.syntax_style = 'default'
msg = dict(
execution_count=1,
code='a = 1 + 1\nb = range(10)',
)
w._append_custom(w._insert_other_input, msg, before_prompt=True)
control = w._control
document = control.document()
self.assertEqual(document.blockCount(), 6)
self.assertEqual(document.toPlainText(), (
'Header\n'
'\n'
'[other] In [1]: a = 1 + 1\n'
' ...: b = range(10)\n'
'\n'
'In [2]: '
))
# Check proper syntax highlighting.
# This changes with every Qt6 release, that's why we don't test it on it.
if not QT6:
html = (
'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN" "http://www.w3.org/TR/REC-html40/strict.dtd">\n'
'<html><head><meta name="qrichtext" content="1" /><style type="text/css">\n'
'p, li { white-space: pre-wrap; }\n'
'</style></head><body style=" font-family:\'Monospace\'; font-size:9pt; font-weight:400; font-style:normal;">\n'
'<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;">Header</p>\n'
'<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n'
'<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#000080;">[other] In [</span><span style=" font-weight:600; color:#000080;">1</span><span style=" color:#000080;">]:</span> a = 1 + 1</p>\n'
'<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#000080;">\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0\xa0...:</span> b = range(10)</p>\n'
'<p style="-qt-paragraph-type:empty; margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><br /></p>\n'
'<p style=" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;"><span style=" color:#000080;">In [</span><span style=" font-weight:600; color:#000080;">2</span><span style=" color:#000080;">]:</span> </p></body></html>'
)
self.assertEqual(document.toHtml(), html)
def test_copy_paste_prompt(self):
"""Test copy/paste removes partial and full prompts."""
w = JupyterWidget(kind='rich')
w._show_interpreter_prompt(1)
control = w._control
code = " if True:\n print('a')"
w._set_input_buffer(code)
assert code not in control.toPlainText()
cursor = w._get_prompt_cursor()
pos = cursor.position()
cursor.setPosition(pos - 3)
cursor.movePosition(QtGui.QTextCursor.End,
QtGui.QTextCursor.KeepAnchor)
control.setTextCursor(cursor)
control.hasFocus = lambda: True
w.copy()
clipboard = QtWidgets.QApplication.clipboard()
assert clipboard.text() == code
w.paste()
expected = "In [1]: if True:\n ...: print('a')"
assert expected in control.toPlainText()
| TestJupyterWidget |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 96973,
"end": 97545
} | class ____(Structure):
_fields_ = [("id", c_uint),
("isP2pSupported", c_uint),
("sliceCount", c_uint),
("instanceCount", c_uint),
("multiprocessorCount", c_uint),
("copyEngineCount", c_uint),
("decoderCount", c_uint),
("encoderCount", c_uint),
("jpegCount", c_uint),
("ofaCount", c_uint),
("memorySizeMB", c_ulonglong),
]
nvmlGpuInstanceProfileInfo_v2 = 0x02000098
| c_nvmlGpuInstanceProfileInfo_t |
python | ray-project__ray | release/ray_release/exception.py | {
"start": 2702,
"end": 2800
} | class ____(EnvironmentSetupError):
exit_code = ExitCode.LOCAL_ENV_SETUP_ERROR
| LocalEnvSetupError |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_kinesis.py | {
"start": 919,
"end": 1082
} | class ____:
@mock_aws
def test_get_conn(self):
hook = KinesisHook(aws_conn_id="aws_default")
assert hook.get_conn() is not None
| TestKinesisHook |
python | numpy__numpy | numpy/polynomial/tests/test_chebyshev.py | {
"start": 13070,
"end": 16230
} | class ____:
def test_chebfit(self):
def f(x):
return x * (x - 1) * (x - 2)
def f2(x):
return x**4 + x**2 + 1
# Test exceptions
assert_raises(ValueError, cheb.chebfit, [1], [1], -1)
assert_raises(TypeError, cheb.chebfit, [[1]], [1], 0)
assert_raises(TypeError, cheb.chebfit, [], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [[[1]]], 0)
assert_raises(TypeError, cheb.chebfit, [1, 2], [1], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1, 2], 0)
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[[1]])
assert_raises(TypeError, cheb.chebfit, [1], [1], 0, w=[1, 1])
assert_raises(ValueError, cheb.chebfit, [1], [1], [-1,])
assert_raises(ValueError, cheb.chebfit, [1], [1], [2, -1, 6])
assert_raises(TypeError, cheb.chebfit, [1], [1], [])
# Test fit
x = np.linspace(0, 2)
y = f(x)
#
coef3 = cheb.chebfit(x, y, 3)
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
coef3 = cheb.chebfit(x, y, [0, 1, 2, 3])
assert_equal(len(coef3), 4)
assert_almost_equal(cheb.chebval(x, coef3), y)
#
coef4 = cheb.chebfit(x, y, 4)
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
coef4 = cheb.chebfit(x, y, [0, 1, 2, 3, 4])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
# check things still work if deg is not in strict increasing
coef4 = cheb.chebfit(x, y, [2, 3, 4, 1, 0])
assert_equal(len(coef4), 5)
assert_almost_equal(cheb.chebval(x, coef4), y)
#
coef2d = cheb.chebfit(x, np.array([y, y]).T, 3)
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
coef2d = cheb.chebfit(x, np.array([y, y]).T, [0, 1, 2, 3])
assert_almost_equal(coef2d, np.array([coef3, coef3]).T)
# test weighting
w = np.zeros_like(x)
yw = y.copy()
w[1::2] = 1
y[0::2] = 0
wcoef3 = cheb.chebfit(x, yw, 3, w=w)
assert_almost_equal(wcoef3, coef3)
wcoef3 = cheb.chebfit(x, yw, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef3, coef3)
#
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, 3, w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
wcoef2d = cheb.chebfit(x, np.array([yw, yw]).T, [0, 1, 2, 3], w=w)
assert_almost_equal(wcoef2d, np.array([coef3, coef3]).T)
# test scaling with complex values x points whose square
# is zero when summed.
x = [1, 1j, -1, -1j]
assert_almost_equal(cheb.chebfit(x, x, 1), [0, 1])
assert_almost_equal(cheb.chebfit(x, x, [0, 1]), [0, 1])
# test fitting only even polynomials
x = np.linspace(-1, 1)
y = f2(x)
coef1 = cheb.chebfit(x, y, 4)
assert_almost_equal(cheb.chebval(x, coef1), y)
coef2 = cheb.chebfit(x, y, [0, 2, 4])
assert_almost_equal(cheb.chebval(x, coef2), y)
assert_almost_equal(coef1, coef2)
| TestFitting |
python | huggingface__transformers | src/transformers/models/glm46v/processing_glm46v.py | {
"start": 1857,
"end": 14428
} | class ____(ProcessorMixin):
r"""
Constructs a GLM-4V processor which wraps a GLM-4V image processor and a GLM-4 tokenizer into a single processor.
[`~Glm46VProcessor.__call__`] and [`~Glm46VProcessor.decode`] for more information.
Args:
image_processor ([`Glm46VProcessor`], *optional*):
The image processor is a required input.
tokenizer ([`PreTrainedTokenizerFast`], *optional*):
The tokenizer is a required input.
video_processor ([`Glm46VVideoProcessor`], *optional*):
The video processor is a required input.
chat_template (`str`, *optional*): A Jinja template which will be used to convert lists of messages
in a chat into a tokenizable string.
"""
def __init__(self, image_processor=None, tokenizer=None, video_processor=None, chat_template=None, **kwargs):
self.image_token = "<|image|>" if not hasattr(tokenizer, "image_token") else tokenizer.image_token
self.video_token = "<|video|>" if not hasattr(tokenizer, "video_token") else tokenizer.video_token
self.image_token_id = (
tokenizer.image_token_id
if getattr(tokenizer, "image_token_id", None)
else tokenizer.convert_tokens_to_ids(self.image_token)
)
self.video_token_id = (
tokenizer.video_token_id
if getattr(tokenizer, "video_token_id", None)
else tokenizer.convert_tokens_to_ids(self.video_token)
)
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template)
def __call__(
self,
images: Optional[ImageInput] = None,
text: Union[TextInput, PreTokenizedInput, list[TextInput], list[PreTokenizedInput]] = None,
videos: Optional[VideoInput] = None,
**kwargs: Unpack[Glm46VProcessorKwargs],
) -> BatchFeature:
"""
Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
and `kwargs` arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
the text.
Args:
images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
tensor. Both channels-first and channels-last formats are supported.
text (`str`, `List[str]`, `List[List[str]]`):
The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
(pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
`is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
videos (`np.ndarray`, `torch.Tensor`, `List[np.ndarray]`, `List[torch.Tensor]`):
The image or batch of videos to be prepared. Each video can be a 4D NumPy array or PyTorch
tensor, or a nested list of 3D frames. Both channels-first and channels-last formats are supported.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors of a particular framework. Acceptable values are:
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return NumPy `np.ndarray` objects.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **pixel_values_videos** -- Pixel values of videos to be fed to a model. Returned when `videos` is not `None`.
- **image_grid_thw** -- List of image 3D grid in LLM. Returned when `images` is not `None`.
- **video_grid_thw** -- List of video 3D grid in LLM. Returned when `videos` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Glm46VProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
if images is not None:
image_inputs = self.image_processor(images=images, **output_kwargs["images_kwargs"])
image_grid_thw = image_inputs["image_grid_thw"]
else:
image_inputs = {}
image_grid_thw = None
if videos is not None:
videos_inputs = self.video_processor(videos=videos, **output_kwargs["videos_kwargs"])
# If user has not requested video metadata, pop it
if not kwargs.get("return_metadata"):
video_metadata = videos_inputs.pop("video_metadata")
else:
video_metadata = videos_inputs["video_metadata"]
video_grid_thw = videos_inputs["video_grid_thw"]
else:
videos_inputs = {}
video_grid_thw = None
if not isinstance(text, list):
text = [text]
text = text.copy() # below lines change text in-place
if image_grid_thw is not None:
merge_length = self.image_processor.merge_size**2
index = 0
for i in range(len(text)):
while self.image_token in text[i]:
num_image_tokens = image_grid_thw[index].prod() // merge_length
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
if video_grid_thw is not None:
merge_length = self.video_processor.merge_size**2
video_index = 0
for i in range(len(text)):
while self.video_token in text[i]:
num_frames = video_grid_thw[video_index][0]
video_structure = ""
metadata = video_metadata[video_index]
if metadata.fps is None:
logger.warning_once(
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
)
metadata.fps = 24 if metadata.fps is None else metadata.fps
timestamps = metadata.timestamps[::2] # mrope
unique_timestamps = []
for idx in range(0, len(timestamps)):
unique_timestamps.append(timestamps[idx])
selected_timestamps = unique_timestamps[:num_frames]
while len(selected_timestamps) < num_frames:
selected_timestamps.append(selected_timestamps[-1] if selected_timestamps else 0)
for frame_idx in range(num_frames):
timestamp_sec = selected_timestamps[frame_idx]
frame_structure = self.replace_frame_token_id(timestamp_sec)
video_structure += frame_structure
text[i] = text[i].replace(self.video_token, video_structure, 1)
num_image_tokens = (
video_grid_thw[video_index].prod() // merge_length // video_grid_thw[video_index][0]
)
for frame_idx in range(num_frames):
if self.image_token in text[i]:
text[i] = text[i].replace(self.image_token, "<|placeholder|>" * num_image_tokens, 1)
video_index += 1
text[i] = text[i].replace("<|placeholder|>", self.image_token)
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
return_mm_token_type_ids = output_kwargs["text_kwargs"].pop("return_mm_token_type_ids", False)
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
self._check_special_mm_tokens(text, text_inputs, modalities=["image", "video"])
if return_mm_token_type_ids:
array_ids = np.array(text_inputs["input_ids"])
mm_token_type_ids = np.zeros_like(text_inputs["input_ids"])
mm_token_type_ids[array_ids == self.image_token_id] = 1
text_inputs["mm_token_type_ids"] = mm_token_type_ids.tolist()
return BatchFeature(data={**text_inputs, **image_inputs, **videos_inputs}, tensor_type=return_tensors)
def _get_num_multimodal_tokens(self, image_sizes=None, video_sizes=None, **kwargs):
"""
Computes the number of placeholder tokens needed for multimodal inputs with the given sizes.
Args:
image_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (height, width) per each image.
video_sizes (`list[list[int]]`, *optional*):
The input sizes formatted as (num_frames, height, width) per each video.
Returns:
`MultiModalData`: A `MultiModalData` object holding number of tokens per each of the provided
input modalities, along with other useful data.
"""
vision_data = {}
if image_sizes is not None:
images_kwargs = Glm46VProcessorKwargs._defaults.get("images_kwargs", {})
images_kwargs.update(kwargs)
merge_size = images_kwargs.get("merge_size", None) or self.image_processor.merge_size
num_image_patches = [
self.image_processor.get_number_of_image_patches(*image_size, images_kwargs)
for image_size in image_sizes
]
num_image_tokens = [(num_patches // merge_size**2) for num_patches in num_image_patches]
vision_data.update({"num_image_tokens": num_image_tokens, "num_image_patches": num_image_patches})
if video_sizes is not None:
videos_kwargs = Glm46VProcessorKwargs._defaults.get("videos_kwargs", {})
videos_kwargs.update(kwargs)
num_video_patches = [
self.video_processor.get_number_of_video_patches(*video_size, videos_kwargs)
for video_size in video_sizes
]
num_video_tokens = [(num_patches // merge_size**2) for num_patches in num_video_patches]
vision_data["num_video_tokens"] = num_video_tokens
return MultiModalData(**vision_data)
def post_process_image_text_to_text(
self, generated_outputs, skip_special_tokens=True, clean_up_tokenization_spaces=False, **kwargs
):
"""
Post-process the output of the model to decode the text.
Args:
generated_outputs (`torch.Tensor` or `np.ndarray`):
The output of the model `generate` function. The output is expected to be a tensor of shape `(batch_size, sequence_length)`
or `(sequence_length,)`.
skip_special_tokens (`bool`, *optional*, defaults to `True`):
Whether or not to remove special tokens in the output. Argument passed to the tokenizer's `batch_decode` method.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the tokenization spaces. Argument passed to the tokenizer's `batch_decode` method.
**kwargs:
Additional arguments to be passed to the tokenizer's `batch_decode method`.
Returns:
`list[str]`: The decoded text.
"""
return self.tokenizer.batch_decode(
generated_outputs,
skip_special_tokens=skip_special_tokens,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
def replace_frame_token_id(self, timestamp_sec):
return f"<|begin_of_image|>{self.image_token}<|end_of_image|>{timestamp_sec:.1f} seconds"
__all__ = ["Glm46VProcessor"]
| Glm46VProcessor |
python | pytorch__pytorch | torch/nn/modules/conv.py | {
"start": 57724,
"end": 60832
} | class ____(LazyModuleMixin):
groups: int
transposed: bool
in_channels: int
out_channels: int
kernel_size: tuple[int, ...]
weight: UninitializedParameter
bias: UninitializedParameter
def reset_parameters(self) -> None:
# has_uninitialized_params is defined in parent class and it is using a protocol on self
if not self.has_uninitialized_params() and self.in_channels != 0: # type: ignore[misc]
# "type:ignore[..]" is required because mypy thinks that "reset_parameters" is undefined
# in super class. Turns out that it is defined in _ConvND which is inherited by any class
# that also inherits _LazyConvXdMixin
super().reset_parameters() # type: ignore[misc]
# Signature of "initialize_parameters" is incompatible with the definition in supertype LazyModuleMixin
def initialize_parameters(self, input: Tensor, *args, **kwargs) -> None: # type: ignore[override]
# defined by parent class but using a protocol
if self.has_uninitialized_params(): # type: ignore[misc]
self.in_channels = self._get_in_channels(input)
if self.in_channels % self.groups != 0:
raise ValueError("in_channels must be divisible by groups")
assert isinstance(self.weight, UninitializedParameter)
if self.transposed:
self.weight.materialize(
(
self.in_channels,
self.out_channels // self.groups,
*self.kernel_size,
)
)
else:
self.weight.materialize(
(
self.out_channels,
self.in_channels // self.groups,
*self.kernel_size,
)
)
if self.bias is not None:
assert isinstance(self.bias, UninitializedParameter)
self.bias.materialize((self.out_channels,))
self.reset_parameters()
# Function to extract in_channels from first input.
def _get_in_channels(self, input: Tensor) -> int:
num_spatial_dims = self._get_num_spatial_dims()
num_dims_no_batch = num_spatial_dims + 1 # +1 for channels dim
num_dims_batch = num_dims_no_batch + 1
if input.dim() not in (num_dims_no_batch, num_dims_batch):
raise RuntimeError(
f"Expected {num_dims_no_batch}D (unbatched) or {num_dims_batch}D (batched) input "
f"to {self.__class__.__name__}, but "
f"got input of size: {input.shape}"
)
return input.shape[1] if input.dim() == num_dims_batch else input.shape[0]
# Function to return the number of spatial dims expected for inputs to the module.
# This is expected to be implemented by subclasses.
def _get_num_spatial_dims(self) -> int:
raise NotImplementedError
# LazyConv1d defines weight as a Tensor but derived class defines it as UninitializeParameter
| _LazyConvXdMixin |
python | pydantic__pydantic | tests/test_construction.py | {
"start": 8967,
"end": 16740
} | class ____(BaseModel, extra='allow'):
pass
def test_copy_deep_extra(copy_method):
class Foo(BaseModel, extra='allow'):
pass
m = Foo(extra=[])
assert copy_method(m).extra is m.extra
assert copy_method(m, deep=True).extra == m.extra
assert copy_method(m, deep=True).extra is not m.extra
def test_copy_set_fields(ModelTwo, copy_method):
m = ModelTwo(a=24, d=Model(a='12'))
m2 = copy_method(m)
assert m.model_dump(exclude_unset=True) == {'a': 24.0, 'd': {'a': 12}}
assert m.model_dump(exclude_unset=True) == m2.model_dump(exclude_unset=True)
def test_simple_pickle():
m = Model(a='24')
b = pickle.dumps(m)
m2 = pickle.loads(b)
assert m.a == m2.a == 24
assert m.b == m2.b == 10
assert m == m2
assert m is not m2
assert tuple(m) == (('a', 24.0), ('b', 10))
assert tuple(m2) == (('a', 24.0), ('b', 10))
def test_recursive_pickle(create_module):
@create_module
def module():
from pydantic import BaseModel, PrivateAttr
class PickleModel(BaseModel):
a: float
b: int = 10
class PickleModelTwo(BaseModel):
_foo_ = PrivateAttr({'private'})
a: float
b: int = 10
c: str = 'foobar'
d: PickleModel
m = module.PickleModelTwo(a=24, d=module.PickleModel(a='123.45'))
m2 = pickle.loads(pickle.dumps(m))
assert m == m2
assert m.d.a == 123.45
assert m2.d.a == 123.45
assert m._foo_ == m2._foo_
def test_pickle_undefined(create_module):
@create_module
def module():
from pydantic import BaseModel, PrivateAttr
class PickleModel(BaseModel):
a: float
b: int = 10
class PickleModelTwo(BaseModel):
_foo_ = PrivateAttr({'private'})
a: float
b: int = 10
c: str = 'foobar'
d: PickleModel
m = module.PickleModelTwo(a=24, d=module.PickleModel(a='123.45'))
m2 = pickle.loads(pickle.dumps(m))
assert m2._foo_ == {'private'}
m._foo_ = PydanticUndefined
m3 = pickle.loads(pickle.dumps(m))
assert not hasattr(m3, '_foo_')
def test_copy_undefined(ModelTwo, copy_method):
m = ModelTwo(a=24, d=Model(a='123.45'))
m2 = copy_method(m)
assert m2._foo_ == {'private'}
m._foo_ = PydanticUndefined
m3 = copy_method(m)
assert not hasattr(m3, '_foo_')
def test_immutable_copy_with_frozen(copy_method):
class Model(BaseModel):
model_config = ConfigDict(frozen=True)
a: int
b: int
m = Model(a=40, b=10)
assert m == copy_method(m)
assert repr(m) == 'Model(a=40, b=10)'
m2 = copy_method(m, update={'b': 12})
assert repr(m2) == 'Model(a=40, b=12)'
with pytest.raises(ValidationError):
m2.b = 13
def test_pickle_fields_set():
m = Model(a=24)
assert m.model_dump(exclude_unset=True) == {'a': 24}
m2 = pickle.loads(pickle.dumps(m))
assert m2.model_dump(exclude_unset=True) == {'a': 24}
def test_pickle_preserves_extra():
m = ExtraModel(a=24)
assert m.model_extra == {'a': 24}
m2 = pickle.loads(pickle.dumps(m))
assert m2.model_extra == {'a': 24}
def test_copy_update_exclude():
class SubModel(BaseModel):
a: str
b: str
class Model(BaseModel):
c: str
d: SubModel
m = Model(c='ex', d=dict(a='ax', b='bx'))
assert m.model_dump() == {'c': 'ex', 'd': {'a': 'ax', 'b': 'bx'}}
assert deprecated_copy(m, exclude={'c'}).model_dump() == {'d': {'a': 'ax', 'b': 'bx'}}
with pytest.warns(
UserWarning,
match=r"Expected `str` - serialized value may not be as expected \[field_name='c', input_value=42, input_type=int\]",
):
assert deprecated_copy(m, exclude={'c'}, update={'c': 42}).model_dump() == {
'c': 42,
'd': {'a': 'ax', 'b': 'bx'},
}
with pytest.warns(
PydanticDeprecatedSince20,
match='The private method `_calculate_keys` will be removed and should no longer be used.',
):
assert m._calculate_keys(exclude={'x': ...}, include=None, exclude_unset=False) == {'c', 'd'}
assert m._calculate_keys(exclude={'x': ...}, include=None, exclude_unset=False, update={'c': 42}) == {'d'}
def test_shallow_copy_modify(copy_method):
class X(BaseModel):
val: int
deep: Any
x = X(val=1, deep={'deep_thing': [1, 2]})
y = copy_method(x)
y.val = 2
y.deep['deep_thing'].append(3)
assert x.val == 1
assert y.val == 2
# deep['deep_thing'] gets modified
assert x.deep['deep_thing'] == [1, 2, 3]
assert y.deep['deep_thing'] == [1, 2, 3]
def test_construct_default_factory():
class Model(BaseModel):
foo: list[int] = Field(default_factory=list)
bar: str = 'Baz'
m = Model.model_construct()
assert m.foo == []
assert m.bar == 'Baz'
@pytest.mark.thread_unsafe(reason='`pytest.warns()` is thread unsafe')
def test_copy_with_excluded_fields():
class User(BaseModel):
name: str
age: int
dob: str
user = User(name='test_user', age=23, dob='01/01/2000')
user_copy = deprecated_copy(user, exclude={'dob': ...})
assert 'dob' in user.model_fields_set
assert 'dob' not in user_copy.model_fields_set
def test_dunder_copy(ModelTwo):
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.__copy__()
assert m is not m2
assert m.a == m2.a == 24
assert isinstance(m2.d, Model)
assert m.d is m2.d
assert m.d.a == m2.d.a == 12
m.a = 12
assert m.a != m2.a
def test_dunder_deepcopy(ModelTwo):
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.__copy__()
assert m is not m2
assert m.a == m2.a == 24
assert isinstance(m2.d, Model)
assert m.d is m2.d
assert m.d.a == m2.d.a == 12
m.a = 12
assert m.a != m2.a
def test_model_copy(ModelTwo):
m = ModelTwo(a=24, d=Model(a='12'))
m2 = m.__copy__()
assert m is not m2
assert m.a == m2.a == 24
assert isinstance(m2.d, Model)
assert m.d is m2.d
assert m.d.a == m2.d.a == 12
m.a = 12
assert m.a != m2.a
def test_pydantic_extra():
class Model(BaseModel):
model_config = dict(extra='allow')
x: int
m = Model.model_construct(x=1, y=2)
assert m.__pydantic_extra__ == {'y': 2}
def test_retain_order_of_fields():
class MyModel(BaseModel):
a: str = 'a'
b: str
m = MyModel.model_construct(b='b')
assert m.model_dump_json() == '{"a":"a","b":"b"}'
def test_initialize_with_private_attr():
class MyModel(BaseModel):
_a: str
m = MyModel.model_construct(_a='a')
assert m._a == 'a'
assert '_a' in m.__pydantic_private__
def test_model_construct_with_alias_choices() -> None:
class MyModel(BaseModel):
a: str = Field(validation_alias=AliasChoices('aaa', 'AAA'))
assert MyModel.model_construct(a='a_value').a == 'a_value'
assert MyModel.model_construct(aaa='a_value').a == 'a_value'
assert MyModel.model_construct(AAA='a_value').a == 'a_value'
def test_model_construct_with_alias_path() -> None:
class MyModel(BaseModel):
a: str = Field(validation_alias=AliasPath('aaa', 'AAA'))
assert MyModel.model_construct(a='a_value').a == 'a_value'
assert MyModel.model_construct(aaa={'AAA': 'a_value'}).a == 'a_value'
def test_model_construct_with_alias_choices_and_path() -> None:
class MyModel(BaseModel):
a: str = Field(validation_alias=AliasChoices('aaa', AliasPath('AAA', 'aaa')))
assert MyModel.model_construct(a='a_value').a == 'a_value'
assert MyModel.model_construct(aaa='a_value').a == 'a_value'
assert MyModel.model_construct(AAA={'aaa': 'a_value'}).a == 'a_value'
| ExtraModel |
python | huggingface__transformers | tests/models/pegasus/test_modeling_pegasus.py | {
"start": 8683,
"end": 12641
} | class ____(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (PegasusModel, PegasusForConditionalGeneration) if is_torch_available() else ()
pipeline_model_mapping = (
{
"feature-extraction": PegasusModel,
"summarization": PegasusForConditionalGeneration,
"text-generation": PegasusForCausalLM,
"text2text-generation": PegasusForConditionalGeneration,
"translation": PegasusForConditionalGeneration,
}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_resize_position_embeddings = True
test_missing_keys = False
def setUp(self):
self.model_tester = PegasusModelTester(self)
self.config_tester = ConfigTester(self, config_class=PegasusConfig)
def test_config(self):
self.config_tester.run_common_tests()
def test_save_load_strict(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
model = model_class(config)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True)
self.assertEqual(info["missing_keys"], set())
def test_decoder_model_past_with_large_inputs(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs)
def test_encoder_decoder_model_standalone(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs)
@require_torch_fp16
def test_generate_fp16(self):
config, input_dict = self.model_tester.prepare_config_and_inputs()
input_ids = input_dict["input_ids"]
attention_mask = input_ids.ne(1).to(torch_device)
model = PegasusForConditionalGeneration(config).eval().to(torch_device)
model.half()
model.generate(input_ids, attention_mask=attention_mask)
model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3)
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant(self):
pass
@unittest.skip(
reason="This architecture seem to not compute gradients properly when using GC, check: https://github.com/huggingface/transformers/pull/27124"
)
def test_training_gradient_checkpointing_use_reentrant_false(self):
pass
def assert_tensors_close(a, b, atol=1e-12, prefix=""):
"""If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error."""
if a is None and b is None:
return True
try:
if torch.allclose(a, b, atol=atol):
return True
raise Exception
except Exception:
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
if a.numel() > 100:
msg = f"tensor values are {pct_different:.1%} percent different."
else:
msg = f"{a} != {b}"
if prefix:
msg = prefix + ": " + msg
raise AssertionError(msg)
def _long_tensor(tok_lst):
return torch.tensor(tok_lst, dtype=torch.long, device=torch_device)
@require_torch
@require_sentencepiece
@require_tokenizers
| PegasusModelTest |
python | sympy__sympy | sympy/stats/crv_types.py | {
"start": 3452,
"end": 5588
} | class ____(SingleContinuousDistribution):
_argnames = ('pdf',)
def __new__(cls, pdf, set=Interval(-oo, oo)):
return Basic.__new__(cls, pdf, set)
@property
def set(self):
return self.args[1]
@staticmethod
def check(pdf, set):
x = Dummy('x')
val = integrate(pdf(x), (x, set))
_value_check(Eq(val, 1) != S.false, "The pdf on the given set is incorrect.")
def ContinuousRV(symbol, density, set=Interval(-oo, oo), **kwargs):
"""
Create a Continuous Random Variable given the following:
Parameters
==========
symbol : Symbol
Represents name of the random variable.
density : Expression containing symbol
Represents probability density function.
set : set/Interval
Represents the region where the pdf is valid, by default is real line.
check : bool
If True, it will check whether the given density
integrates to 1 over the given set. If False, it
will not perform this check. Default is False.
Returns
=======
RandomSymbol
Many common continuous random variable types are already implemented.
This function should be necessary only very rarely.
Examples
========
>>> from sympy import Symbol, sqrt, exp, pi
>>> from sympy.stats import ContinuousRV, P, E
>>> x = Symbol("x")
>>> pdf = sqrt(2)*exp(-x**2/2)/(2*sqrt(pi)) # Normal distribution
>>> X = ContinuousRV(x, pdf)
>>> E(X)
0
>>> P(X>0)
1/2
"""
pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
pdf = Lambda(symbol, pdf)
# have a default of False while `rv` should have a default of True
kwargs['check'] = kwargs.pop('check', False)
return rv(symbol.name, ContinuousDistributionHandmade, (pdf, set), **kwargs)
########################################
# Continuous Probability Distributions #
########################################
#-------------------------------------------------------------------------------
# Arcsin distribution ----------------------------------------------------------
| ContinuousDistributionHandmade |
python | getsentry__sentry | src/sentry/profiles/flamegraph.py | {
"start": 1059,
"end": 1147
} | class ____(TypedDict):
project_id: int
profile_id: str
| TransactionProfileCandidate |
python | numpy__numpy | numpy/ma/core.py | {
"start": 27045,
"end": 27438
} | class ____:
"""
DomainGreater(v)(x) is True where x <= v.
"""
def __init__(self, critical_value):
"DomainGreater(v)(x) = true where x <= v"
self.critical_value = critical_value
def __call__(self, x):
"Executes the call behavior."
with np.errstate(invalid='ignore'):
return umath.less_equal(x, self.critical_value)
| _DomainGreater |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/libsvm_svr.py | {
"start": 641,
"end": 6771
} | class ____(AutoSklearnRegressionAlgorithm):
def __init__(
self,
kernel,
C,
epsilon,
tol,
shrinking,
gamma=0.1,
degree=3,
coef0=0.0,
verbose=False,
max_iter=-1,
random_state=None,
):
self.kernel = kernel
self.C = C
self.epsilon = epsilon
self.tol = tol
self.shrinking = shrinking
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
self.estimator = None
def fit(self, X, y):
import sklearn.svm
# Calculate the size of the kernel cache (in MB) for sklearn's LibSVM.
# The cache size is calculated as 2/3 of the available memory
# (which is calculated as the memory limit minus the used memory)
try:
# Retrieve memory limits imposed on the process
soft, hard = resource.getrlimit(resource.RLIMIT_AS)
if soft > 0:
# Convert limit to units of megabytes
soft /= 1024 * 1024
# Retrieve memory used by this process
maxrss = resource.getrusage(resource.RUSAGE_SELF)[2] / 1024
# In MacOS, the MaxRSS output of resource.getrusage in bytes;
# on other platforms, it's in kilobytes
if sys.platform == "darwin":
maxrss = maxrss / 1024
cache_size = (soft - maxrss) / 1.5
if cache_size < 0:
cache_size = 200
else:
cache_size = 200
except Exception:
cache_size = 200
self.C = float(self.C)
self.epsilon = float(self.epsilon)
self.tol = float(self.tol)
self.shrinking = check_for_bool(self.shrinking)
self.degree = int(self.degree)
self.gamma = float(self.gamma)
if check_none(self.coef0):
self.coef0 = 0.0
else:
self.coef0 = float(self.coef0)
self.verbose = int(self.verbose)
self.max_iter = int(self.max_iter)
self.estimator = sklearn.svm.SVR(
kernel=self.kernel,
C=self.C,
epsilon=self.epsilon,
tol=self.tol,
shrinking=self.shrinking,
degree=self.degree,
gamma=self.gamma,
coef0=self.coef0,
cache_size=cache_size,
verbose=self.verbose,
max_iter=self.max_iter,
)
self.scaler = sklearn.preprocessing.StandardScaler(copy=True)
# Convert y to be at least 2d for the scaler
# [1,1,1] -> [[1], [1], [1]]
if y.ndim == 1:
y = y.reshape((-1, 1))
y_scaled = self.scaler.fit_transform(y)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if y_scaled.ndim == 2 and y_scaled.shape[1] == 1:
y_scaled = y_scaled.flatten()
self.estimator.fit(X, y_scaled)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
if self.scaler is None:
raise NotImplementedError
y_pred = self.estimator.predict(X)
inverse = self.scaler.inverse_transform(y_pred)
# Flatten: [[0], [0], [0]] -> [0, 0, 0]
if inverse.ndim == 2 and inverse.shape[1] == 1:
inverse = inverse.flatten()
return inverse
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "SVR",
"name": "Support Vector Regression",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"prefers_data_normalized": True,
"is_deterministic": True,
"input": (SPARSE, DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
C = UniformFloatHyperparameter(
name="C", lower=0.03125, upper=32768, log=True, default_value=1.0
)
# Random Guess
epsilon = UniformFloatHyperparameter(
name="epsilon", lower=0.001, upper=1, default_value=0.1, log=True
)
kernel = CategoricalHyperparameter(
name="kernel",
choices=["linear", "poly", "rbf", "sigmoid"],
default_value="rbf",
)
degree = UniformIntegerHyperparameter(
name="degree", lower=2, upper=5, default_value=3
)
gamma = UniformFloatHyperparameter(
name="gamma", lower=3.0517578125e-05, upper=8, log=True, default_value=0.1
)
# TODO this is totally ad-hoc
coef0 = UniformFloatHyperparameter(
name="coef0", lower=-1, upper=1, default_value=0
)
# probability is no hyperparameter, but an argument to the SVM algo
shrinking = CategoricalHyperparameter(
name="shrinking", choices=["True", "False"], default_value="True"
)
tol = UniformFloatHyperparameter(
name="tol", lower=1e-5, upper=1e-1, default_value=1e-3, log=True
)
max_iter = UnParametrizedHyperparameter("max_iter", -1)
cs = ConfigurationSpace()
cs.add_hyperparameters(
[C, kernel, degree, gamma, coef0, shrinking, tol, max_iter, epsilon]
)
degree_depends_on_poly = EqualsCondition(degree, kernel, "poly")
gamma_depends_on_kernel = InCondition(
child=gamma, parent=kernel, values=("poly", "rbf")
)
coef0_depends_on_kernel = InCondition(
child=coef0, parent=kernel, values=("poly", "sigmoid")
)
cs.add_conditions(
[degree_depends_on_poly, gamma_depends_on_kernel, coef0_depends_on_kernel]
)
return cs
| LibSVM_SVR |
python | ray-project__ray | python/ray/data/tests/unit/test_datatype.py | {
"start": 8755,
"end": 10109
} | class ____:
"""Test type inference from values."""
@pytest.mark.parametrize(
"numpy_value,expected_dtype",
[
(np.array([1, 2, 3], dtype="int32"), np.dtype("int32")),
(np.array([1.0, 2.0], dtype="float64"), np.dtype("float64")),
(np.int64(42), np.dtype("int64")),
(np.float32(3.14), np.dtype("float32")),
],
)
def test_infer_dtype_numpy_values(self, numpy_value, expected_dtype):
"""Test inference of NumPy arrays and scalars."""
dt = DataType.infer_dtype(numpy_value)
assert dt.is_numpy_type()
assert dt._physical_dtype == expected_dtype
# Removed test_infer_dtype_pyarrow_scalar - no longer works with current implementation
@pytest.mark.parametrize(
"python_value",
[
42, # int
3.14, # float
"hello", # str
True, # bool
[1, 2, 3], # list
],
)
def test_infer_dtype_python_values_arrow_success(self, python_value):
"""Test inference of Python values that Arrow can handle."""
dt = DataType.infer_dtype(python_value)
# Should infer to Arrow type for basic Python values
assert dt.is_arrow_type()
# Removed test_infer_dtype_fallback_to_python_type - no longer supported
| TestDataTypeInference |
python | kamyu104__LeetCode-Solutions | Python/sort-the-people.py | {
"start": 40,
"end": 354
} | class ____(object):
def sortPeople(self, names, heights):
"""
:type names: List[str]
:type heights: List[int]
:rtype: List[str]
"""
order = range(len(names))
order.sort(key=lambda x: heights[x], reverse=True)
return [names[i] for i in order]
| Solution |
python | keras-team__keras | keras/src/trainers/trainer_test.py | {
"start": 10890,
"end": 99954
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_metric_tracking(self):
class ModelWithMetric(Trainer, layers.Dense):
def __init__(self, units):
layers.Dense.__init__(
self,
units=units,
use_bias=False,
kernel_initializer=initializers.Ones(),
)
Trainer.__init__(self)
self.my_metric = metrics.MeanSquaredError(name="my_metric")
model = ModelWithMetric(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
x = np.ones((2, 4))
y = np.zeros((2, 3))
# Fit the model to make sure compile_metrics are built
model.fit(x, y, batch_size=2, epochs=1)
# The model should have 3 metrics: loss_tracker, compile_metrics,
# my_metric.
self.assertEqual(len(model.metrics), 3)
self.assertEqual(model.metrics[0], model._loss_tracker)
self.assertEqual(model.metrics[1], model._compile_metrics)
self.assertEqual(model.metrics[2], model.my_metric)
# All metrics should have their weights created
self.assertEqual(len(model._loss_tracker.variables), 2)
self.assertEqual(len(model._compile_metrics.variables), 2)
self.assertEqual(len(model.my_metric.variables), 2)
# And those weights are tracked at the model level
self.assertEqual(len(model.metrics_variables), 6)
self.assertLen(model.non_trainable_variables, 0)
# Models with only weighted_metrics should have the same 3 metrics
model_weighted = ModelWithMetric(units=3)
model_weighted.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
weighted_metrics=[metrics.MeanSquaredError()],
)
model_weighted.fit(
x,
y,
batch_size=2,
epochs=1,
sample_weight=np.ones(2),
)
self.assertEqual(len(model_weighted.metrics), 3)
def test_nested_trainer_metrics(self):
# https://github.com/keras-team/keras/issues/20188
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(model.metrics, 2)
self.assertEqual(model.metrics[0], model._loss_tracker)
self.assertEqual(model.metrics[1], model._compile_metrics)
inputs = keras.Input((4,))
outputs = model(inputs)
outputs = layers.Dense(8)(outputs)
new_model = models.Model(inputs, outputs)
new_model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(new_model.metrics, 2)
self.assertEqual(new_model.metrics[0], new_model._loss_tracker)
self.assertEqual(new_model.metrics[1], new_model._compile_metrics)
def test_nested_trainer_metrics_without_compile(self):
model = ExampleModel(units=3)
self.assertLen(model.metrics, 0)
inputs = keras.Input((4,))
outputs = model(inputs)
outputs = layers.Dense(8)(outputs)
new_model = models.Model(inputs, outputs)
new_model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(new_model.metrics, 2)
self.assertEqual(new_model.metrics[0], new_model._loss_tracker)
self.assertEqual(new_model.metrics[1], new_model._compile_metrics)
def test_multiple_compiles(self):
# https://github.com/keras-team/keras/issues/20474
model1 = ExampleModel(units=3)
model2 = ExampleModel(units=3)
model1.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
# Combine these 2 models into `combined`.
inputs = keras.Input(shape=(4,))
x = model1(inputs)
outputs = model2(x)
combined = models.Model(inputs, outputs)
combined.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertLen(model1.metrics, 2)
self.assertIsNotNone(model1._loss_tracker)
self.assertEqual(model1.metrics[0], model1._loss_tracker)
self.assertEqual(model1.metrics[1], model1._compile_metrics)
# `combined.metrics` will not include `model1.metrics`.
self.assertLen(combined.metrics, 2)
self.assertIsNotNone(combined._loss_tracker)
self.assertEqual(combined.metrics[0], combined._loss_tracker)
self.assertEqual(combined.metrics[1], combined._compile_metrics)
@pytest.mark.skipif(
backend.backend() != "torch",
reason="torch backend runs in eager mode for jit_compile='auto'",
)
def test_compile_eager_vs_jit_torch(self):
model = ExampleModel(units=3)
model.compile(jit_compile="auto")
# torch trainer en/disables torch.compile only based on the value of
# model.jit_compile (not model.run_eagerly)
self.assertFalse(model.run_eagerly)
self.assertFalse(model.jit_compile)
@parameterized.named_parameters(
[
("eager", True, False, False),
("graph_fn", False, False, False),
("jit", False, True, False),
("steps_per_epoch_eager", True, False, True),
("steps_per_epoch_graph_fn", False, False, True),
("steps_per_epoch_jit", False, True, True),
]
)
@pytest.mark.requires_trainable_backend
def test_fit_flow(self, run_eagerly, jit_compile, use_steps_per_epoch):
if not run_eagerly and not jit_compile and use_steps_per_epoch:
if False and backend.backend() == "tensorflow":
self.skipTest(
"TODO: Graph mode without XLA in TF backend leads to "
"unexpected logs, need further checks."
)
if jit_compile and backend.backend() == "torch":
self.skipTest(
"TODO: compilation with torch backend leads to "
"unexpected logs, need further checks."
)
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("mean_squared_error", history)
self.assertAllClose(
history["mean_squared_error"],
[14.5, 11.5, 8.5],
atol=1.0, # TODO: results vary across backends
)
@parameterized.named_parameters(
[
{
"testcase_name": "np_array",
"dataset_type": "np_array",
"fit_kwargs": {"batch_size": 5},
},
{
"testcase_name": "native_array",
"dataset_type": "native_array",
"fit_kwargs": {"batch_size": 5},
},
{
"testcase_name": "py_dataset",
"dataset_type": "py_dataset",
},
{
"testcase_name": "py_dataset_cw",
"dataset_type": "py_dataset",
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "py_dataset_infinite_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {
"steps_per_epoch": 20,
"class_weight": {0: 1, 1: 2},
},
},
{
"testcase_name": "py_dataset_multithreading",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2},
},
{
"testcase_name": "py_dataset_multithreading_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2},
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_multithreading_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {"infinite": True, "workers": 2},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "py_dataset_multiprocessing",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2, "use_multiprocessing": True},
},
{
"testcase_name": "py_dataset_multiprocessing_cw",
"dataset_type": "py_dataset",
"dataset_kwargs": {"workers": 2, "use_multiprocessing": True},
"fit_kwargs": {"class_weight": {0: 1, 1: 2}},
},
{
"testcase_name": "py_dataset_multiprocessing_infinite",
"dataset_type": "py_dataset",
"dataset_kwargs": {
"infinite": True,
"workers": 2,
"use_multiprocessing": True,
},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "tf_dataset",
"dataset_type": "tf_dataset",
},
{
"testcase_name": "tf_dataset_infinite",
"dataset_type": "tf_dataset",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "torch_dataloader_tensor",
"dataset_type": "torch_dataloader",
},
{
"testcase_name": "torch_dataloader_iterable",
"dataset_type": "torch_dataloader",
"dataset_kwargs": {"iterable": True, "has_len": False},
},
{
"testcase_name": "torch_dataloader_iterable_with_len",
"dataset_type": "torch_dataloader",
"dataset_kwargs": {"iterable": True, "has_len": True},
},
{
"testcase_name": "generator",
"dataset_type": "generator",
},
{
"testcase_name": "generator_infinite",
"dataset_type": "generator",
"dataset_kwargs": {"infinite": True},
"fit_kwargs": {"steps_per_epoch": 20},
},
{
"testcase_name": "grain_datast",
"dataset_type": "grain_datast",
"dataset_kwargs": {"has_len": False},
},
{
"testcase_name": "grain_datast_with_len",
"dataset_type": "grain_datast",
"dataset_kwargs": {"has_len": True},
},
{
"testcase_name": "grain_dataloader",
"dataset_type": "grain_datast",
"dataset_kwargs": {"use_dataloader": True},
},
]
)
@pytest.mark.requires_trainable_backend
def test_fit_with_data_adapter(
self, dataset_type, dataset_kwargs={}, fit_kwargs={}
):
jit_compile = True
if (
dataset_kwargs.get("use_multiprocessing", False)
and backend.backend() == "jax"
):
pytest.skip("Multiprocessing not supported with JAX backend")
if dataset_type == "grain_datast" and backend.backend() == "torch":
# Grain datasets are not supported with torch + jit_compile.
jit_compile = False
model = ExampleModel(units=3)
optimizer = optimizers.Adagrad()
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
jit_compile=jit_compile,
)
x, y = create_dataset(dataset_type, dataset_kwargs)
model.fit(x, y, epochs=3, **fit_kwargs)
@parameterized.named_parameters(
[
("eager", True, False, False),
("graph_fn", False, False, False),
("jit", False, True, False),
("steps_per_epoch_eager", True, False, True),
("steps_per_epoch_graph_fn", False, False, True),
("steps_per_epoch_jit", False, True, True),
]
)
@pytest.mark.requires_trainable_backend
def test_fit_with_val_split(
self, run_eagerly, jit_compile, use_steps_per_epoch
):
if not run_eagerly and not jit_compile and use_steps_per_epoch:
if backend.backend() == "tensorflow":
self.skipTest(
"TODO: Graph mode without XLA in TF backend leads to "
"unexpected logs, need further checks."
)
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
validation_split=0.2,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("val_loss", history)
# Test with backend-native tensors.
x = ops.ones((dataset_size, 4))
y = ops.zeros((dataset_size, 3))
history = model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch if use_steps_per_epoch else None,
epochs=epochs,
validation_split=0.2,
)
history = history.history
self.assertIn("loss", history)
self.assertIn("val_loss", history)
@pytest.mark.requires_trainable_backend
def test_fit_with_custom_train_step(self):
if backend.backend() == "jax":
model = JaxCustomTrainTestStepModel(units=3)
else:
model = CustomTrainTestStepModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
history = model.fit(x, y, batch_size=batch_size)
history = history.history
self.assertIn("loss", history)
self.assertIn("mean_squared_error", history)
self.assertAllClose(history["my_custom_metric"], 10.0)
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_fit_sparse(self, generator_type, mode):
model = ExampleModel(units=3)
optimizer = optimizers.Adagrad()
model.compile(
optimizer=optimizer,
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=False,
)
dataset = sparse_generator(generator_type)
sparse_variable_updates = False
def mock_optimizer_assign(variable, value):
nonlocal sparse_variable_updates
if value.__class__.__name__ == "IndexedSlices":
sparse_variable_updates = True
with mock.patch.object(
optimizer, "assign_sub", autospec=True
) as optimizer_assign_sub:
optimizer_assign_sub.side_effect = mock_optimizer_assign
model.fit(dataset)
# JAX does not produce sparse gradients the way we use it.
if backend.backend() != "jax":
# Verify tensors did not get densified along the way.
self.assertTrue(sparse_variable_updates)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_evaluate_flow(self, run_eagerly, jit_compile):
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
output = model.evaluate(x, y, batch_size=batch_size)
self.assertAllClose(output, [16.0, 16.0])
output = model.evaluate(x, y, batch_size=batch_size, return_dict=True)
self.assertIsInstance(output, dict)
self.assertIn("loss", output)
self.assertIn("mean_squared_error", output)
self.assertAllClose(output["mean_squared_error"], 16.0)
@parameterized.named_parameters([("flat", False), ("dict", True)])
@pytest.mark.requires_trainable_backend
def test_evaluate_with_custom_test_step(self, return_dict):
if backend.backend() == "jax":
model = JaxCustomTrainTestStepModel(units=3)
else:
model = CustomTrainTestStepModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
batch_size = 16
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
output = model.evaluate(
x, y, batch_size=batch_size, return_dict=return_dict
)
self.assertLen(output, 3)
if return_dict:
self.assertAllClose(output["my_custom_metric"], 5.0)
else:
self.assertAllClose(output[-1], 5.0) # Custom metrics go last.
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_evaluate_sparse(self, generator_type, mode):
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.Adagrad(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=False,
)
dataset = sparse_generator(generator_type)
model.evaluate(dataset)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_predict_flow(self, run_eagerly, jit_compile):
# Test basic example
model = ExampleModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = np.ones((100, 4))
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertAllClose(outputs, 4 * np.ones((100, 3)))
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_predict_flow_struct(self, run_eagerly, jit_compile):
# Test with input/output structs
model = StructModel(units=3)
model.run_eagerly = run_eagerly
model.jit_compile = jit_compile
x = {
"x_one": np.ones((100, 4)),
"x_two": np.ones((100, 4)),
}
batch_size = 16
outputs = model.predict(x, batch_size=batch_size)
self.assertIsInstance(outputs, dict)
self.assertEqual(len(outputs), 2)
self.assertAllClose(outputs["y_one"], 4 * np.ones((100, 3)))
self.assertAllClose(outputs["y_two"], 4 * np.ones((100, 3)))
@parameterized.named_parameters(
named_product(
generator_type=["tf", "jax", "scipy"], mode=["eager", "graph"]
)
)
@pytest.mark.skipif(
not backend.SUPPORTS_SPARSE_TENSORS,
reason="Backend does not support sparse tensors.",
)
def test_predict_sparse(self, generator_type, mode):
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.Adagrad(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=False,
)
dataset = sparse_generator(generator_type)
dataset_size = sum(
[batch[1].shape[0] for batch in sparse_generator(generator_type)]
)
y = model.predict(dataset)
self.assertEqual(len(y), dataset_size)
@pytest.mark.skipif(
backend.backend() != "jax",
reason="Memory optimization is only implemented in JAX",
)
def test_fit_eval_flow_for_jax_model_weights(self):
model = ExampleModel(units=3)
epochs = 3
batch_size = 20
steps_per_epoch = 7
dataset_size = batch_size * (steps_per_epoch - 2)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
class ModelWeightCheck(Callback):
def __init__(self):
super().__init__()
# Note that we access model via self._model since self.model
# will trigger a sync of the jax training state back to the model.
def on_train_batch_end(self, batch, logs=None):
for v in self._model.trainable_variables:
assert v._value is None
for v in self._model.non_trainable_variables:
assert v._value is None
for v in self._model.optimizer.variables:
assert v._value is None
for v in self._model.metrics_variables:
assert v._value is None
def on_test_batch_end(self, batch, logs=None):
for v in self._model.non_trainable_variables:
assert v._value is None
for v in self._model.metrics_variables:
assert v._value is None
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
model.fit(
x,
y,
batch_size=batch_size,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
callbacks=[ModelWeightCheck()],
)
model.evaluate(
x,
y,
batch_size=batch_size,
callbacks=[ModelWeightCheck()],
)
@parameterized.named_parameters(
named_product(
steps_per_execution=[3, 101], mode=["eager", "non_jit", "jit"]
)
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_count(self, steps_per_execution, mode):
data_size = 100
batch_size = 16
epochs = 2
x = np.ones((data_size, 4))
y = np.ones((data_size, 1))
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_count = StepCount(steps_per_execution)
history = model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=[step_count],
verbose=0,
)
self.assertEqual(
step_count.begin_count,
1 + (data_size - 1) // (steps_per_execution * batch_size),
)
self.assertEqual(step_count.end_count, step_count.begin_count)
self.assertEqual(step_count.epoch_begin_count, epochs)
self.assertEqual(
step_count.epoch_end_count, step_count.epoch_begin_count
)
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=1,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
history_2 = model_2.fit(
x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=0
)
self.assertAllClose(history.history["loss"], history_2.history["loss"])
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(x, batch_size=batch_size),
model_2.predict(x, batch_size=batch_size),
)
self.assertAllClose(model.evaluate(x, y), model_2.evaluate(x, y))
@parameterized.named_parameters(
named_product(steps_per_execution=[3, 8, 32])
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="`unrolled_steps_per_execution` is only "
"available with the tensorflow backend.",
)
def test_steps_per_execution_unrolled_steps_steps_count(
self, steps_per_execution
):
data_size = 100
batch_size = 16
epochs = 2
unrolled_steps_per_execution = 8
x = np.ones((data_size, 4))
y = np.ones((data_size, 1))
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
jit_compile=True,
)
step_count = StepCount(steps_per_execution)
model.unrolled_steps_per_execution = unrolled_steps_per_execution
history = model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=[step_count],
verbose=0,
)
self.assertEqual(
step_count.begin_count,
1 + (data_size - 1) // (steps_per_execution * batch_size),
)
self.assertEqual(step_count.end_count, step_count.begin_count)
self.assertEqual(step_count.epoch_begin_count, epochs)
self.assertEqual(
step_count.epoch_end_count, step_count.epoch_begin_count
)
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
jit_compile=True,
)
model_2.unrolled_steps_per_execution = 1
history_2 = model_2.fit(
x=x, y=y, batch_size=batch_size, epochs=epochs, verbose=0
)
self.assertAllClose(history.history["loss"], history_2.history["loss"])
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(x, batch_size=batch_size),
model_2.predict(x, batch_size=batch_size),
)
self.assertAllClose(model.evaluate(x, y), model_2.evaluate(x, y))
@parameterized.named_parameters(
named_product(
steps_per_execution=[1, 50], mode=["eager", "non_jit", "jit"]
)
)
def test_predict_preserve_order(self, steps_per_execution, mode):
if steps_per_execution > 1 and backend.backend() == "torch":
self.skipTest("`steps_per_execution` not implemented for torch yet")
def generate_uneven_batches():
batch_sizes = [2, 3, 4]
def gen_i():
for i in range(100):
yield i
iterator = iter(gen_i())
j = 0
while True:
batch_size = batch_sizes[j % len(batch_sizes)]
try:
batch = np.array(
[next(iterator) for _ in range(batch_size)]
)
except StopIteration:
break
j += 1
yield batch
from keras.src.utils.module_utils import tensorflow as tf
dataset = tf.data.Dataset.from_generator(
generate_uneven_batches,
output_signature=tf.TensorSpec((None,), dtype=tf.int32),
)
x = keras.layers.Input(shape=())
y = keras.layers.Identity()(x)
model = keras.Model(x, y)
model.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
preds = model.predict(x=dataset, verbose=0)
self.assertAllEqual(preds, np.arange(len(preds), dtype=np.float32))
@parameterized.named_parameters(
named_product(
steps_per_execution=[1, 50], mode=["eager", "non_jit", "jit"]
)
)
def test_predict_generator(self, steps_per_execution, mode):
if steps_per_execution > 1 and backend.backend() == "torch":
self.skipTest("`steps_per_execution` not implemented for torch yet")
batch_size = 2
def generate_batches():
def gen_i():
for i in range(10):
yield i
iterator = iter(gen_i())
j = 0
while True:
try:
batch = np.array(
[next(iterator) for _ in range(batch_size)]
)
except StopIteration:
break
j += 1
yield (batch,)
model = keras.Sequential(
[keras.layers.InputLayer(shape=()), keras.layers.Identity()]
)
model.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
preds = model.predict(x=generate_batches(), verbose=0)
self.assertAllEqual(
preds, np.concatenate(list(generate_batches()), axis=1)[0]
)
@parameterized.named_parameters(
named_product(
steps_per_execution=[3, 101], mode=["eager", "non_jit", "jit"]
)
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_count_unknown_dataset_size(
self, steps_per_execution, mode
):
data_size = 100
batch_size = 16
epochs = 2
def data_generator():
x = np.ones((data_size, 4), dtype=np.float32)
y = np.ones((data_size, 1), dtype=np.float32)
for _x, _y in zip(x, y):
yield _x, _y
import tensorflow as tf
dataset = tf.data.Dataset.from_generator(
data_generator,
output_signature=(
tf.TensorSpec(shape=(4,), dtype=tf.float32),
tf.TensorSpec(shape=(1,), dtype=tf.float32),
),
)
dataset = dataset.batch(batch_size)
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_count = StepCount(steps_per_execution)
history = model.fit(
dataset,
epochs=epochs,
callbacks=[step_count],
verbose=0,
)
batch_count = 1 + (data_size - 1) // (steps_per_execution * batch_size)
self.assertGreaterEqual(step_count.begin_count, batch_count)
self.assertEqual(step_count.end_count, batch_count)
self.assertEqual(step_count.epoch_begin_count, epochs)
self.assertEqual(
step_count.epoch_end_count, step_count.epoch_begin_count
)
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
steps_per_execution=1,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
history_2 = model_2.fit(dataset, epochs=epochs, verbose=0)
self.assertAllClose(history.history["loss"], history_2.history["loss"])
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(dataset),
model_2.predict(dataset),
)
self.assertAllClose(model.evaluate(dataset), model_2.evaluate(dataset))
@parameterized.named_parameters(
named_product(
steps_per_epoch_test=[
"match_one_epoch",
"match_multi_epoch",
"not_match_too_low",
"not_match_but_high_enough",
],
mode=["eager", "non_jit", "jit"],
)
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_per_epoch(
self, steps_per_epoch_test, mode
):
batch_size = 8
epochs = 2
steps_per_execution = 2
num_batches = 5 * steps_per_execution
data_size = num_batches * batch_size
if steps_per_epoch_test == "match_one_epoch":
steps_per_epoch = num_batches
elif steps_per_epoch_test == "match_multi_epoch":
steps_per_epoch = num_batches // steps_per_execution
elif steps_per_epoch_test == "not_match_too_low":
steps_per_epoch = num_batches - steps_per_execution
elif steps_per_epoch_test == "not_match_but_high_enough":
steps_per_epoch = num_batches + steps_per_execution
x = np.ones((data_size, 4))
y = np.ones((data_size, 1))
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer = StepObserver()
model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[step_observer],
verbose=0,
)
if steps_per_epoch_test != "not_match_too_low":
training_batch_count = (
epochs
* min(steps_per_epoch, num_batches)
// steps_per_execution
)
else:
complete_epochs = (num_batches // steps_per_execution) // (
steps_per_epoch // steps_per_execution
)
remaining_steps = (num_batches // steps_per_execution) % (
steps_per_epoch // steps_per_execution
)
steps_cycles = [
complete_epochs * steps_per_epoch // steps_per_execution,
remaining_steps,
] * epochs
steps_per_epochs = steps_cycles[:epochs]
training_batch_count = sum(steps_per_epochs)
self.assertEqual(step_observer.begin_count, training_batch_count)
self.assertEqual(step_observer.end_count, step_observer.begin_count)
self.assertEqual(step_observer.epoch_begin_count, epochs)
self.assertEqual(
step_observer.epoch_end_count, step_observer.epoch_begin_count
)
if steps_per_epoch_test != "not_match_too_low":
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
steps_per_execution=1,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer_2 = StepObserver()
if steps_per_epoch_test in (
"not_match_but_high_enough",
"match_one_epoch",
):
model_2_epochs = epochs
else:
model_2_epochs = 1
model_2.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=model_2_epochs,
callbacks=[step_observer_2],
verbose=0,
)
losses = step_observer.batch_loss_history
losses_2 = step_observer_2.batch_loss_history[
steps_per_execution - 1 :: steps_per_execution
]
self.assertAllClose(losses, losses_2)
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(x, batch_size=batch_size),
model_2.predict(x, batch_size=batch_size),
)
self.assertAllClose(model.evaluate(x, y), model_2.evaluate(x, y))
@parameterized.named_parameters(
named_product(
steps_per_epoch_test=[
"match_one_epoch",
"match_multi_epoch",
"not_match_too_low",
"not_match_but_high_enough",
],
mode=["eager", "non_jit", "jit"],
)
)
@pytest.mark.requires_trainable_backend
def test_steps_per_epoch(self, steps_per_epoch_test, mode):
batch_size = 8
epochs = 4
num_batches = 10
data_size = num_batches * batch_size
if steps_per_epoch_test == "match_one_epoch":
steps_per_epoch = num_batches
elif steps_per_epoch_test == "match_multi_epoch":
steps_per_epoch = num_batches // (epochs // 2)
elif steps_per_epoch_test == "not_match_too_low":
steps_per_epoch = num_batches - 1
elif steps_per_epoch_test == "not_match_but_high_enough":
steps_per_epoch = num_batches + 1
x = np.ones((data_size, 4))
y = np.ones((data_size, 1))
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer = StepObserver()
model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[step_observer],
verbose=0,
)
if steps_per_epoch_test != "not_match_too_low":
training_batch_count = epochs * min(steps_per_epoch, num_batches)
else:
complete_epochs = num_batches // steps_per_epoch
remaining_steps = num_batches % steps_per_epoch
steps_cycles = [
complete_epochs * steps_per_epoch,
remaining_steps,
] * epochs
steps_per_epochs = steps_cycles[:epochs]
training_batch_count = sum(steps_per_epochs)
self.assertEqual(step_observer.begin_count, training_batch_count)
self.assertEqual(step_observer.end_count, step_observer.begin_count)
self.assertEqual(step_observer.epoch_begin_count, epochs)
self.assertEqual(
step_observer.epoch_end_count, step_observer.epoch_begin_count
)
if steps_per_epoch_test != "not_match_too_low":
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
steps_per_execution=1,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer_2 = StepObserver()
if steps_per_epoch_test in (
"not_match_but_high_enough",
"match_one_epoch",
):
model_2_epochs = epochs
elif steps_per_epoch_test == "match_multi_epoch":
model_2_epochs = epochs // (num_batches // steps_per_epoch)
else:
model_2_epochs = 1
model_2.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=model_2_epochs,
callbacks=[step_observer_2],
verbose=0,
)
losses = step_observer.batch_loss_history
losses_2 = step_observer_2.batch_loss_history
self.assertAllClose(losses, losses_2)
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(x, batch_size=batch_size),
model_2.predict(x, batch_size=batch_size),
)
self.assertAllClose(model.evaluate(x, y), model_2.evaluate(x, y))
@pytest.mark.requires_trainable_backend
def test_max_epochs_and_steps(self):
batch_size = 8
epochs = 4
num_batches = 10
data_size = num_batches * batch_size
x, y = np.ones((data_size, 4)), np.ones((data_size, 1))
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
)
step_observer = StepObserver()
model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=[step_observer],
verbose=0,
)
self.assertEqual(step_observer.epoch_begin_count, epochs)
self.assertEqual(step_observer.begin_count, num_batches * epochs)
try:
config.set_max_epochs(2)
config.set_max_steps_per_epoch(3)
step_observer = StepObserver()
model.fit(
x=x,
y=y,
batch_size=batch_size,
epochs=epochs,
callbacks=[step_observer],
verbose=0,
)
self.assertEqual(step_observer.epoch_begin_count, 2)
self.assertEqual(step_observer.begin_count, 6)
finally:
config.set_max_epochs(None)
config.set_max_steps_per_epoch(None)
@parameterized.named_parameters(
named_product(
steps_per_epoch_test=[
"match",
"not_match_too_low",
"not_match_but_high_enough",
],
mode=["eager", "non_jit", "jit"],
)
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_per_epoch_unknown_data_size(
self, steps_per_epoch_test, mode
):
batch_size = 8
epochs = 2
steps_per_execution = 2
num_batches = 5 * epochs * steps_per_execution
data_size = num_batches * batch_size
if steps_per_epoch_test == "match":
steps_per_epoch = num_batches // epochs
elif steps_per_epoch_test == "not_match_too_low":
steps_per_epoch = num_batches - steps_per_execution
elif steps_per_epoch_test == "not_match_but_high_enough":
steps_per_epoch = num_batches + steps_per_execution
def data_generator():
x = np.ones((data_size, 4), dtype=np.float32)
y = np.ones((data_size, 1), dtype=np.float32)
for _x, _y in zip(x, y):
yield _x, _y
import tensorflow as tf
dataset = tf.data.Dataset.from_generator(
data_generator,
output_signature=(
tf.TensorSpec(shape=(4,), dtype=tf.float32),
tf.TensorSpec(shape=(1,), dtype=tf.float32),
),
)
dataset = dataset.batch(batch_size)
model = ExampleModel(units=1)
model.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
steps_per_execution=steps_per_execution,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer = StepObserver()
model.fit(
dataset,
epochs=epochs,
steps_per_epoch=steps_per_epoch,
callbacks=[step_observer],
verbose=0,
)
if steps_per_epoch_test != "not_match_too_low":
training_batch_count = (
epochs
* min(steps_per_epoch, num_batches)
// steps_per_execution
)
else:
complete_epochs = (num_batches // steps_per_execution) // (
steps_per_epoch // steps_per_execution
)
remaining_steps = (num_batches // steps_per_execution) % (
steps_per_epoch // steps_per_execution
)
steps_cycles = [
complete_epochs * steps_per_epoch // steps_per_execution,
remaining_steps,
] * epochs
steps_per_epochs = steps_cycles[:epochs]
training_batch_count = sum(steps_per_epochs)
self.assertGreaterEqual(step_observer.begin_count, training_batch_count)
self.assertEqual(step_observer.end_count, training_batch_count)
self.assertEqual(step_observer.epoch_begin_count, epochs)
self.assertEqual(
step_observer.epoch_end_count, step_observer.epoch_begin_count
)
if steps_per_epoch_test != "not_match_too_low":
model_2 = ExampleModel(units=1)
model_2.compile(
loss="mse",
optimizer="sgd",
metrics=[EpochAgnosticMeanSquaredError()],
steps_per_execution=1,
run_eagerly=(mode == "eager"),
jit_compile=(mode == "jit"),
)
step_observer_2 = StepObserver()
if steps_per_epoch_test == "not_match_but_high_enough":
model_2_epochs = epochs
else:
model_2_epochs = 1
model_2.fit(
dataset,
epochs=model_2_epochs,
callbacks=[step_observer_2],
verbose=0,
)
losses = step_observer.batch_loss_history
losses_2 = step_observer_2.batch_loss_history[
steps_per_execution - 1 :: steps_per_execution
]
self.assertAllClose(losses, losses_2)
self.assertAllClose(model.get_weights(), model_2.get_weights())
self.assertAllClose(
model.predict(dataset), model_2.predict(dataset)
)
self.assertAllClose(
model.evaluate(dataset), model_2.evaluate(dataset)
)
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_steps_per_execution_steps_count_without_training(self):
class StepCount(Callback):
def __init__(self):
super().__init__()
self.test_count = 0
self.predict_count = 0
self.batches = [0, 3, 6]
def on_test_batch_begin(self, batch, logs=None):
assert batch == self.batches[self.test_count]
self.test_count += 1
def on_predict_batch_begin(self, batch, logs=None):
assert batch == self.batches[self.predict_count]
self.predict_count += 1
x = np.ones((100, 4))
y = np.ones((100, 1))
batch_size = 16
model = ExampleModel(units=1)
model.compile(loss="mse", steps_per_execution=3)
step_count = StepCount()
model.predict(x, batch_size=batch_size, callbacks=[step_count])
self.assertEqual(step_count.predict_count, 3)
model.evaluate(x, y, batch_size=batch_size, callbacks=[step_count])
self.assertEqual(step_count.test_count, 3)
@pytest.mark.requires_trainable_backend
def test_fit_with_different_batch_size_same_loss(self):
x = np.random.rand(100, 4)
y = np.ones((100, 1))
model = ExampleModel(units=1)
model.trainable = False
model.compile(loss="mse")
loss1 = model.fit(x, y, batch_size=80).history["loss"]
loss2 = model.fit(x, y, batch_size=100).history["loss"]
self.assertAllClose(loss1, loss2)
def test_evaluate_with_different_batch_size_same_loss(self):
x = np.random.rand(100, 4)
y = np.ones((100, 1))
model = ExampleModel(units=1)
model.compile(loss="mse")
loss1 = model.evaluate(x, y, batch_size=80)
loss2 = model.evaluate(x, y, batch_size=100)
self.assertAllClose(loss1, loss2)
@pytest.mark.requires_trainable_backend
def test_adds_loss_scaling_optimizer(self):
model = TrainingTestingLayer(dtype="mixed_float16")
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, optimizers.LossScaleOptimizer)
model = TrainingTestingLayer(dtype="mixed_float16")
model.compile(optimizer="rmsprop", loss="mse", auto_scale_loss=False)
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, RMSprop)
model = TrainingTestingLayer(dtype="mixed_bfloat16")
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
model.fit(x, y, batch_size=32)
self.assertIsInstance(model.optimizer, RMSprop)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="half precision unsupported on torch CPU.",
)
def test_loss_scaling_prevents_underflow(self):
class DeepModel(Trainer, layers.Layer):
def __init__(self):
layers.Layer.__init__(self, dtype="mixed_float16")
Trainer.__init__(self)
self.layers = []
for _ in range(15):
# Sigmoid has a small gradient, will eventually underflow.
self.layers.append(
layers.Dense(
1,
use_bias=False,
kernel_initializer="ones",
activation="sigmoid",
dtype="mixed_float16",
)
)
def call(self, x):
for layer in self.layers:
x = layer(x)
return x
loss = losses.MeanSquaredError()
# Blow up any gradient updates, so underflow is obvious.
optimizer = optimizers.SGD(learning_rate=1e9)
model = DeepModel()
model.compile(optimizer, loss=loss, auto_scale_loss=False)
model.fit(np.ones((1, 1)), np.ones((1, 1)), batch_size=1)
first_kernel = model.layers[0].kernel
# Without autoscaling, the first dense will not update.
self.assertEqual(first_kernel, np.ones_like(first_kernel))
# Blow up any gradient updates, so underflow is obvious.
optimizer = optimizers.SGD(learning_rate=1e9)
model = DeepModel()
model.compile(optimizer, loss=loss, auto_scale_loss=True)
model.fit(np.ones((1, 1)), np.ones((1, 1)), batch_size=1)
first_kernel = model.layers[0].kernel
# With autoscaling, the first dense will update.
self.assertNotEqual(first_kernel, np.ones_like(first_kernel))
@pytest.mark.requires_trainable_backend
def test_training_arg(self):
model = TrainingTestingLayer()
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((128, 1))
y = np.zeros((128, 1))
history = model.fit(x, y, batch_size=32)
self.assertAllClose(history.history["loss"], [1.0])
val_loss = model.evaluate(x, y, batch_size=32)
self.assertAllClose(val_loss, 0.0)
preds = model.predict(x)
self.assertAllClose(preds, np.zeros((128, 1)))
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
@pytest.mark.requires_trainable_backend
def test_on_batch_methods(self, run_eagerly, jit_compile):
if backend.backend() == "torch" and jit_compile:
self.skipTest(
"test_on_batch with jit_compile=True not supported in torch "
"backend yet."
)
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
sw = np.arange(100).reshape((100,)).astype("float32") / 50.0
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
logs = model.train_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 16.0)
logs = model.train_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 15.579)
logs = model.test_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 15.173)
logs = model.test_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 14.97)
output = model.predict_on_batch(x)
self.assertIsInstance(output, np.ndarray)
self.assertAllClose(output[0], np.array([3.789511, 3.789511, 3.789511]))
# With sample weights
logs = model.train_on_batch(x, y, sw)
self.assertAlmostEqual(logs[0], 14.819)
logs = model.test_on_batch(x, y, sw)
self.assertAlmostEqual(logs[0], 14.595)
output = model.predict_on_batch(x)
self.assertAllClose(output[0], np.array([3.689468, 3.689468, 3.689468]))
# With class weights
logs = model.train_on_batch(x, y, class_weight={1: 0.3, 0: 0.2})
self.assertAlmostEqual(logs[0], 12.899)
@parameterized.named_parameters(
[
("eager", True, False),
("graph_fn", False, False),
("jit", False, True),
]
)
def test_on_batch_methods_without_training(self, run_eagerly, jit_compile):
if backend.backend() == "torch" and jit_compile:
self.skipTest(
"test_on_batch with jit_compile=True not supported in torch "
"backend yet."
)
model = ExampleModel(units=3)
x = np.ones((100, 4))
y = np.zeros((100, 3))
model.compile(
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
run_eagerly=run_eagerly,
jit_compile=jit_compile,
)
output = model.predict_on_batch(x)
self.assertIsInstance(output, np.ndarray)
self.assertAllClose(output[0], np.array([4.0, 4.0, 4.0]))
logs = model.test_on_batch(x, y)
self.assertIsInstance(logs, list)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs[0], 16.0)
logs = model.test_on_batch(x, y, return_dict=True)
self.assertIsInstance(logs, dict)
self.assertEqual(len(logs), 2)
self.assertAlmostEqual(logs["loss"], 16.0)
def test_nested_input_predict(self):
# https://github.com/keras-team/keras/issues/325
class TupleInputModel(keras.Model):
def call(self, inputs):
a, b = inputs
return a + b
model = TupleInputModel()
x1, x2 = np.random.rand(2, 3, 4)
out = model.predict((x1, x2))
self.assertEqual(out.shape, (3, 4))
class DictInputModel(keras.Model):
def call(self, inputs):
return inputs["a"] + inputs["b"]
model = DictInputModel()
x1, x2 = np.random.rand(2, 3, 4)
out = model.predict({"a": x1, "b": x2})
self.assertEqual(out.shape, (3, 4))
@pytest.mark.requires_trainable_backend
def test_for_eval_epoch_iterator(self):
model = ExampleModel(units=3)
model.compile(
optimizer="adam", loss="mse", metrics=["mean_absolute_error"]
)
x = np.ones((16, 4))
y = np.zeros((16, 3))
x_test = np.ones((16, 4))
y_test = np.zeros((16, 3))
model.fit(
x,
y,
batch_size=4,
validation_data=(x_test, y_test),
)
assert getattr(model, "_eval_epoch_iterator", None) is None
# Try model.fit with reshaped validation_data
# This will throw an exception which is intended
try:
model.fit(
x,
y,
batch_size=4,
validation_data=(
x_test.reshape((-1, 16, 4)),
y_test.reshape((-1, 16, 3)),
),
)
except:
pass
# Try model.fit with correct validation_data this should work.
# After successful training `_eval_epoch_iterator` should be None
model.fit(
x,
y,
batch_size=4,
validation_data=(x_test, y_test),
)
assert getattr(model, "_eval_epoch_iterator", None) is None
@pytest.mark.requires_trainable_backend
def test_callback_methods_keys(self):
class CustomCallback(Callback):
def on_train_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == [
"loss",
"mean_absolute_error",
"val_loss",
"val_mean_absolute_error",
]
def on_epoch_begin(self, epoch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_epoch_end(self, epoch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == [
"loss",
"mean_absolute_error",
"val_loss",
"val_mean_absolute_error",
]
def on_test_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_test_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_predict_begin(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_predict_end(self, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_train_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_test_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_test_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["loss", "mean_absolute_error"]
def on_predict_batch_begin(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == []
def on_predict_batch_end(self, batch, logs=None):
keys = sorted(list(logs.keys()))
assert keys == ["outputs"]
model = ExampleModel(units=3)
model.compile(
optimizer="adam", loss="mse", metrics=["mean_absolute_error"]
)
x = np.ones((16, 4))
y = np.zeros((16, 3))
x_test = np.ones((16, 4))
y_test = np.zeros((16, 3))
model.fit(
x,
y,
callbacks=[CustomCallback()],
batch_size=4,
validation_data=(x_test, y_test),
)
model.evaluate(x_test, y_test, batch_size=4)
model.predict(x_test, batch_size=4)
@pytest.mark.requires_trainable_backend
def test_internal_only_loss(self):
class LossLayer(layers.Layer):
def call(self, x):
self.add_loss(ops.sum(x))
return x
model = keras.Sequential(
[
layers.Dense(2),
LossLayer(),
layers.Dense(1),
]
)
model.compile(optimizer="adam")
x = np.ones((16, 2))
y = np.zeros((16, 1))
model.fit(x, y, batch_size=4)
def get_layer(self):
class ExampleLayer(keras.Layer):
def call(self, x):
return x * 2
return ExampleLayer
def get_model(self):
class ExampleModel(keras.Model):
def call(self, x):
return x * 2
return ExampleModel
def get_functional(self):
ExampleLayer = self.get_layer()
class ExampleFunctional(keras.src.Functional):
def __init__(self, input_shape=(None,)):
inputs = keras.Input(input_shape)
outputs = ExampleLayer()(inputs)
super().__init__(inputs=inputs, outputs=outputs)
return ExampleFunctional
@parameterized.named_parameters(
[
{
"testcase_name": "model",
"model_class": "get_model",
},
{
"testcase_name": "layer",
"model_class": "get_layer",
},
{
"testcase_name": "functional",
"model_class": "get_functional",
},
]
)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
keras.backend.backend() != "tensorflow",
reason="Only tensorflow supports raggeds",
)
def test_trainer_with_raggeds(self, model_class):
from keras.src.utils.module_utils import tensorflow as tf
def loss_fn(y, y_pred, sample_weight=None):
return 0
model = getattr(self, model_class)()()
x = tf.ragged.constant([[1], [2, 3]])
# test forward pass
y = model(x)
self.assertEqual(type(y), tf.RaggedTensor)
# test training
if model_class in ["get_model", "get_functional"]:
model.compile(optimizer="adam", loss=loss_fn)
model.fit(x, x)
y = model.predict(x)
self.assertEqual(type(y), tf.RaggedTensor)
# test if everything works with the sequential model
model = keras.Sequential([model])
model.compile(optimizer="adam", loss=loss_fn)
model.fit(x, x)
y = model.predict(x)
self.assertEqual(type(y), tf.RaggedTensor)
def test_predict_dropout(self):
# Test that `predict` with a dropout op
# has nondeterministic behavior across batches.
inputs = layers.Input((20,))
outputs = layers.Dropout(0.5, seed=1337)(inputs, training=True)
model = keras.Model(inputs, outputs)
out1 = model.predict(np.ones((4, 20)), batch_size=2)
self.assertGreater(5, np.sum(np.abs(out1[:2, :] - out1[2:4, :])))
out2 = model.predict_on_batch(np.ones((2, 20)))
out3 = model.predict_on_batch(np.ones((2, 20)))
self.assertGreater(5, np.sum(np.abs(out2 - out3)))
@pytest.mark.requires_trainable_backend
def test_recompile(self):
model = ExampleModel(units=3)
model.compile(
optimizer="sgd", loss="mse", metrics=["mean_squared_error"]
)
history_1 = model.fit(np.ones((3, 2)), np.ones((3, 3))).history
eval_out_1 = model.evaluate(
np.ones((3, 2)), np.ones((3, 3)), return_dict=True
)
model.compile(
optimizer="sgd", loss="mse", metrics=["mean_absolute_error"]
)
history_2 = model.fit(np.ones((3, 2)), np.ones((3, 3))).history
eval_out_2 = model.evaluate(
np.ones((3, 2)), np.ones((3, 3)), return_dict=True
)
self.assertEqual(
sorted(list(history_1.keys())), ["loss", "mean_squared_error"]
)
self.assertEqual(
sorted(list(eval_out_1.keys())), ["loss", "mean_squared_error"]
)
self.assertEqual(
sorted(list(history_2.keys())), ["loss", "mean_absolute_error"]
)
self.assertEqual(
sorted(list(eval_out_2.keys())), ["loss", "mean_absolute_error"]
)
def test_evaluate_return_list_respect_metrics_order(self):
def metrics_zero(y_true, y_pred):
return 0.0
def metrics_one(y_true, y_pred):
return 1.0
model = ExampleModel(units=3)
model.compile(
optimizer="sgd", loss="mse", metrics=[metrics_zero, metrics_one]
)
eval_out = model.evaluate(np.ones((3, 2)), np.ones((3, 3)))
self.assertLen(eval_out, 3)
self.assertEqual(eval_out[1], 0.0)
self.assertEqual(eval_out[2], 1.0)
model.compile(
optimizer="sgd", loss="mse", metrics=[metrics_one, metrics_zero]
)
eval_out = model.evaluate(np.ones((3, 2)), np.ones((3, 3)))
self.assertLen(eval_out, 3)
self.assertEqual(eval_out[1], 1.0)
self.assertEqual(eval_out[2], 0.0)
@pytest.mark.requires_trainable_backend
def test_nested_inputs(self):
model = ListInputModel(units=2)
out = model([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(tuple(out.shape), (3, 2))
model.compile(optimizer="sgd", loss="mse", metrics=["mse"])
history = model.fit(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
).history
self.assertAllClose(history["loss"], 16.0)
train_out = model.train_on_batch(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(train_out[0], 15.2200)
eval_out = model.evaluate(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(eval_out[0], 13.0321)
eval_out = model.test_on_batch(
[np.ones((3, 2)), np.ones((3, 3))], np.ones((3, 2))
)
self.assertAllClose(eval_out[0], 13.0321)
predict_out = model.predict([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(predict_out.shape, (3, 2))
predict_out = model.predict_on_batch([np.ones((3, 2)), np.ones((3, 3))])
self.assertEqual(predict_out.shape, (3, 2))
@pytest.mark.requires_trainable_backend
def test_validation_data_infinite_generator(self):
# Test that you can pass an infinite generator to `validation_data`
# arg of fit() as well as a `validation_steps` argument and that
# validation only runs for the correct number of steps.
model = ExampleModel(units=3)
model.compile(optimizer="sgd", loss="mse", metrics=["mse"])
class Recorder(keras.callbacks.Callback):
def __init__(self):
self.train_counter = 0
self.val_counter = 0
def on_train_batch_end(self, *args, **kwargs):
self.train_counter += 1
def on_test_batch_end(self, *args, **kwargs):
self.val_counter += 1
def infinite_gen():
while True:
yield np.ones((2, 2)), np.ones((2, 3))
recorder = Recorder()
model.fit(
infinite_gen(),
validation_data=infinite_gen(),
steps_per_epoch=3,
validation_steps=4,
epochs=1,
shuffle=False,
callbacks=[recorder],
)
self.assertEqual(recorder.train_counter, 3)
self.assertEqual(recorder.val_counter, 4)
@parameterized.named_parameters(
[
("fit", "fit", "training", "train"),
("evaluate", "evaluate", "evaluating", "test"),
("predict", "predict", "predicting", "predict"),
]
)
@pytest.mark.requires_trainable_backend
def test_stop_loop(self, method, method_gerund, on_end_name):
model = ExampleModel(units=3)
model.compile(optimizer="sgd", loss="mse", metrics=["mse"])
class Stopper(keras.callbacks.Callback):
def __init__(self, stop_count):
self.stop_count = stop_count
self.counter = 0
setattr(self, f"on_{on_end_name}_batch_end", self.batch_end)
def batch_end(self, *args, **kwargs):
self.counter += 1
if self.counter == self.stop_count:
setattr(self.model, f"stop_{method_gerund}", True)
def infinite_gen():
while True:
x = np.ones((2, 2))
y = np.ones((2, 3))
yield (x,) if method == "predict" else (x, y)
stop_count = 5
stopper = Stopper(stop_count)
getattr(model, method)(
infinite_gen(),
callbacks=[stopper],
)
self.assertEqual(stopper.counter, stop_count)
@pytest.mark.requires_trainable_backend
def test_constraints_are_applied(self):
model = models.Sequential(
[layers.Dense(2, kernel_constraint="non_neg")]
)
x = np.ones((2, 3))
y = np.ones((2, 2))
model.compile(optimizer="rmsprop", loss="mse")
model.fit(x, y)
self.assertGreaterEqual(
np.min(backend.convert_to_numpy(model.layers[0].kernel)), 0.0
)
@pytest.mark.requires_trainable_backend
def test_rng_updated_during_predict(self):
class TestTimeDropout(layers.Layer):
def __init__(self):
super().__init__()
self.random_generator = keras.random.SeedGenerator()
def call(self, x):
return keras.random.dropout(
x, rate=0.5, seed=self.random_generator
)
inputs = layers.Input((20,))
outputs = TestTimeDropout()(inputs)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="mse")
x = np.ones((32, 20))
out_1 = model.predict(x)
out_2 = model.predict(x)
self.assertGreater(np.mean(np.abs(out_1 - out_2)), 0.01)
@pytest.mark.requires_trainable_backend
def test_callbacks_can_update_state_at_batch_boundary(self):
class CounterModel(keras.Model):
def __init__(self):
super().__init__()
self.train_counter = self.add_weight(
shape=(),
initializer="zeros",
)
self.test_counter = self.add_weight(
shape=(),
initializer="zeros",
)
self.predict_counter = self.add_weight(
shape=(),
initializer="zeros",
)
self.dense = layers.Dense(3)
def call(self, x):
return self.dense(x)
class CounterCallback(keras.callbacks.Callback):
def __init__(self):
self.eager_call_counter_train = 0
self.eager_call_counter_test = 0
self.eager_call_counter_predict = 0
def on_train_batch_end(self, *args, **kwargs):
self.model.train_counter.assign_add(1)
self.eager_call_counter_train += 1
def on_test_batch_end(self, *args, **kwargs):
self.model.test_counter.assign_add(1)
self.eager_call_counter_test += 1
def on_predict_batch_end(self, *args, **kwargs):
self.model.predict_counter.assign_add(1)
self.eager_call_counter_predict += 1
model = CounterModel()
model.compile(
optimizer="sgd", loss="mse", metrics=["mse"], run_eagerly=True
)
cbk = CounterCallback()
model.fit(
np.ones((4, 3)),
np.ones((4, 3)),
callbacks=[cbk],
epochs=3,
batch_size=1,
verbose=0,
validation_data=(np.ones((2, 3)), np.ones((2, 3))),
)
self.assertAlmostEqual(cbk.eager_call_counter_train, 12)
self.assertAlmostEqual(model.train_counter.numpy(), 12)
self.assertAlmostEqual(cbk.eager_call_counter_test, 6)
self.assertAlmostEqual(model.test_counter.numpy(), 6)
model.predict(
np.ones((4, 3)),
callbacks=[cbk],
batch_size=1,
)
self.assertAlmostEqual(cbk.eager_call_counter_predict, 4)
self.assertAlmostEqual(model.predict_counter.numpy(), 4)
@pytest.mark.requires_trainable_backend
def test_metric_update_in_compute_loss(self):
test_self = self
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.custom_metric = keras.metrics.Mean(name="custom")
self.dense = keras.layers.Dense(2)
def call(self, x):
return self.dense(x)
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
if not in_symbolic_scope():
test_self.assertTrue(training)
loss = super().compute_loss(
x, y, y_pred, sample_weight, training
)
self.custom_metric.update_state(loss * 4)
return loss
model = MyModel()
model.compile(optimizer="sgd", loss="mse")
x = np.ones((32, 4))
y = np.ones((32, 2)) * 2
history = model.fit(x, y)
self.assertAlmostEqual(
history.history["custom"][0], history.history["loss"][0] * 4
)
@pytest.mark.requires_trainable_backend
def test_fwd_pass_loss_presence_in_compute_loss(self):
test_self = self
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.custom_metric = keras.metrics.Mean(name="custom")
self.dense = keras.layers.Dense(2, activity_regularizer="l2")
def call(self, x):
return self.dense(x)
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
if not in_symbolic_scope():
test_self.assertTrue(training)
loss = super().compute_loss(
x, y, y_pred, sample_weight, training
)
self.custom_metric.update_state(sum(self.losses))
return loss
model = MyModel()
model.compile(optimizer="sgd", loss="mse")
x = np.ones((32, 4))
y = np.ones((32, 2)) * 2
history = model.fit(x, y)
self.assertGreater(history.history["custom"][0], 0.0)
@pytest.mark.requires_trainable_backend
def test_evaluate_with_custom_compute_loss(self):
test_self = self
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.custom_metric = keras.metrics.Mean(name="custom")
self.dense = keras.layers.Dense(2, activity_regularizer="l2")
def call(self, x):
return self.dense(x)
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
training=True,
):
if not in_symbolic_scope():
test_self.assertFalse(training)
loss = super().compute_loss(
x, y, y_pred, sample_weight, training
)
self.custom_metric.update_state(loss * 4)
return loss
model = MyModel()
model.compile(optimizer="sgd", loss="mse")
x = np.ones((32, 4))
y = np.ones((32, 2)) * 2
logs = model.evaluate(x, y, return_dict=True)
self.assertAlmostEqual(logs["custom"], logs["loss"] * 4)
@pytest.mark.requires_trainable_backend
def test_compute_loss_no_training_backwards_compatibility(self):
class MyModel(keras.Model):
def __init__(self):
super().__init__()
self.custom_metric = keras.metrics.Mean(name="custom")
self.dense = keras.layers.Dense(2, activity_regularizer="l2")
def call(self, x):
return self.dense(x)
def compute_loss(
self,
x=None,
y=None,
y_pred=None,
sample_weight=None,
):
loss = super().compute_loss(x, y, y_pred, sample_weight)
self.custom_metric.update_state(loss * 4)
return loss
model = MyModel()
model.compile(optimizer="sgd", loss="mse")
x = np.ones((32, 4))
y = np.ones((32, 2)) * 2
logs = model.evaluate(x, y, return_dict=True)
self.assertAlmostEqual(logs["custom"], logs["loss"] * 4)
history = model.fit(x, y)
self.assertAlmostEqual(
history.history["custom"][0], history.history["loss"][0] * 4
)
@pytest.mark.requires_trainable_backend
def test_loss_weights(self):
epochs = 3
batch_size = 20
dataset_size = batch_size * 2
# Single output case.
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
loss_weights=0.2,
)
x = np.ones((dataset_size, 4))
y = np.zeros((dataset_size, 3))
history = model.fit(
x,
y,
batch_size=batch_size,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertAllClose(
history["loss"],
[3.182979, 3.115617, 3.049681],
atol=1e-3,
)
# Dict output case.
model = StructModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss={
"y_one": losses.MeanSquaredError(),
"y_two": losses.MeanSquaredError(),
},
metrics={
"y_one": metrics.MeanSquaredError(),
"y_two": metrics.MeanSquaredError(),
},
loss_weights={"y_one": 0.1, "y_two": 0.2},
)
x1 = np.ones((dataset_size, 4))
x2 = np.ones((dataset_size, 4))
y1 = np.zeros((dataset_size, 3))
y2 = np.zeros((dataset_size, 3))
history = model.fit(
{"x_one": x1, "x_two": x2},
{"y_one": y1, "y_two": y2},
batch_size=batch_size,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertAllClose(
history["loss"],
[4.778718, 4.694403, 4.611693],
atol=1e-3,
)
# List output case.
model = ListOutputModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=[losses.MeanSquaredError(), losses.MeanSquaredError()],
metrics=[metrics.MeanSquaredError(), metrics.MeanSquaredError()],
loss_weights=[0.1, 0.2],
)
x = np.ones((dataset_size, 4))
y1 = np.zeros((dataset_size, 3))
y2 = np.zeros((dataset_size, 3))
history = model.fit(
x,
[y1, y2],
batch_size=batch_size,
epochs=epochs,
)
history = history.history
self.assertIn("loss", history)
self.assertAllClose(
history["loss"],
[4.778718, 4.694403, 4.611693],
atol=1e-3,
)
@pytest.mark.requires_trainable_backend
def test_partial_loss_partial_label(self):
inputs = keras.Input((2,))
x = keras.layers.Dense(3, kernel_initializer="ones")(inputs)
partial_model = keras.Model(inputs, [x, x, x])
partial_model.compile(loss=["mse", None, None])
full_model = keras.Model(inputs, [x, x, x])
full_model.compile(loss=["mse", "mse", "mse"])
eval_x = np.ones((32, 2))
eval_y = np.ones((32, 3))
partial_logs = partial_model.evaluate(eval_x, eval_y, return_dict=True)
logs = full_model.evaluate(eval_x, [eval_y] * 3, return_dict=True)
self.assertAlmostEqual(partial_logs["loss"] * 3, logs["loss"])
def test_symbolic_build(self):
class ExampleModelWithTrainingArgs(Trainer, layers.Layer):
def __init__(self, units):
layers.Layer.__init__(self)
Trainer.__init__(self)
self.dense = layers.Dense(units)
self.bn = layers.BatchNormalization(axis=-1)
def build(self, input_shape):
self.dense.build(input_shape)
input_shape = self.dense.compute_output_shape(input_shape)
self.bn.build(input_shape)
def call(self, x, training=None):
outputs = self.bn(self.dense(x), training=training)
return [outputs, outputs]
model = ExampleModelWithTrainingArgs(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=[losses.MeanSquaredError(), losses.MeanSquaredError()],
metrics=[metrics.MeanSquaredError(), metrics.MeanSquaredError()],
)
x = np.ones((4, 4))
y = np.zeros((4, 3))
model(x) # Eager call to build model weights
ref_weights = model.get_weights()
# Before `_symbolic_build`
self.assertTrue(model.built)
self.assertFalse(model._compile_metrics.built)
self.assertFalse(model._compile_loss.built)
self.assertLen(model._compile_loss.metrics, 0)
self.assertLen(model.metrics, 2)
model._symbolic_build(data_batch=(x, (y, y)))
weights = model.get_weights()
# Ensure weights are intact
self.assertEqual(len(weights), len(ref_weights))
for w, ref_w in zip(weights, ref_weights):
self.assertAllClose(w, ref_w)
# Ensure `built`
self.assertTrue(model.built)
self.assertTrue(model._compile_metrics.built)
self.assertTrue(model._compile_loss.built)
# Ensure the len of metrics (original metrics + loss trackers)
self.assertLen(model._compile_metrics.metrics, 2)
self.assertLen(model._compile_loss.metrics, 2)
self.assertLen(model.metrics, 4)
# Ensure no values in metrics
for v in model._compile_metrics.variables:
self.assertAllClose(v, 0.0)
for v in model._compile_loss.variables:
self.assertAllClose(v, 0.0)
@pytest.mark.skipif(
backend.backend() != "tensorflow",
reason="This test is only applicable to TensorFlow.",
)
@pytest.mark.requires_trainable_backend
def test_jit_compile_with_tf_determinism(self):
from tensorflow.python.framework.config import disable_op_determinism
from tensorflow.python.framework.config import enable_op_determinism
enable_op_determinism()
model = ExampleModel(units=3)
model.compile(
optimizer=optimizers.SGD(),
loss=losses.MeanSquaredError(),
metrics=[metrics.MeanSquaredError()],
)
self.assertFalse(model.jit_compile)
disable_op_determinism()
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
def test_retracing(self):
x = np.ones((100, 4))
y = np.ones((100, 1))
input = keras.Input(shape=[4])
output = keras.layers.Dense(1, activation="relu")(input)
tracing_count = [0]
class TracingCounterModel(keras.Model):
def train_step(self, *args):
tracing_count[0] = tracing_count[0] + 1
return super().train_step(*args)
model = TracingCounterModel(inputs=input, outputs=output)
model.compile(
loss="mse",
optimizer="adam",
steps_per_execution=20,
)
epochs = 1
model.fit(
x=x,
y=y,
batch_size=1,
epochs=epochs,
verbose=0,
)
self.assertLessEqual(tracing_count[0], 2)
@pytest.mark.requires_trainable_backend
@pytest.mark.skipif(
backend.backend() == "torch",
reason="`steps_per_execution` not implemented for torch yet",
)
@pytest.mark.skipif(
backend.backend() == "tensorflow",
reason="`predict_function` with `steps_per_execution` is not "
"optimized for tensorflow yet",
)
def test_retracing_predict(self):
x = np.ones((100, 4))
input = keras.Input(shape=[4])
output = keras.layers.Dense(1, activation="relu")(input)
tracing_count = [0]
class TracingCounterModel(keras.Model):
def predict_step(self, *args):
tracing_count[0] = tracing_count[0] + 1
return super().predict_step(*args)
model = TracingCounterModel(inputs=input, outputs=output)
model.compile(
loss="mse",
optimizer="adam",
steps_per_execution=20,
)
model.predict(
x=x,
batch_size=1,
verbose=0,
)
self.assertLessEqual(tracing_count[0], 2)
| TestTrainer |
python | ray-project__ray | python/ray/autoscaler/v2/schema.py | {
"start": 6960,
"end": 12681
} | class ____:
"""
AutoscalerInstance represents an instance that's managed by the autoscaler.
This includes two states:
1. the instance manager state: information of the underlying cloud instance.
2. the ray node state, e.g. resources, ray node status.
The two states are linked by the cloud instance id, which should be set
when the ray node is started.
"""
# The cloud instance id. It could be None if the instance hasn't been assigned
# a cloud instance id, e.g. the instance is still in QUEUED or REQUESTED status.
cloud_instance_id: Optional[str] = None
# The ray node state status. It could be None when no ray node is running
# or has run on the cloud instance: for example, ray is still being installed
# or the instance manager hasn't had a cloud instance assigned (e.g. QUEUED,
# REQUESTED).
ray_node: Optional[NodeState] = None
# The instance manager instance state. It would be None when the ray_node is not
# None.
# It could be None iff:
# 1. There's a ray node, but the instance manager hasn't discovered the
# cloud instance that's running this ray process yet. This could happen since
# the instance manager only discovers instances periodically.
#
# 2. There was a ray node running on the cloud instance, which was already stopped
# and removed from the instance manager state. But the ray state is still lagging
# behind.
#
# 3. There is a ray node that's unmanaged by the instance manager.
#
im_instance: Optional[Instance] = None
# | cloud_instance_id | ray_node | im_instance |
# |-------------------|----------|-------------|
# | None | None | None | Not possible.
# | None | None | not None | OK. An instance hasn't had ray running on it yet. # noqa E501
# | None | Not None | None | OK. Possible if the ray node is not started by autoscaler. # noqa E501
# | None | Not None | not None | Not possible - no way to link im instance with ray node. # noqa E501
# | not None | None | None | Not possible since cloud instance id is either part of im state or ray node. # noqa E501
# | not None | None | not None | OK. e.g. An instance that's not running ray yet. # noqa E501
# | not None | Not None | None | OK. See scenario 1, 2, 3 above.
# | not None | Not None | not None | OK. An instance that's running ray.
def validate(self) -> Tuple[bool, str]:
"""Validate the autoscaler instance state.
Returns:
A tuple of (valid, error_msg) where:
- valid is whether the state is valid
- error_msg is the error message for the validation results.
"""
state_combinations = {
# (cloud_instance_id is None, ray_node is None, im_instance is None): (valid, error_msg) # noqa E501
(True, True, True): (False, "Not possible"),
(True, True, False): (True, ""),
(True, False, True): (
True,
"There's a ray node w/o cloud instance id, must be started not "
"by autoscaler",
),
(True, False, False): (
False,
"Not possible - no way to link im instance with ray node",
),
(False, True, True): (
False,
"Not possible since cloud instance id is either part of "
"im state or ray node",
),
(False, True, False): (True, ""),
(False, False, True): (True, ""),
(False, False, False): (True, ""),
}
valid, error_msg = state_combinations[
(
self.cloud_instance_id is None,
self.ray_node is None,
self.im_instance is None,
)
]
if not valid:
return valid, error_msg
if self.im_instance is not None and self.ray_node is None:
# We don't see a ray node, but tracking an im instance.
if self.cloud_instance_id is None:
if InstanceUtil.is_cloud_instance_allocated(self.im_instance.status):
return (
False,
"instance should be in a status where cloud instance "
"is not allocated.",
)
else:
if not InstanceUtil.is_cloud_instance_allocated(
self.im_instance.status
):
return (
False,
"instance should be in a status where cloud instance is "
"allocated.",
)
if self.ray_node is not None:
if self.cloud_instance_id != self.ray_node.instance_id:
return False, "cloud instance id doesn't match."
if self.im_instance is not None and self.cloud_instance_id is not None:
if self.cloud_instance_id != self.im_instance.cloud_instance_id:
return False, "cloud instance id doesn't match."
return True, ""
def is_ray_running(self) -> bool:
"""Whether the ray node is running."""
return self.ray_node is not None and self.ray_node.status in [
NodeStatus.RUNNING,
NodeStatus.IDLE,
]
def is_ray_stop(self) -> bool:
"""Whether the ray node is stopped."""
return self.ray_node is None or self.ray_node.status in [
NodeStatus.DEAD,
]
| AutoscalerInstance |
python | pytorch__pytorch | torch/fx/experimental/migrate_gradual_types/constraint.py | {
"start": 8242,
"end": 9279
} | class ____(Constraint):
def __init__(self, tensor_size, index, res, input_var):
"""
Constraint for getting item given a tensor size
:param tensor_size: actual number
:param index: actual number representing the index
:param res: dimension variable to carry the item we get
:param input_var: a tensor variable from which we will get item
"""
assert isinstance(res, DVar)
self.res = res
self.tensor_size = tensor_size
self.index = index
self.input_var = input_var
def __repr__(self):
return f" {self.res} = GetItem({self.input_var}, tensor_size: {self.tensor_size}, {self.index})"
def __eq__(self, other):
if isinstance(other, GetItem):
return (
self.res == other.res
and self.tensor_size == other.tensor_size
and self.index == other.index
and self.input_var == other.input_var
)
else:
return False
| GetItem |
python | bokeh__bokeh | tests/unit/bokeh/test_transform.py | {
"start": 5801,
"end": 6686
} | class ____:
def test_basic(self) -> None:
t = bt.factor_mark("foo", ["hex", "square"], ["foo", "bar"], start=1, end=2)
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, CategoricalMarkerMapper)
assert t.transform.markers == ["hex", "square"]
assert t.transform.factors == ["foo", "bar"]
assert t.transform.start == 1
assert t.transform.end == 2
def test_defaults(self) -> None:
t = bt.factor_mark("foo", ["hex", "square"], ["foo", "bar"])
assert isinstance(t, Field)
assert t.field == "foo"
assert isinstance(t.transform, CategoricalMarkerMapper)
assert t.transform.markers == ["hex", "square"]
assert t.transform.factors == ["foo", "bar"]
assert t.transform.start == 0
assert t.transform.end is None
| Test_factor_mark |
python | jackfrued__Python-100-Days | Day31-35/code/example18.py | {
"start": 535,
"end": 1006
} | class ____(metaclass=SingletonMeta):
"""总统(单例类)"""
def __init__(self, name, country):
self.name = name
self.country = country
def __str__(self):
return f'{self.country}: {self.name}'
def main():
"""主函数"""
p1 = President('特朗普', '美国')
p2 = President('奥巴马', '美国')
p3 = President.__call__('克林顿', '美国')
print(p1 == p2)
print(p1 == p3)
print(p1, p2, p3, sep='\n')
if __name__ == '__main__':
main()
| President |
python | django__django | django/db/models/fields/generated.py | {
"start": 231,
"end": 8013
} | class ____(Field):
generated = True
db_returning = True
_query = None
output_field = None
def __init__(self, *, expression, output_field, db_persist, **kwargs):
if kwargs.setdefault("editable", False):
raise ValueError("GeneratedField cannot be editable.")
if not kwargs.setdefault("blank", True):
raise ValueError("GeneratedField must be blank.")
if kwargs.get("default", NOT_PROVIDED) is not NOT_PROVIDED:
raise ValueError("GeneratedField cannot have a default.")
if kwargs.get("db_default", NOT_PROVIDED) is not NOT_PROVIDED:
raise ValueError("GeneratedField cannot have a database default.")
if db_persist not in (True, False):
raise ValueError("GeneratedField.db_persist must be True or False.")
self.expression = expression
self.output_field = output_field
self.db_persist = db_persist
super().__init__(**kwargs)
@cached_property
def cached_col(self):
from django.db.models.expressions import Col
return Col(self.model._meta.db_table, self, self.output_field)
def get_col(self, alias, output_field=None):
if alias != self.model._meta.db_table and output_field in (None, self):
output_field = self.output_field
return super().get_col(alias, output_field)
def contribute_to_class(self, *args, **kwargs):
super().contribute_to_class(*args, **kwargs)
self._query = Query(model=self.model, alias_cols=False)
# Register lookups from the output_field class.
for lookup_name, lookup in self.output_field.get_class_lookups().items():
self.register_lookup(lookup, lookup_name=lookup_name)
def generated_sql(self, connection):
compiler = connection.ops.compiler("SQLCompiler")(
self._query, connection=connection, using=None
)
resolved_expression = self.expression.resolve_expression(
self._query, allow_joins=False
)
sql, params = compiler.compile(resolved_expression)
if (
getattr(self.expression, "conditional", False)
and not connection.features.supports_boolean_expr_in_select_clause
):
sql = f"CASE WHEN {sql} THEN 1 ELSE 0 END"
return sql, params
@cached_property
def referenced_fields(self):
resolved_expression = self.expression.resolve_expression(
self._query, allow_joins=False
)
referenced_fields = []
for col in self._query._gen_cols([resolved_expression]):
referenced_fields.append(col.target)
return frozenset(referenced_fields)
def check(self, **kwargs):
databases = kwargs.get("databases") or []
errors = [
*super().check(**kwargs),
*self._check_supported(databases),
*self._check_persistence(databases),
]
output_field_clone = self.output_field.clone()
output_field_clone.model = self.model
output_field_checks = output_field_clone.check(databases=databases)
if output_field_checks:
separator = "\n "
error_messages = separator.join(
f"{output_check.msg} ({output_check.id})"
for output_check in output_field_checks
if isinstance(output_check, checks.Error)
)
if error_messages:
errors.append(
checks.Error(
"GeneratedField.output_field has errors:"
f"{separator}{error_messages}",
obj=self,
id="fields.E223",
)
)
warning_messages = separator.join(
f"{output_check.msg} ({output_check.id})"
for output_check in output_field_checks
if isinstance(output_check, checks.Warning)
)
if warning_messages:
errors.append(
checks.Warning(
"GeneratedField.output_field has warnings:"
f"{separator}{warning_messages}",
obj=self,
id="fields.W224",
)
)
return errors
def _check_supported(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not (
connection.features.supports_virtual_generated_columns
or "supports_stored_generated_columns"
in self.model._meta.required_db_features
) and not (
connection.features.supports_stored_generated_columns
or "supports_virtual_generated_columns"
in self.model._meta.required_db_features
):
errors.append(
checks.Error(
f"{connection.display_name} does not support GeneratedFields.",
obj=self,
id="fields.E220",
)
)
return errors
def _check_persistence(self, databases):
errors = []
for db in databases:
if not router.allow_migrate_model(db, self.model):
continue
connection = connections[db]
if (
self.model._meta.required_db_vendor
and self.model._meta.required_db_vendor != connection.vendor
):
continue
if not self.db_persist and not (
connection.features.supports_virtual_generated_columns
or "supports_virtual_generated_columns"
in self.model._meta.required_db_features
):
errors.append(
checks.Error(
f"{connection.display_name} does not support non-persisted "
"GeneratedFields.",
obj=self,
id="fields.E221",
hint="Set db_persist=True on the field.",
)
)
if self.db_persist and not (
connection.features.supports_stored_generated_columns
or "supports_stored_generated_columns"
in self.model._meta.required_db_features
):
errors.append(
checks.Error(
f"{connection.display_name} does not support persisted "
"GeneratedFields.",
obj=self,
id="fields.E222",
hint="Set db_persist=False on the field.",
)
)
return errors
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
del kwargs["blank"]
del kwargs["editable"]
kwargs["db_persist"] = self.db_persist
kwargs["expression"] = self.expression
kwargs["output_field"] = self.output_field
return name, path, args, kwargs
def get_internal_type(self):
return self.output_field.get_internal_type()
def db_parameters(self, connection):
return self.output_field.db_parameters(connection)
def db_type_parameters(self, connection):
return self.output_field.db_type_parameters(connection)
| GeneratedField |
python | PrefectHQ__prefect | tests/test_task_engine.py | {
"start": 58967,
"end": 59263
} | class ____:
async def test_sync_task_in_async_task(self):
@task
def sync_task():
return 42
@task
async def async_task():
return sync_task()
result = await run_task_async(async_task)
assert result == 42
| TestSyncAsyncTasks |
python | python-jsonschema__jsonschema | jsonschema/cli.py | {
"start": 2121,
"end": 3222
} | class ____:
_ERROR_MSG = dedent(
"""\
===[{type}]===({path})===
{body}
-----------------------------
""",
)
_SUCCESS_MSG = "===[SUCCESS]===({path})===\n"
def filenotfound_error(self, path, exc_info):
return self._ERROR_MSG.format(
path=path,
type="FileNotFoundError",
body=f"{path!r} does not exist.",
)
def parsing_error(self, path, exc_info):
exc_type, exc_value, exc_traceback = exc_info
exc_lines = "".join(
traceback.format_exception(exc_type, exc_value, exc_traceback),
)
return self._ERROR_MSG.format(
path=path,
type=exc_type.__name__,
body=exc_lines,
)
def validation_error(self, instance_path, error):
return self._ERROR_MSG.format(
path=instance_path,
type=error.__class__.__name__,
body=error,
)
def validation_success(self, instance_path):
return self._SUCCESS_MSG.format(path=instance_path)
@define
| _PrettyFormatter |
python | apache__thrift | lib/py/src/protocol/TJSONProtocol.py | {
"start": 3752,
"end": 11907
} | class ____(TProtocolBase):
def __init__(self, trans):
TProtocolBase.__init__(self, trans)
self.resetWriteContext()
self.resetReadContext()
# We don't have length limit implementation for JSON protocols
@property
def string_length_limit(senf):
return None
@property
def container_length_limit(senf):
return None
def resetWriteContext(self):
self.context = JSONBaseContext(self)
self.contextStack = [self.context]
def resetReadContext(self):
self.resetWriteContext()
self.reader = LookaheadReader(self)
def pushContext(self, ctx):
self.contextStack.append(ctx)
self.context = ctx
def popContext(self):
self.contextStack.pop()
if self.contextStack:
self.context = self.contextStack[-1]
else:
self.context = JSONBaseContext(self)
def writeJSONString(self, string):
self.context.write()
json_str = ['"']
for s in string:
escaped = ESCAPE_CHAR_VALS.get(s, s)
json_str.append(escaped)
json_str.append('"')
self.trans.write(bytes(''.join(json_str), 'utf-8'))
def writeJSONNumber(self, number, formatter='{0}'):
self.context.write()
jsNumber = str(formatter.format(number)).encode('ascii')
if self.context.escapeNum():
self.trans.write(QUOTE)
self.trans.write(jsNumber)
self.trans.write(QUOTE)
else:
self.trans.write(jsNumber)
def writeJSONBase64(self, binary):
self.context.write()
self.trans.write(QUOTE)
self.trans.write(base64.b64encode(binary))
self.trans.write(QUOTE)
def writeJSONObjectStart(self):
self.context.write()
self.trans.write(LBRACE)
self.pushContext(JSONPairContext(self))
def writeJSONObjectEnd(self):
self.popContext()
self.trans.write(RBRACE)
def writeJSONArrayStart(self):
self.context.write()
self.trans.write(LBRACKET)
self.pushContext(JSONListContext(self))
def writeJSONArrayEnd(self):
self.popContext()
self.trans.write(RBRACKET)
def readJSONSyntaxChar(self, character):
current = self.reader.read()
if character != current:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unexpected character: %s" % current)
def _isHighSurrogate(self, codeunit):
return codeunit >= 0xd800 and codeunit <= 0xdbff
def _isLowSurrogate(self, codeunit):
return codeunit >= 0xdc00 and codeunit <= 0xdfff
def _toChar(self, high, low=None):
if not low:
return chr(high)
else:
codepoint = (1 << 16) + ((high & 0x3ff) << 10)
codepoint += low & 0x3ff
return chr(codepoint)
def readJSONString(self, skipContext):
highSurrogate = None
string = []
if skipContext is False:
self.context.read()
self.readJSONSyntaxChar(QUOTE)
while True:
character = self.reader.read()
if character == QUOTE:
break
if ord(character) == ESCSEQ0:
character = self.reader.read()
if ord(character) == ESCSEQ1:
character = self.trans.read(4).decode('ascii')
codeunit = int(character, 16)
if self._isHighSurrogate(codeunit):
if highSurrogate:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected low surrogate char")
highSurrogate = codeunit
continue
elif self._isLowSurrogate(codeunit):
if not highSurrogate:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected high surrogate char")
character = self._toChar(highSurrogate, codeunit)
highSurrogate = None
else:
character = self._toChar(codeunit)
else:
if character not in ESCAPE_CHARS:
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Expected control char")
character = ESCAPE_CHARS[character]
elif character in ESCAPE_CHAR_VALS:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Unescaped control char")
else:
utf8_bytes = bytearray([ord(character)])
while ord(self.reader.peek()) >= 0x80:
utf8_bytes.append(ord(self.reader.read()))
character = utf8_bytes.decode('utf-8')
string.append(character)
if highSurrogate:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Expected low surrogate char")
return ''.join(string)
def isJSONNumeric(self, character):
return (True if NUMERIC_CHAR.find(character) != - 1 else False)
def readJSONQuotes(self):
if (self.context.escapeNum()):
self.readJSONSyntaxChar(QUOTE)
def readJSONNumericChars(self):
numeric = []
while True:
character = self.reader.peek()
if self.isJSONNumeric(character) is False:
break
numeric.append(self.reader.read())
return b''.join(numeric).decode('ascii')
def readJSONInteger(self):
self.context.read()
self.readJSONQuotes()
numeric = self.readJSONNumericChars()
self.readJSONQuotes()
try:
return int(numeric)
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONDouble(self):
self.context.read()
if self.reader.peek() == QUOTE:
string = self.readJSONString(True)
try:
double = float(string)
if (self.context.escapeNum is False and
not math.isinf(double) and
not math.isnan(double)):
raise TProtocolException(
TProtocolException.INVALID_DATA,
"Numeric data unexpectedly quoted")
return double
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
else:
if self.context.escapeNum() is True:
self.readJSONSyntaxChar(QUOTE)
try:
return float(self.readJSONNumericChars())
except ValueError:
raise TProtocolException(TProtocolException.INVALID_DATA,
"Bad data encounted in numeric data")
def readJSONBase64(self):
string = self.readJSONString(False)
size = len(string)
m = size % 4
# Force padding since b64encode method does not allow it
if m != 0:
for i in range(4 - m):
string += '='
return base64.b64decode(string)
def readJSONObjectStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACE)
self.pushContext(JSONPairContext(self))
def readJSONObjectEnd(self):
self.readJSONSyntaxChar(RBRACE)
self.popContext()
def readJSONArrayStart(self):
self.context.read()
self.readJSONSyntaxChar(LBRACKET)
self.pushContext(JSONListContext(self))
def readJSONArrayEnd(self):
self.readJSONSyntaxChar(RBRACKET)
self.popContext()
| TJSONProtocolBase |
python | getsentry__sentry | src/sentry/seer/autofix/utils.py | {
"start": 1126,
"end": 1186
} | class ____(TypedDict):
id: int
title: str
| AutofixIssue |
python | numpy__numpy | numpy/distutils/system_info.py | {
"start": 76997,
"end": 77184
} | class ____(system_info):
section = 'cblas'
dir_env_var = 'CBLAS'
# No default as it's used only in blas_info
_lib_names = []
notfounderror = BlasNotFoundError
| cblas_info |
python | coleifer__peewee | tests/regressions.py | {
"start": 39457,
"end": 40081
} | class ____(ModelTestCase):
requires = [CPK, CPKFK]
def test_composite_pk_with_fk(self):
c1 = CPK.create(name='c1')
c2 = CPK.create(name='c2')
CPKFK.create(key='k1', cpk=c1)
CPKFK.create(key='k2', cpk=c1)
CPKFK.create(key='k3', cpk=c2)
query = (CPKFK
.select(CPKFK.key, CPK)
.join(CPK)
.order_by(CPKFK.key, CPK.name))
with self.assertQueryCount(1):
self.assertEqual([(r.key, r.cpk.name) for r in query],
[('k1', 'c1'), ('k2', 'c1'), ('k3', 'c2')])
| TestCompositePKwithFK |
python | mlflow__mlflow | mlflow/models/signature.py | {
"start": 11670,
"end": 26247
} | class ____:
def __init__(self, input_=None, output=None):
self.input = input_
self.output = output
def __repr__(self):
return f"<input: {self.input}, output: {self.output}>"
def _extract_type_hints(f, input_arg_index):
"""
Extract type hints from a function.
Args:
f: Function to extract type hints from.
input_arg_index: Index of the function argument that corresponds to the model input.
Returns:
A `_TypeHints` object containing the input and output type hints.
"""
if not hasattr(f, "__annotations__") and hasattr(f, "__call__"):
return _extract_type_hints(f.__call__, input_arg_index)
if f.__annotations__ == {}:
return _TypeHints()
arg_names = list(filter(lambda x: x != "self", _get_arg_names(f)))
if len(arg_names) - 1 < input_arg_index:
raise MlflowException.invalid_parameter_value(
f"The specified input argument index ({input_arg_index}) is out of range for the "
"function signature: {}".format(input_arg_index, arg_names)
)
arg_name = arg_names[input_arg_index]
try:
hints = get_type_hints(f)
except (
TypeError,
NameError, # To handle this issue: https://github.com/python/typing/issues/797
):
# ---
# from __future__ import annotations # postpones evaluation of 'list[str]'
#
# def f(x: list[str]) -> list[str]:
# ^^^^^^^^^ Evaluating this expression ('list[str]') results in a TypeError in
# Python < 3.9 because the built-in list type is not subscriptable.
# return x
# ---
# Best effort to infer type hints from strings
hints = {}
for arg in [arg_name, "return"]:
if hint_str := f.__annotations__.get(arg, None):
if hint := _infer_hint_from_str(hint_str):
hints[arg] = hint
else:
_logger.info("Unsupported type hint: %s, skipping schema inference", hint_str)
except Exception as e:
_logger.warning("Failed to extract type hints from function %s: %s", f.__name__, repr(e))
return _TypeHints()
return _TypeHints(hints.get(arg_name), hints.get("return"))
def _is_context_in_predict_function_signature(*, func=None, parameters=None):
if parameters is None:
if func is None:
raise ValueError("Either `func` or `parameters` must be provided.")
parameters = inspect.signature(func).parameters
return (
# predict(self, context, model_input, ...)
"context" in parameters
# predict(self, ctx, model_input, ...) ctx can be any parameter name
or len([param for param in parameters if param not in ("self", "params")]) == 2
)
@filter_user_warnings_once
def _infer_signature_from_type_hints(
python_model, context, type_hints: _TypeHints, input_example=None
) -> ModelSignature | None:
"""
Infer the signature from type hints.
"""
if type_hints.input is None:
return None
params = None
params_key = "params"
if _contains_params(input_example):
input_example, params = input_example
try:
input_schema = _infer_schema_from_list_type_hint(type_hints.input)
except InvalidTypeHintException:
raise MlflowException.invalid_parameter_value(
"The `predict` function has unsupported type hints for the model input "
"arguments. Update it to one of supported type hints, or remove type hints "
"to bypass this check. Error: {e}"
)
except Exception as e:
warnings.warn(f"Failed to infer signature from type hint: {e.message}", stacklevel=3)
return None
func = python_model if callable(python_model) else python_model.predict
# only warn if the pyfunc decorator is not used and schema can
# be inferred from the input type hint
_pyfunc_decorator_used = getattr(func, "_is_pyfunc", False)
if not _pyfunc_decorator_used:
# stacklevel is 3 because we have a decorator
warnings.warn(
"Decorate your function with `@mlflow.pyfunc.utils.pyfunc` to enable auto "
"data validation against model input type hints.",
stacklevel=3,
)
default_output_schema = Schema([ColSpec(type=AnyType())])
is_output_type_hint_valid = False
output_schema = None
if type_hints.output:
try:
# output type hint doesn't need to be a list
# but if it's a list, we infer the schema from the list type hint
# to be consistent with input schema inference
output_schema = (
_infer_schema_from_list_type_hint(type_hints.output)
if _is_list_type_hint(type_hints.output)
else _infer_schema_from_type_hint(type_hints.output)
)
is_output_type_hint_valid = True
except Exception as e:
_logger.info(
f"Failed to infer output type hint, setting output schema to AnyType. {e}",
stacklevel=2,
)
output_schema = default_output_schema
else:
output_schema = default_output_schema
params_schema = _infer_param_schema(params) if params else None
if input_example is not None:
# only validate input example here if pyfunc decorator is not used
# because when the decorator is used, the input is validated in the predict function
if not _pyfunc_decorator_used and (
msg := _get_data_validation_result(
data=input_example, type_hint=type_hints.input
).error_message
):
_logger.warning(
"Input example is not compatible with the type hint of the `predict` function. "
f"Error: {msg}"
)
else:
kwargs = (
{params_key: params}
if params and params_key in inspect.signature(func).parameters
else {}
)
# This is for PythonModel's predict function
if _is_context_in_predict_function_signature(func=func):
inputs = [None, input_example]
else:
inputs = [input_example]
_logger.info("Running the predict function to generate output based on input example")
try:
if hasattr(python_model, "load_context"):
python_model.load_context(context)
output_example = func(*inputs, **kwargs)
except Exception:
_logger.warning(
"Failed to run the predict function on input example. To see the full "
"traceback, set logging level to DEBUG.",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
else:
if is_output_type_hint_valid and (
msg := _get_data_validation_result(
data=output_example, type_hint=type_hints.output
).error_message
):
_logger.warning(
f"Failed to validate output `{output_example}` against type hint "
f"`{type_hints.output}`, setting output schema to AnyType. "
f"Error: {msg}"
)
output_schema = default_output_schema
if not any([input_schema, output_schema, params_schema]):
return None
signature = ModelSignature(inputs=input_schema, outputs=output_schema, params=params_schema)
signature._is_signature_from_type_hint = True
return signature
def _infer_signature_from_input_example(
input_example: _Example | None, wrapped_model
) -> ModelSignature | None:
"""
Infer the signature from an example input and a PyFunc wrapped model. Catches all exceptions.
Args:
input_example: Saved _Example object that contains input example instance.
wrapped_model: A PyFunc wrapped model which has a `predict` method.
Returns:
A `ModelSignature` object containing the inferred schema of both the model's inputs
based on the `input_example` and the model's outputs based on the prediction from the
`wrapped_model`.
"""
from mlflow.pyfunc import _validate_prediction_input
if input_example is None:
return None
try:
# Copy the input example so that it is not mutated by predict()
input_data = deepcopy(input_example.inference_data)
params = input_example.inference_params
input_schema = _infer_schema(input_data)
params_schema = _infer_param_schema(params) if params else None
# do the same validation as pyfunc predict to make sure the signature is correctly
# applied to the model
input_data, params = _validate_prediction_input(
input_data, params, input_schema, params_schema
)
prediction = wrapped_model.predict(input_data, params=params)
# For column-based inputs, 1D numpy arrays likely signify row-based predictions. Thus, we
# convert them to a Pandas series for inferring as a single ColSpec Schema.
if (
not input_schema.is_tensor_spec()
and isinstance(prediction, np.ndarray)
and prediction.ndim == 1
):
prediction = pd.Series(prediction)
output_schema = None
try:
output_schema = _infer_schema(prediction)
except Exception:
# try assign output schema if failing to infer it from prediction for langchain models
try:
from mlflow.langchain.model import _LangChainModelWrapper
from mlflow.langchain.utils.chat import _ChatResponse
except ImportError:
pass
else:
if isinstance(wrapped_model, _LangChainModelWrapper) and isinstance(
prediction, _ChatResponse
):
output_schema = prediction.get_schema()
if output_schema is None:
_logger.warning(
"Failed to infer model output schema from prediction result, setting "
"output schema to AnyType. For full traceback, set logging level to debug.",
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
output_schema = Schema([ColSpec(type=AnyType())])
return ModelSignature(input_schema, output_schema, params_schema)
except Exception as e:
if _MLFLOW_TESTING.get():
raise
_logger.warning(
_LOG_MODEL_INFER_SIGNATURE_WARNING_TEMPLATE,
repr(e),
exc_info=_logger.isEnabledFor(logging.DEBUG),
)
def set_signature(
model_uri: str,
signature: ModelSignature,
):
"""
Sets the model signature for specified model artifacts.
The process involves downloading the MLmodel file in the model artifacts (if it's non-local),
updating its model signature, and then overwriting the existing MLmodel file. Should the
artifact repository associated with the model artifacts disallow overwriting, this function will
fail.
Furthermore, as model registry artifacts are read-only, model artifacts located in the
model registry and represented by ``models:/`` URI schemes are not compatible with this API.
To set a signature on a model version, first load the source model artifacts. Following this,
generate a new model version using the loaded model artifacts and a corresponding signature.
For more information about setting signatures on model versions, see
`this doc section <https://mlflow.org/docs/latest/ml/model/signatures/#adding-signatures-to-registered-model-versions>`_.
Args:
model_uri: The location, in URI format, of the MLflow model. For example:
- ``/Users/me/path/to/local/model``
- ``relative/path/to/local/model``
- ``s3://my_bucket/path/to/model``
- ``runs:/<mlflow_run_id>/run-relative/path/to/model``
- ``mlflow-artifacts:/path/to/model``
- ``models:/<model_id>``
For more information about supported URI schemes, see
`Referencing Artifacts <https://www.mlflow.org/docs/latest/concepts.html#
artifact-locations>`_.
Please note that model URIs with the ``models:/<name>/<version>`` scheme are not
supported.
signature: ModelSignature to set on the model.
.. code-block:: python
:caption: Example
import mlflow
from mlflow.models import set_signature, infer_signature
# load model from run artifacts
run_id = "96771d893a5e46159d9f3b49bf9013e2"
artifact_path = "models"
model_uri = f"runs:/{run_id}/{artifact_path}"
model = mlflow.pyfunc.load_model(model_uri)
# determine model signature
test_df = ...
predictions = model.predict(test_df)
signature = infer_signature(test_df, predictions)
# set the signature for the logged model
set_signature(model_uri, signature)
"""
assert isinstance(signature, ModelSignature), (
"The signature argument must be a ModelSignature object"
)
resolved_uri = model_uri
if RunsArtifactRepository.is_runs_uri(model_uri):
resolved_uri = RunsArtifactRepository.get_underlying_uri(model_uri)
elif ModelsArtifactRepository._is_logged_model_uri(model_uri):
resolved_uri = ModelsArtifactRepository.get_underlying_uri(model_uri)
elif ModelsArtifactRepository.is_models_uri(model_uri):
raise MlflowException(
f"Failed to set signature on {model_uri!r}. "
"Model URIs with the `models:/<name>/<version>` scheme are not supported.",
INVALID_PARAMETER_VALUE,
)
try:
ml_model_file = _download_artifact_from_uri(
artifact_uri=append_to_uri_path(resolved_uri, MLMODEL_FILE_NAME)
)
except Exception as ex:
raise MlflowException(
f'Failed to download an "{MLMODEL_FILE_NAME}" model file from "{model_uri}"',
RESOURCE_DOES_NOT_EXIST,
) from ex
model_meta = Model.load(ml_model_file)
model_meta.signature = signature
model_meta.save(ml_model_file)
_upload_artifact_to_uri(ml_model_file, resolved_uri)
| _TypeHints |
python | django-compressor__django-compressor | compressor/storage.py | {
"start": 4275,
"end": 4408
} | class ____(LazyObject):
def _setup(self):
self._wrapped = get_storage()
default_storage = DefaultStorage()
| DefaultStorage |
python | wandb__wandb | tests/system_tests/test_functional/dspy/dspy_callback_no_program.py | {
"start": 95,
"end": 1137
} | class ____(dspy.Module):
def __init__(self) -> None:
super().__init__()
self.predict = dspy.Predict("question: str -> answer: str")
def main() -> None:
from wandb.integration.dspy import WandbDSPyCallback
with wandb.init(project="dspy-system-test-noprogram") as run:
cb = WandbDSPyCallback(log_results=True, run=run)
class FakeEvaluate:
def __init__(self) -> None:
self.devset = []
self.num_threads = 1
self.auto = "light"
# Start without a program
cb.on_evaluate_start(call_id="c1", instance=FakeEvaluate(), inputs={})
# Still emit a valid result and ensure program_signature is logged with minimal columns
ex1 = dspy.Example(question="What is 7+1?", answer="8")
pred1 = dspy.Prediction(answer="8")
out = EvaluationResult(score=0.8, results=[(ex1, pred1, True)])
cb.on_evaluate_end(call_id="c1", outputs=out, exception=None)
if __name__ == "__main__":
main()
| MinimalProgram |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.