language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | kamyu104__LeetCode-Solutions | Python/most-profit-assigning-work.py | {
"start": 121,
"end": 699
} | class ____(object):
def maxProfitAssignment(self, difficulty, profit, worker):
"""
:type difficulty: List[int]
:type profit: List[int]
:type worker: List[int]
:rtype: int
"""
jobs = zip(difficulty, profit)
jobs.sort()
worker.sort()
result, i, max_profit = 0, 0, 0
for ability in worker:
while i < len(jobs) and jobs[i][0] <= ability:
max_profit = max(max_profit, jobs[i][1])
i += 1
result += max_profit
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 986596,
"end": 987119
} | class ____(sgqlc.types.Type):
"""Autogenerated return type of StartRepositoryMigration"""
__schema__ = github_schema
__field_names__ = ("client_mutation_id", "repository_migration")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
repository_migration = sgqlc.types.Field("RepositoryMigration", graphql_name="repositoryMigration")
"""The new repository migration."""
| StartRepositoryMigrationPayload |
python | tensorflow__tensorflow | tensorflow/python/ops/image_ops_test.py | {
"start": 55589,
"end": 60475
} | class ____(test_util.TensorFlowTestCase):
def _testContrast(self, x_np, y_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np, shape=x_np.shape)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
self.assertAllClose(y_tf, y_np, 1e-6)
def testDoubleContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 62, 169, 255, 28, 0, 255, 135, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testDoubleContrastFloat(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.float64).reshape(x_shape) / 255.
y_data = [
-45.25, -90.75, -92.5, 62.75, 169.25, 333.5, 28.75, -84.75, 349.5,
134.75, 409.25, -116.5
]
y_np = np.array(y_data, dtype=np.float64).reshape(x_shape) / 255.
self._testContrast(x_np, y_np, contrast_factor=2.0)
def testHalfContrastUint8(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [22, 52, 65, 49, 118, 172, 41, 54, 176, 67, 178, 59]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=0.5)
def testBatchDoubleContrast(self):
x_shape = [2, 1, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
y_data = [0, 0, 0, 81, 200, 255, 10, 0, 255, 116, 255, 0]
y_np = np.array(y_data, dtype=np.uint8).reshape(x_shape)
self._testContrast(x_np, y_np, contrast_factor=2.0)
def _adjustContrastNp(self, x_np, contrast_factor):
mean = np.mean(x_np, (1, 2), keepdims=True)
y_np = mean + contrast_factor * (x_np - mean)
return y_np
def _adjustContrastTf(self, x_np, contrast_factor):
with self.cached_session():
x = constant_op.constant(x_np)
y = image_ops.adjust_contrast(x, contrast_factor)
y_tf = self.evaluate(y)
return y_tf
def testRandomContrast(self):
x_shapes = [
[1, 2, 2, 3],
[2, 1, 2, 3],
[1, 2, 2, 3],
[2, 5, 5, 3],
[2, 1, 1, 3],
]
for x_shape in x_shapes:
x_np = np.random.rand(*x_shape) * 255.
contrast_factor = np.random.rand() * 2.0 + 0.1
y_np = self._adjustContrastNp(x_np, contrast_factor)
y_tf = self._adjustContrastTf(x_np, contrast_factor)
self.assertAllClose(y_tf, y_np, rtol=1e-5, atol=1e-5)
def testContrastFactorShape(self):
x_shape = [1, 2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
with self.assertRaisesRegex((ValueError, errors.InvalidArgumentError),
"contrast_factor must be scalar|"
"Shape must be rank 0 but is rank 1"):
image_ops.adjust_contrast(x_np, [2.0])
@test_util.run_in_graph_and_eager_modes
def testDeterminismUnimplementedExceptionThrowing(self):
"""Test d9m-unimplemented exception-throwing when op-determinism is enabled.
This test depends upon other tests, tests which do not enable
op-determinism, to ensure that determinism-unimplemented exceptions are not
erroneously thrown when op-determinism is not enabled.
"""
if test_util.is_xla_enabled():
self.skipTest('XLA implementation does not raise exception')
with self.session(), test_util.deterministic_ops():
input_shape = (1, 2, 2, 1)
on_gpu = len(tf_config.list_physical_devices("GPU"))
# AdjustContrast seems to now be inaccessible via the Python API.
# AdjustContrastv2 only supports float16 and float32 on GPU, and other
# types are converted to and from float32 at the Python level before
# AdjustContrastv2 is called.
dtypes_to_test = [
dtypes.uint8, dtypes.int8, dtypes.int16, dtypes.int32, dtypes.float32,
dtypes.float64
]
if on_gpu:
dtypes_to_test.append(dtypes.float16)
ctx_mgr = self.assertRaisesRegex(
errors.UnimplementedError,
"A deterministic GPU implementation of AdjustContrastv2 is not" +
" currently available.")
else:
ctx_mgr = contextlib.suppress()
for dtype in dtypes_to_test:
input_images = array_ops.zeros(input_shape, dtype=dtype)
contrast_factor = 1.
with ctx_mgr:
output_images = image_ops.adjust_contrast(input_images,
contrast_factor)
self.evaluate(output_images)
| AdjustContrastTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_dms.py | {
"start": 10316,
"end": 22693
} | class ____:
def setup_method(self):
self.dms = DmsHook()
def test_init(self):
assert self.dms.aws_conn_id == "aws_default"
@mock.patch.object(DmsHook, "get_conn")
def test_describe_replication_tasks_with_no_tasks_found(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = {}
marker, tasks = self.dms.describe_replication_tasks()
mock_conn.return_value.describe_replication_tasks.assert_called_once()
assert marker is None
assert len(tasks) == 0
@mock.patch.object(DmsHook, "get_conn")
def test_describe_replication_tasks(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = MOCK_DESCRIBE_RESPONSE
describe_tasks_kwargs = {
"Filters": [{"Name": "replication-task-id", "Values": [MOCK_DATA["replication_task_id"]]}]
}
marker, tasks = self.dms.describe_replication_tasks(**describe_tasks_kwargs)
mock_conn.return_value.describe_replication_tasks.assert_called_with(**describe_tasks_kwargs)
assert marker is None
assert len(tasks) == 1
assert tasks[0]["ReplicationTaskArn"] == MOCK_TASK_ARN
@mock.patch.object(DmsHook, "get_conn")
def test_describe_teplication_tasks_with_marker(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = MOCK_DESCRIBE_RESPONSE_WITH_MARKER
describe_tasks_kwargs = {
"Filters": [{"Name": "replication-task-id", "Values": [MOCK_DATA["replication_task_id"]]}]
}
marker, tasks = self.dms.describe_replication_tasks(**describe_tasks_kwargs)
mock_conn.return_value.describe_replication_tasks.assert_called_with(**describe_tasks_kwargs)
assert marker == MOCK_DESCRIBE_RESPONSE_WITH_MARKER["Marker"]
assert len(tasks) == 1
assert tasks[0]["ReplicationTaskArn"] == MOCK_TASK_ARN
@mock.patch.object(DmsHook, "get_conn")
def test_find_replication_tasks_by_arn(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = MOCK_DESCRIBE_RESPONSE
tasks = self.dms.find_replication_tasks_by_arn(MOCK_TASK_ARN)
expected_call_params = {
"Filters": [{"Name": "replication-task-arn", "Values": [MOCK_TASK_ARN]}],
"WithoutSettings": False,
}
mock_conn.return_value.describe_replication_tasks.assert_called_with(**expected_call_params)
assert len(tasks) == 1
assert tasks[0]["ReplicationTaskArn"] == MOCK_TASK_ARN
@mock.patch.object(DmsHook, "get_conn")
def test_get_task_status(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = MOCK_DESCRIBE_RESPONSE
status = self.dms.get_task_status(MOCK_TASK_ARN)
expected_call_params = {
"Filters": [{"Name": "replication-task-arn", "Values": [MOCK_TASK_ARN]}],
"WithoutSettings": True,
}
mock_conn.return_value.describe_replication_tasks.assert_called_with(**expected_call_params)
assert status == MOCK_TASK_RESPONSE_DATA["Status"]
@mock.patch.object(DmsHook, "get_conn")
def test_get_task_status_with_no_task_found(self, mock_conn):
mock_conn.return_value.describe_replication_tasks.return_value = {}
status = self.dms.get_task_status(MOCK_TASK_ARN)
mock_conn.return_value.describe_replication_tasks.assert_called_once()
assert status is None
@mock.patch.object(DmsHook, "get_conn")
def test_create_replication_task(self, mock_conn):
mock_conn.return_value.create_replication_task.return_value = MOCK_CREATE_RESPONSE
result = self.dms.create_replication_task(**MOCK_DATA)
expected_call_params = {
"ReplicationTaskIdentifier": MOCK_DATA["replication_task_id"],
"SourceEndpointArn": MOCK_DATA["source_endpoint_arn"],
"TargetEndpointArn": MOCK_DATA["target_endpoint_arn"],
"ReplicationInstanceArn": MOCK_DATA["replication_instance_arn"],
"MigrationType": MOCK_DATA["migration_type"],
"TableMappings": json.dumps(MOCK_DATA["table_mappings"]),
}
mock_conn.return_value.create_replication_task.assert_called_with(**expected_call_params)
assert result == MOCK_CREATE_RESPONSE["ReplicationTask"]["ReplicationTaskArn"]
@mock.patch.object(DmsHook, "get_conn")
def test_start_replication_task(self, mock_conn):
mock_conn.return_value.start_replication_task.return_value = MOCK_START_RESPONSE
start_type = "start-replication"
self.dms.start_replication_task(
replication_task_arn=MOCK_TASK_ARN,
start_replication_task_type=start_type,
)
expected_call_params = {
"ReplicationTaskArn": MOCK_TASK_ARN,
"StartReplicationTaskType": start_type,
}
mock_conn.return_value.start_replication_task.assert_called_with(**expected_call_params)
@mock.patch.object(DmsHook, "get_conn")
def test_stop_replication_task(self, mock_conn):
mock_conn.return_value.stop_replication_task.return_value = MOCK_STOP_RESPONSE
self.dms.stop_replication_task(replication_task_arn=MOCK_TASK_ARN)
expected_call_params = {"ReplicationTaskArn": MOCK_TASK_ARN}
mock_conn.return_value.stop_replication_task.assert_called_with(**expected_call_params)
@mock.patch.object(DmsHook, "get_conn")
def test_delete_replication_task(self, mock_conn):
mock_conn.return_value.delete_replication_task.return_value = MOCK_DELETE_RESPONSE
self.dms.delete_replication_task(replication_task_arn=MOCK_TASK_ARN)
expected_call_params = {"ReplicationTaskArn": MOCK_TASK_ARN}
mock_conn.return_value.delete_replication_task.assert_called_with(**expected_call_params)
@mock.patch.object(DmsHook, "get_conn")
def test_wait_for_task_status_with_unknown_target_status(self, mock_conn):
with pytest.raises(TypeError, match="Status must be an instance of DmsTaskWaiterStatus"):
self.dms.wait_for_task_status(MOCK_TASK_ARN, "unknown_status")
@mock.patch.object(DmsHook, "get_conn")
def test_wait_for_task_status(self, mock_conn):
self.dms.wait_for_task_status(replication_task_arn=MOCK_TASK_ARN, status=DmsTaskWaiterStatus.DELETED)
expected_waiter_call_params = {
"Filters": [{"Name": "replication-task-arn", "Values": [MOCK_TASK_ARN]}],
"WithoutSettings": True,
}
mock_conn.return_value.get_waiter.assert_called_with("replication_task_deleted")
mock_conn.return_value.get_waiter.return_value.wait.assert_called_with(**expected_waiter_call_params)
@mock.patch.object(DmsHook, "conn")
def test_describe_config_no_filter(self, mock_conn):
mock_conn.describe_replication_configs.return_value = MOCK_CONFIG_RESPONSE
resp = self.dms.describe_replication_configs()
assert len(resp) == 2
@mock.patch.object(DmsHook, "conn")
def test_describe_config_filter(self, mock_conn):
filter = [{"Name": "replication-type", "Values": ["cdc"]}]
self.dms.describe_replication_configs(filters=filter)
mock_conn.describe_replication_configs.assert_called_with(Filters=filter)
@mock.patch.object(DmsHook, "conn")
def test_create_repl_config_kwargs(self, mock_conn):
self.dms.create_replication_config(
replication_config_id=MOCK_REPLICATION_CONFIG["ReplicationConfigIdentifier"],
source_endpoint_arn=MOCK_REPLICATION_CONFIG["SourceEndpointArn"],
target_endpoint_arn=MOCK_REPLICATION_CONFIG["TargetEndpointArn"],
compute_config=MOCK_REPLICATION_CONFIG["ComputeConfig"],
replication_type=MOCK_REPLICATION_CONFIG["ReplicationType"],
table_mappings=MOCK_REPLICATION_CONFIG["TableMappings"],
additional_config_kwargs={
"ReplicationSettings": MOCK_REPLICATION_CONFIG["ReplicationSettings"],
"SupplementalSettings": MOCK_REPLICATION_CONFIG["SupplementalSettings"],
},
)
mock_conn.create_replication_config.assert_called_with(
ReplicationConfigIdentifier=MOCK_REPLICATION_CONFIG["ReplicationConfigIdentifier"],
SourceEndpointArn=MOCK_REPLICATION_CONFIG["SourceEndpointArn"],
TargetEndpointArn=MOCK_REPLICATION_CONFIG["TargetEndpointArn"],
ComputeConfig=MOCK_REPLICATION_CONFIG["ComputeConfig"],
ReplicationType=MOCK_REPLICATION_CONFIG["ReplicationType"],
TableMappings=MOCK_REPLICATION_CONFIG["TableMappings"],
ReplicationSettings=MOCK_REPLICATION_CONFIG["ReplicationSettings"],
SupplementalSettings=MOCK_REPLICATION_CONFIG["SupplementalSettings"],
)
self.dms.create_replication_config(
replication_config_id=MOCK_REPLICATION_CONFIG["ReplicationConfigIdentifier"],
source_endpoint_arn=MOCK_REPLICATION_CONFIG["SourceEndpointArn"],
target_endpoint_arn=MOCK_REPLICATION_CONFIG["TargetEndpointArn"],
compute_config=MOCK_REPLICATION_CONFIG["ComputeConfig"],
replication_type=MOCK_REPLICATION_CONFIG["ReplicationType"],
table_mappings=MOCK_REPLICATION_CONFIG["TableMappings"],
)
mock_conn.create_replication_config.assert_called_with(
ReplicationConfigIdentifier=MOCK_REPLICATION_CONFIG["ReplicationConfigIdentifier"],
SourceEndpointArn=MOCK_REPLICATION_CONFIG["SourceEndpointArn"],
TargetEndpointArn=MOCK_REPLICATION_CONFIG["TargetEndpointArn"],
ComputeConfig=MOCK_REPLICATION_CONFIG["ComputeConfig"],
ReplicationType=MOCK_REPLICATION_CONFIG["ReplicationType"],
TableMappings=MOCK_REPLICATION_CONFIG["TableMappings"],
)
@mock.patch.object(DmsHook, "conn")
def test_create_repl_config(self, mock_conn):
mock_conn.create_replication_config.return_value = MOCK_REPLICATION_CONFIG_RESP
resp = self.dms.create_replication_config(
replication_config_id=MOCK_REPLICATION_CONFIG["ReplicationConfigIdentifier"],
source_endpoint_arn=MOCK_REPLICATION_CONFIG["SourceEndpointArn"],
target_endpoint_arn=MOCK_REPLICATION_CONFIG["TargetEndpointArn"],
compute_config=MOCK_REPLICATION_CONFIG["ComputeConfig"],
replication_type=MOCK_REPLICATION_CONFIG["ReplicationType"],
table_mappings=MOCK_REPLICATION_CONFIG["TableMappings"],
)
assert resp == MOCK_REPLICATION_CONFIG_RESP["ReplicationConfig"]["ReplicationConfigArn"]
@mock.patch.object(DmsHook, "conn")
def test_describe_replications(self, mock_conn):
mock_conn.describe_replication_tasks.return_value = MOCK_DESCRIBE_REPLICATIONS_RESP
resp = self.dms.describe_replication_tasks()
assert len(resp) == 2
@mock.patch.object(DmsHook, "conn")
def test_describe_replications_filter(self, mock_conn):
filter = [
{
"Name": "replication-task-id",
"Values": MOCK_DESCRIBE_REPLICATIONS_RESP["Replications"][0]["ReplicationConfigArn"],
}
]
self.dms.describe_replication_tasks(filters=filter)
mock_conn.describe_replication_tasks.assert_called_with(filters=filter)
@mock.patch.object(DmsHook, "conn")
def test_start_replication_args(self, mock_conn):
self.dms.start_replication(
replication_config_arn=MOCK_TASK_ARN,
start_replication_type="cdc",
)
mock_conn.start_replication.assert_called_with(
ReplicationConfigArn=MOCK_TASK_ARN,
StartReplicationType="cdc",
)
@mock.patch.object(DmsHook, "conn")
def test_start_replication_kwargs(self, mock_conn):
self.dms.start_replication(
replication_config_arn=MOCK_TASK_ARN,
start_replication_type="cdc",
cdc_start_time="2022-01-01T00:00:00Z",
cdc_start_pos=None,
cdc_stop_pos=None,
)
mock_conn.start_replication.assert_called_with(
ReplicationConfigArn=MOCK_TASK_ARN,
StartReplicationType="cdc",
CdcStartTime=parser.parse("2022-01-01T00:00:00Z"),
)
| TestDmsHook |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/assertsql.py | {
"start": 818,
"end": 861
} | class ____(AssertRule):
pass
| SQLMatchRule |
python | PyCQA__pyflakes | pyflakes/test/test_code_segment.py | {
"start": 222,
"end": 4496
} | class ____(TestCase):
"""
Tests for segments of a module
"""
def test_function_segment(self):
self.flakes('''
def foo():
def bar():
pass
''', is_segment=True)
self.flakes('''
def foo():
def bar():
x = 0
''', m.UnusedVariable, is_segment=True)
def test_class_segment(self):
self.flakes('''
class Foo:
class Bar:
pass
''', is_segment=True)
self.flakes('''
class Foo:
def bar():
x = 0
''', m.UnusedVariable, is_segment=True)
def test_scope_class(self):
checker = self.flakes('''
class Foo:
x = 0
def bar(a, b=1, *d, **e):
pass
''', is_segment=True)
scopes = checker.deadScopes
module_scopes = [
scope for scope in scopes if scope.__class__ is ModuleScope]
class_scopes = [
scope for scope in scopes if scope.__class__ is ClassScope]
function_scopes = [
scope for scope in scopes if scope.__class__ is FunctionScope]
# Ensure module scope is not present because we are analysing
# the inner contents of Foo
self.assertEqual(len(module_scopes), 0)
self.assertEqual(len(class_scopes), 1)
self.assertEqual(len(function_scopes), 1)
class_scope = class_scopes[0]
function_scope = function_scopes[0]
self.assertIsInstance(class_scope, ClassScope)
self.assertIsInstance(function_scope, FunctionScope)
self.assertIn('x', class_scope)
self.assertIn('bar', class_scope)
self.assertIn('a', function_scope)
self.assertIn('b', function_scope)
self.assertIn('d', function_scope)
self.assertIn('e', function_scope)
self.assertIsInstance(class_scope['bar'], FunctionDefinition)
self.assertIsInstance(class_scope['x'], Assignment)
self.assertIsInstance(function_scope['a'], Argument)
self.assertIsInstance(function_scope['b'], Argument)
self.assertIsInstance(function_scope['d'], Argument)
self.assertIsInstance(function_scope['e'], Argument)
def test_scope_function(self):
checker = self.flakes('''
def foo(a, b=1, *d, **e):
def bar(f, g=1, *h, **i):
pass
''', is_segment=True)
scopes = checker.deadScopes
module_scopes = [
scope for scope in scopes if scope.__class__ is ModuleScope]
function_scopes = [
scope for scope in scopes if scope.__class__ is FunctionScope]
# Ensure module scope is not present because we are analysing
# the inner contents of foo
self.assertEqual(len(module_scopes), 0)
self.assertEqual(len(function_scopes), 2)
function_scope_foo = function_scopes[1]
function_scope_bar = function_scopes[0]
self.assertIsInstance(function_scope_foo, FunctionScope)
self.assertIsInstance(function_scope_bar, FunctionScope)
self.assertIn('a', function_scope_foo)
self.assertIn('b', function_scope_foo)
self.assertIn('d', function_scope_foo)
self.assertIn('e', function_scope_foo)
self.assertIn('bar', function_scope_foo)
self.assertIn('f', function_scope_bar)
self.assertIn('g', function_scope_bar)
self.assertIn('h', function_scope_bar)
self.assertIn('i', function_scope_bar)
self.assertIsInstance(function_scope_foo['bar'], FunctionDefinition)
self.assertIsInstance(function_scope_foo['a'], Argument)
self.assertIsInstance(function_scope_foo['b'], Argument)
self.assertIsInstance(function_scope_foo['d'], Argument)
self.assertIsInstance(function_scope_foo['e'], Argument)
self.assertIsInstance(function_scope_bar['f'], Argument)
self.assertIsInstance(function_scope_bar['g'], Argument)
self.assertIsInstance(function_scope_bar['h'], Argument)
self.assertIsInstance(function_scope_bar['i'], Argument)
def test_scope_async_function(self):
self.flakes('async def foo(): pass', is_segment=True)
| TestCodeSegments |
python | oauthlib__oauthlib | tests/openid/connect/core/grant_types/test_authorization_code.py | {
"start": 588,
"end": 854
} | class ____(AuthorizationCodeGrantTest):
"""Test that OpenID don't interfere with normal OAuth 2 flows."""
def setUp(self):
super().setUp()
self.auth = AuthorizationCodeGrant(request_validator=self.mock_validator)
| OpenIDAuthCodeInterferenceTest |
python | psf__requests | src/requests/exceptions.py | {
"start": 2158,
"end": 2375
} | class ____(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
| Timeout |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassKwOnly1.py | {
"start": 124,
"end": 320
} | class ____:
a: str
_: KW_ONLY
b: int = 0
DC1("hi")
DC1(a="hi")
DC1(a="hi", b=1)
DC1("hi", b=1)
# This should generate an error because "b" is keyword-only.
DC1("hi", 1)
@dataclass
| DC1 |
python | huggingface__transformers | src/transformers/models/eomt/image_processing_eomt.py | {
"start": 7676,
"end": 40441
} | class ____(BaseImageProcessor):
r"""
Constructs a EoMT image processor. The image processor can be used to prepare image(s) and optional targets
for the model.
This image processor inherits from [`BaseImageProcessor`] which contains most of the main methods. Users should
refer to this superclass for more information regarding those methods.
Args:
do_resize (`bool`, *optional*, defaults to `True`):
Whether to resize the input to a certain `size`.
size (`int`, *optional*, defaults to 640):
Resize the input to the given size. Only has an effect if `do_resize` is set to `True`. If size is a
sequence like `(width, height)`, output size will be matched to this. If size is an int, smaller edge of
the image will be matched to this number. i.e, if `height > width`, then image will be rescaled to `(size *
height / width, size)`.
resample (`int`, *optional*, defaults to `Resampling.BILINEAR`):
An optional resampling filter. This can be one of `PIL.Image.Resampling.NEAREST`,
`PIL.Image.Resampling.BOX`, `PIL.Image.Resampling.BILINEAR`, `PIL.Image.Resampling.HAMMING`,
`PIL.Image.Resampling.BICUBIC` or `PIL.Image.Resampling.LANCZOS`. Only has an effect if `do_resize` is set
to `True`.
do_rescale (`bool`, *optional*, defaults to `True`):
Whether to rescale the input to a certain `scale`.
rescale_factor (`float`, *optional*, defaults to `1/ 255`):
Rescale the input by the given factor. Only has an effect if `do_rescale` is set to `True`.
do_normalize (`bool`, *optional*, defaults to `True`):
Whether or not to normalize the input with mean and standard deviation.
do_split_image (`bool`, *optional*, defaults to `False`):
Whether to split the input images into overlapping patches for semantic segmentation. If set to `True`, the
input images will be split into patches of size `size["shortest_edge"]` with an overlap between patches.
Otherwise, the input images will be padded to the target size.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
image_mean (`int`, *optional*, defaults to `[0.485, 0.456, 0.406]`):
The sequence of means for each channel, to be used when normalizing images. Defaults to the ImageNet mean.
image_std (`int`, *optional*, defaults to `[0.229, 0.224, 0.225]`):
The sequence of standard deviations for each channel, to be used when normalizing images. Defaults to the
ImageNet std.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
num_labels (`int`, *optional*):
The number of labels in the segmentation map.
"""
model_input_names = ["pixel_values"]
def __init__(
self,
do_resize: bool = True,
size: Optional[dict[str, int]] = None,
resample: PILImageResampling = PILImageResampling.BILINEAR,
do_rescale: bool = True,
rescale_factor: float = 1 / 255,
do_normalize: bool = True,
do_split_image: bool = False,
do_pad: bool = False,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
ignore_index: Optional[int] = None,
num_labels: Optional[int] = None,
**kwargs,
):
super().__init__(**kwargs)
size = size if size is not None else {"shortest_edge": 640, "longest_edge": 640}
size = get_size_dict(size, default_to_square=False)
self.do_resize = do_resize
self.size = size
self.resample = resample
self.do_rescale = do_rescale
self.rescale_factor = rescale_factor
self.do_normalize = do_normalize
self.do_split_image = do_split_image
self.do_pad = do_pad
self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
self.ignore_index = ignore_index
self.num_labels = num_labels
def resize(
self,
image: np.ndarray,
size: dict,
resample: PILImageResampling = PILImageResampling.BILINEAR,
data_format=None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
**kwargs,
) -> np.ndarray:
"""
Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
resized to keep the input aspect ratio.
Args:
image (`np.ndarray`):
Image to resize.
resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
Resampling filter to use when resiizing the image.
data_format (`str` or `ChannelDimension`, *optional*):
The channel dimension format of the image. If not provided, it will be the same as the input image.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
"""
image_size = get_image_size(image)
output_size = get_size_with_aspect_ratio(image_size, size["shortest_edge"], size["longest_edge"])
image = resize(
image=image,
size=output_size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
return_numpy=True,
**kwargs,
)
return image
def _split_image(self, image: ImageInput, size: dict, image_index: int) -> tuple[list, list]:
"""Slices an image into overlapping patches for semantic segmentation."""
patches, patch_offsets = [], []
image_size = get_image_size(image)
patch_size = size["shortest_edge"]
longer_side = max(image_size)
num_patches = math.ceil(longer_side / patch_size)
total_overlap = num_patches * patch_size - longer_side
overlap_per_patch = total_overlap / (num_patches - 1) if num_patches > 1 else 0
for i in range(num_patches):
start = int(i * (patch_size - overlap_per_patch))
end = start + patch_size
if image_size[0] > image_size[1]:
patch = image[:, start:end, :]
else:
patch = image[:, :, start:end]
patches.append(patch)
patch_offsets.append([image_index, start, end])
return patches, patch_offsets
def _pad(self, image: ImageInput, size: dict) -> np.ndarray:
"""Pads the image to the target size using zero padding."""
height, width = get_image_size(image)
target_height, target_width = get_target_size(size)
pad_h = max(0, target_height - height)
pad_w = max(0, target_width - width)
padding = ((0, pad_h), (0, pad_w))
# Channel axis is last; default padding format is compatible
padded_image = pad(image=image, padding=padding, mode=PaddingMode.CONSTANT, constant_values=0.0)
return padded_image
def _preprocess_images(
self,
images: ImageInput,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_split_image: Optional[bool] = None,
do_pad: Optional[bool] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
data_format: Optional[Union[str, ChannelDimension]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a batch of images."""
images = [to_numpy_array(image) for image in images]
if do_resize:
images = [
self.resize(
image,
size=size,
resample=resample,
data_format=data_format,
input_data_format=input_data_format,
)
for image in images
]
processed_images, patch_offsets = [], []
if do_split_image:
for idx, img in enumerate(images):
patches, offsets = self._split_image(img, size, idx)
processed_images.extend(patches)
patch_offsets.extend(offsets)
images = processed_images
if do_pad:
images = [self._pad(img, size) for img in images]
if do_rescale:
images = [self.rescale(img, scale=rescale_factor, input_data_format=input_data_format) for img in images]
if do_normalize:
images = [
self.normalize(
image,
mean=image_mean,
std=image_std,
input_data_format=input_data_format,
)
for image in images
]
return images, patch_offsets
def _preprocess_mask(
self,
segmentation_map: ImageInput,
do_resize: Optional[bool] = False,
do_pad: Optional[bool] = False,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
data_format: Union[str, ChannelDimension] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> np.ndarray:
"""Preprocesses a single mask."""
# Add channel dimension if missing - needed for certain transformations
if segmentation_map.ndim == 2:
added_channel_dim = True
segmentation_map = segmentation_map[None, ...]
input_data_format = ChannelDimension.FIRST
else:
added_channel_dim = False
if input_data_format is None:
input_data_format = infer_channel_dimension_format(segmentation_map)
if do_resize:
segmentation_map = self.resize(
segmentation_map,
size=size,
resample=resample,
data_format=data_format,
)
if do_pad:
segmentation_map = self._pad(segmentation_map, size)
# Remove extra channel dimension if added for processing
if added_channel_dim:
segmentation_map = segmentation_map.squeeze(0)
return torch.from_numpy(segmentation_map)
@filter_out_non_signature_kwargs()
def preprocess(
self,
images: ImageInput,
segmentation_maps: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
instance_id_to_semantic_id: Optional[dict[int, int]] = None,
do_split_image: Optional[bool] = None,
do_resize: Optional[bool] = None,
size: Optional[dict[str, int]] = None,
resample: Optional[PILImageResampling] = None,
do_rescale: Optional[bool] = None,
rescale_factor: Optional[float] = None,
do_normalize: Optional[bool] = None,
do_pad: Optional[bool] = None,
image_mean: Optional[Union[float, list[float]]] = None,
image_std: Optional[Union[float, list[float]]] = None,
ignore_index: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
data_format: Union[str, ChannelDimension] = ChannelDimension.FIRST,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> BatchFeature:
"""
Preprocesses images or a batch of images.
Args:
images (`ImageInput`):
Image or batch of images to preprocess.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids.
do_split_image (`bool`, *optional*, defaults to `self.do_split_image`):
Whether to split the input images into overlapping patches for semantic segmentation.
do_resize (`bool`, *optional*, defaults to `self.do_resize`):
Whether to resize the input images.
size (`dict[str, int]`, *optional*, defaults to `self.size`):
Target size as a dictionary with `"shortest_edge"` and `"longest_edge"` keys.
resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
Resampling filter to use when resizing.
do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
Whether to rescale the input images by `rescale_factor`.
rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
Factor to scale image pixel values.
do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
Whether to normalize the input images.
do_pad (`bool`, *optional*, defaults to `False`):
Whether to pad the image. If `True`, will pad the patch dimension of the images in the batch to the largest
number of patches in the batch. Padding will be applied to the bottom and right with zeros.
image_mean (`float` or `list[float]`, *optional*, defaults to `self.image_mean`):
Mean for normalization. Single value or list for each channel.
image_std (`float` or `list[float]`, *optional*, defaults to `self.image_std`):
Standard deviation for normalization. Single value or list for each channel.
ignore_index (`int`, *optional*):
Label to be assigned to background pixels in segmentation maps. If provided, segmentation map pixels
denoted with 0 (background) will be replaced with `ignore_index`.
return_tensors (`str` or `TensorType`, *optional*):
The type of tensors to return. Can be `"pt"` or `"np"`.
data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
Channel format of the output image. Either `"channels_first"` or `"channels_last"`.
input_data_format (`ChannelDimension` or `str`, *optional*):
Channel format of the input image.
"""
do_split_image = do_split_image if do_split_image is not None else self.do_split_image
do_resize = do_resize if do_resize is not None else self.do_resize
size = size if size is not None else self.size
size = get_size_dict(size, default_to_square=False)
resample = resample if resample is not None else self.resample
do_rescale = do_rescale if do_rescale is not None else self.do_rescale
rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
do_normalize = do_normalize if do_normalize is not None else self.do_normalize
do_pad = do_pad if do_pad is not None else self.do_pad
image_mean = image_mean if image_mean is not None else self.image_mean
image_std = image_std if image_std is not None else self.image_std
ignore_index = ignore_index if ignore_index is not None else self.ignore_index
images = self.fetch_images(images)
images = make_flat_list_of_images(images)
if not valid_images(images):
raise ValueError("Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, or torch.Tensor")
validate_preprocess_arguments(
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
do_resize=do_resize,
size=size,
resample=resample,
)
pixel_values_list, patch_offsets = self._preprocess_images(
images=images,
do_resize=do_resize,
size=size,
resample=resample,
do_split_image=do_split_image,
do_pad=do_pad,
do_rescale=do_rescale,
rescale_factor=rescale_factor,
do_normalize=do_normalize,
image_mean=image_mean,
image_std=image_std,
data_format=data_format,
input_data_format=input_data_format,
)
if segmentation_maps is not None:
segmentation_maps = make_flat_list_of_images(segmentation_maps, expected_ndims=2)
segmentation_maps = [to_numpy_array(mask) for mask in segmentation_maps]
segmentation_maps = [
self._preprocess_mask(
segmentation_map,
do_resize=do_resize,
do_pad=do_pad,
size=size,
resample=PILImageResampling.NEAREST,
data_format=data_format,
input_data_format=input_data_format,
)
for segmentation_map in segmentation_maps
]
encoded_inputs = self.encode_inputs(
pixel_values_list,
segmentation_maps,
instance_id_to_semantic_id,
ignore_index,
return_tensors,
input_data_format=data_format,
)
if do_split_image and patch_offsets:
encoded_inputs["patch_offsets"] = [torch.tensor(offsets) for offsets in patch_offsets]
return encoded_inputs
def encode_inputs(
self,
pixel_values_list: list[ImageInput],
segmentation_maps: Optional[ImageInput] = None,
instance_id_to_semantic_id: Optional[Union[list[dict[int, int]], dict[int, int]]] = None,
ignore_index: Optional[int] = None,
return_tensors: Optional[Union[str, TensorType]] = None,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
"""
Pad images up to the largest image in a batch and create a corresponding `pixel_mask`.
EoMT addresses semantic segmentation with a mask classification paradigm, thus input segmentation maps
will be converted to lists of binary masks and their respective labels. Let's see an example, assuming
`segmentation_maps = [[2,6,7,9]]`, the output will contain `mask_labels =
[[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]]` (four binary masks) and `class_labels = [2,6,7,9]`, the labels for
each mask.
Args:
pixel_values_list (`list[ImageInput]`):
list of images (pixel values) to be padded. Each image should be a tensor of shape `(channels, height,
width)`.
segmentation_maps (`ImageInput`, *optional*):
The corresponding semantic segmentation maps with the pixel-wise annotations.
(`bool`, *optional*, defaults to `True`):
Whether or not to pad images up to the largest image in a batch and create a pixel mask.
If left to the default, will return a pixel mask that is:
- 1 for pixels that are real (i.e. **not masked**),
- 0 for pixels that are padding (i.e. **masked**).
instance_id_to_semantic_id (`list[dict[int, int]]` or `dict[int, int]`, *optional*):
A mapping between object instance ids and class ids. If passed, `segmentation_maps` is treated as an
instance segmentation map where each pixel represents an instance id. Can be provided as a single
dictionary with a global/dataset-level mapping or as a list of dictionaries (one per image), to map
instance ids in each image separately.
return_tensors (`str` or [`~file_utils.TensorType`], *optional*):
If set, will return tensors instead of NumPy arrays. If set to `'pt'`, return PyTorch `torch.Tensor`
objects.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format of the input image. If not provided, it will be inferred.
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **pixel_values** -- Pixel values to be fed to a model.
- **mask_labels** -- Optional list of mask labels of shape `(labels, height, width)` to be fed to a model
(when `annotations` are provided).
- **class_labels** -- Optional list of class labels of shape `(labels)` to be fed to a model (when
`annotations` are provided). They identify the labels of `mask_labels`, e.g. the label of
`mask_labels[i][j]` if `class_labels[i][j]`.
"""
ignore_index = self.ignore_index if ignore_index is None else ignore_index
pixel_values_list = [to_numpy_array(pixel_values) for pixel_values in pixel_values_list]
if input_data_format is None:
input_data_format = infer_channel_dimension_format(pixel_values_list[0])
encoded_inputs = BatchFeature({"pixel_values": pixel_values_list}, tensor_type=return_tensors)
if segmentation_maps is not None:
mask_labels = []
class_labels = []
# Convert to list of binary masks and labels
for idx, segmentation_map in enumerate(segmentation_maps):
segmentation_map = to_numpy_array(segmentation_map)
if isinstance(instance_id_to_semantic_id, list):
instance_id = instance_id_to_semantic_id[idx]
else:
instance_id = instance_id_to_semantic_id
# Use instance2class_id mapping per image
masks, classes = convert_segmentation_map_to_binary_masks(
segmentation_map,
instance_id,
ignore_index=ignore_index,
)
mask_labels.append(torch.from_numpy(masks))
class_labels.append(torch.from_numpy(classes))
# we cannot batch them since they don't share a common class size
encoded_inputs["mask_labels"] = mask_labels
encoded_inputs["class_labels"] = class_labels
return encoded_inputs
def merge_image_patches(
self,
segmentation_logits: torch.Tensor,
patch_offsets: list[tuple[int, int, int]],
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""
Reconstructs full-size semantic segmentation logits from patch predictions.
Args:
segmentation_logits (`torch.Tensor`):
A tensor of shape `(num_patches, num_classes, patch_height, patch_width)` representing predicted logits
for each image patch.
patch_offsets (`list[tuple[int, int, int]]`):
A list of tuples where each tuple contains:
- `image_index` (int): Index of the original image this patch belongs to.
- `start` (int): Start pixel index of the patch along the long dimension (height or width).
- `end` (int): End pixel index of the patch along the long dimension.
target_sizes (`list[tuple[int, int]]`):
list of original (height, width) dimensions for each image before preprocessing.
size (`dict[str, int]`):
A size dict which was used to resize.
"""
num_classes = segmentation_logits.shape[1]
aggregated_logits = []
patch_counts = []
for image_size in target_sizes:
height, width = get_size_with_aspect_ratio(image_size, size["shortest_edge"], size["longest_edge"])
aggregated_logits.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
patch_counts.append(torch.zeros((num_classes, height, width), device=segmentation_logits.device))
# Stitch patches back into full-sized logit maps
for patch_idx, (image_idx, patch_start, patch_end) in enumerate(patch_offsets):
if target_sizes[image_idx][0] > target_sizes[image_idx][1]:
aggregated_logits[image_idx][:, patch_start:patch_end, :] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, patch_start:patch_end, :] += 1
else:
aggregated_logits[image_idx][:, :, patch_start:patch_end] += segmentation_logits[patch_idx]
patch_counts[image_idx][:, :, patch_start:patch_end] += 1
# Normalize and resize logits to original image size
reconstructed_logits = []
for idx, (logit_sum, count) in enumerate(zip(aggregated_logits, patch_counts)):
averaged_logits = logit_sum / count.clamp(min=1)
resized_logits = F.interpolate(
averaged_logits[None, ...],
size=target_sizes[idx],
mode="bilinear",
align_corners=False,
)[0]
reconstructed_logits.append(resized_logits)
return reconstructed_logits
def unpad_image(
self,
segmentation_logits: torch.Tensor,
target_sizes: list[tuple[int, int]],
size: dict[str, int],
) -> list[torch.Tensor]:
"""Restores panoptic segmentation logits to their original image resolutions."""
resized_logits = []
for idx, original_size in enumerate(target_sizes):
target_height, target_width = get_size_with_aspect_ratio(
original_size, size["shortest_edge"], size["longest_edge"]
)
cropped_logits = segmentation_logits[idx][:, :target_height, :target_width]
upsampled_logits = F.interpolate(
cropped_logits[None, ...], size=original_size, mode="bilinear", align_corners=False
)[0]
resized_logits.append(upsampled_logits)
return resized_logits
def post_process_semantic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
size: Optional[dict[str, int]] = None,
) -> np.ndarray:
"""Post-processes model outputs into final semantic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
patch_offsets = outputs.patch_offsets
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
# Remove the null class `[..., :-1]`
masks_classes = class_queries_logits.softmax(dim=-1)[..., :-1]
masks_probs = masks_queries_logits.sigmoid() # [batch_size, num_queries, height, width]
segmentation_logits = torch.einsum("bqc, bqhw -> bchw", masks_classes, masks_probs)
output_logits = self.merge_image_patches(segmentation_logits, patch_offsets, target_sizes, size)
preds = [logit.argmax(dim=0) for logit in output_logits]
return preds
def post_process_panoptic_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.8,
mask_threshold: float = 0.5,
overlap_mask_area_threshold: float = 0.8,
stuff_classes: Optional[list[int]] = None,
size: Optional[dict[str, int]] = None,
):
"""Post-processes model outputs into final panoptic segmentation prediction."""
size = size if size is not None else self.size
masks_queries_logits = outputs.masks_queries_logits # [batch_size, num_queries, height, width]
class_queries_logits = outputs.class_queries_logits # [batch_size, num_queries, num_classes+1]
batch_size = class_queries_logits.shape[0]
num_labels = class_queries_logits.shape[-1] - 1
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
pred_scores_batch, pred_labels_batch = class_queries_logits.softmax(dim=-1).max(-1)
results: list = []
for i in range(batch_size):
mask_probs, pred_scores, pred_labels = remove_low_and_no_objects(
mask_probs_batch[i], pred_scores_batch[i], pred_labels_batch[i], threshold, num_labels
)
# No mask found
if mask_probs.shape[0] <= 0:
height, width = target_sizes[i] if target_sizes is not None else mask_probs.shape[1:]
segmentation = torch.zeros((height, width)) - 1
results.append({"segmentation": segmentation, "segments_info": []})
continue
segmentation, segments = compute_segments(
mask_probs=mask_probs,
pred_scores=pred_scores,
pred_labels=pred_labels,
stuff_classes=stuff_classes,
mask_threshold=mask_threshold,
overlap_mask_area_threshold=overlap_mask_area_threshold,
target_size=target_sizes[i] if target_sizes is not None else None,
)
results.append({"segmentation": segmentation, "segments_info": segments})
return results
@filter_out_non_signature_kwargs()
def post_process_instance_segmentation(
self,
outputs,
target_sizes: list[tuple[int, int]],
threshold: float = 0.5,
size: Optional[dict[str, int]] = None,
):
"""Post-processes model outputs into Instance Segmentation Predictions."""
size = size if size is not None else self.size
class_queries_logits = outputs.class_queries_logits
masks_queries_logits = outputs.masks_queries_logits
output_size = get_target_size(size)
masks_queries_logits = F.interpolate(
masks_queries_logits,
size=output_size,
mode="bilinear",
)
mask_probs_batch = self.unpad_image(masks_queries_logits, target_sizes, size)
device = masks_queries_logits.device
batch_size = class_queries_logits.shape[0]
num_queries = class_queries_logits.shape[-2]
results = []
for i in range(batch_size):
mask_pred = mask_probs_batch[i]
mask_class = class_queries_logits[i]
# Remove the null class `[..., :-1]`
scores, pred_classes = mask_class.softmax(dim=-1)[..., :-1].max(-1)
pred_masks = (mask_pred > 0).float()
# Calculate average mask prob
mask_scores = (mask_pred.sigmoid().flatten(1) * pred_masks.flatten(1)).sum(1) / (
pred_masks.flatten(1).sum(1) + 1e-6
)
pred_scores = scores * mask_scores
segmentation = torch.zeros(target_sizes[i], device=device) - 1
instance_maps, segments = [], []
current_segment_id = 0
for j in range(num_queries):
score = pred_scores[j].item()
if not torch.all(pred_masks[j] == 0) and score >= threshold:
segmentation[pred_masks[j] == 1] = current_segment_id
segments.append(
{
"id": current_segment_id,
"label_id": pred_classes[j].item(),
"score": round(score, 6),
}
)
current_segment_id += 1
instance_maps.append(pred_masks[j])
results.append({"segmentation": segmentation, "segments_info": segments})
return results
__all__ = ["EomtImageProcessor"]
| EomtImageProcessor |
python | altair-viz__altair | altair/datasets/_typing.py | {
"start": 2097,
"end": 6614
} | class ____(TypedDict, total=False):
"""
Full schema for ``metadata.parquet``.
Parameters
----------
dataset_name
Name of the dataset from the resource name field.
suffix
File extension/`Path.suffix`_.
file_name
Equivalent to `Path.name`_.
bytes
File size in *bytes*.
is_image
Only accessible via url.
is_tabular
Can be read as tabular data.
is_geo
`GeoJSON`_ format.
is_topo
`TopoJSON`_ format.
is_spatial
Any geospatial format. Only natively supported by ``polars``.
is_json
Not supported natively by ``pyarrow``.
has_schema
Data types available for improved ``pandas`` parsing.
sha
Unique hash for the dataset.
.. note::
E.g. if the dataset did *not* change between ``v1.0.0``-``v2.0.0``;
then this value would remain stable.
url
Remote url used to access dataset.
.. _Path.stem:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.stem
.. _Path.name:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.name
.. _Path.suffix:
https://docs.python.org/3/library/pathlib.html#pathlib.PurePath.suffix
.. _GeoJSON:
https://en.wikipedia.org/wiki/GeoJSON
.. _TopoJSON:
https://en.wikipedia.org/wiki/GeoJSON#TopoJSON
Examples
--------
``Metadata`` keywords form constraints to filter a table like the below sample:
```
shape: (73, 13)
┌────────────────┬────────┬────────────────┬───┬───────────────┬───────────────┐
│ dataset_name ┆ suffix ┆ file_name ┆ … ┆ sha ┆ url │
│ --- ┆ --- ┆ --- ┆ ┆ --- ┆ --- │
│ str ┆ str ┆ str ┆ ┆ str ┆ str │
╞════════════════╪════════╪════════════════╪═══╪═══════════════╪═══════════════╡
│ airports ┆ .csv ┆ airports.csv ┆ … ┆ 608ba6d51fa70 ┆ https://cdn.j │
│ ┆ ┆ ┆ ┆ 584c3fa1d31e… ┆ sdelivr.net/… │
│ annual_precip ┆ .json ┆ annual-precip. ┆ … ┆ 719e73406cfc0 ┆ https://cdn.j │
│ ┆ ┆ json ┆ ┆ 8f16dda65151… ┆ sdelivr.net/… │
│ anscombe ┆ .json ┆ anscombe.json ┆ … ┆ 11ae97090b626 ┆ https://cdn.j │
│ ┆ ┆ ┆ ┆ 3bdf0c866115… ┆ sdelivr.net/… │
│ barley ┆ .json ┆ barley.json ┆ … ┆ 8dc50de2509b6 ┆ https://cdn.j │
│ ┆ ┆ ┆ ┆ e197ce95c24c… ┆ sdelivr.net/… │
│ birdstrikes ┆ .csv ┆ birdstrikes.cs ┆ … ┆ 1b8b190c9bc02 ┆ https://cdn.j │
│ ┆ ┆ v ┆ ┆ ef7bcbfe5a8a… ┆ sdelivr.net/… │
│ … ┆ … ┆ … ┆ … ┆ … ┆ … │
│ weekly_weather ┆ .json ┆ weekly-weather ┆ … ┆ bd42a3e2403e7 ┆ https://cdn.j │
│ ┆ ┆ .json ┆ ┆ ccd6baaa89f9… ┆ sdelivr.net/… │
│ wheat ┆ .json ┆ wheat.json ┆ … ┆ cde46b43fc82f ┆ https://cdn.j │
│ ┆ ┆ ┆ ┆ 4c3c2a37ddcf… ┆ sdelivr.net/… │
│ windvectors ┆ .csv ┆ windvectors.cs ┆ … ┆ ed686b0ba613a ┆ https://cdn.j │
│ ┆ ┆ v ┆ ┆ bd59d09fcd94… ┆ sdelivr.net/… │
│ world_110m ┆ .json ┆ world-110m.jso ┆ … ┆ a1ce852de6f27 ┆ https://cdn.j │
│ ┆ ┆ n ┆ ┆ 13c94c0c2840… ┆ sdelivr.net/… │
│ zipcodes ┆ .csv ┆ zipcodes.csv ┆ … ┆ d3df33e12be0d ┆ https://cdn.j │
│ ┆ ┆ ┆ ┆ 0544c95f1bd4… ┆ sdelivr.net/… │
└────────────────┴────────┴────────────────┴───┴───────────────┴───────────────┘
```
"""
dataset_name: Dataset | LiteralString
suffix: Extension
file_name: str
bytes: int
is_image: bool
is_tabular: bool
is_geo: bool
is_topo: bool
is_spatial: bool
is_json: bool
has_schema: bool
sha: str
url: str
FlFieldStr: TypeAlias = Literal[
"integer",
"number",
"boolean",
"string",
"object",
"array",
"date",
"datetime",
"time",
"duration",
]
"""
String representation of `frictionless`_ `Field Types`_.
.. _frictionless:
https://github.com/frictionlessdata/frictionless-py
.. _Field Types:
https://datapackage.org/standard/table-schema/#field-types
"""
| Metadata |
python | nryoung__algorithms | tests/test_searching.py | {
"start": 1229,
"end": 1590
} | class ____(unittest.TestCase):
"""
Tests BMH search on string "ABCDE FG ABCDEABCDEF"
"""
def test_bmhsearch(self):
self.string = "ABCDE FG ABCDEABCDEF"
rv1 = bmh_search.search(self.string, "ABCDEA")
rv2 = bmh_search.search(self.string, "ABCDER")
self.assertIs(rv1[0], 9)
self.assertFalse(rv2)
| TestBMHSearch |
python | ray-project__ray | python/ray/autoscaler/v2/tests/test_node_provider.py | {
"start": 3917,
"end": 4630
} | class ____(CloudInstanceProviderTesterBase):
def __init__(self, **kwargs):
self.config_reader = FileConfigReader(
get_test_config_path("test_ray_complex.yaml"), skip_content_hash=True
)
self.config = self.config_reader.get_cached_autoscaling_config()
self.base_provider = MockProvider()
provider = NodeProviderAdapter(
self.base_provider,
self.config_reader,
)
super().__init__(provider, self.config)
def _add_creation_error(self, e: Exception):
self.base_provider.creation_error = e
def _add_termination_errors(self, e: Exception):
self.base_provider.termination_errors = e
| MockProviderTester |
python | django__django | tests/contenttypes_tests/test_models.py | {
"start": 430,
"end": 12509
} | class ____(TestCase):
def setUp(self):
ContentType.objects.clear_cache()
self.addCleanup(ContentType.objects.clear_cache)
def test_lookup_cache(self):
"""
The content type cache (see ContentTypeManager) works correctly.
Lookups for a particular content type -- by model, ID, or natural key
-- should hit the database only on the first lookup.
"""
# At this point, a lookup for a ContentType should hit the DB
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# A second hit, though, won't hit the DB, nor will a lookup by ID
# or natural key
with self.assertNumQueries(0):
ct = ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_id(ct.id)
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
# Once we clear the cache, another lookup will again hit the DB
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
# The same should happen with a lookup by natural key
ContentType.objects.clear_cache()
with self.assertNumQueries(1):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
# And a second hit shouldn't hit the DB
with self.assertNumQueries(0):
ContentType.objects.get_by_natural_key("contenttypes", "contenttype")
def test_get_for_models_creation(self):
ContentType.objects.all().delete()
with self.assertNumQueries(4):
cts = ContentType.objects.get_for_models(
ContentType, FooWithUrl, ProxyModel, ConcreteModel
)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
},
)
def test_get_for_models_empty_cache(self):
# Empty cache.
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(
ContentType, FooWithUrl, ProxyModel, ConcreteModel
)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
ProxyModel: ContentType.objects.get_for_model(ProxyModel),
ConcreteModel: ContentType.objects.get_for_model(ConcreteModel),
},
)
def test_get_for_models_partial_cache(self):
# Partial cache
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(1):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
},
)
def test_get_for_models_migrations(self):
state = ProjectState.from_apps(apps.get_app_config("contenttypes"))
ContentType = state.apps.get_model("contenttypes", "ContentType")
cts = ContentType.objects.get_for_models(ContentType)
self.assertEqual(
cts, {ContentType: ContentType.objects.get_for_model(ContentType)}
)
@isolate_apps("contenttypes_tests")
def test_get_for_models_migrations_create_model(self):
state = ProjectState.from_apps(apps.get_app_config("contenttypes"))
class Foo(models.Model):
class Meta:
app_label = "contenttypes_tests"
state.add_model(ModelState.from_model(Foo))
ContentType = state.apps.get_model("contenttypes", "ContentType")
cts = ContentType.objects.get_for_models(FooWithUrl, Foo)
self.assertEqual(
cts,
{
Foo: ContentType.objects.get_for_model(Foo),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
},
)
def test_get_for_models_full_cache(self):
# Full cache
ContentType.objects.get_for_model(ContentType)
ContentType.objects.get_for_model(FooWithUrl)
with self.assertNumQueries(0):
cts = ContentType.objects.get_for_models(ContentType, FooWithUrl)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
FooWithUrl: ContentType.objects.get_for_model(FooWithUrl),
},
)
@isolate_apps("contenttypes_tests")
def test_get_for_model_create_contenttype(self):
"""
ContentTypeManager.get_for_model() creates the corresponding content
type if it doesn't exist in the database.
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, "contenttypes_tests")
self.assertEqual(ct.model, "modelcreatedonthefly")
self.assertEqual(str(ct), "modelcreatedonthefly")
def test_get_for_concrete_model(self):
"""
Make sure the `for_concrete_model` kwarg correctly works
with concrete, proxy and deferred models
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(ProxyModel)
)
self.assertEqual(
concrete_model_ct,
ContentType.objects.get_for_model(ConcreteModel, for_concrete_model=False),
)
proxy_model_ct = ContentType.objects.get_for_model(
ProxyModel, for_concrete_model=False
)
self.assertNotEqual(concrete_model_ct, proxy_model_ct)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only("pk").get().__class__
DeferredProxyModel = ProxyModel.objects.only("pk").get().__class__
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(DeferredConcreteModel)
)
self.assertEqual(
concrete_model_ct,
ContentType.objects.get_for_model(
DeferredConcreteModel, for_concrete_model=False
),
)
self.assertEqual(
concrete_model_ct, ContentType.objects.get_for_model(DeferredProxyModel)
)
self.assertEqual(
proxy_model_ct,
ContentType.objects.get_for_model(
DeferredProxyModel, for_concrete_model=False
),
)
def test_get_for_concrete_models(self):
"""
Make sure the `for_concrete_models` kwarg correctly works
with concrete, proxy and deferred models.
"""
concrete_model_ct = ContentType.objects.get_for_model(ConcreteModel)
cts = ContentType.objects.get_for_models(ConcreteModel, ProxyModel)
self.assertEqual(
cts,
{
ConcreteModel: concrete_model_ct,
ProxyModel: concrete_model_ct,
},
)
proxy_model_ct = ContentType.objects.get_for_model(
ProxyModel, for_concrete_model=False
)
cts = ContentType.objects.get_for_models(
ConcreteModel, ProxyModel, for_concrete_models=False
)
self.assertEqual(
cts,
{
ConcreteModel: concrete_model_ct,
ProxyModel: proxy_model_ct,
},
)
# Make sure deferred model are correctly handled
ConcreteModel.objects.create(name="Concrete")
DeferredConcreteModel = ConcreteModel.objects.only("pk").get().__class__
DeferredProxyModel = ProxyModel.objects.only("pk").get().__class__
cts = ContentType.objects.get_for_models(
DeferredConcreteModel, DeferredProxyModel
)
self.assertEqual(
cts,
{
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: concrete_model_ct,
},
)
cts = ContentType.objects.get_for_models(
DeferredConcreteModel, DeferredProxyModel, for_concrete_models=False
)
self.assertEqual(
cts,
{
DeferredConcreteModel: concrete_model_ct,
DeferredProxyModel: proxy_model_ct,
},
)
def test_cache_not_shared_between_managers(self):
with self.assertNumQueries(1):
ContentType.objects.get_for_model(ContentType)
with self.assertNumQueries(0):
ContentType.objects.get_for_model(ContentType)
other_manager = ContentTypeManager()
other_manager.model = ContentType
with self.assertNumQueries(1):
other_manager.get_for_model(ContentType)
with self.assertNumQueries(0):
other_manager.get_for_model(ContentType)
def test_missing_model(self):
"""
Displaying content types in admin (or anywhere) doesn't break on
leftover content type records in the DB for which no model is defined
anymore.
"""
ct = ContentType.objects.create(
app_label="contenttypes",
model="OldModel",
)
self.assertEqual(str(ct), "OldModel")
self.assertIsNone(ct.model_class())
# Stale ContentTypes can be fetched like any other object.
ct_fetched = ContentType.objects.get_for_id(ct.pk)
self.assertIsNone(ct_fetched.model_class())
def test_missing_model_with_existing_model_name(self):
"""
Displaying content types in admin (or anywhere) doesn't break on
leftover content type records in the DB for which no model is defined
anymore, even if a model with the same name exists in another app.
"""
# Create a stale ContentType that matches the name of an existing
# model.
ContentType.objects.create(app_label="contenttypes", model="author")
ContentType.objects.clear_cache()
# get_for_models() should work as expected for existing models.
cts = ContentType.objects.get_for_models(ContentType, Author)
self.assertEqual(
cts,
{
ContentType: ContentType.objects.get_for_model(ContentType),
Author: ContentType.objects.get_for_model(Author),
},
)
def test_str(self):
ct = ContentType.objects.get(app_label="contenttypes_tests", model="site")
self.assertEqual(str(ct), "Contenttypes_Tests | site")
def test_str_auth(self):
ct = ContentType.objects.get(app_label="auth", model="group")
self.assertEqual(str(ct), "Authentication and Authorization | group")
def test_name(self):
ct = ContentType.objects.get(app_label="contenttypes_tests", model="site")
self.assertEqual(ct.name, "site")
def test_app_labeled_name(self):
ct = ContentType.objects.get(app_label="contenttypes_tests", model="site")
self.assertEqual(ct.app_labeled_name, "Contenttypes_Tests | site")
def test_name_unknown_model(self):
ct = ContentType(app_label="contenttypes_tests", model="unknown")
self.assertEqual(ct.name, "unknown")
def test_app_labeled_name_unknown_model(self):
ct = ContentType(app_label="contenttypes_tests", model="unknown")
self.assertEqual(ct.app_labeled_name, "unknown")
| ContentTypesTests |
python | ansible__ansible | test/lib/ansible_test/_internal/docker_util.py | {
"start": 28581,
"end": 28855
} | class ____(DockerError):
"""The container identified by `identifier` was not found."""
def __init__(self, identifier: str) -> None:
super().__init__('The container "%s" was not found.' % identifier)
self.identifier = identifier
| ContainerNotFoundError |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-solr/llama_index/vector_stores/solr/client/responses.py | {
"start": 2786,
"end": 5128
} | class ____(BaseModel):
"""
Solr search response.
See `Solr documentation
<https://solr.apache.org/guide/solr/latest/query-guide/response-writers.html#json-response-writer>`_
for details.
"""
response: SolrSelectResponseBody
"""The response contents for the input query, containing documents when applicable."""
response_header: SolrResponseHeader = Field(default_factory=SolrResponseHeader)
"""The header information for the response."""
debug: Optional[dict[str, Any]] = None
"""Debugging information for the response.
This will not be present unless indicated in the request.
"""
model_config: ClassVar[ConfigDict] = ConfigDict(
alias_generator=alias_generators.to_camel, # generate camelCase aliases
extra="allow", # allow extra fields, for forward-compatability
populate_by_name=True, # allow both name and alias forms when building
)
@classmethod
def from_pysolr_results(cls, results: pysolr.Results) -> Self:
"""
Build a response from a :py:class:`pysolr.Results`.
This uses the underlying raw response contained in the ``pysolr`` results.
"""
raw_response: dict[str, Any] = results.raw_response.get("response", {})
return cls(
response=SolrSelectResponseBody(
docs=results.docs,
num_found=results.hits,
num_found_exact=raw_response.get("numFoundExact", True),
start=raw_response.get("start", 0),
),
response_header=results.raw_response.get("responseHeader", {}),
debug=results.debug,
)
@classmethod
def from_aiosolr_response(cls, results: aiosolr.Response) -> Self:
"""Build a response from a :py:class:`aiosolr.Response`."""
raw_response: dict[str, Any] = results.data.get("response", {})
return cls(
response=SolrSelectResponseBody(
docs=results.docs,
num_found=raw_response.get("numFound", 0),
num_found_exact=raw_response.get("numFoundExact", True),
start=raw_response.get("start", 0),
),
response_header=SolrResponseHeader(status=results.status),
debug=results.data.get("debug", {}),
)
| SolrSelectResponse |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 6230,
"end": 6406
} | class ____:
"""State of the composite argument parser's generated documentation."""
sections: dict[str, str] = dataclasses.field(default_factory=dict)
| DocumentationState |
python | ray-project__ray | rllib/evaluation/tests/test_episode_v2.py | {
"start": 380,
"end": 720
} | class ____(Policy):
@override(Policy)
def compute_actions(
self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs
):
return obs_batch.argmax(axis=1), [], {}
| EchoPolicy |
python | mlflow__mlflow | tests/pyfunc/test_pyfunc_schema_enforcement.py | {
"start": 3909,
"end": 121690
} | class ____(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
assert isinstance(params, dict)
assert all(isinstance(x, int) for x in params["int_array"])
assert all(isinstance(x, float) for x in params["double_array"])
assert all(isinstance(x, float) for x in params["float_array"])
assert all(isinstance(x, int) for x in params["long_array"])
assert all(isinstance(x, datetime.datetime) for x in params["datetime_array"])
return params
def test_schema_enforcement_single_column_2d_array():
X = np.array([[1], [2], [3]])
y = np.array([1, 2, 3])
model = sklearn.linear_model.LinearRegression()
model.fit(X, y)
signature = infer_signature(X, y)
assert signature.inputs.inputs[0].shape == (-1, 1)
assert signature.outputs.inputs[0].shape == (-1,)
with mlflow.start_run():
model_info = mlflow.sklearn.log_model(model, name="model", signature=signature)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
pdf = pd.DataFrame(X)
np.testing.assert_almost_equal(loaded_model.predict(pdf), model.predict(pdf))
def test_column_schema_enforcement():
m = Model()
input_schema = Schema(
[
ColSpec("integer", "a"),
ColSpec("long", "b"),
ColSpec("float", "c"),
ColSpec("double", "d"),
ColSpec("boolean", "e"),
ColSpec("string", "g"),
ColSpec("binary", "f"),
ColSpec("datetime", "h"),
]
)
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
pdf = pd.DataFrame(
data=[[1, 2, 3, 4, True, "x", bytes([1]), "2021-01-01 00:00:00.1234567"]],
columns=["b", "d", "a", "c", "e", "g", "f", "h"],
dtype=object,
)
pdf["a"] = pdf["a"].astype(np.int32)
pdf["b"] = pdf["b"].astype(np.int64)
pdf["c"] = pdf["c"].astype(np.float32)
pdf["d"] = pdf["d"].astype(np.float64)
pdf["h"] = pdf["h"].astype(np.dtype("datetime64[ns]"))
# test that missing column raises
match_missing_inputs = "Model is missing inputs"
with pytest.raises(MlflowException, match=match_missing_inputs):
res = pyfunc_model.predict(pdf[["b", "d", "a", "e", "g", "f", "h"]])
# test that extra column is ignored
pdf["x"] = 1
# test that columns are reordered, extra column is ignored
res = pyfunc_model.predict(pdf)
assert all((res == pdf[input_schema.input_names()]).all())
expected_types = dict(zip(input_schema.input_names(), input_schema.pandas_types()))
# MLflow datetime type in input_schema does not encode precision, so add it for assertions
expected_types["h"] = np.dtype("datetime64[ns]")
# object cannot be converted to pandas Strings at the moment
expected_types["f"] = object
expected_types["g"] = object
actual_types = res.dtypes.to_dict()
assert expected_types == actual_types
# Test conversions
# 1. long -> integer raises
pdf["a"] = pdf["a"].astype(np.int64)
match_incompatible_inputs = "Incompatible input types"
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["a"] = pdf["a"].astype(np.int32)
# 2. integer -> long works
pdf["b"] = pdf["b"].astype(np.int32)
res = pyfunc_model.predict(pdf)
assert all((res == pdf[input_schema.input_names()]).all())
assert res.dtypes.to_dict() == expected_types
pdf["b"] = pdf["b"].astype(np.int64)
# 3. unsigned int -> long works
pdf["b"] = pdf["b"].astype(np.uint32)
res = pyfunc_model.predict(pdf)
assert all((res == pdf[input_schema.input_names()]).all())
assert res.dtypes.to_dict() == expected_types
pdf["b"] = pdf["b"].astype(np.int64)
# 4. unsigned int -> int raises
pdf["a"] = pdf["a"].astype(np.uint32)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["a"] = pdf["a"].astype(np.int32)
# 5. double -> float raises
pdf["c"] = pdf["c"].astype(np.float64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["c"] = pdf["c"].astype(np.float32)
# 6. float -> double works, double -> float does not
pdf["d"] = pdf["d"].astype(np.float32)
res = pyfunc_model.predict(pdf)
assert res.dtypes.to_dict() == expected_types
pdf["d"] = pdf["d"].astype(np.float64)
pdf["c"] = pdf["c"].astype(np.float64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["c"] = pdf["c"].astype(np.float32)
# 7. int -> float raises
pdf["c"] = pdf["c"].astype(np.int32)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["c"] = pdf["c"].astype(np.float32)
# 8. int -> double works
pdf["d"] = pdf["d"].astype(np.int32)
pyfunc_model.predict(pdf)
assert all((res == pdf[input_schema.input_names()]).all())
assert res.dtypes.to_dict() == expected_types
# 9. long -> double raises
pdf["d"] = pdf["d"].astype(np.int64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["d"] = pdf["d"].astype(np.float64)
# 10. any float -> any int raises
pdf["a"] = pdf["a"].astype(np.float32)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
# 10. any float -> any int raises
pdf["a"] = pdf["a"].astype(np.float64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["a"] = pdf["a"].astype(np.int32)
pdf["b"] = pdf["b"].astype(np.float64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["b"] = pdf["b"].astype(np.int64)
pdf["b"] = pdf["b"].astype(np.float64)
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(pdf)
pdf["b"] = pdf["b"].astype(np.int64)
# 11. objects work
pdf["b"] = pdf["b"].astype(object)
pdf["d"] = pdf["d"].astype(object)
pdf["e"] = pdf["e"].astype(object)
pdf["f"] = pdf["f"].astype(object)
pdf["g"] = pdf["g"].astype(object)
res = pyfunc_model.predict(pdf)
assert res.dtypes.to_dict() == expected_types
# 12. datetime64[D] (date only) -> datetime64[x] works
pdf["h"] = pdf["h"].values.astype("datetime64[D]")
res = pyfunc_model.predict(pdf)
assert res.dtypes.to_dict() == expected_types
pdf["h"] = pdf["h"].astype("datetime64[s]")
# 13. np.ndarrays can be converted to dataframe but have no columns
with pytest.raises(MlflowException, match=match_missing_inputs):
pyfunc_model.predict(pdf.values)
# 14. dictionaries of str -> list/nparray work,
# including extraneous multi-dimensional arrays and lists
arr = np.array([1, 2, 3])
d = {
"a": arr.astype("int32"),
"b": arr.astype("int64"),
"c": arr.astype("float32"),
"d": arr.astype("float64"),
"e": [True, False, True],
"g": ["a", "b", "c"],
"f": [bytes(0), bytes(1), bytes(1)],
"h": np.array(["2020-01-01", "2020-02-02", "2020-03-03"], dtype=np.datetime64),
# Extraneous multi-dimensional numpy array should be silently dropped
"i": np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
# Extraneous multi-dimensional list should be silently dropped
"j": [[1, 2, 3], [4, 5, 6], [7, 8, 9]],
}
res = pyfunc_model.predict(d)
assert res.dtypes.to_dict() == expected_types
# 15. dictionaries of str -> list[list] fail
d = {
"a": [arr.astype("int32")],
"b": [arr.astype("int64")],
"c": [arr.astype("float32")],
"d": [arr.astype("float64")],
"e": [[True, False, True]],
"g": np.array([["a", "b", "c"]]),
"f": [[bytes(0), bytes(1), bytes(1)]],
"h": [np.array(["2020-01-01", "2020-02-02", "2020-03-03"], dtype=np.datetime64)],
}
with pytest.raises(MlflowException, match=match_incompatible_inputs):
pyfunc_model.predict(d)
# 16. conversion to dataframe fails
d = {
"a": [1],
"b": [1, 2],
"c": [1, 2, 3],
}
with pytest.raises(
MlflowException,
match="This model contains a column-based signature, which suggests a DataFrame input.",
):
pyfunc_model.predict(d)
# 17. conversion from Decimal to float is allowed since numpy currently has no support for the
# data type.
pdf["d"] = [decimal.Decimal(1.0)]
res = pyfunc_model.predict(pdf)
assert res.dtypes.to_dict() == expected_types
def _compare_exact_tensor_dict_input(d1, d2):
"""Return whether two dicts of np arrays are exactly equal"""
if d1.keys() != d2.keys():
return False
return all(np.array_equal(d1[key], d2[key]) for key in d1)
def test_tensor_multi_named_schema_enforcement():
m = Model()
input_schema = Schema(
[
TensorSpec(np.dtype(np.uint64), (-1, 5), "a"),
TensorSpec(np.dtype(np.short), (-1, 2), "b"),
TensorSpec(np.dtype(np.float32), (2, -1, 2), "c"),
]
)
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
inp = {
"a": np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1]], dtype=np.uint64),
"b": np.array([[0, 0], [1, 1], [2, 2]], dtype=np.short),
"c": np.array([[[0, 0], [1, 1]], [[2, 2], [3, 3]]], dtype=np.float32),
}
# test that missing column raises
inp1 = inp.copy()
with pytest.raises(MlflowException, match="Model is missing inputs"):
pyfunc_model.predict(inp1.pop("b"))
# test that extra column is ignored
inp2 = inp.copy()
inp2["x"] = 1
# test that extra column is removed
res = pyfunc_model.predict(inp2)
assert res == {k: v for k, v in inp.items() if k in {"a", "b", "c"}}
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
# test that variable axes are supported
inp3 = {
"a": np.array([[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2]], dtype=np.uint64),
"b": np.array([[0, 0], [1, 1]], dtype=np.short),
"c": np.array([[[0, 0]], [[2, 2]]], dtype=np.float32),
}
res = pyfunc_model.predict(inp3)
assert _compare_exact_tensor_dict_input(res, inp3)
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
# test that type casting is not supported
inp4 = inp.copy()
inp4["a"] = inp4["a"].astype(np.int32)
with pytest.raises(
MlflowException, match="dtype of input int32 does not match expected dtype uint64"
):
pyfunc_model.predict(inp4)
# test wrong shape
inp5 = {
"a": np.array([[0, 0, 0, 0]], dtype=np.uint),
"b": np.array([[0, 0], [1, 1]], dtype=np.short),
"c": np.array([[[0, 0]]], dtype=np.float32),
}
with pytest.raises(
MlflowException,
match=re.escape("Shape of input (1, 4) does not match expected shape (-1, 5)"),
):
pyfunc_model.predict(inp5)
# test non-dictionary input
inp6 = [
np.array([[0, 0, 0, 0, 0]], dtype=np.uint64),
np.array([[0, 0], [1, 1]], dtype=np.short),
np.array([[[0, 0]]], dtype=np.float32),
]
with pytest.raises(
MlflowException, match=re.escape("Model is missing inputs ['a', 'b', 'c'].")
):
pyfunc_model.predict(inp6)
# test empty ndarray does not work
inp7 = inp.copy()
inp7["a"] = np.array([])
with pytest.raises(
MlflowException, match=re.escape("Shape of input (0,) does not match expected shape")
):
pyfunc_model.predict(inp7)
# test dictionary of str -> list does not work
inp8 = {k: list(v) for k, v in inp.items()}
match = (
r"This model contains a tensor-based model signature with input names.+"
r"suggests a dictionary input mapping input name to a numpy array, but a dict"
r" with value type <class 'list'> was found"
)
with pytest.raises(MlflowException, match=match):
pyfunc_model.predict(inp8)
# test dataframe input fails at shape enforcement
pdf = pd.DataFrame(data=[[1, 2, 3]], columns=["a", "b", "c"])
pdf["a"] = pdf["a"].astype(np.uint64)
pdf["b"] = pdf["b"].astype(np.short)
pdf["c"] = pdf["c"].astype(np.float32)
with pytest.raises(
MlflowException,
match=re.escape(
"The input pandas dataframe column 'a' contains scalar values, which requires the "
"shape to be (-1,) or (-1, 1), but got tensor spec shape of (-1, 5)"
),
):
pyfunc_model.predict(pdf)
def test_schema_enforcement_single_named_tensor_schema():
m = Model()
input_schema = Schema([TensorSpec(np.dtype(np.uint64), (-1, 2, 3), "a")])
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
input_array = np.array(range(12), dtype=np.uint64).reshape((2, 2, 3))
inp = {
"a": input_array,
}
# sanity test that dictionary with correct input works
res = pyfunc_model.predict(inp)
assert res == inp
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
# test single np.ndarray input works and is converted to dictionary
res = pyfunc_model.predict(inp["a"])
assert res == inp
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
# test list does not work
with pytest.raises(MlflowException, match="Model is missing inputs"):
pyfunc_model.predict(input_array.tolist())
def test_schema_enforcement_single_unnamed_tensor_schema():
m = Model()
input_schema = Schema([TensorSpec(np.dtype(np.uint64), (-1, 3))])
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
input_array = np.array(range(6), dtype=np.uint64).reshape((2, 3))
# test single np.ndarray input works and is converted to dictionary
res = pyfunc_model.predict(input_array)
np.testing.assert_array_equal(res, input_array)
expected_types = input_schema.input_types()[0]
assert expected_types == res.dtype
input_df = pd.DataFrame(input_array, columns=["c1", "c2", "c3"])
res = pyfunc_model.predict(input_df)
np.testing.assert_array_equal(res, input_array)
assert expected_types == res.dtype
input_df = input_df.drop("c3", axis=1)
with pytest.raises(
expected_exception=MlflowException,
match=re.escape(
"This model contains a model signature with an unnamed input. Since the "
"input data is a pandas DataFrame containing multiple columns, "
"the input shape must be of the structure "
"(-1, number_of_dataframe_columns). "
"Instead, the input DataFrame passed had 2 columns and "
"an input shape of (-1, 3) with all values within the "
"DataFrame of scalar type. Please adjust the passed in DataFrame to "
"match the expected structure",
),
):
pyfunc_model.predict(input_df)
def test_schema_enforcement_named_tensor_schema_1d():
m = Model()
input_schema = Schema(
[TensorSpec(np.dtype(np.uint64), (-1,), "a"), TensorSpec(np.dtype(np.float32), (-1,), "b")]
)
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
pdf = pd.DataFrame(data=[[0, 0], [1, 1]], columns=["a", "b"])
pdf["a"] = pdf["a"].astype(np.uint64)
pdf["b"] = pdf["a"].astype(np.float32)
d_inp = {
"a": np.array(pdf["a"], dtype=np.uint64),
"b": np.array(pdf["b"], dtype=np.float32),
}
# test dataframe input works for 1d tensor specs and input is converted to dict
res = pyfunc_model.predict(pdf)
assert _compare_exact_tensor_dict_input(res, d_inp)
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
wrong_m = Model()
wrong_m.signature = ModelSignature(
inputs=Schema(
[
TensorSpec(np.dtype(np.uint64), (-1, 2), "a"),
TensorSpec(np.dtype(np.float32), (-1,), "b"),
]
)
)
wrong_pyfunc_model = PyFuncModel(model_meta=wrong_m, model_impl=TestModel())
with pytest.raises(
expected_exception=MlflowException,
match=re.escape(
"The input pandas dataframe column 'a' contains scalar "
"values, which requires the shape to be (-1,) or (-1, 1), but got tensor spec "
"shape of (-1, 2)."
),
):
wrong_pyfunc_model.predict(pdf)
wrong_m.signature.inputs = Schema(
[
TensorSpec(np.dtype(np.uint64), (2, -1), "a"),
TensorSpec(np.dtype(np.float32), (-1,), "b"),
]
)
with pytest.raises(
expected_exception=MlflowException,
match=re.escape(
"For pandas dataframe input, the first dimension of shape must be a variable "
"dimension and other dimensions must be fixed, but in model signature the shape "
"of input a is (2, -1)."
),
):
wrong_pyfunc_model.predict(pdf)
# test that dictionary works too
res = pyfunc_model.predict(d_inp)
assert res == d_inp
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
def test_schema_enforcement_named_tensor_schema_multidimensional():
m = Model()
input_schema = Schema(
[
TensorSpec(np.dtype(np.uint64), (-1, 2, 3), "a"),
TensorSpec(np.dtype(np.float32), (-1, 3, 4), "b"),
]
)
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
data_a = np.array(range(12), dtype=np.uint64)
data_b = np.array(range(24), dtype=np.float32) + 10.0
pdf = pd.DataFrame(
{"a": data_a.reshape(-1, 2 * 3).tolist(), "b": data_b.reshape(-1, 3 * 4).tolist()}
)
d_inp = {
"a": data_a.reshape((-1, 2, 3)),
"b": data_b.reshape((-1, 3, 4)),
}
# test dataframe input works for 1d tensor specs and input is converted to dict
res = pyfunc_model.predict(pdf)
assert _compare_exact_tensor_dict_input(res, d_inp)
# test dataframe input works for 1d tensor specs and input is converted to dict
pdf_contains_numpy_array = pd.DataFrame(
{"a": list(data_a.reshape(-1, 2 * 3)), "b": list(data_b.reshape(-1, 3 * 4))}
)
res = pyfunc_model.predict(pdf_contains_numpy_array)
assert _compare_exact_tensor_dict_input(res, d_inp)
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
with pytest.raises(
expected_exception=MlflowException,
match=re.escape(
"The value in the Input DataFrame column 'a' could not be converted to the expected "
"shape of: '(-1, 2, 3)'. Ensure that each of the input list elements are of uniform "
"length and that the data can be coerced to the tensor type 'uint64'"
),
):
pyfunc_model.predict(
pdf.assign(a=np.array(range(16), dtype=np.uint64).reshape(-1, 8).tolist())
)
# test that dictionary works too
res = pyfunc_model.predict(d_inp)
assert res == d_inp
expected_types = dict(zip(input_schema.input_names(), input_schema.input_types()))
actual_types = {k: v.dtype for k, v in res.items()}
assert expected_types == actual_types
def test_missing_value_hint_is_displayed_when_it_should():
m = Model()
input_schema = Schema([ColSpec("integer", "a")])
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
pdf = pd.DataFrame(data=[[1], [None]], columns=["a"])
match = "Incompatible input types"
with pytest.raises(MlflowException, match=match) as ex:
pyfunc_model.predict(pdf)
hint = "Hint: the type mismatch is likely caused by missing values."
assert hint in str(ex.value.message)
pdf = pd.DataFrame(data=[[1.5], [None]], columns=["a"])
with pytest.raises(MlflowException, match=match) as ex:
pyfunc_model.predict(pdf)
assert hint not in str(ex.value.message)
pdf = pd.DataFrame(data=[[1], [2]], columns=["a"], dtype=np.float64)
with pytest.raises(MlflowException, match=match) as ex:
pyfunc_model.predict(pdf)
assert hint not in str(ex.value.message)
def test_column_schema_enforcement_no_col_names():
m = Model()
input_schema = Schema([ColSpec("double"), ColSpec("double"), ColSpec("double")])
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
test_data = [[1.0, 2.0, 3.0]]
# Can call with just a list
pd.testing.assert_frame_equal(pyfunc_model.predict(test_data), pd.DataFrame(test_data))
# Or can call with a DataFrame without column names
pd.testing.assert_frame_equal(
pyfunc_model.predict(pd.DataFrame(test_data)), pd.DataFrame(test_data)
)
# # Or can call with a np.ndarray
pd.testing.assert_frame_equal(
pyfunc_model.predict(pd.DataFrame(test_data).values), pd.DataFrame(test_data)
)
# Or with column names!
pdf = pd.DataFrame(data=test_data, columns=["a", "b", "c"])
pd.testing.assert_frame_equal(pyfunc_model.predict(pdf), pdf)
# Must provide the right number of arguments
with pytest.raises(MlflowException, match="the provided value only has 2 inputs."):
pyfunc_model.predict([[1.0, 2.0]])
# Must provide the right types
with pytest.raises(MlflowException, match="Can not safely convert int64 to float64"):
pyfunc_model.predict([[1, 2, 3]])
# Can only provide data type that can be converted to dataframe...
with pytest.raises(MlflowException, match="Expected input to be DataFrame. Found: set"):
pyfunc_model.predict({1, 2, 3})
# 9. dictionaries of str -> list/nparray work
d = {"a": [1.0], "b": [2.0], "c": [3.0]}
pd.testing.assert_frame_equal(pyfunc_model.predict(d), pd.DataFrame(d))
def test_tensor_schema_enforcement_no_col_names():
m = Model()
input_schema = Schema([TensorSpec(np.dtype(np.float32), (-1, 3))])
m.signature = ModelSignature(inputs=input_schema)
pyfunc_model = PyFuncModel(model_meta=m, model_impl=TestModel())
test_data = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]], dtype=np.float32)
# Can call with numpy array of correct shape
np.testing.assert_array_equal(pyfunc_model.predict(test_data), test_data)
# Or can call with a dataframe
np.testing.assert_array_equal(pyfunc_model.predict(pd.DataFrame(test_data)), test_data)
# Can not call with a list
with pytest.raises(
MlflowException,
match="This model contains a tensor-based model signature with no input names",
):
pyfunc_model.predict([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
# Can not call with a dict
with pytest.raises(
MlflowException,
match="This model contains a tensor-based model signature with no input names",
):
pyfunc_model.predict({"blah": test_data})
# Can not call with a np.ndarray of a wrong shape
with pytest.raises(
MlflowException,
match=re.escape("Shape of input (2, 2) does not match expected shape (-1, 3)"),
):
pyfunc_model.predict(np.array([[1.0, 2.0], [4.0, 5.0]]))
# Can not call with a np.ndarray of a wrong type
with pytest.raises(
MlflowException, match="dtype of input uint32 does not match expected dtype float32"
):
pyfunc_model.predict(test_data.astype(np.uint32))
# Can call with a np.ndarray with more elements along variable axis
test_data2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]], dtype=np.float32)
np.testing.assert_array_equal(pyfunc_model.predict(test_data2), test_data2)
# Can not call with an empty ndarray
with pytest.raises(
MlflowException, match=re.escape("Shape of input () does not match expected shape (-1, 3)")
):
pyfunc_model.predict(np.ndarray([]))
@pytest.mark.parametrize("orient", ["records"])
def test_schema_enforcement_for_inputs_style_orientation_of_dataframe(orient):
# Test Dict[str, List[Any]]
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": [4, 5, 6], "b": ["a", "b", "c"]}
pd_data = pd.DataFrame(data)
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, str]
test_signature = {
"inputs": '[{"name": "a", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": "Hi there!"}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test List[Dict[str, Union[str, List[str]]]]
test_signature = {
"inputs": '[{"name": "query", "type": "string"}, {"name": "inputs", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = [{"query": ["test_query1", "test_query2"], "inputs": "test input"}]
pd_data = pd.DataFrame(data)
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test List[str]
test_signature = {
"inputs": '[{"type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = ["a", "b", "c"]
pd_data = pd.DataFrame(data)
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray]
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([1, 2, 3]), "b": np.array(["a", "b", "c"])}
pd_data = pd.DataFrame(data)
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, <scalar>] (support added in MLflow 2.3.0)
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": 12, "b": "a"}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray] where array.size == 1
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([12]), "b": np.array(["a"])}
pd_data = pd.DataFrame(data)
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray] where primitives are supplied
test_signature = {
"inputs": '[{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
# simulates the structure that model serving will convert the data to when using
# a Dict[str, str] with a scalar singular value string
data = {"a": np.array("a"), "b": np.array("b")}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Assert that the Dict[str, np.ndarray] casing with primitive does not work on anything
# but a single string.
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "long"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array(1), "b": np.array(2)}
pd_data = pd.DataFrame([data])
# Schema enforcement explicitly only provides support for strings that meet primitives in
# np.arrays criteria. All other data types should fail.
with pytest.raises(MlflowException, match="This model contains a column-based"):
_enforce_schema(data, signature.inputs)
with pytest.raises(MlflowException, match="Incompatible input types for column a. Can not"):
_enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
# Test bytes
test_signature = {
"inputs": '[{"name": "audio", "type": "binary"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": b"Hi I am a bytes string"}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test base64 encoded
test_signature = {
"inputs": '[{"name": "audio", "type": "binary"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": base64.b64encode(b"Hi I am a bytes string").decode("ascii")}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
pd_check = _enforce_schema(pd_data.to_dict(orient=orient), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
def test_schema_enforcement_for_optional_columns():
input_schema = Schema(
[
ColSpec("double", "a"),
ColSpec("double", "b"),
ColSpec("string", "c", required=False),
ColSpec("long", "d", required=False),
]
)
signature = ModelSignature(inputs=input_schema)
test_data_with_all_cols = {"a": [1.0], "b": [1.0], "c": ["something"], "d": [2]}
test_data_with_only_required_cols = {"a": [1.0], "b": [1.0]}
test_data_with_one_optional_col = {"a": [1.0], "b": [1.0], "d": [2]}
for data in [
test_data_with_all_cols,
test_data_with_only_required_cols,
test_data_with_one_optional_col,
]:
pd_data = pd.DataFrame(data)
check = _enforce_schema(pd_data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Ensure wrong data type for optional column throws
test_bad_data = {"a": [1.0], "b": [1.0], "d": ["not the right type"]}
pd_data = pd.DataFrame(test_bad_data)
with pytest.raises(MlflowException, match="Incompatible input types for column d."):
_enforce_schema(pd_data, signature.inputs)
# Ensure it still validates for required columns
test_missing_required = {"b": [2.0], "c": ["something"]}
pd_data = pd.DataFrame(test_missing_required)
with pytest.raises(MlflowException, match="Model is missing inputs"):
_enforce_schema(pd_data, signature.inputs)
def test_schema_enforcement_for_list_inputs_back_compatibility_check():
# Test Dict[str, scalar or List[str]]
test_signature = {
"inputs": '[{"name": "prompt", "type": "string"}, {"name": "stop", "type": "string"}]',
"outputs": '[{"type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"prompt": "this is the prompt", "stop": ["a", "b"]}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[str]]
test_signature = {
"inputs": '[{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": ["Hi there!"], "b": ["Hello there", "Bye!"]}
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[binary]] with bytes
test_signature = {
"inputs": '[{"name": "audio", "type": "binary"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": [b"Hi I am a bytes string"]}
pd_data = pd.DataFrame([data])
pd_check = _enforce_schema(pd_data, signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, List[binary]] with base64 encoded
test_signature = {
"inputs": '[{"name": "audio", "type": "binary"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": [base64.b64encode(b"Hi I am a bytes string").decode("ascii")]}
pd_data = pd.DataFrame([data])
pd_check = _enforce_schema(pd_data, signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, List[Any]]
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": [4, 5, 6], "b": ["a", "b", "c"]}
pd_data = pd.DataFrame(data)
pd_check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray]
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([1, 2, 3]), "b": np.array(["a", "b", "c"])}
pd_data = pd.DataFrame(data)
pd_check = _enforce_schema(pd_data.to_dict(orient="list"), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray] where array.size == 1
test_signature = {
"inputs": '[{"name": "a", "type": "long"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([12]), "b": np.array(["a"])}
pd_data = pd.DataFrame(data)
pd_check = _enforce_schema(pd_data.to_dict(orient="list"), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
# Test Dict[str, np.ndarray] where primitives are supplied
test_signature = {
"inputs": '[{"name": "a", "type": "string"}, {"name": "b", "type": "string"}]',
"outputs": '[{"name": "response", "type": "string"}]',
}
signature = ModelSignature.from_dict(test_signature)
# simulates the structure that model serving will convert the data to when using
# a Dict[str, str] with a scalar singular value string
data = {"a": np.array("a"), "b": np.array("b")}
pd_data = pd.DataFrame([data])
pd_check = _enforce_schema(pd_data.to_dict(orient="list"), signature.inputs)
pd.testing.assert_frame_equal(pd_check, pd_data)
def test_schema_enforcement_for_list_inputs():
# Test Dict[str, scalar or List[str]]
test_signature = {
"inputs": '[{"type": "string", "name": "prompt", "required": true}, '
'{"type": "array", "items": {"type": "string"}, '
'"name": "stop", "required": true}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"prompt": "this is the prompt", "stop": ["a", "b"]}
output = "this is the output"
assert signature == infer_signature(data, output)
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[str]]
test_signature = {
"inputs": '[{"type": "array", "items": {"type": "string"}, '
'"name": "a", "required": true}, '
'{"type": "array", "items": {"type": "string"}, '
'"name": "b", "required": true}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": ["Hi there!"], "b": ["Hello there", "Bye!"]}
assert signature == infer_signature(data, output)
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[binary]] with bytes
test_signature = {
"inputs": '[{"type": "array", "items": {"type": "binary"}, '
'"name": "audio", "required": true}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": [b"Hi I am a bytes string"]}
assert signature == infer_signature(data, output)
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[binary]] with base64 encoded
test_signature = {
"inputs": '[{"type": "array", "items": {"type": "binary"}, '
'"name": "audio", "required": true}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"audio": [base64.b64encode(b"Hi I am a bytes string")]}
assert signature == infer_signature(data, output)
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, List[Any]]
test_signature = {
"inputs": '[{"type": "array", "items": {"type": "long"}, '
'"name": "a", "required": true}, '
'{"type": "array", "items": {"type": "string"}, '
'"name": "b", "required": true}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": [4, 5, 6], "b": ["a", "b", "c"]}
assert signature == infer_signature(data, output)
pd_data = pd.DataFrame([data])
check = _enforce_schema(data, signature.inputs)
pd.testing.assert_frame_equal(check, pd_data)
# Test Dict[str, np.ndarray]
test_signature = {
"inputs": '[{"name": "a", "type": "tensor", "tensor-spec": '
'{"dtype": "int64", "shape": [-1]}}, '
'{"name": "b", "type": "tensor", "tensor-spec": '
'{"dtype": "str", "shape": [-1]}}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([1, 2, 3]), "b": np.array(["a", "b", "c"])}
pd_check = _enforce_schema(data, signature.inputs)
assert pd_check == data
# Test Dict[str, np.ndarray] where array.size == 1
test_signature = {
"inputs": '[{"name": "a", "type": "tensor", "tensor-spec": '
'{"dtype": "int64", "shape": [-1]}}, '
'{"name": "b", "type": "tensor", "tensor-spec": '
'{"dtype": "str", "shape": [-1]}}]',
"outputs": '[{"type": "string", "required": true}]',
}
signature = ModelSignature.from_dict(test_signature)
data = {"a": np.array([12]), "b": np.array(["a"])}
pd_check = _enforce_schema(data, signature.inputs)
assert pd_check == data
def test_enforce_schema_warns_with_extra_fields():
schema = Schema([ColSpec("string", "a")])
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
_enforce_schema({"a": "hi", "b": "bye"}, schema)
mock_warning.assert_called_once_with(
"Found extra inputs in the model input that are not defined in the model "
"signature: `['b']`. These inputs will be ignored."
)
def test_enforce_params_schema_with_success():
# Correct parameters & schema
test_parameters = {
"str_param": "str_a",
"int_param": np.int32(1),
"bool_param": True,
"double_param": 1.0,
"float_param": np.float32(0.1),
"long_param": 100,
"datetime_param": np.datetime64("2023-06-26 00:00:00"),
"str_list": ["a", "b", "c"],
"bool_list": [True, False],
"object": {"a": 1, "b": ["x", "y"], "c": {"d": 2}},
}
test_schema = ParamSchema(
[
ParamSpec("str_param", DataType.string, "str_a", None),
ParamSpec("int_param", DataType.integer, np.int32(1), None),
ParamSpec("bool_param", DataType.boolean, True, None),
ParamSpec("double_param", DataType.double, 1.0, None),
ParamSpec("float_param", DataType.float, np.float32(0.1), None),
ParamSpec("long_param", DataType.long, 100, None),
ParamSpec(
"datetime_param", DataType.datetime, np.datetime64("2023-06-26 00:00:00"), None
),
ParamSpec("str_list", DataType.string, ["a", "b", "c"], (-1,)),
ParamSpec("bool_list", DataType.boolean, [True, False], (-1,)),
ParamSpec(
"object",
Object(
[
Property("a", DataType.long),
Property("b", Array(DataType.string)),
Property("c", Object([Property("d", DataType.long)])),
]
),
{"a": 1, "b": ["x", "y"], "c": {"d": 2}},
None,
),
]
)
assert _enforce_params_schema(test_parameters, test_schema) == test_parameters
# Correct parameters & schema with array
params = {
"double_array": np.array([1.0, 2.0]),
"float_array": np.array([np.float32(1.0), np.float32(2.0)]),
"long_array": np.array([1, 2]),
"datetime_array": np.array(
[np.datetime64("2023-06-26 00:00:00"), np.datetime64("2023-06-26 00:00:00")]
),
}
schema = ParamSchema(
[
ParamSpec("double_array", DataType.double, np.array([1.0, 2.0]), (-1,)),
ParamSpec(
"float_array", DataType.float, np.array([np.float32(1.0), np.float32(2.0)]), (-1,)
),
ParamSpec("long_array", DataType.long, np.array([1, 2]), (-1,)),
ParamSpec(
"datetime_array",
DataType.datetime,
np.array(
[np.datetime64("2023-06-26 00:00:00"), np.datetime64("2023-06-26 00:00:00")]
),
(-1,),
),
]
)
for param, value in params.items():
assert (_enforce_params_schema(params, schema)[param] == value).all()
# Converting parameters value type to corresponding schema type
# 1. int -> long, float, double
assert _enforce_params_schema({"double_param": np.int32(1)}, test_schema)["double_param"] == 1.0
assert _enforce_params_schema({"float_param": np.int32(1)}, test_schema)["float_param"] == 1.0
assert _enforce_params_schema({"long_param": np.int32(1)}, test_schema)["long_param"] == 1
# With array
for param in ["double_array", "float_array", "long_array"]:
assert (
_enforce_params_schema({param: [np.int32(1), np.int32(2)]}, schema)[param]
== params[param]
).all()
assert (
_enforce_params_schema({param: np.array([np.int32(1), np.int32(2)])}, schema)[param]
== params[param]
).all()
# 2. long -> float, double
assert _enforce_params_schema({"double_param": 1}, test_schema)["double_param"] == 1.0
assert _enforce_params_schema({"float_param": 1}, test_schema)["float_param"] == 1.0
# With array
for param in ["double_array", "float_array"]:
assert (_enforce_params_schema({param: [1, 2]}, schema)[param] == params[param]).all()
assert (
_enforce_params_schema({param: np.array([1, 2])}, schema)[param] == params[param]
).all()
# 3. float -> double
assert (
_enforce_params_schema({"double_param": np.float32(1)}, test_schema)["double_param"] == 1.0
)
assert np.isclose(
_enforce_params_schema({"double_param": np.float32(0.1)}, test_schema)["double_param"],
0.1,
atol=1e-6,
)
# With array
assert (
_enforce_params_schema({"double_array": [np.float32(1), np.float32(2)]}, schema)[
"double_array"
]
== params["double_array"]
).all()
assert (
_enforce_params_schema({"double_array": np.array([np.float32(1), np.float32(2)])}, schema)[
"double_array"
]
== params["double_array"]
).all()
# 4. any -> datetime (try conversion)
assert _enforce_params_schema({"datetime_param": "2023-07-01 00:00:00"}, test_schema)[
"datetime_param"
] == np.datetime64("2023-07-01 00:00:00")
# With array
assert (
_enforce_params_schema(
{"datetime_array": ["2023-06-26 00:00:00", "2023-06-26 00:00:00"]}, schema
)["datetime_array"]
== params["datetime_array"]
).all()
assert (
_enforce_params_schema(
{"datetime_array": np.array(["2023-06-26 00:00:00", "2023-06-26 00:00:00"])}, schema
)["datetime_array"]
== params["datetime_array"]
).all()
# Add default values if the parameter is not provided
test_parameters = {"a": "str_a"}
test_schema = ParamSchema(
[ParamSpec("a", DataType.string, ""), ParamSpec("b", DataType.long, 1)]
)
updated_parameters = {"b": 1}
updated_parameters.update(test_parameters)
assert _enforce_params_schema(test_parameters, test_schema) == updated_parameters
# Ignore values not specified in ParamSchema and log warning
test_parameters = {"a": "str_a", "invalid_param": "value"}
test_schema = ParamSchema([ParamSpec("a", DataType.string, "")])
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
assert _enforce_params_schema(test_parameters, test_schema) == {"a": "str_a"}
mock_warning.assert_called_once_with(
"Unrecognized params ['invalid_param'] are ignored for inference. "
"Supported params are: {'a'}. "
"To enable them, please add corresponding schema in ModelSignature."
)
# Converting parameters keys to string if it is not
test_parameters = {1: 1.0}
test_schema = ParamSchema([ParamSpec("1", DataType.double, 1.0)])
assert _enforce_params_schema(test_parameters, test_schema) == {"1": 1.0}
def test_enforce_params_schema_add_default_values():
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params):
return list(params.values())
params = {"str_param": "string", "int_array": [1, 2, 3]}
signature = infer_signature(["input"], params=params)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="my_model", python_model=MyModel(), signature=signature
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
# Not passing params -- predict with default values
loaded_predict = loaded_model.predict(["input"])
assert loaded_predict == ["string", [1, 2, 3]]
# Passing some params -- add default values
loaded_predict = loaded_model.predict(["input"], params={"str_param": "new_string"})
assert loaded_predict == ["new_string", [1, 2, 3]]
# Passing all params -- override
loaded_predict = loaded_model.predict(
["input"], params={"str_param": "new_string", "int_array": [4, 5, 6]}
)
assert loaded_predict == ["new_string", [4, 5, 6]]
# Raise warning for unrecognized params
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
loaded_predict = loaded_model.predict(["input"], params={"new_param": "new_string"})
mock_warning.assert_called_once()
assert (
"Unrecognized params ['new_param'] are ignored for inference"
in mock_warning.call_args[0][0]
)
assert loaded_predict == ["string", [1, 2, 3]]
def test_enforce_params_schema_errors():
# Raise error when failing to convert value to DataType.datetime
test_schema = ParamSchema(
[ParamSpec("datetime_param", DataType.datetime, np.datetime64("2023-06-06"))]
)
with pytest.raises(
MlflowException,
match=r"Failed to convert value `1.0` from type `<class 'float'>` to `DataType.datetime`",
):
_enforce_params_schema({"datetime_param": 1.0}, test_schema)
# With array
test_schema = ParamSchema(
[
ParamSpec(
"datetime_array",
DataType.datetime,
np.array([np.datetime64("2023-06-06"), np.datetime64("2023-06-06")]),
(-1,),
)
]
)
with pytest.raises(
MlflowException,
match=r"Failed to convert value `1.0` from type `<class 'float'>` to `DataType.datetime`",
):
_enforce_params_schema({"datetime_array": [1.0, 2.0]}, test_schema)
# Raise error when failing to convert value to DataType.float
test_schema = ParamSchema([ParamSpec("float_param", DataType.float, np.float32(1))])
with pytest.raises(
MlflowException, match=r"Failed to validate type and shape for 'float_param'"
):
_enforce_params_schema({"float_param": "a"}, test_schema)
# With array
test_schema = ParamSchema(
[ParamSpec("float_array", DataType.float, np.array([np.float32(1), np.float32(2)]), (-1,))]
)
with pytest.raises(
MlflowException, match=r"Failed to validate type and shape for 'float_array'"
):
_enforce_params_schema(
{"float_array": [np.float32(1), np.float32(2), np.float64(3)]}, test_schema
)
# Raise error for any other conversions
error_msg = r"Failed to validate type and shape for 'int_param'"
test_schema = ParamSchema([ParamSpec("int_param", DataType.long, np.int32(1))])
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"int_param": np.float32(1)}, test_schema)
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"int_param": "1"}, test_schema)
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"int_param": np.datetime64("2023-06-06")}, test_schema)
error_msg = r"Failed to validate type and shape for 'str_param'"
test_schema = ParamSchema([ParamSpec("str_param", DataType.string, "1")])
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"str_param": np.float32(1)}, test_schema)
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"str_param": b"string"}, test_schema)
with pytest.raises(MlflowException, match=error_msg):
_enforce_params_schema({"str_param": np.datetime64("2023-06-06")}, test_schema)
# Raise error if parameters is not dictionary
with pytest.raises(MlflowException, match=r"Parameters must be a dictionary. Got type 'int'."):
_enforce_params_schema(100, test_schema)
# Raise error if invalid parameters are passed
test_parameters = {"a": True, "b": (1, 2), "c": b"test"}
test_schema = ParamSchema(
[
ParamSpec("a", DataType.boolean, False),
ParamSpec("b", DataType.string, [], (-1,)),
ParamSpec("c", DataType.string, ""),
]
)
with pytest.raises(
MlflowException,
match=re.escape(
"Value must be a 1D array with shape (-1,) for param 'b': string "
"(default: []) (shape: (-1,)), received tuple"
),
):
_enforce_params_schema(test_parameters, test_schema)
# Raise error for non-1D array
with pytest.raises(MlflowException, match=r"received list with ndim 2"):
_enforce_params_schema(
{"a": [[1, 2], [3, 4]]}, ParamSchema([ParamSpec("a", DataType.long, [], (-1,))])
)
def test_enforce_params_schema_warns_with_model_without_params():
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return list(params.values()) if isinstance(params, dict) else None
params = {"str_param": "string", "int_array": [1, 2, 3], "123": 123}
signature = infer_signature(["input"])
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="model1", python_model=MyModel(), signature=signature
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
loaded_model.predict(["input"], params=params)
mock_warning.assert_called_with(
"`params` can only be specified at inference time if the model signature defines a params "
"schema. This model does not define a params schema. Ignoring provided params: "
"['str_param', 'int_array', '123']"
)
def test_enforce_params_schema_errors_with_model_with_params():
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return list(params.values()) if isinstance(params, dict) else None
params = {"str_param": "string", "int_array": [1, 2, 3], "123": 123}
signature = infer_signature(["input"], params=params)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model", python_model=MyModel(), signature=signature
)
loaded_model_with_params = mlflow.pyfunc.load_model(model_info.model_uri)
with pytest.raises(MlflowException, match=r"Parameters must be a dictionary. Got type 'list'"):
loaded_model_with_params.predict(["input"], params=[1, 2, 3])
with mock.patch("mlflow.models.utils._logger.warning") as mock_warning:
loaded_model_with_params.predict(["input"], params={123: 456})
mock_warning.assert_called_with(
"Keys in parameters should be of type `str`, but received non-string keys."
"Converting all keys to string..."
)
def test_param_spec_with_success():
# Normal cases
assert ParamSpec("a", DataType.long, 1).default == 1
assert ParamSpec("a", DataType.string, "1").default == "1"
assert ParamSpec("a", DataType.boolean, True).default is True
assert ParamSpec("a", DataType.double, 1.0).default == 1.0
assert ParamSpec("a", DataType.float, np.float32(1)).default == 1
assert ParamSpec("a", DataType.datetime, np.datetime64("2023-06-06")).default == datetime.date(
2023, 6, 6
)
assert ParamSpec(
"a", DataType.datetime, np.datetime64("2023-06-06 00:00:00")
).default == datetime.datetime(2023, 6, 6, 0, 0, 0)
assert ParamSpec("a", DataType.integer, np.int32(1)).default == 1
# Convert default value type if it is not consistent with provided type
# 1. int -> long, float, double
assert ParamSpec("a", DataType.long, np.int32(1)).default == 1
assert ParamSpec("a", DataType.float, np.int32(1)).default == 1.0
assert ParamSpec("a", DataType.double, np.int32(1)).default == 1.0
# 2. long -> float, double
assert ParamSpec("a", DataType.float, 1).default == 1.0
assert ParamSpec("a", DataType.double, 1).default == 1.0
# 3. float -> double
assert ParamSpec("a", DataType.double, np.float32(1)).default == 1.0
# 4. any -> datetime (try conversion)
assert ParamSpec("a", DataType.datetime, "2023-07-01 00:00:00").default == np.datetime64(
"2023-07-01 00:00:00"
)
def test_param_spec_errors():
# Raise error if default value can not be converted to specified type
with pytest.raises(MlflowException, match=r"Failed to validate type and shape for 'a'"):
ParamSpec("a", DataType.integer, "1.0")
with pytest.raises(MlflowException, match=r"Failed to validate type and shape for 'a'"):
ParamSpec("a", DataType.integer, [1.0, 2.0], (-1,))
with pytest.raises(MlflowException, match=r"Failed to validate type and shape for 'a'"):
ParamSpec("a", DataType.string, True)
with pytest.raises(MlflowException, match=r"Failed to validate type and shape for 'a'"):
ParamSpec("a", DataType.string, [1.0, 2.0], (-1,))
with pytest.raises(MlflowException, match=r"Binary type is not supported for parameters"):
ParamSpec("a", DataType.binary, 1.0)
with pytest.raises(MlflowException, match=r"Failed to convert value"):
ParamSpec("a", DataType.datetime, 1.0)
with pytest.raises(MlflowException, match=r"Failed to convert value"):
ParamSpec("a", DataType.datetime, [1.0, 2.0], (-1,))
with pytest.raises(MlflowException, match=r"Failed to convert value to `DataType.datetime`"):
ParamSpec("a", DataType.datetime, np.datetime64("20230606"))
# Raise error if shape is not specified for list value
with pytest.raises(
MlflowException,
match=re.escape("Value must be a scalar for type `DataType.long`"),
):
ParamSpec("a", DataType.long, [1, 2, 3], shape=None)
with pytest.raises(
MlflowException,
match=re.escape("Value must be a scalar for type `DataType.integer`"),
):
ParamSpec("a", DataType.integer, np.array([1, 2, 3]), shape=None)
# Raise error if shape is specified for scalar value
with pytest.raises(
MlflowException,
match=re.escape(
"Value must be a 1D array with shape (-1,) for param 'a': boolean (default: True) "
"(shape: (-1,)), received bool"
),
):
ParamSpec("a", DataType.boolean, True, shape=(-1,))
# Raise error if shape specified is not allowed
with pytest.raises(
MlflowException,
match=r"Shape must be None for scalar or dictionary value, "
r"or \(-1,\) for 1D array value",
):
ParamSpec("a", DataType.boolean, [True, False], (2,))
# Raise error if default value is not scalar or 1D array
with pytest.raises(
MlflowException,
match=re.escape(
"Value must be a 1D array with shape (-1,) for param 'a': boolean (default: {'a': 1}) "
"(shape: (-1,)), received dict"
),
):
ParamSpec("a", DataType.boolean, {"a": 1}, (-1,))
def test_enforce_schema_in_python_model_predict(sample_params_basic, param_schema_basic):
test_params = sample_params_basic
test_schema = param_schema_basic
signature = infer_signature(["input1"], params=test_params)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=PythonModelWithBasicParams(),
signature=signature,
)
assert signature.params == test_schema
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
loaded_predict = loaded_model.predict(["a", "b"], params=test_params)
for param, value in test_params.items():
if param == "double_array":
assert (loaded_predict[param] == value).all()
else:
assert loaded_predict[param] == value
# Automatically convert type if it's not consistent with schema
# 1. int -> long, float, double
params_int = {
"double_param": np.int32(1),
"float_param": np.int32(1),
"long_param": np.int32(1),
}
expected_params_int = {
"double_param": 1.0,
"float_param": np.float32(1),
"long_param": 1,
}
loaded_predict = loaded_model.predict(["a", "b"], params=params_int)
for param in params_int:
assert loaded_predict[param] == expected_params_int[param]
# 2. long -> float, double
params_long = {
"double_param": 1,
"float_param": 1,
}
expected_params_long = {
"double_param": 1.0,
"float_param": np.float32(1),
}
loaded_predict = loaded_model.predict(["a", "b"], params=params_long)
for param in params_long:
assert loaded_predict[param] == expected_params_long[param]
# 3. float -> double
assert (
loaded_model.predict(
["a", "b"],
params={
"double_param": np.float32(1),
},
)["double_param"]
== 1.0
)
# 4. any -> datetime (try conversion)
assert loaded_model.predict(
["a", "b"],
params={
"datetime_param": "2023-06-26 00:00:00",
},
)["datetime_param"] == np.datetime64("2023-06-26 00:00:00")
def test_schema_enforcement_all_feature_types_pandas():
data = {
"long": [1, 2, 3],
"bool": [True, False, False],
"string": ["a", "b", "c"],
"datetime": [pd.Timestamp("2020-07-14 00:00:00")] * 3,
"bool_nullable": [True, None, False],
"string_nullable": ["a", "b", None],
"double_nullable": [1.0, 2.0, None],
}
df = pd.DataFrame.from_dict(data)
schema = Schema(
[
ColSpec(DataType.long, "long"),
ColSpec(DataType.boolean, "bool"),
ColSpec(DataType.string, "string"),
ColSpec(DataType.datetime, "datetime"),
ColSpec(DataType.boolean, "bool_nullable", required=False),
ColSpec(DataType.string, "string_nullable", required=False),
ColSpec(DataType.double, "double_nullable", required=False),
]
)
pd.testing.assert_frame_equal(_enforce_schema(df, schema), df, check_dtype=False)
def test_enforce_schema_in_python_model_serving(sample_params_basic):
signature = infer_signature(["input1"], params=sample_params_basic)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=PythonModelWithBasicParams(),
signature=signature,
)
# params in payload should be json serializable
test_params = {
"str_param": "str_a",
"int_param": 1,
"bool_param": True,
"double_param": 1.0,
"float_param": 0.1,
"long_param": 100,
"datetime_param": datetime.datetime(2023, 6, 6, 0, 0, 0),
"str_list": ["a", "b", "c"],
"bool_list": [True, False],
"double_array": np.array([1.0, 2.0]),
}
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=dump_input_data(["a", "b"], params=test_params),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200
prediction = json.loads(response.content.decode("utf-8"))["predictions"]
for param, value in test_params.items():
if param == "double_array":
assert (prediction[param] == value).all()
elif param == "datetime_param":
assert prediction[param] == value.isoformat()
else:
assert prediction[param] == value
# Test invalid params for model serving
with pytest.raises(TypeError, match=r"Object of type int32 is not JSON serializable"):
dump_input_data(["a", "b"], params={"int_param": np.int32(1)})
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=dump_input_data(["a", "b"], params={"double_param": "invalid"}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 400
assert (
"Failed to validate type and shape for 'double_param'"
in json.loads(response.content.decode("utf-8"))["message"]
)
# Can not pass bytes to request
with pytest.raises(TypeError, match=r"Object of type bytes is not JSON serializable"):
pyfunc_serve_and_score_model(
model_info.model_uri,
data=dump_input_data(["a", "b"], params={"str_param": b"bytes"}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
def test_python_model_serving_compatible(tmp_path):
"""
# Code for logging the model in mlflow 2.4.0
import mlflow
from mlflow.models import infer_signature
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
python_model = MyModel(),
artifact_path = "test_model",
signature = infer_signature(["input"]),
registered_model_name="model")
"""
tmp_path.joinpath("MLmodel").write_text(
"""
artifact_path: test_model
flavors:
python_function:
cloudpickle_version: 2.2.1
env:
conda: conda.yaml
virtualenv: python_env.yaml
loader_module: mlflow.pyfunc.model
python_model: python_model.pkl
python_version: 3.8.16
mlflow_version: 2.4.0
model_uuid: 3cbde93be0114644a6ec900c64cab39d
run_id: 3f87fdff03524c19908c3a47fb99f9cd
signature:
inputs: '[{"type": "string"}]'
outputs: null
utc_time_created: '2023-07-13 01:29:55.467561'
"""
)
tmp_path.joinpath("python_env.yaml").write_text(
"""
python: 3.8.16
build_dependencies:
- pip==23.1.2
- setuptools==56.0.0
- wheel==0.40.0
dependencies:
- -r requirements.txt
"""
)
tmp_path.joinpath("requirements.txt").write_text(
"""
mlflow==2.4.0
cloudpickle==2.2.1
"""
)
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input):
return model_input
python_model = MyModel()
with open(tmp_path / "python_model.pkl", "wb") as out:
cloudpickle.dump(python_model, out)
assert Version(mlflow.__version__) > Version("2.4.0")
model_uri = str(tmp_path)
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
assert pyfunc_loaded.metadata.signature == ModelSignature(Schema([ColSpec("string")]))
# predict is compatible
local_predict = pyfunc_loaded.predict(["input"])
assert local_predict.values[0].tolist() == ["input"]
# model serving is compatible
response = pyfunc_serve_and_score_model(
model_uri,
data=dump_input_data(["a", "b"]),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200
prediction = json.loads(response.content.decode("utf-8"))["predictions"]
assert prediction == [{"0": "a"}, {"0": "b"}]
def test_function_python_model_serving_compatible(tmp_path):
"""
# Code for logging the model in mlflow 2.4.0
import mlflow
from mlflow.models import infer_signature
def my_model(model_input):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
python_model = my_model,
artifact_path = "test_model",
signature = infer_signature(["input"]),
registered_model_name="model",
input_example=["input"])
"""
tmp_path.joinpath("MLmodel").write_text(
"""
artifact_path: test_model
flavors:
python_function:
cloudpickle_version: 2.2.1
env:
conda: conda.yaml
virtualenv: python_env.yaml
loader_module: mlflow.pyfunc.model
python_model: python_model.pkl
python_version: 3.8.16
mlflow_version: 2.4.0
model_uuid: f19b9a51a34a453282e53ca41d384964
run_id: 9fd7b6e125a547fdbb4505f15e8259ed
saved_input_example_info:
artifact_path: input_example.json
pandas_orient: split
type: dataframe
signature:
inputs: '[{"type": "string"}]'
outputs: null
utc_time_created: '2023-07-14 10:18:44.353510'
"""
)
tmp_path.joinpath("python_env.yaml").write_text(
"""
python: 3.8.16
build_dependencies:
- pip==23.1.2
- setuptools==56.0.0
- wheel==0.40.0
dependencies:
- -r requirements.txt
"""
)
tmp_path.joinpath("requirements.txt").write_text(
"""
mlflow==2.4.0
cloudpickle==2.2.1
pandas==2.0.3
"""
)
tmp_path.joinpath("input_example.json").write_text(
"""
{"data": [["input"]]}
"""
)
def my_model(model_input):
return model_input
from mlflow.pyfunc.model import _FunctionPythonModel
python_model = _FunctionPythonModel(my_model, signature=infer_signature(["input"]))
with open(tmp_path / "python_model.pkl", "wb") as out:
cloudpickle.dump(python_model, out)
assert Version(mlflow.__version__) > Version("2.4.0")
model_uri = str(tmp_path)
pyfunc_loaded = mlflow.pyfunc.load_model(model_uri)
assert pyfunc_loaded.metadata.signature == ModelSignature(Schema([ColSpec("string")]))
# predict is compatible
local_predict = pyfunc_loaded.predict(["input"])
assert local_predict.values[0].tolist() == ["input"]
# model serving is compatible
response = pyfunc_serve_and_score_model(
model_uri,
data=dump_input_data(["a", "b"]),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200
prediction = json.loads(response.content.decode("utf-8"))["predictions"]
assert prediction == [{"0": "a"}, {"0": "b"}]
def test_enforce_schema_with_arrays_in_python_model_predict(sample_params_with_arrays):
params = sample_params_with_arrays
signature = infer_signature(["input1"], params=params)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=PythonModelWithArrayParams(),
signature=signature,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
loaded_predict = loaded_model.predict(["a", "b"], params=params)
for param, value in params.items():
assert (loaded_predict[param] == value).all()
# Automatically convert type if it's not consistent with schema
# 1. int -> long, float, double
for param in ["double_array", "float_array", "long_array"]:
loaded_predict = loaded_model.predict(
["a", "b"], params={param: np.array([np.int32(1), np.int32(2)])}
)
assert (loaded_predict[param] == params[param]).all()
# 2. long -> float, double
for param in ["double_array", "float_array"]:
loaded_predict = loaded_model.predict(["a", "b"], params={param: np.array([1, 2])})
assert (loaded_predict[param] == params[param]).all()
# 3. float -> double
loaded_predict = loaded_model.predict(
["a", "b"], params={"double_array": np.array([np.float32(1), np.float32(2)])}
)
assert (loaded_predict["double_array"] == params["double_array"]).all()
# 4. any -> datetime (try conversion)
loaded_predict = loaded_model.predict(
["a", "b"],
params={"datetime_array": np.array(["2023-06-26 00:00:00", "2023-06-26 00:00:00"])},
)
assert (loaded_predict["datetime_array"] == params["datetime_array"]).all()
# Raise error if failing to convert the type
with pytest.raises(
MlflowException,
match=r"Failed to convert value `1.0` from type `<class 'float'>` to `DataType.datetime`",
):
loaded_model.predict(["a", "b"], params={"datetime_array": [1.0, 2.0]})
with pytest.raises(MlflowException, match=r"Failed to validate type and shape for 'int_array'"):
loaded_model.predict(["a", "b"], params={"int_array": np.array([1.0, 2.0])})
with pytest.raises(
MlflowException, match=r"Failed to validate type and shape for 'float_array'"
):
loaded_model.predict(["a", "b"], params={"float_array": [True, False]})
with pytest.raises(
MlflowException, match=r"Failed to validate type and shape for 'double_array'"
):
loaded_model.predict(["a", "b"], params={"double_array": [1.0, "2.0"]})
def test_enforce_schema_with_arrays_in_python_model_serving(sample_params_with_arrays):
params = sample_params_with_arrays
signature = infer_signature(["input1"], params=params)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=PythonModelWithArrayParams(),
signature=signature,
)
with pyfunc_scoring_endpoint(
model_info.model_uri, extra_args=["--env-manager", "local"]
) as endpoint:
response = endpoint.invoke(
data=dump_input_data(["a", "b"], params=params),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
assert response.status_code == 200
prediction = json.loads(response.content.decode("utf-8"))["predictions"]
for param, value in params.items():
if param == "datetime_array":
assert prediction[param] == list(map(np.datetime_as_string, value))
else:
assert (prediction[param] == value).all()
# Test invalid params for model serving
response = endpoint.invoke(
data=dump_input_data(["a", "b"], params={"datetime_array": [1.0, 2.0]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
assert response.status_code == 400
assert (
"Failed to convert value `1.0` from type `<class 'float'>` to `DataType.datetime`"
in json.loads(response.content.decode("utf-8"))["message"]
)
response = endpoint.invoke(
data=dump_input_data(["a", "b"], params={"int_array": np.array([1.0, 2.0])}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
assert response.status_code == 400
assert (
"Failed to validate type and shape for 'int_array'"
in json.loads(response.content.decode("utf-8"))["message"]
)
response = endpoint.invoke(
data=dump_input_data(["a", "b"], params={"float_array": [True, False]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
assert response.status_code == 400
assert (
"Failed to validate type and shape for 'float_array'"
in json.loads(response.content.decode("utf-8"))["message"]
)
response = endpoint.invoke(
data=dump_input_data(["a", "b"], params={"double_array": [1.0, "2.0"]}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
)
assert response.status_code == 400
assert (
"Failed to validate type and shape for 'double_array'"
in json.loads(response.content.decode("utf-8"))["message"]
)
@pytest.mark.parametrize(
("example", "input_schema", "output_schema"),
[
(
["input1", "input2", "input3"],
Schema([ColSpec(DataType.string)]),
Schema([ColSpec(DataType.string, 0)]),
),
(
[{"a": "a", "b": "b"}, {"a": "b"}],
Schema([ColSpec(DataType.string, "a"), ColSpec(DataType.string, "b", required=False)]),
Schema([ColSpec(DataType.string, "a"), ColSpec(DataType.string, "b", required=False)]),
),
(
{"a": ["a", "b", "c"], "b": "b"},
Schema([ColSpec(Array(DataType.string), "a"), ColSpec(DataType.string, "b")]),
Schema([ColSpec(Array(DataType.string), "a"), ColSpec(DataType.string, "b")]),
),
(
pd.DataFrame({"a": ["a", "b", "c"], "b": "b"}),
Schema([ColSpec(DataType.string, "a"), ColSpec(DataType.string, "b")]),
Schema([ColSpec(DataType.string, "a"), ColSpec(DataType.string, "b")]),
),
],
)
def test_pyfunc_model_input_example_with_params(
sample_params_basic, param_schema_basic, tmp_path, example, input_schema, output_schema
):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
input_example=(example, sample_params_basic),
)
# Test _infer_signature_from_input_example
assert model_info.signature.inputs == input_schema
assert model_info.signature.outputs == output_schema
assert model_info.signature.params == param_schema_basic
# Test predict
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(example)
expected_df = pd.DataFrame([example] if isinstance(example, dict) else example)
pd.testing.assert_frame_equal(prediction, expected_df)
# Test saved example
local_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path)
mlflow_model = Model.load(os.path.join(local_path, "MLmodel"))
loaded_example = mlflow_model.load_input_example(local_path)
if isinstance(example, list) and all(np.isscalar(x) for x in example):
np.testing.assert_equal(loaded_example, example)
else:
if isinstance(example, pd.DataFrame):
pd.testing.assert_frame_equal(loaded_example, example)
else:
assert loaded_example == example
for test_example in ["saved_example", "manual_example"]:
if test_example == "saved_example":
payload = mlflow_model.get_serving_input(local_path)
else:
if isinstance(example, pd.DataFrame):
payload = json.dumps({"dataframe_split": example.to_dict(orient="split")})
else:
payload = json.dumps({"inputs": example})
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=payload,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
result = pd.DataFrame(result).values.tolist()[0]
np.testing.assert_equal(result, expected_df.values.tolist()[0])
def test_invalid_input_example_warn_when_model_logging():
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
# List[str] is converted to pandas DataFrame
# after schema enforcement, so this is invalid
assert isinstance(model_input, list)
return "string"
with mock.patch("mlflow.models.model._logger.warning") as mock_warning:
with mlflow.start_run():
mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
input_example=["some string"],
)
assert any(
"Failed to validate serving input example" in call[0][0]
for call in mock_warning.call_args_list
)
def assert_equal(a, b):
if isinstance(a, pd.DataFrame):
pd.testing.assert_frame_equal(a, b)
elif isinstance(a, np.ndarray) or isinstance(b, np.ndarray):
np.testing.assert_equal(a, b)
elif isinstance(a, dict):
assert a.keys() == b.keys()
for key in a:
assert_equal(a[key], b[key])
else:
assert a == b
@pytest.mark.parametrize(
("example", "signature", "expected_input", "expected_output"),
[
(
pd.DataFrame({"a": ["input1", "input2", "input3"]}),
ModelSignature(
Schema([ColSpec(DataType.string, "a")]), Schema([ColSpec(DataType.string)])
),
pd.DataFrame({"a": ["input1", "input2", "input3"]}),
"string output",
),
(
np.array([1, 2, 3]),
ModelSignature(
Schema([TensorSpec(np.dtype("int64"), (-1,))]),
Schema([TensorSpec(np.dtype("float64"), (-1,))]),
),
np.array([1, 2, 3]),
np.array([1.0, 2.0, 3.0]),
),
(
np.array([1, 2, 3, np.nan]),
ModelSignature(
Schema([TensorSpec(np.dtype("float64"), (-1,))]),
Schema([TensorSpec(np.dtype("float64"), (-1,))]),
),
np.array([1, 2, 3, np.nan]),
np.array([1.0, 2.0, 3.0, np.nan]),
),
(
{"a": np.array([1, 2, 3])},
ModelSignature(
Schema([TensorSpec(np.dtype("int64"), (-1,), "a")]),
Schema([TensorSpec(np.dtype("float64"), (-1,), "b")]),
),
{"a": np.array([1, 2, 3])},
{"b": np.array([1.0, 2.0, 3.0])},
),
(
["input1", "input2", "input3"],
ModelSignature(Schema([ColSpec(DataType.string)]), Schema([ColSpec(DataType.string)])),
# This is due to _enforce_schema
pd.DataFrame(["input1", "input2", "input3"]),
["input1", "input2", "input3"],
),
(
[{"a": ["sentence1", "sentence2"], "b": ["answer1", "answer2"]}],
ModelSignature(
Schema(
[ColSpec(Array(DataType.string), "a"), ColSpec(Array(DataType.string), "b")]
),
Schema([ColSpec(DataType.string, "output")]),
),
pd.DataFrame([{"a": ["sentence1", "sentence2"], "b": ["answer1", "answer2"]}]),
{"output": "some prediction"},
),
(
{"messages": [{"role": "user", "content": "some question"}]},
ModelSignature(
Schema(
[
ColSpec(
Array(
Object(
[
Property("role", DataType.string),
Property("content", DataType.string),
]
)
),
"messages",
)
]
),
Schema([ColSpec(DataType.string, "output")]),
),
# we assume the field is array so we need another list wrapper
pd.DataFrame([{"messages": [{"role": "user", "content": "some question"}]}]),
{"output": "some prediction"},
),
],
)
def test_input_example_validation_during_logging(
tmp_path, example, signature, expected_input, expected_output
):
from mlflow.models import validate_serving_input
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
assert_equal(model_input, expected_input)
return expected_output
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
input_example=example,
)
assert model_info.signature == signature
mlflow_model = Model.load(model_info.model_uri)
local_path = _download_artifact_from_uri(model_info.model_uri, output_path=tmp_path)
serving_input_example = mlflow_model.get_serving_input(local_path)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=serving_input_example,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
if is_unified_llm_input(example):
result = json.loads(response.content.decode("utf-8"))
else:
result = json.loads(response.content.decode("utf-8"))["predictions"]
assert_equal(result, expected_output)
# make sure validate_serving_input has the same output
assert convert_input_example_to_serving_input(example) == serving_input_example
result = validate_serving_input(model_info.model_uri, serving_input_example)
assert_equal(result, expected_output)
def test_pyfunc_schema_inference_not_generate_trace():
# Test that the model logging call does not generate a trace.
# When input example is provided, we run prediction to infer
# the model signature, but it should not generate a trace.
class MyModel(mlflow.pyfunc.PythonModel):
@mlflow.trace()
def predict(self, context, model_input):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
input_example=["input"],
)
# No trace should be generated
traces = get_traces()
assert len(traces) == 0
# Normal prediction should emit a trace
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
loaded_model.predict("input")
traces = get_traces()
assert len(traces) == 1
@pytest.mark.parametrize(
("data", "schema"),
[
({"a": np.array([1, 2, 3])}, Schema([ColSpec(DataType.long, name="a")])),
({"query": "sentence"}, Schema([ColSpec(DataType.string, name="query")])),
(
{"query": ["sentence_1", "sentence_2"]},
Schema([ColSpec(DataType.string, name="query")]),
),
(
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
Schema(
[
ColSpec(DataType.string, name="query"),
ColSpec(DataType.string, name="table"),
]
),
),
(
[{"query": "sentence"}, {"query": "sentence"}],
Schema([ColSpec(DataType.string, name="query")]),
),
(
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
],
Schema(
[
ColSpec(DataType.string, name="query"),
ColSpec(DataType.string, name="table"),
]
),
),
],
)
def test_pyfunc_model_schema_enforcement_with_dicts_and_lists(data, schema):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
signature = ModelSignature(schema)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(data)
if isinstance(data, dict) and all(
isinstance(x, str) or (isinstance(x, list) and all(isinstance(y, str) for y in x))
for x in data.values()
):
df = pd.DataFrame([data])
else:
df = pd.DataFrame(data)
pd.testing.assert_frame_equal(prediction, df)
# Test pandas DataFrame input
prediction = loaded_model.predict(df)
pd.testing.assert_frame_equal(prediction, df)
@pytest.mark.parametrize(
("data", "schema"),
[
({"query": "sentence"}, Schema([ColSpec(DataType.string, name="query")])),
(
{"query": ["sentence_1", "sentence_2"]},
Schema([ColSpec(DataType.string, name="query")]),
),
(
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
Schema(
[
ColSpec(DataType.string, name="query"),
ColSpec(DataType.string, name="table"),
]
),
),
],
)
# `instances` is an invalid key for schema with MLflow < 2.9.0
@pytest.mark.parametrize("format_key", ["inputs", "dataframe_split", "dataframe_records"])
def test_pyfunc_model_serving_with_dicts(data, schema, format_key):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
signature = ModelSignature(schema)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
df = (
pd.DataFrame([data])
if all(isinstance(x, str) for x in data.values())
else pd.DataFrame(data)
)
if format_key == "inputs":
payload = {format_key: data}
elif format_key in ("dataframe_split", "dataframe_records"):
payload = {format_key: df.to_dict(orient=format_key[10:])}
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
# This is not consistent with batch inference df
pd.testing.assert_frame_equal(pd.DataFrame(result), df)
@pytest.mark.parametrize(
("data", "schema"),
[
(
[{"query": "sentence"}, {"query": "sentence"}],
Schema([ColSpec(DataType.string, name="query")]),
),
(
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
],
Schema(
[
ColSpec(DataType.string, name="query"),
ColSpec(DataType.string, name="table"),
]
),
),
],
)
# `inputs`` is an invalid key for schema with MLflow < 2.9.0
@pytest.mark.parametrize("format_key", ["instances", "dataframe_split", "dataframe_records"])
def test_pyfunc_model_serving_with_lists_of_dicts(data, schema, format_key):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
signature = ModelSignature(schema)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
df = pd.DataFrame(data)
if format_key == "instances":
payload = {format_key: data}
elif format_key in ("dataframe_split", "dataframe_records"):
payload = {format_key: df.to_dict(orient=format_key[10:])}
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
pd.testing.assert_frame_equal(pd.DataFrame(result), df)
@pytest.mark.parametrize(
("data", "schema"),
[
({"query": "sentence"}, Schema([ColSpec(DataType.string, name="query")])),
(
{"query": ["sentence_1", "sentence_2"]},
Schema([ColSpec(Array(DataType.string), name="query")]),
),
(
{"query": {"a": "a", "b": 1}},
Schema(
[
ColSpec(
Object([Property("a", DataType.string), Property("b", DataType.long)]),
"query",
)
]
),
),
(
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
Schema(
[
ColSpec(Array(DataType.string), name="query"),
ColSpec(DataType.string, name="table"),
]
),
),
(
{"query": [{"name": "value", "age": 10}, {"name": "value"}], "table": ["some_table"]},
Schema(
[
ColSpec(
Array(
Object(
[
Property("name", DataType.string),
Property("age", DataType.long, required=False),
]
)
),
name="query",
),
ColSpec(Array(DataType.string), name="table"),
]
),
),
(
[{"query": "sentence"}, {"query": "sentence"}],
Schema([ColSpec(DataType.string, name="query")]),
),
(
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"]},
],
Schema(
[
ColSpec(Array(DataType.string), name="query"),
ColSpec(DataType.string, name="table", required=False),
]
),
),
],
)
def test_pyfunc_model_schema_enforcement_with_objects_and_arrays(data, schema):
class MyModel(mlflow.pyfunc.PythonModel):
def load_context(self, context):
self.pipeline = "pipeline"
def predict(self, context, model_input, params=None):
assert self.pipeline == "pipeline"
return model_input
signature = infer_signature(data)
assert signature.inputs == schema
pdf = pd.DataFrame(data if isinstance(data, list) else [data])
assert infer_signature(pdf).inputs == schema
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(data)
df = pd.DataFrame(data) if isinstance(data, list) else pd.DataFrame([data])
pd.testing.assert_frame_equal(prediction, df)
# Test pandas DataFrame input
prediction = loaded_model.predict(df)
pd.testing.assert_frame_equal(prediction, df)
@pytest.mark.parametrize(
"data",
[
{"query": "sentence"},
{"query": ["sentence_1", "sentence_2"]},
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": [{"name": "value"}, {"name": "value"}], "table": ["some_table"]},
[{"query": "sentence"}, {"query": "sentence"}],
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"]},
],
[
{"query": [{"name": "value"}, {"name": "value"}], "table": ["some_table"]},
{"query": [{"name": "value", "age": 10}, {"name": "value"}], "table": ["some_table"]},
],
],
)
@pytest.mark.parametrize("format_key", ["inputs", "dataframe_split", "dataframe_records"])
def test_pyfunc_model_scoring_with_objects_and_arrays(data, format_key):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=infer_signature(data),
)
df = pd.DataFrame(data) if isinstance(data, list) else pd.DataFrame([data])
if format_key == "inputs":
payload = {format_key: data}
elif format_key == "dataframe_split":
payload = {format_key: df.to_dict(orient="split")}
elif format_key == "dataframe_records":
payload = {format_key: df.to_dict(orient="records")}
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
expected_result = df.to_dict(orient="records")
np.testing.assert_equal(result, expected_result)
@pytest.mark.parametrize(
"data",
[
{"query": "sentence"},
{"query": ["sentence_1", "sentence_2"]},
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": [{"name": "value"}, {"name": "value"}], "table": ["some_table"]},
[{"query": "sentence"}, {"query": "sentence"}],
],
)
def test_pyfunc_model_scoring_with_objects_and_arrays_instances(data):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=infer_signature(data),
)
df = pd.DataFrame(data) if isinstance(data, list) else pd.DataFrame([data])
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"instances": data}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
expected_result = df.to_dict(orient="records")
np.testing.assert_equal(result, expected_result)
@pytest.mark.parametrize(
"data",
[
[{"query": {"a": "b"}, "name": "A"}, {"query": {"a": "c"}, "name": "B"}],
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"]},
],
[
{"query": [{"name": "value"}, {"name": "value"}], "table": ["some_table"]},
{"query": [{"name": "value", "age": 10}, {"name": "value"}], "table": ["some_table"]},
],
],
)
def test_pyfunc_model_scoring_with_objects_and_arrays_instances_errors(data):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=infer_signature(data),
)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"instances": data}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 400, response.content
assert "Failed to enforce schema" in json.loads(response.content.decode("utf-8"))["message"]
@pytest.mark.parametrize(
("data", "schema"),
[
(
[{"query": "question1"}, {"query": "question2"}],
Schema([ColSpec(DataType.string, "query")]),
),
(
[{"query": ["sentence_1", "sentence_2"]}, {"query": ["sentence_1", "sentence_2"]}],
Schema([ColSpec(DataType.string, "query")]),
),
(
[
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
{"query": ["sentence_1", "sentence_2"], "table": "some_table"},
],
Schema([ColSpec(DataType.string, "query"), ColSpec(DataType.string, "table")]),
),
],
)
def test_pyfunc_model_scoring_instances_backwards_compatibility(data, schema):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=ModelSignature(schema),
)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps({"instances": data}),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
np.testing.assert_equal(result, data)
@pytest.mark.parametrize(
("data", "schema"),
[
(
{
"netsed_list": [
[["a", "b"], ["c", "d"]],
[["e", "f"], ["g"]],
]
},
Schema([ColSpec(Array(Array(DataType.string)), name="netsed_list")]),
),
(
{
"numpy_2d_array": [
np.array([[np.int32(1), np.int32(2)], [np.int32(3), np.int32(4)]])
]
},
Schema([ColSpec(Array(Array(DataType.integer)), name="numpy_2d_array")]),
),
(
{"list_of_np_array": [[np.array(["a", "b"])], [np.array(["c", "d"])]]},
Schema([ColSpec(Array(Array(DataType.string)), name="list_of_np_array")]),
),
],
)
def test_pyfunc_model_schema_enforcement_nested_array(data, schema):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
df = pd.DataFrame.from_records(data)
signature = infer_signature(df)
assert signature.inputs == schema
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(df)
pd.testing.assert_frame_equal(prediction, df)
@pytest.mark.parametrize(
("data", "schema"),
[
(
{
"simple_map": [
{"a": 3, "b": 4},
{},
{"c": 5},
]
},
Schema([ColSpec(Map(value_type=DataType.long), name="simple_map")]),
),
(
{
"simple_map": [
{"a": 3, "b": 4},
{},
{"c": 5},
]
},
Schema([ColSpec(Map(value_type=DataType.long))]), # Unnamed column
),
(
{
"nested_map": [
{"a": {"a1": 3, "a2": 4}, "b": {"b1": 5}},
{},
{"c": {}},
]
},
Schema([ColSpec(Map(value_type=Map(value_type=DataType.long)), name="nested_map")]),
),
(
{
"array_in_map": [
{"a": [1, 2, 3], "b": [4, 5]},
{},
{"c": []},
]
},
Schema([ColSpec(Map(value_type=Array(dtype=DataType.long)), name="array_in_map")]),
),
(
{
"object_in_map": [
{"a": {"key1": "a1", "key2": 1}, "b": {"key1": "b1"}},
{},
{"c": {"key1": "c1"}},
]
},
Schema(
[
ColSpec(
Map(
value_type=Object(
[
Property("key1", DataType.string),
Property("key2", DataType.long, required=False),
]
)
),
name="object_in_map",
)
]
),
),
(
{
"map_in_array": [
[{"a": 3, "b": 4}, {"c": 5}],
[],
[{"d": 6}],
]
},
Schema([ColSpec(Array(dtype=Map(value_type=DataType.long)), name="map_in_array")]),
),
(
{
"map_in_object": [
{"key1": {"a": 3, "b": 4}, "key2": {"c": 5}},
{"key1": {"d": 6}},
]
},
Schema(
[
ColSpec(
Object(
[
Property("key1", Map(value_type=DataType.long)),
Property("key2", Map(value_type=DataType.long), required=False),
]
),
name="map_in_object",
)
]
),
),
],
)
@pytest.mark.parametrize("format_key", ["dataframe_split", "dataframe_records"])
def test_pyfunc_model_schema_enforcement_map_type(data, schema, format_key):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
df = pd.DataFrame.from_records(data)
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=ModelSignature(inputs=schema, outputs=schema),
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(df)
pd.testing.assert_frame_equal(prediction, df)
if format_key == "dataframe_split":
payload = {format_key: df.to_dict(orient="split")}
elif format_key == "dataframe_records":
payload = {format_key: df.to_dict(orient="records")}
class CustomJsonEncoder(json.JSONEncoder):
def default(self, o):
import numpy as np
if isinstance(o, np.int64):
return int(o)
return super().default(o)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps(payload, cls=CustomJsonEncoder),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
expected_result = df.to_dict(orient="records")
np.testing.assert_equal(result, expected_result)
@pytest.mark.parametrize(
("data", "schema"),
[
(
[
{
"object_column": {"query": ["sentence_1", "sentence_2"], "table": "some_table"},
"string_column": "some_string",
"array_column": [{"name": "value"}, {"name": "value"}],
},
{
"object_column": {"query": ["sentence_1", "sentence_2"]},
"string_column": "some_string",
"array_column": [{"name": "value"}],
},
],
Schema(
[
ColSpec(
Object(
[
Property("query", Array(DataType.string)),
Property("table", DataType.string, required=False),
]
),
"object_column",
),
ColSpec(DataType.string, "string_column"),
ColSpec(
Array(Object([Property("name", DataType.string)])),
"array_column",
),
]
),
),
],
)
@pytest.mark.parametrize("format_key", ["inputs", "dataframe_split", "dataframe_records"])
def test_pyfunc_model_schema_enforcement_complex(data, schema, format_key):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
df = pd.DataFrame.from_records(data)
signature = infer_signature(df)
assert signature.inputs == schema
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
signature=signature,
)
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(df)
pd.testing.assert_frame_equal(prediction, df)
if format_key == "inputs":
payload = {format_key: data}
elif format_key == "dataframe_split":
payload = {format_key: df.to_dict(orient="split")}
elif format_key == "dataframe_records":
payload = {format_key: df.to_dict(orient="records")}
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=json.dumps(payload),
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
expected_result = df.to_dict(orient="records")
np.testing.assert_equal(result, expected_result)
def test_zero_or_one_longs_convert_to_floats():
zeros = pd.DataFrame([{"temperature": 0}, {"temperature": 0.9}, {"temperature": 1}, {}])
schema = Schema([ColSpec(DataType.double, name="temperature", required=False)])
data = _enforce_schema(zeros, schema)
pd.testing.assert_series_equal(
data["temperature"], pd.Series([0.0, 0.9, 1.0, np.nan], dtype=np.float64), check_names=False
)
@pytest.mark.parametrize(
("input_example", "expected_schema", "payload_example"),
[
({"a": None}, Schema([ColSpec(type=AnyType(), name="a", required=False)]), {"a": "string"}),
(
{"a": [None, []]},
Schema([ColSpec(Array(AnyType()), name="a", required=False)]),
{"a": ["abc", "123"]},
),
(
{"a": [None]},
Schema([ColSpec(type=Array(AnyType()), name="a", required=False)]),
{"a": ["abc"]},
),
(
{"a": [None, "string"]},
Schema([ColSpec(type=Array(DataType.string), name="a", required=False)]),
{"a": ["abc"]},
),
(
{"a": {"x": None}},
Schema([ColSpec(type=Object([Property("x", AnyType(), required=False)]), name="a")]),
{"a": {"x": 234}},
),
(
[
{
"messages": [
{
"content": "You are a helpful assistant.",
"additional_kwargs": {},
"response_metadata": {},
"type": "system",
"name": None,
"id": None,
},
{
"content": "What would you like to ask?",
"additional_kwargs": {},
"response_metadata": {},
"type": "ai",
"name": None,
"id": None,
"example": False,
"tool_calls": [],
"invalid_tool_calls": [],
"usage_metadata": None,
},
{
"content": "Who owns MLflow?",
"additional_kwargs": {},
"response_metadata": {},
"type": "human",
"name": None,
"id": None,
"example": False,
},
],
"text": "Hello?",
}
],
Schema(
[
ColSpec(
Array(
Object(
properties=[
Property("content", DataType.string),
Property("additional_kwargs", AnyType(), required=False),
Property("response_metadata", AnyType(), required=False),
Property("type", DataType.string),
Property("name", AnyType(), required=False),
Property("id", AnyType(), required=False),
Property("example", DataType.boolean, required=False),
Property("tool_calls", AnyType(), required=False),
Property("invalid_tool_calls", AnyType(), required=False),
Property("usage_metadata", AnyType(), required=False),
]
)
),
name="messages",
),
ColSpec(DataType.string, name="text"),
]
),
[
{
"messages": [
{
"content": "You are a helpful assistant.",
"additional_kwargs": {"x": "x"},
"response_metadata": {"y": "y"},
"type": "system",
"name": "test",
"id": 1234567,
"tool_calls": [{"tool1": "abc"}],
"invalid_tool_calls": ["tool2", "tool3"],
},
],
"text": "Hello?",
}
],
),
],
)
def test_schema_enforcement_for_anytype(input_example, expected_schema, payload_example):
class MyModel(mlflow.pyfunc.PythonModel):
def predict(self, context, model_input, params=None):
return model_input
with mlflow.start_run():
model_info = mlflow.pyfunc.log_model(
name="test_model",
python_model=MyModel(),
input_example=input_example,
)
assert model_info.signature.inputs == expected_schema
loaded_model = mlflow.pyfunc.load_model(model_info.model_uri)
prediction = loaded_model.predict(payload_example)
df = (
pd.DataFrame(payload_example)
if isinstance(payload_example, list)
else pd.DataFrame([payload_example])
)
pd.testing.assert_frame_equal(prediction, df)
data = convert_input_example_to_serving_input(payload_example)
response = pyfunc_serve_and_score_model(
model_info.model_uri,
data=data,
content_type=pyfunc_scoring_server.CONTENT_TYPE_JSON,
extra_args=["--env-manager", "local"],
)
assert response.status_code == 200, response.content
result = json.loads(response.content.decode("utf-8"))["predictions"]
expected_result = df.to_dict(orient="records")
np.testing.assert_equal(result, expected_result)
| PythonModelWithArrayParams |
python | pytorch__pytorch | .github/scripts/trymerge.py | {
"start": 47707,
"end": 91249
} | class ____:
name: str
patterns: list[str]
approved_by: list[str]
mandatory_checks_name: Optional[list[str]]
ignore_flaky_failures: bool = True
def gen_new_issue_link(
org: str, project: str, labels: list[str], template: str = "bug-report.yml"
) -> str:
labels_str = ",".join(labels)
return (
f"https://github.com/{org}/{project}/issues/new?"
f"labels={urllib.parse.quote(labels_str)}&"
f"template={urllib.parse.quote(template)}"
)
def read_merge_rules(
repo: Optional[GitRepo], org: str, project: str
) -> list[MergeRule]:
"""Returns the list of all merge rules for the repo or project.
NB: this function is used in Meta-internal workflows, see the comment
at the top of this file for details.
"""
repo_relative_rules_path = MERGE_RULE_PATH
if repo is None:
json_data = gh_fetch_url(
f"https://api.github.com/repos/{org}/{project}/contents/{repo_relative_rules_path}",
headers={"Accept": "application/vnd.github.v3+json"},
reader=json.load,
)
content = base64.b64decode(json_data["content"])
return [MergeRule(**x) for x in yaml.safe_load(content)]
else:
rules_path = Path(repo.repo_dir) / repo_relative_rules_path
if not rules_path.exists():
print(f"{rules_path} does not exist, returning empty rules")
return []
with open(rules_path) as fp:
rc = yaml.safe_load(fp)
return [MergeRule(**x) for x in rc]
def find_matching_merge_rule(
pr: GitHubPR,
repo: Optional[GitRepo] = None,
skip_mandatory_checks: bool = False,
skip_internal_checks: bool = False,
ignore_current_checks: Optional[list[str]] = None,
) -> tuple[
MergeRule,
list[tuple[str, Optional[str], Optional[int]]],
list[tuple[str, Optional[str], Optional[int]]],
dict[str, list[Any]],
]:
"""
Returns merge rule matching to this pr together with the list of associated pending
and failing jobs OR raises an exception.
NB: this function is used in Meta-internal workflows, see the comment at the top of
this file for details.
"""
changed_files = pr.get_changed_files()
approved_by = set(pr.get_approved_by())
issue_link = gen_new_issue_link(
org=pr.org,
project=pr.project,
labels=["module: ci"],
)
reject_reason = f"No rule found to match PR. Please [report]{issue_link} this issue to DevX team."
rules = read_merge_rules(repo, pr.org, pr.project)
if not rules:
reject_reason = f"Rejecting the merge as no rules are defined for the repository in {MERGE_RULE_PATH}"
raise RuntimeError(reject_reason)
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
ignore_current_checks=ignore_current_checks,
)
# This keeps the list of all approvers that could stamp the change
all_rule_approvers = {}
# PRs can fail multiple merge rules, but it only needs to pass one rule to be approved.
# If it fails all rules, we need to find the rule that it came closest to passing and report
# that to the dev.
#
# reject_reason_score ranks rules by relevancy. The higher the score, the more relevant the
# rule & rejection reason, and we only care about the most relevant rule/reason
#
# reject_reason_score intrepretation:
# Score 0 to 10K - how many files rule matched
# Score 10K - matched all files, but no overlapping approvers
# Score 20K - matched all files and approvers, but mandatory checks are pending
# Score 30k - Matched all files and approvers, but mandatory checks failed
reject_reason_score = 0
for rule in rules:
rule_name = rule.name
patterns_re = patterns_to_regex(rule.patterns)
non_matching_files = []
# Does this rule apply to all the files?
for fname in changed_files:
if not patterns_re.match(fname):
non_matching_files.append(fname)
if len(non_matching_files) > 0:
num_matching_files = len(changed_files) - len(non_matching_files)
if num_matching_files > reject_reason_score:
reject_reason_score = num_matching_files
reject_reason = "\n".join(
(
f"Not all files match rule `{rule_name}`.",
f"{num_matching_files} files matched, but there are still non-matching files:",
f"{','.join(non_matching_files[:5])}{', ...' if len(non_matching_files) > 5 else ''}",
)
)
continue
# If rule needs approvers but PR has not been reviewed, skip it
if len(rule.approved_by) > 0 and len(approved_by) == 0:
if reject_reason_score < 10000:
reject_reason_score = 10000
reject_reason = f"PR #{pr.pr_num} has not been reviewed yet"
continue
# Does the PR have the required approvals for this rule?
rule_approvers = set()
for approver in rule.approved_by:
if "/" in approver:
org, name = approver.split("/")
rule_approvers.update(gh_get_team_members(org, name))
else:
rule_approvers.add(approver)
approvers_intersection = approved_by.intersection(rule_approvers)
# If rule requires approvers but they aren't the ones that reviewed PR
if len(approvers_intersection) == 0 and len(rule_approvers) > 0:
# Less than or equal is intentionally used here to gather all potential
# approvers
if reject_reason_score <= 10000:
reject_reason_score = 10000
all_rule_approvers[rule.name] = rule.approved_by
# Prepare the reject reason
all_rule_approvers_msg = [
f"- {name} ({', '.join(approved_by[:5])}{', ...' if len(approved_by) > 5 else ''})"
for name, approved_by in all_rule_approvers.items()
]
reject_reason = "Approvers from one of the following sets are needed:\n"
reject_reason += "\n".join(all_rule_approvers_msg)
continue
# Does the PR pass the checks required by this rule?
mandatory_checks = (
rule.mandatory_checks_name if rule.mandatory_checks_name is not None else []
)
required_checks = list(
filter(
lambda x: ("EasyCLA" in x)
or ("Facebook CLA Check" in x)
or not skip_mandatory_checks,
mandatory_checks,
)
)
pending_checks, failed_checks, _ = categorize_checks(
checks,
required_checks,
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD
if rule.ignore_flaky_failures
else 0,
)
# categorize_checks assumes all tests are required if required_checks is empty.
# this is a workaround as we want to keep that behavior for categorize_checks
# generally.
if not required_checks:
pending_checks = []
failed_checks = []
hud_link = f"https://hud.pytorch.org/{pr.org}/{pr.project}/commit/{pr.last_commit_sha()}"
if len(failed_checks) > 0:
if reject_reason_score < 30000:
reject_reason_score = 30000
reject_reason = "\n".join(
(
f"{len(failed_checks)} mandatory check(s) failed. The first few are:",
*checks_to_markdown_bullets(failed_checks),
"",
f"Dig deeper by [viewing the failures on hud]({hud_link})",
)
)
continue
elif len(pending_checks) > 0:
if reject_reason_score < 20000:
reject_reason_score = 20000
reject_reason = "\n".join(
(
f"{len(pending_checks)} mandatory check(s) are pending/not yet run. The first few are:",
*checks_to_markdown_bullets(pending_checks),
"",
f"Dig deeper by [viewing the pending checks on hud]({hud_link})",
)
)
continue
if not skip_internal_checks and pr.has_internal_changes():
raise RuntimeError(
"This PR has internal changes and must be landed via Phabricator! Please try reimporting/rexporting the PR!"
)
# Categorize all checks when skip_mandatory_checks (force merge) is set. Do it here
# where the list of checks is readily available. These records will be saved into
# s3 merge records
(
pending_mandatory_checks,
failed_mandatory_checks,
ignorable_checks,
) = categorize_checks(
checks,
[],
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD,
)
return (
rule,
pending_mandatory_checks,
failed_mandatory_checks,
ignorable_checks,
)
if reject_reason_score == 20000:
raise MandatoryChecksMissingError(reject_reason, rule)
raise MergeRuleFailedError(reject_reason, rule)
def checks_to_str(checks: list[tuple[str, Optional[str]]]) -> str:
return ", ".join(f"[{c[0]}]({c[1]})" if c[1] is not None else c[0] for c in checks)
def checks_to_markdown_bullets(
checks: list[tuple[str, Optional[str], Optional[int]]],
) -> list[str]:
return [
f"- [{c[0]}]({c[1]})" if c[1] is not None else f"- {c[0]}" for c in checks[:5]
]
def post_starting_merge_comment(
repo: GitRepo,
pr: GitHubPR,
explainer: TryMergeExplainer,
dry_run: bool,
ignore_current_checks_info: Optional[
list[tuple[str, Optional[str], Optional[int]]]
] = None,
) -> None:
"""Post the initial merge starting message on the PR. Also post a short
message on all PRs in the stack."""
gh_post_pr_comment(
pr.org,
pr.project,
pr.pr_num,
explainer.get_merge_message(ignore_current_checks_info),
dry_run=dry_run,
)
if pr.is_ghstack_pr():
for additional_prs, _ in get_ghstack_prs(repo, pr):
if additional_prs.pr_num != pr.pr_num:
gh_post_pr_comment(
additional_prs.org,
additional_prs.project,
additional_prs.pr_num,
f"Starting merge as part of PR stack under #{pr.pr_num}",
dry_run=dry_run,
)
def manually_close_merged_pr(
pr: GitHubPR,
additional_merged_prs: list[GitHubPR],
merge_commit_sha: str,
dry_run: bool,
) -> None:
def _comment_and_close(pr: GitHubPR, comment: str) -> None:
pr = GitHubPR(pr.org, pr.project, pr.pr_num) # Refresh the PR
if not pr.is_closed():
gh_post_pr_comment(pr.org, pr.project, pr.pr_num, comment, dry_run)
gh_close_pr(pr.org, pr.project, pr.pr_num, dry_run)
message = (
f"This PR (#{pr.pr_num}) was merged in {merge_commit_sha} but it is still open, likely due to a Github bug, "
"so mergebot is closing it manually. If you think this is a mistake, please feel free to reopen and contact Dev Infra."
)
_comment_and_close(pr, message)
for additional_pr in additional_merged_prs:
message = (
f"This PR (#{additional_pr.pr_num}) was merged as part of PR #{pr.pr_num} in the stack under {merge_commit_sha} "
"but it is still open, likely due to a Github bug, so mergebot is closing it manually. "
"If you think this is a mistake, please feel free to reopen and contact Dev Infra."
)
_comment_and_close(additional_pr, message)
print(f"PR {pr.pr_num} and all additional PRs in the stack have been closed.")
@retries_decorator()
def save_merge_record(
comment_id: int,
pr_num: int,
owner: str,
project: str,
author: str,
pending_checks: list[tuple[str, Optional[str], Optional[int]]],
failed_checks: list[tuple[str, Optional[str], Optional[int]]],
ignore_current_checks: list[tuple[str, Optional[str], Optional[int]]],
broken_trunk_checks: list[tuple[str, Optional[str], Optional[int]]],
flaky_checks: list[tuple[str, Optional[str], Optional[int]]],
unstable_checks: list[tuple[str, Optional[str], Optional[int]]],
last_commit_sha: str,
merge_base_sha: str,
merge_commit_sha: str = "",
is_failed: bool = False,
skip_mandatory_checks: bool = False,
ignore_current: bool = False,
error: str = "",
) -> None:
"""
This saves the merge records as a json, which can later be uploaded to s3
"""
# Prepare the record to be written into s3
data = [
{
"comment_id": comment_id,
"pr_num": pr_num,
"owner": owner,
"project": project,
"author": author,
"pending_checks": pending_checks,
"failed_checks": failed_checks,
"ignore_current_checks": ignore_current_checks,
"broken_trunk_checks": broken_trunk_checks,
"flaky_checks": flaky_checks,
"unstable_checks": unstable_checks,
"last_commit_sha": last_commit_sha,
"merge_base_sha": merge_base_sha,
"merge_commit_sha": merge_commit_sha,
"is_failed": is_failed,
"skip_mandatory_checks": skip_mandatory_checks,
"ignore_current": ignore_current,
"error": error,
# This is a unique identifier for the record for deduping purposes
# in Rockset. Any unique string would work. This will not be used
# after we migrate off Rockset
"_id": f"{project}-{pr_num}-{comment_id}-{os.environ.get('GITHUB_RUN_ID')}",
}
]
repo_root = Path(__file__).resolve().parent.parent.parent
with open(repo_root / "merge_record.json", "w") as f:
json.dump(data, f)
@retries_decorator()
def get_drci_classifications(pr_num: int, project: str = "pytorch") -> Any:
"""
Query HUD API to find similar failures to decide if they are flaky
"""
# NB: This doesn't work internally atm because this requires making an
# external API call to HUD
failures = gh_fetch_url(
f"https://hud.pytorch.org/api/drci/drci?prNumber={pr_num}",
data=f"repo={project}",
headers={
"Authorization": os.getenv("DRCI_BOT_KEY", ""),
"Accept": "application/vnd.github.v3+json",
},
method="POST",
reader=json.load,
)
return failures.get(str(pr_num), {}) if failures else {}
REMOVE_JOB_NAME_SUFFIX_REGEX = re.compile(r", [0-9]+, [0-9]+, .+\)$")
def remove_job_name_suffix(name: str, replacement: str = ")") -> str:
return re.sub(REMOVE_JOB_NAME_SUFFIX_REGEX, replacement, name)
def is_broken_trunk(
check: JobCheckState,
drci_classifications: Any,
) -> bool:
if not check or not drci_classifications:
return False
name = check.name
job_id = check.job_id
# Consult the list of broken trunk failures from Dr.CI
return any(
(name == broken_trunk["name"]) or (job_id and job_id == broken_trunk["id"])
for broken_trunk in drci_classifications.get("BROKEN_TRUNK", [])
)
def is_unstable(
check: JobCheckState,
drci_classifications: Any,
) -> bool:
if not check or not drci_classifications:
return False
name = check.name
job_id = check.job_id
# The job name has the unstable keyword. This is the original way to mark a job
# as unstable on HUD, Dr.CI, and trymerge
if "unstable" in name:
return True
# Consult the list of unstable failures from Dr.CI
return any(
(name == unstable["name"] or (job_id and job_id == unstable["id"]))
for unstable in drci_classifications.get("UNSTABLE", [])
)
def is_flaky(
check: JobCheckState,
drci_classifications: Any,
) -> bool:
if not check or not drci_classifications:
return False
name = check.name
job_id = check.job_id
# Consult the list of flaky failures from Dr.CI
return any(
(name == flaky["name"] or (job_id and job_id == flaky["id"]))
for flaky in drci_classifications.get("FLAKY", [])
)
def is_invalid_cancel(
name: str,
conclusion: Optional[str],
drci_classifications: Any,
) -> bool:
"""
After https://github.com/pytorch/test-infra/pull/4579, invalid cancelled
signals have been removed from HUD and Dr.CI. The same needs to be done
here for consistency
"""
if (
not name
or not drci_classifications
or not conclusion
or conclusion.upper() != "CANCELLED"
):
return False
# If a job is cancelled and not listed as a failure by Dr.CI, it's an
# invalid signal and can be ignored
return all(
name != failure["name"] for failure in drci_classifications.get("FAILED", [])
)
def get_classifications(
pr_num: int,
project: str,
checks: dict[str, JobCheckState],
ignore_current_checks: Optional[list[str]],
) -> dict[str, JobCheckState]:
# Get the failure classification from Dr.CI, which is the source of truth
# going forward. It's preferable to try calling Dr.CI API directly first
# to get the latest results as well as update Dr.CI PR comment
drci_classifications = get_drci_classifications(pr_num=pr_num, project=project)
def get_readable_drci_results(drci_classifications: Any) -> str:
try:
s = f"From Dr.CI API ({pr_num}):\n"
for classification, jobs in drci_classifications.items():
s += f" {classification}: \n"
for job in jobs:
s += f" {job['id']} {job['name']}\n"
return s
except Exception:
return f"From Dr.CI API: {json.dumps(drci_classifications)}"
print(get_readable_drci_results(drci_classifications))
# NB: if the latest results from Dr.CI is not available, i.e. when calling from
# SandCastle, we fallback to any results we can find on Dr.CI check run summary
if (
not drci_classifications
and DRCI_CHECKRUN_NAME in checks
and checks[DRCI_CHECKRUN_NAME]
and checks[DRCI_CHECKRUN_NAME].summary
):
drci_summary = checks[DRCI_CHECKRUN_NAME].summary
try:
print(f"From Dr.CI checkrun summary: {drci_summary}")
drci_classifications = json.loads(str(drci_summary))
except json.JSONDecodeError:
warn("Invalid Dr.CI checkrun summary")
drci_classifications = {}
checks_with_classifications = checks.copy()
for name, check in checks.items():
if check.status == "SUCCESS" or check.status == "NEUTRAL":
continue
if is_unstable(check, drci_classifications):
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
check.status,
"UNSTABLE",
check.job_id,
check.title,
check.summary,
)
continue
# NB: It's important to note that when it comes to ghstack and broken trunk classification,
# Dr.CI uses the base of the whole stack
if is_broken_trunk(check, drci_classifications):
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
check.status,
"BROKEN_TRUNK",
check.job_id,
check.title,
check.summary,
)
continue
elif is_flaky(check, drci_classifications):
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
check.status,
"FLAKY",
check.job_id,
check.title,
check.summary,
)
continue
elif is_invalid_cancel(name, check.status, drci_classifications):
# NB: Create a new category here for invalid cancelled signals because
# there are usually many of them when they happen. So, they shouldn't
# be counted toward ignorable failures threshold
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
check.status,
"INVALID_CANCEL",
check.job_id,
check.title,
check.summary,
)
continue
if ignore_current_checks is not None and name in ignore_current_checks:
checks_with_classifications[name] = JobCheckState(
check.name,
check.url,
check.status,
"IGNORE_CURRENT_CHECK",
check.job_id,
check.title,
check.summary,
)
return checks_with_classifications
def filter_checks_with_lambda(
checks: JobNameToStateDict, status_filter: Callable[[Optional[str]], bool]
) -> list[JobCheckState]:
return [check for check in checks.values() if status_filter(check.status)]
def get_pr_commit_sha(repo: GitRepo, pr: GitHubPR) -> str:
commit_sha = pr.get_merge_commit()
if commit_sha is not None:
return commit_sha
commits = repo.commits_resolving_gh_pr(pr.pr_num)
if len(commits) == 0:
raise PostCommentError("Can't find any commits resolving PR")
return commits[0]
def validate_revert(
repo: GitRepo, pr: GitHubPR, *, comment_id: Optional[int] = None
) -> tuple[str, str]:
comment = (
pr.get_last_comment()
if comment_id is None
else pr.get_comment_by_id(comment_id)
)
if comment.editor_login is not None:
raise PostCommentError(
"Halting the revert as the revert comment has been edited."
)
author_association = comment.author_association
author_login = comment.author_login
allowed_reverters = ["COLLABORATOR", "MEMBER", "OWNER"]
# For some reason, one can not be a member of private repo, only CONTRIBUTOR
if pr.is_base_repo_private():
allowed_reverters.append("CONTRIBUTOR")
# Special case the pytorch-auto-revert app, whose does not have association
# But should be able to issue revert command
if comment.author_url == "https://github.com/apps/pytorch-auto-revert":
allowed_reverters.append("NONE")
if author_association not in allowed_reverters:
raise PostCommentError(
f"Will not revert as @{author_login} is not one of "
f"[{', '.join(allowed_reverters)}], but instead is {author_association}."
)
commit_sha = get_pr_commit_sha(repo, pr)
return (author_login, commit_sha)
def get_ghstack_dependent_prs(
repo: GitRepo, pr: GitHubPR, only_closed: bool = True
) -> list[tuple[str, GitHubPR]]:
"""
Get the PRs in the stack that are above this PR (inclusive).
Throws error if stack have branched or original branches are gone
"""
assert pr.is_ghstack_pr()
orig_ref = f"{repo.remote}/{pr.get_ghstack_orig_ref()}"
rev_list = repo.revlist(f"{pr.default_branch()}..{orig_ref}")
if len(rev_list) == 0:
raise RuntimeError(
f"PR {pr.pr_num} does not have any revisions associated with it"
)
skip_len = len(rev_list) - 1
for branch in repo.branches_containing_ref(orig_ref):
candidate = repo.revlist(f"{pr.default_branch()}..{branch}")
# Pick longest candidate
if len(candidate) > len(rev_list):
candidate, rev_list = rev_list, candidate
# Validate that candidate always ends rev-list
if rev_list[-len(candidate) :] != candidate:
raise RuntimeError(
f"Branch {branch} revlist {', '.join(candidate)} is not a subset of {', '.join(rev_list)}"
)
# Remove commits original PR depends on
if skip_len > 0:
rev_list = rev_list[:-skip_len]
rc: list[tuple[str, GitHubPR]] = []
for pr_, sha in _revlist_to_prs(repo, pr, rev_list):
if not pr_.is_closed():
if not only_closed:
rc.append(("", pr_))
continue
commit_sha = get_pr_commit_sha(repo, pr_)
rc.append((commit_sha, pr_))
return rc
def do_revert_prs(
repo: GitRepo,
original_pr: GitHubPR,
shas_and_prs: list[tuple[str, GitHubPR]],
*,
author_login: str,
extra_msg: str = "",
skip_internal_checks: bool = False,
dry_run: bool = False,
) -> None:
# Prepare and push revert commits
for commit_sha, pr in shas_and_prs:
revert_msg = f"\nReverted {pr.get_pr_url()} on behalf of {prefix_with_github_url(author_login)}"
revert_msg += extra_msg
repo.checkout(pr.default_branch())
repo.revert(commit_sha)
msg = repo.commit_message("HEAD")
msg = re.sub(RE_PULL_REQUEST_RESOLVED, "", msg)
msg += revert_msg
repo.amend_commit_message(msg)
repo.push(shas_and_prs[0][1].default_branch(), dry_run)
# Comment/reopen PRs
for commit_sha, pr in shas_and_prs:
revert_message = ""
if pr.pr_num == original_pr.pr_num:
revert_message += (
f"@{pr.get_pr_creator_login()} your PR has been successfully reverted."
)
else:
revert_message += (
f"@{pr.get_pr_creator_login()} your PR has been reverted as part of the stack under "
f"#{original_pr.pr_num}.\n"
)
if (
pr.has_internal_changes()
and not pr.has_no_connected_diff()
and not skip_internal_checks
):
revert_message += "\n:warning: This PR might contain internal changes"
revert_message += "\ncc: @pytorch/pytorch-dev-infra"
gh_post_pr_comment(
pr.org, pr.project, pr.pr_num, revert_message, dry_run=dry_run
)
pr.add_numbered_label("reverted", dry_run)
pr.add_label("ci-no-td", dry_run)
if not dry_run:
gh_post_commit_comment(pr.org, pr.project, commit_sha, revert_msg)
gh_update_pr_state(pr.org, pr.project, pr.pr_num)
def try_revert(
repo: GitRepo,
pr: GitHubPR,
*,
dry_run: bool = False,
comment_id: Optional[int] = None,
reason: Optional[str] = None,
) -> None:
try:
author_login, commit_sha = validate_revert(repo, pr, comment_id=comment_id)
except PostCommentError as e:
gh_post_pr_comment(pr.org, pr.project, pr.pr_num, str(e), dry_run=dry_run)
return
extra_msg = f" due to {reason}" if reason is not None else ""
extra_msg += (
f" ([comment]({pr.get_comment_by_id(comment_id).url}))\n"
if comment_id is not None
else "\n"
)
shas_and_prs = [(commit_sha, pr)]
if pr.is_ghstack_pr():
try:
shas_and_prs = get_ghstack_dependent_prs(repo, pr)
prs_to_revert = " ".join([t[1].get_pr_url() for t in shas_and_prs])
print(f"About to stack of PRs: {prs_to_revert}")
except Exception as e:
print(
f"Failed to fetch dependent PRs: {str(e)}, fall over to single revert"
)
do_revert_prs(
repo,
pr,
shas_and_prs,
author_login=author_login,
extra_msg=extra_msg,
dry_run=dry_run,
skip_internal_checks=can_skip_internal_checks(pr, comment_id),
)
def prefix_with_github_url(suffix_str: str) -> str:
return f"https://github.com/{suffix_str}"
def check_for_sev(org: str, project: str, skip_mandatory_checks: bool) -> None:
if skip_mandatory_checks:
return
response = cast(
dict[str, Any],
gh_fetch_json_list(
"https://api.github.com/search/issues", # @lint-ignore
# Having two label: queries is an AND operation
params={
"q": f'repo:{org}/{project} is:open is:issue label:"ci: sev" label:"merge blocking"'
},
),
)
if response["total_count"] != 0:
raise RuntimeError(
"Not merging any PRs at the moment because there is a "
+ "merge blocking https://github.com/pytorch/pytorch/labels/ci:%20sev issue open at: \n"
+ f"{response['items'][0]['html_url']}"
)
return
def has_label(labels: list[str], pattern: Pattern[str] = CIFLOW_LABEL) -> bool:
return len(list(filter(pattern.match, labels))) > 0
def categorize_checks(
check_runs: JobNameToStateDict,
required_checks: list[str],
ok_failed_checks_threshold: Optional[int] = None,
) -> tuple[
list[tuple[str, Optional[str], Optional[int]]],
list[tuple[str, Optional[str], Optional[int]]],
dict[str, list[Any]],
]:
"""
Categories all jobs into the list of pending and failing jobs. All known flaky
failures and broken trunk are ignored by defaults when ok_failed_checks_threshold
is not set (unlimited)
"""
pending_checks: list[tuple[str, Optional[str], Optional[int]]] = []
failed_checks: list[tuple[str, Optional[str], Optional[int]]] = []
# failed_checks_categorization is used to keep track of all ignorable failures when saving the merge record on s3
failed_checks_categorization: dict[str, list[Any]] = defaultdict(list)
# If required_checks is not set or empty, consider all names are relevant
relevant_checknames = [
name
for name in check_runs
if not required_checks or any(x in name for x in required_checks)
]
for checkname in required_checks:
if all(checkname not in x for x in check_runs):
pending_checks.append((checkname, None, None))
for checkname in relevant_checknames:
status = check_runs[checkname].status
url = check_runs[checkname].url
classification = check_runs[checkname].classification
job_id = check_runs[checkname].job_id
if status is None and classification != "UNSTABLE":
# NB: No need to wait if the job classification is unstable as it would be
# ignored anyway. This is useful to not need to wait for scarce resources
# like ROCm, which is also frequently in unstable mode
pending_checks.append((checkname, url, job_id))
elif classification == "INVALID_CANCEL":
continue
elif not is_passing_status(check_runs[checkname].status):
target = (
failed_checks_categorization[classification]
if classification
in ("IGNORE_CURRENT_CHECK", "BROKEN_TRUNK", "FLAKY", "UNSTABLE")
else failed_checks
)
target.append((checkname, url, job_id))
flaky_or_broken_trunk = (
failed_checks_categorization["BROKEN_TRUNK"]
+ failed_checks_categorization["FLAKY"]
)
if flaky_or_broken_trunk:
warn(
f"The following {len(flaky_or_broken_trunk)} checks failed but were likely due flakiness or broken trunk: "
+ ", ".join([x[0] for x in flaky_or_broken_trunk])
+ (
f" but this is greater than the threshold of {ok_failed_checks_threshold} so merge will fail"
if ok_failed_checks_threshold is not None
and len(flaky_or_broken_trunk) > ok_failed_checks_threshold
else ""
)
)
if (
ok_failed_checks_threshold is not None
and len(flaky_or_broken_trunk) > ok_failed_checks_threshold
):
failed_checks = failed_checks + flaky_or_broken_trunk
# The list of failed_checks_categorization is returned so that it can be saved into the s3 merge record
return (pending_checks, failed_checks, failed_checks_categorization)
def merge(
pr: GitHubPR,
repo: GitRepo,
comment_id: int,
dry_run: bool = False,
skip_mandatory_checks: bool = False,
timeout_minutes: int = 400,
stale_pr_days: int = 3,
ignore_current: bool = False,
) -> None:
initial_commit_sha = pr.last_commit_sha()
pr_link = f"https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num}"
print(f"Attempting merge of {initial_commit_sha} ({pr_link})")
if MERGE_IN_PROGRESS_LABEL not in pr.get_labels():
gh_add_labels(pr.org, pr.project, pr.pr_num, [MERGE_IN_PROGRESS_LABEL], dry_run)
explainer = TryMergeExplainer(
skip_mandatory_checks,
pr.get_labels(),
pr.pr_num,
pr.org,
pr.project,
ignore_current,
)
# probably a bad name, but this is a list of current checks that should be
# ignored and is toggled by the --ignore-current flag
ignore_current_checks_info = []
if pr.is_ghstack_pr():
get_ghstack_prs(repo, pr) # raises error if out of sync
check_for_sev(pr.org, pr.project, skip_mandatory_checks)
if skip_mandatory_checks:
post_starting_merge_comment(repo, pr, explainer, dry_run)
return pr.merge_into(
repo,
dry_run=dry_run,
skip_mandatory_checks=skip_mandatory_checks,
comment_id=comment_id,
)
# Check for approvals
find_matching_merge_rule(pr, repo, skip_mandatory_checks=True)
if not has_required_labels(pr):
raise RuntimeError(LABEL_ERR_MSG.lstrip(" #"))
if ignore_current:
checks = pr.get_checkrun_conclusions()
_, failing, _ = categorize_checks(
checks,
list(checks.keys()),
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD,
)
ignore_current_checks_info = failing
post_starting_merge_comment(
repo,
pr,
explainer,
dry_run,
ignore_current_checks_info=ignore_current_checks_info,
)
start_time = time.time()
last_exception = ""
elapsed_time = 0.0
ignore_current_checks = [
x[0] for x in ignore_current_checks_info
] # convert to List[str] for convenience
while elapsed_time < timeout_minutes * 60:
check_for_sev(pr.org, pr.project, skip_mandatory_checks)
current_time = time.time()
elapsed_time = current_time - start_time
print(
f"Attempting merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} ({elapsed_time / 60} minutes elapsed)"
)
pr = GitHubPR(pr.org, pr.project, pr.pr_num)
if initial_commit_sha != pr.last_commit_sha():
raise RuntimeError(
"New commits were pushed while merging. Please rerun the merge command."
)
try:
required_checks = []
failed_rule_message = None
ignore_flaky_failures = True
try:
find_matching_merge_rule(
pr, repo, ignore_current_checks=ignore_current_checks
)
except MandatoryChecksMissingError as ex:
if ex.rule is not None:
ignore_flaky_failures = ex.rule.ignore_flaky_failures
if ex.rule.mandatory_checks_name is not None:
required_checks = ex.rule.mandatory_checks_name
failed_rule_message = ex
checks = pr.get_checkrun_conclusions()
checks = get_classifications(
pr.pr_num,
pr.project,
checks,
ignore_current_checks=ignore_current_checks,
)
pending, failing, _ = categorize_checks(
checks,
required_checks + [x for x in checks if x not in required_checks],
ok_failed_checks_threshold=IGNORABLE_FAILED_CHECKS_THESHOLD
if ignore_flaky_failures
else 0,
)
# HACK until GitHub will be better about surfacing those
startup_failures = filter_checks_with_lambda(
checks, lambda status: status == "STARTUP_FAILURE"
)
if len(startup_failures) > 0:
raise RuntimeError(
f"{len(startup_failures)} STARTUP failures reported, please check workflows syntax! "
+ ", ".join(f"[{x.name}]({x.url})" for x in startup_failures[:5])
)
# END of HACK
if len(failing) > 0:
raise RuntimeError(
f"{len(failing)} jobs have failed, first few of them are: "
+ ", ".join(f"[{x[0]}]({x[1]})" for x in failing[:5])
)
if len(pending) > 0:
if failed_rule_message is not None:
raise failed_rule_message
else:
raise MandatoryChecksMissingError(
f"Still waiting for {len(pending)} jobs to finish, "
+ f"first few of them are: {', '.join(x[0] for x in pending[:5])}"
)
return pr.merge_into(
repo,
dry_run=dry_run,
skip_mandatory_checks=skip_mandatory_checks,
comment_id=comment_id,
ignore_current_checks=ignore_current_checks,
)
except MandatoryChecksMissingError as ex:
last_exception = str(ex)
print(
f"Merge of https://github.com/{pr.org}/{pr.project}/pull/{pr.pr_num} failed due to: {ex}. Retrying in 5 min",
flush=True,
)
time.sleep(5 * 60)
# Finally report timeout back
msg = f"Merged timed out after {timeout_minutes} minutes. Please contact the pytorch_dev_infra team."
msg += f"The last exception was: {last_exception}"
gh_add_labels(pr.org, pr.project, pr.pr_num, ["land-failed"], dry_run)
raise RuntimeError(msg)
def main() -> None:
args = parse_args()
repo = GitRepo(get_git_repo_dir(), get_git_remote_name())
org, project = repo.gh_owner_and_name()
pr = GitHubPR(org, project, args.pr_num)
def handle_exception(e: Exception, title: str = "Merge failed") -> None:
exception = f"**Reason**: {e}"
failing_rule = None
if isinstance(e, MergeRuleFailedError):
failing_rule = e.rule.name if e.rule else None
internal_debugging = ""
run_url = os.getenv("GH_RUN_URL")
if run_url is not None:
# Hide this behind a collapsed bullet since it's not helpful to most devs
internal_debugging = "\n".join(
line
for line in (
"<details><summary>Details for Dev Infra team</summary>",
f'Raised by <a href="{run_url}">workflow job</a>\n',
f"Failing merge rule: {failing_rule}" if failing_rule else "",
"</details>",
)
if line
) # ignore empty lines during the join
msg = "\n".join((f"## {title}", f"{exception}", "", f"{internal_debugging}"))
gh_post_pr_comment(org, project, args.pr_num, msg, dry_run=args.dry_run)
import traceback
traceback.print_exc()
if args.revert:
try:
gh_post_pr_comment(
org,
project,
args.pr_num,
get_revert_message(org, project, pr.pr_num),
args.dry_run,
)
try_revert(
repo,
pr,
dry_run=args.dry_run,
comment_id=args.comment_id,
reason=args.reason,
)
except Exception as e:
handle_exception(e, f"Reverting PR {args.pr_num} failed")
return
if pr.is_closed():
gh_post_pr_comment(
org,
project,
args.pr_num,
f"Can't merge closed PR #{args.pr_num}",
dry_run=args.dry_run,
)
return
if pr.is_cross_repo() and pr.is_ghstack_pr():
gh_post_pr_comment(
org,
project,
args.pr_num,
"Cross-repo ghstack merges are not supported",
dry_run=args.dry_run,
)
return
if not pr.is_ghstack_pr() and pr.base_ref() != pr.default_branch():
gh_post_pr_comment(
org,
project,
args.pr_num,
f"PR targets {pr.base_ref()} rather than {pr.default_branch()}, refusing merge request",
dry_run=args.dry_run,
)
return
if args.check_mergeability:
if pr.is_ghstack_pr():
get_ghstack_prs(repo, pr) # raises error if out of sync
pr.merge_changes_locally(
repo,
skip_mandatory_checks=True,
skip_all_rule_checks=True,
)
return
if not args.force and pr.has_invalid_submodule_updates():
message = (
f"This PR updates submodules {', '.join(pr.get_changed_submodules())}\n"
)
message += '\nIf those updates are intentional, please add "submodule" keyword to PR title/description.'
gh_post_pr_comment(org, project, args.pr_num, message, dry_run=args.dry_run)
return
try:
# Ensure comment id is set, else fail
if not args.comment_id:
raise ValueError(
"Comment ID is required for merging PRs, please provide it using --comment-id"
)
merge(
pr,
repo,
comment_id=args.comment_id,
dry_run=args.dry_run,
skip_mandatory_checks=args.force,
ignore_current=args.ignore_current,
)
except Exception as e:
handle_exception(e)
if args.comment_id and args.pr_num:
# Finally, upload the record to s3, we don't have access to the
# list of pending and failed checks here, but they are not really
# needed at the moment
save_merge_record(
comment_id=args.comment_id,
pr_num=args.pr_num,
owner=org,
project=project,
author=pr.get_author(),
pending_checks=[],
failed_checks=[],
ignore_current_checks=[],
broken_trunk_checks=[],
flaky_checks=[],
unstable_checks=[],
last_commit_sha=pr.last_commit_sha(default=""),
merge_base_sha=pr.get_merge_base(),
is_failed=True,
skip_mandatory_checks=args.force,
ignore_current=args.ignore_current,
error=str(e),
)
else:
print("Missing comment ID or PR number, couldn't upload to s3")
finally:
if not args.check_mergeability:
gh_remove_label(
org, project, args.pr_num, MERGE_IN_PROGRESS_LABEL, args.dry_run
)
if __name__ == "__main__":
main()
| MergeRule |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/notifications/test_sns.py | {
"start": 1189,
"end": 4640
} | class ____:
def test_class_and_notifier_are_same(self):
assert send_sns_notification is SnsNotifier
@pytest.mark.parametrize(
"aws_conn_id",
[
pytest.param("aws_test_conn_id", id="custom-conn"),
pytest.param(None, id="none-conn"),
pytest.param(NOTSET, id="default-value"),
],
)
@pytest.mark.parametrize(
"region_name",
[
pytest.param("eu-west-2", id="custom-region"),
pytest.param(None, id="no-region"),
pytest.param(NOTSET, id="default-value"),
],
)
def test_parameters_propagate_to_hook(self, aws_conn_id, region_name):
"""Test notifier attributes propagate to SnsHook."""
notifier_kwargs = {}
if aws_conn_id is not NOTSET:
notifier_kwargs["aws_conn_id"] = aws_conn_id
if region_name is not NOTSET:
notifier_kwargs["region_name"] = region_name
notifier = SnsNotifier(**notifier_kwargs, **PUBLISH_KWARGS)
with mock.patch("airflow.providers.amazon.aws.notifications.sns.SnsHook") as mock_hook:
hook = notifier.hook
assert hook is notifier.hook, "Hook property not cached"
mock_hook.assert_called_once_with(
aws_conn_id=(aws_conn_id if aws_conn_id is not NOTSET else "aws_default"),
region_name=(region_name if region_name is not NOTSET else None),
)
# Basic check for notifier
notifier.notify({})
mock_hook.return_value.publish_to_target.assert_called_once_with(**PUBLISH_KWARGS)
@pytest.mark.asyncio
async def test_async_notify(self):
notifier = SnsNotifier(**PUBLISH_KWARGS)
with mock.patch("airflow.providers.amazon.aws.notifications.sns.SnsHook") as mock_hook:
mock_hook.return_value.apublish_to_target = mock.AsyncMock()
await notifier.async_notify({})
mock_hook.return_value.apublish_to_target.assert_called_once_with(**PUBLISH_KWARGS)
def test_sns_notifier_templated(self, create_dag_without_db):
notifier = SnsNotifier(
aws_conn_id="{{ dag.dag_id }}",
target_arn="arn:aws:sns:{{ var_region }}:{{ var_account }}:{{ var_topic }}",
message="I, {{ var_username }}",
subject="{{ var_subject }}",
message_attributes={"foo": "{{ dag.dag_id }}"},
region_name="{{ var_region }}",
)
with mock.patch("airflow.providers.amazon.aws.notifications.sns.SnsHook") as m:
notifier(
{
"dag": create_dag_without_db("test_sns_notifier_templated"),
"var_username": "Robot",
"var_region": "us-west-1",
"var_account": "000000000000",
"var_topic": "AwesomeTopic",
"var_subject": "spam-egg",
}
)
# Hook initialisation
m.assert_called_once_with(aws_conn_id="test_sns_notifier_templated", region_name="us-west-1")
# Publish message
m.return_value.publish_to_target.assert_called_once_with(
target_arn="arn:aws:sns:us-west-1:000000000000:AwesomeTopic",
message="I, Robot",
subject="spam-egg",
message_attributes={"foo": "test_sns_notifier_templated"},
)
| TestSnsNotifier |
python | django__django | tests/select_related_regress/models.py | {
"start": 2499,
"end": 2527
} | class ____(Fowl):
pass
| Hen |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_protect06.py | {
"start": 315,
"end": 2055
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("protect06.xlsx")
def test_create_file(self):
"""Test the a simple XlsxWriter file with worksheet protection."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
unlocked = workbook.add_format({"locked": 0, "hidden": 0})
hidden = workbook.add_format({"locked": 0, "hidden": 1})
worksheet.protect()
worksheet.unprotect_range("A1", None, "password")
worksheet.unprotect_range("C1:C3")
worksheet.unprotect_range("G4:I6", "MyRange")
worksheet.unprotect_range("K7", None, "foobar")
worksheet.write("A1", 1)
worksheet.write("A2", 2, unlocked)
worksheet.write("A3", 3, hidden)
workbook.close()
self.assertExcelEqual()
def test_create_file_keyword(self):
"""Test the a simple XlsxWriter file with worksheet protection."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
unlocked = workbook.add_format({"locked": 0, "hidden": 0})
hidden = workbook.add_format({"locked": 0, "hidden": 1})
worksheet.protect()
worksheet.unprotect_range("A1", password="password")
worksheet.unprotect_range(cell_range="C1:C3")
worksheet.unprotect_range("G4:I6", range_name="MyRange")
worksheet.unprotect_range("K7", None, "foobar")
worksheet.write("A1", 1)
worksheet.write("A2", 2, unlocked)
worksheet.write("A3", 3, hidden)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | crytic__slither | slither/vyper_parsing/ast/types.py | {
"start": 423,
"end": 501
} | class ____(ASTNode):
name: str
body: List[AnnAssign]
@dataclass
| EventDef |
python | pytorch__pytorch | torch/_dynamo/variables/nn_module.py | {
"start": 39047,
"end": 54739
} | class ____(UserDefinedObjectVariable):
_nonvar_fields = {
"value_type",
"is_state_mutated",
"nn_module_stack_source",
*UserDefinedObjectVariable._nonvar_fields,
}
"""
The above class will specialize on the id() of a module and place
parameters on the torch.fx.GraphModule. Giving one graph per
module instance. This version treats nn.Modules() like other user
defined objects and will pass parameters into the FX graph as inputs.
Giving one graph per module class.
"""
def __init__(self, value, **kwargs) -> None:
if type(value) is torch.jit._script.RecursiveScriptModule:
unimplemented(
gb_type="UnspecializedNNModuleVariable wrapped around ScriptModules unsupported",
context=str(value),
explanation="ScriptModules aren't supported in UnspecializedNNModuleVariable"
" because their .forward function isn't a static member of their type.",
hints=[
*graph_break_hints.DIFFICULT,
],
)
if "value_type" in kwargs:
lazy_value_to_become = getattr(kwargs["value_type"], "cls_to_become", None)
if type(value) is lazy_value_to_become:
# We may have cloned a variabletracker for a LazyModule earlier (e.g. tracking side-effects)
# and then later we called and mutated the LazyModule into a MaterializedModule.
# We do not do the mutation upon first seeing a LazyModule since we preserve eager semantics to only
# mutate upon first call, but this requires we update multiple copies of the VariableTracker post-mutation.
kwargs["value_type"] = type(value)
super().__init__(value=value, **kwargs)
self.is_state_mutated = False
# nn_module_stack_source is used to ensure BC for nn_module_stack.
# Downstream users prefer mod.linear instead of mod._modules['linear']
# as the module stack. When Dynamo inlines the __getattr__ method, we
# cannot use self.source for nn_module_stack because it will be similar
# to mod._modules['linear']. In these cases, we set the
# nn_module_stack_source appropriately to resemble mod.linear.
self.nn_module_stack_source = self.source
def _wrap_source(self, attr_source):
# the vt is already wrapped with UnspecializedNNModuleSource
return attr_source
def get_nn_module_stack_source(self):
return self.nn_module_stack_source or self.source
def set_nn_module_stack_source(self, source):
self.nn_module_stack_source = source
@staticmethod
@functools.cache
def _nn_module_method_ids():
# Allow __setattr__ to fall through to base class handler
supported = {
torch.nn.Module.__setattr__,
torch.nn.Module.__init__,
torch.nn.Module.__delattr__,
}
return {
id(x.__code__)
for x in torch.nn.Module.__dict__.values()
if hasattr(x, "__code__") and x not in supported
}
def unpack_var_sequence(self, tx):
try:
fn = inspect.getattr_static(self.value_type, "__iter__")
except AttributeError as e:
raise NotImplementedError from e
if fn in (
torch.nn.ModuleList.__iter__,
torch.nn.ParameterList.__iter__,
torch.nn.Sequential.__iter__,
):
# The program can mutate the nn module object but the saved `value`
# will not reflect the mutations. So, trace through the `__iter__`
# function to reflect any tracked mutations.
return tx.inline_user_function_return(
VariableTracker.build(tx, fn),
[
self,
],
{},
).unpack_var_sequence(tx)
return super().unpack_var_sequence(tx)
def call_function(
self,
tx: "InstructionTranslator",
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
mod = self.value
# see comment on lazy module handling in NNModuleVariable.call_function for context
if is_lazy_module(mod):
if mod.cls_to_become is not None:
self.value_type = mod.cls_to_become
initialize_lazy_module(tx, mod, args, kwargs)
if not isinstance(mod, torch.fx.GraphModule):
name = "__call__"
fn = getattr(self.value_type, name)
else:
name = "_call_impl"
fn = getattr(self.value_type, name)
# Check if we can short circuit nn.Module._call_impl to the forward
# method. NB - This is done to reduce the compile time of Dynamo.
if (
istype(mod.__call__, types.MethodType)
and istype(mod._call_impl, types.MethodType)
and mod.__call__.__func__ is unpatched_nn_module_call
and mod._call_impl.__func__ is unpatched_nn_module_call_impl
and "forward" not in mod.__dict__
):
forward_method = inspect.getattr_static(mod, "forward")
if isinstance(forward_method, types.FunctionType):
globals_vt = tx.nn_modules_globals_vt
if not (
self.var_getattr(tx, "_backward_hooks").realize().len()
or self.var_getattr(tx, "_backward_pre_hooks").realize().len()
or self.var_getattr(tx, "_forward_hooks").realize().len()
or self.var_getattr(tx, "_forward_pre_hooks").realize().len()
or globals_vt.var_getattr(tx, "_global_backward_pre_hooks").len()
or globals_vt.var_getattr(tx, "_global_backward_hooks").len()
or globals_vt.var_getattr(tx, "_global_forward_hooks").len()
or globals_vt.var_getattr(tx, "_global_forward_pre_hooks").len()
):
name = "forward"
fn = self.value_type.forward
if self.source:
source = self.get_source_by_walking_mro(name)
else:
source = None
guard_to_detect_forward_monkeypatching(self.source, mod)
ctx = (
record_nn_module_stack(
str(id(mod)), self.get_nn_module_stack_source(), tx, mod
)
if self.source
else nullcontext()
)
with ctx:
if not isinstance(fn, (types.FunctionType, torch.jit.ScriptFunction)):
fn_vt = VariableTracker.build(tx, fn, source=source)
return fn_vt.call_function(tx, [self] + list(args), kwargs)
else:
# Ideally we would have just used VariableTracker.build(tx, fn,
# source=source) but that introduces guard on the
# `forward.__code__` object. Given that we already guard on the
# forward not present in generic dict, we dont need this guard.
return variables.UserFunctionVariable(fn, source=source).call_function(
tx, [self] + list(args), kwargs
)
def call_method(
self,
tx,
name,
args: "list[VariableTracker]",
kwargs: "dict[str, VariableTracker]",
) -> "VariableTracker":
if name in ["_call_impl", "_wrapped_call_impl"]:
fn = getattr(self.value_type, name)
if self.source:
source = self.get_source_by_walking_mro(name)
else:
source = None
fn_vt = VariableTracker.build(tx, fn, source=source)
return fn_vt.call_function(tx, [self] + list(args), kwargs)
if name not in getattr(self.value, "__dict__", {}):
try:
method = inspect.getattr_static(type(self.value), name)
except AttributeError:
method = None
if isinstance(method, staticmethod):
source = AttrSource(self.get_source_by_walking_mro(name), "__func__")
fn_vt = VariableTracker.build(tx, method.__func__, source=source)
return fn_vt.call_function(tx, args, kwargs)
if (
hasattr(method, "__code__")
and id(method.__code__) in self._nn_module_method_ids()
):
unimplemented(
gb_type="UnspecializedNNModuleVariable missing method",
context=f"call_method: {self} {name} {args} {kwargs}",
explanation=f"Dynamo does not support tracing method {name} of nn.Module {self.value}",
hints=[
"Dynamo does not really define unspecialized nn.Module very well.",
*graph_break_hints.DIFFICULT,
],
)
# "_parameters" in self.value.__dict__ checks that module is initialized
if name == "__setattr__" and "_parameters" in self.value.__dict__:
# Record if mutations happens on parameters/buffers/modules. The
# mutations on these are not tracked by base class
# UserDefinedObject vt. This will be used later to graph break
# on seeing a parameters() and family calls.
# TODO(anijain2305) - This might not be needed if we let Dynamo
# inline both getattr and setattr. In that case, it should see
# the lowest level dicts - _parameters and family and
# automatically track mutations on those. Investigate if that
# can be done.
attr_name = args[0].as_python_constant()
value = args[1]
# This is reverse engineered by looking at nn module __setattr__
# logic.
if (
isinstance(value, variables.TensorVariable)
and value.python_type() is torch.nn.Parameter
) or attr_name in self.value.__dict__["_parameters"]:
# Handle parameters
self.is_state_mutated = True
elif attr_name in self.value.__dict__["_buffers"]:
# Handle buffers
self.is_state_mutated = True
elif (
isinstance(
value,
(
variables.NNModuleVariable,
variables.UnspecializedNNModuleVariable,
),
)
or attr_name in self.value.__dict__["_modules"]
):
# Handle submodules
self.is_state_mutated = True
if (
method is torch.nn.Module.__setattr__
and isinstance(args[1], variables.DeletedVariable)
) or method is torch.nn.Module.__delattr__:
# Trace through __delattr__ to track mutations on the module
# members like `_modules``.
fn_vt = VariableTracker.build(tx, torch.nn.Module.__delattr__)
return fn_vt.call_function(tx, [self, args[0]], kwargs)
return super().call_method(tx, name, args, kwargs)
def getattr_helper(self, tx: "InstructionTranslator", field, name_vt):
dict_vt = self.var_getattr(tx, field)
if isinstance(dict_vt, variables.ConstDictVariable):
return dict_vt.maybe_getitem_const(name_vt)
return None
def var_getattr(self, tx: "InstructionTranslator", name):
# Allow skipping of empty hook dict guards on inbuilt nn modules
if name in (
"_backward_hooks",
"_backward_pre_hooks",
"_forward_hooks",
"_forward_pre_hooks",
):
# For empty hooks, make an EMPTY_NN_MODULE_HOOKS_DICT. This allows us to control the installation of empty
# hooks guard via skip_nnmodule_hook_guards
if not tx.output.side_effects.has_pending_mutation_of_attr(self, name):
hooks_dict = getattr(self.value, name)
if isinstance(hooks_dict, dict) and len(hooks_dict) == 0:
if self.source:
hooks_source = AttrSource(self.source, name)
install_guard(
hooks_source.make_guard(
GuardBuilder.EMPTY_NN_MODULE_HOOKS_DICT
)
)
return variables.ConstDictVariable({})
# For non-empty hook dicts, one way is to just fallback to VariableTracker.build() and create a ConstDictVariable.
# However, ConstDictVariable guards on keys. This can cause recompiles when the same hook is installed for
# different nn module instances, because the key keeps changing (look more into RemovableHandle to understand why
# key changes - also related https://github.com/pytorch/pytorch/issues/125836). Here, we carefully craft a
# NNModuleHooksDictVariable (a subclass of ConstDictVariable) to avoid any guard on the keys.
if (
self.source
and name
in (
"_forward_pre_hooks",
"_forward_hooks",
)
and not tx.output.side_effects.has_pending_mutation_of_attr(self, name)
):
hooks_dict = getattr(self.value, name)
hooks_dict_source = AttrSource(self.source, name)
install_guard(hooks_dict_source.make_guard(GuardBuilder.SEQUENCE_LENGTH))
tx.output.guard_on_key_order.add(hooks_dict_source)
def build_key_value(i, k, v):
# Make key sourceless to avoid any guard on it
key = variables.ConstantVariable.create(k)
# Instead of using dict[key] to access the value, use a dict[dict.keys()[index]] to access the
# value. This removes the reliance on the actual key value.
source_key = ConstDictKeySource(hooks_dict_source, i)
source_value = DictGetItemSource(hooks_dict_source, source_key)
value = LazyVariableTracker.create(v, source_value)
return key, value
result = dict(
build_key_value(i, k, v) for i, (k, v) in enumerate(hooks_dict.items())
)
return variables.NNModuleHooksDictVariable(
result, type(hooks_dict), source=hooks_dict_source
)
return super().var_getattr(tx, name)
def manually_trace_nn_module_getattr(self, tx: "InstructionTranslator", name):
"""
Dynamo tracing of nn.Module __getattr__ can be expensive if the model
has deep submodule hierarchy. Since the __getattr__ is stable, we can
directly look into the underlying datastructures. This saves a lot of
compilation time.
"""
name_vt = variables.ConstantVariable(name)
out = self.getattr_helper(tx, "_parameters", name_vt)
if out is None:
out = self.getattr_helper(tx, "_modules", name_vt)
if out is None:
out = self.getattr_helper(tx, "_buffers", name_vt)
if out is None:
raise_observed_exception(
AttributeError,
tx,
msg=f"'{type(self.value).__name__}' object has no attribute '{name}'",
)
return out
| UnspecializedNNModuleVariable |
python | huggingface__transformers | src/transformers/models/vjepa2/video_processing_vjepa2.py | {
"start": 875,
"end": 1754
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BILINEAR
image_mean = IMAGENET_DEFAULT_MEAN
image_std = IMAGENET_DEFAULT_STD
size = {"shortest_edge": int(256 * 256 / 224)}
crop_size = 256
do_resize = True
do_rescale = True
do_center_crop = True
do_normalize = True
def __init__(self, **kwargs: Unpack[VideosKwargs]):
crop_size = kwargs.get("crop_size", 256)
if not isinstance(crop_size, int):
if not isinstance(crop_size, dict) or "height" not in crop_size:
raise ValueError("crop_size must be an integer or a dictionary with a 'height' key")
crop_size = crop_size["height"]
resize_size = int(crop_size * 256 / 224)
kwargs["size"] = {"shortest_edge": resize_size}
super().__init__(**kwargs)
__all__ = ["VJEPA2VideoProcessor"]
| VJEPA2VideoProcessor |
python | apache__airflow | providers/google/src/airflow/providers/google/marketing_platform/operators/campaign_manager.py | {
"start": 9629,
"end": 12530
} | class ____(BaseOperator):
"""
Creates a report.
.. seealso::
Check official API docs:
`https://developers.google.com/doubleclick-advertisers/rest/v4/reports/insert`
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:GoogleCampaignManagerInsertReportOperator`
:param profile_id: The DFA user profile ID.
:param report: Report to be created.
:param api_version: The version of the api that will be requested, for example 'v4'.
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"profile_id",
"report",
"api_version",
"gcp_conn_id",
"impersonation_chain",
)
template_ext: Sequence[str] = (".json",)
def __init__(
self,
*,
profile_id: str,
report: dict[str, Any],
api_version: str = "v4",
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.profile_id = profile_id
self.report = report
self.api_version = api_version
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def prepare_template(self) -> None:
# If .json is passed then we have to read the file
if isinstance(self.report, str) and self.report.endswith(".json"):
with open(self.report) as file:
self.report = json.load(file)
def execute(self, context: Context):
hook = GoogleCampaignManagerHook(
gcp_conn_id=self.gcp_conn_id,
api_version=self.api_version,
impersonation_chain=self.impersonation_chain,
)
self.log.info("Inserting Campaign Manager report.")
response = hook.insert_report(profile_id=self.profile_id, report=self.report)
report_id = response.get("id")
context["task_instance"].xcom_push(key="report_id", value=report_id)
self.log.info("Report successfully inserted. Report id: %s", report_id)
return response
| GoogleCampaignManagerInsertReportOperator |
python | altair-viz__altair | altair/vegalite/v6/schema/channels.py | {
"start": 250825,
"end": 258981
} | class ____(
DatumChannelMixin, core.FieldOrDatumDefWithConditionDatumDefnumber
):
"""
FillOpacityDatum schema wrapper.
Parameters
----------
bandPosition : float
Relative position on a band of a stacked, binned, time unit, or band scale. For
example, the marks will be positioned at the beginning of the band if set to ``0``,
and at the middle of the band if set to ``0.5``.
condition : dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`, Sequence[dict, :class:`ConditionalValueDefnumberExprRef`, :class:`ConditionalParameterValueDefnumberExprRef`, :class:`ConditionalPredicateValueDefnumberExprRef`]
One or more value definition(s) with `a parameter or a test predicate
<https://vega.github.io/vega-lite/docs/condition.html>`__.
**Note:** A field definition's ``condition`` property can only contain `conditional
value definitions <https://vega.github.io/vega-lite/docs/condition.html#value>`__
since Vega-Lite only allows at most one encoded field per encoding channel.
datum : str, bool, dict, float, :class:`ExprRef`, :class:`DateTime`, :class:`RepeatRef`, :class:`PrimitiveValue`, None
A constant value in data domain.
title : str, :class:`Text`, Sequence[str], None
A title for the field. If ``null``, the title will be removed.
**Default value:** derived from the field's name and transformation function
(``aggregate``, ``bin`` and ``timeUnit``). If the field has an aggregate function,
the function is displayed as part of the title (e.g., ``"Sum of Profit"``). If the
field is binned or has a time unit applied, the applied function is shown in
parentheses (e.g., ``"Profit (binned)"``, ``"Transaction Date (year-month)"``).
Otherwise, the title is simply the field name.
**Notes**:
1) You can customize the default field title format by providing the `fieldTitle
<https://vega.github.io/vega-lite/docs/config.html#top-level-config>`__ property in
the `config <https://vega.github.io/vega-lite/docs/config.html>`__ or `fieldTitle
function via the compile function's options
<https://vega.github.io/vega-lite/usage/compile.html#field-title>`__.
2) If both field definition's ``title`` and axis, header, or legend ``title`` are
defined, axis/header/legend title will be used.
type : :class:`Type`, Literal['quantitative', 'ordinal', 'temporal', 'nominal', 'geojson']
The type of measurement (``"quantitative"``, ``"temporal"``, ``"ordinal"``, or
``"nominal"``) for the encoded field or constant value (``datum``). It can also be a
``"geojson"`` type for encoding `'geoshape'
<https://vega.github.io/vega-lite/docs/geoshape.html>`__.
Vega-Lite automatically infers data types in many cases as discussed below. However,
type is required for a field if: (1) the field is not nominal and the field encoding
has no specified ``aggregate`` (except ``argmin`` and ``argmax``), ``bin``, scale
type, custom ``sort`` order, nor ``timeUnit`` or (2) if you wish to use an ordinal
scale for a field with ``bin`` or ``timeUnit``.
**Default value:**
1) For a data ``field``, ``"nominal"`` is the default data type unless the field
encoding has ``aggregate``, ``channel``, ``bin``, scale type, ``sort``, or
``timeUnit`` that satisfies the following criteria:
* ``"quantitative"`` is the default type if (1) the encoded field contains ``bin``
or ``aggregate`` except ``"argmin"`` and ``"argmax"``, (2) the encoding channel is
``latitude`` or ``longitude`` channel or (3) if the specified scale type is `a
quantitative scale <https://vega.github.io/vega-lite/docs/scale.html#type>`__.
* ``"temporal"`` is the default type if (1) the encoded field contains ``timeUnit``
or (2) the specified scale type is a time or utc scale
* ``"ordinal"`` is the default type if (1) the encoded field contains a `custom sort
order
<https://vega.github.io/vega-lite/docs/sort.html#specifying-custom-sort-order>`__,
(2) the specified scale type is an ordinal/point/band scale, or (3) the encoding
channel is ``order``.
2) For a constant value in data domain (``datum``):
* ``"quantitative"`` if the datum is a number
* ``"nominal"`` if the datum is a string
* ``"temporal"`` if the datum is `a date time object
<https://vega.github.io/vega-lite/docs/datetime.html>`__
**Note:**
* Data ``type`` describes the semantics of the data rather than the primitive data
types (number, string, etc.). The same primitive data type can have different
types of measurement. For example, numeric data can represent quantitative,
ordinal, or nominal data.
* Data values for a temporal field can be either a date-time string (e.g.,
``"2015-03-07 12:32:17"``, ``"17:01"``, ``"2015-03-16"``. ``"2015"``) or a
timestamp number (e.g., ``1552199579097``).
* When using with `bin <https://vega.github.io/vega-lite/docs/bin.html>`__, the
``type`` property can be either ``"quantitative"`` (for using a linear bin scale)
or `"ordinal" (for using an ordinal bin scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `timeUnit
<https://vega.github.io/vega-lite/docs/timeunit.html>`__, the ``type`` property
can be either ``"temporal"`` (default, for using a temporal scale) or `"ordinal"
(for using an ordinal scale)
<https://vega.github.io/vega-lite/docs/type.html#cast-bin>`__.
* When using with `aggregate
<https://vega.github.io/vega-lite/docs/aggregate.html>`__, the ``type`` property
refers to the post-aggregation data type. For example, we can calculate count
``distinct`` of a categorical field ``"cat"`` using ``{"aggregate": "distinct",
"field": "cat"}``. The ``"type"`` of the aggregate output is ``"quantitative"``.
* Secondary channels (e.g., ``x2``, ``y2``, ``xError``, ``yError``) do not have
``type`` as they must have exactly the same type as their primary channels (e.g.,
``x``, ``y``).
**See also:** `type <https://vega.github.io/vega-lite/docs/type.html>`__
documentation.
"""
_class_is_valid_at_instantiation = False
_encoding_name = "fillOpacity"
@overload
def bandPosition(self, _: float, /) -> FillOpacityDatum: ...
@overload
def condition(
self,
*,
test: Optional[str | SchemaBase | Map] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> FillOpacityDatum: ...
@overload
def condition(
self,
*,
empty: Optional[bool] = Undefined,
param: Optional[str | SchemaBase] = Undefined,
value: Optional[float | Parameter | SchemaBase | Map] = Undefined,
) -> FillOpacityDatum: ...
@overload
def condition(
self, _: list[core.ConditionalValueDefnumberExprRef], /
) -> FillOpacityDatum: ...
@overload
def title(self, _: str | Sequence[str] | None, /) -> FillOpacityDatum: ...
@overload
def type(self, _: Type_T, /) -> FillOpacityDatum: ...
def __init__(
self,
datum,
bandPosition: Optional[float] = Undefined,
condition: Optional[SchemaBase | Sequence[SchemaBase | Map] | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | None] = Undefined,
type: Optional[SchemaBase | Type_T] = Undefined,
**kwds,
):
super().__init__(
datum=datum,
bandPosition=bandPosition,
condition=condition,
title=title,
type=type,
**kwds,
)
@with_property_setters
| FillOpacityDatum |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/dependency_install/package.py | {
"start": 217,
"end": 610
} | class ____(Package):
"""Dependency which has a working install method"""
homepage = "http://www.example.com"
url = "http://www.example.com/a-1.0.tar.gz"
version("1.0", md5="0123456789abcdef0123456789abcdef")
version("2.0", md5="abcdef0123456789abcdef0123456789")
def install(self, spec, prefix):
touch(join_path(prefix, "an_installation_file"))
| DependencyInstall |
python | redis__redis-py | tests/test_asyncio/test_command_policies.py | {
"start": 2972,
"end": 6658
} | class ____:
async def test_resolves_correctly_policies(self, r: RedisCluster, monkeypatch):
# original nodes selection method
determine_nodes = r._determine_nodes
determined_nodes = []
primary_nodes = r.get_primaries()
calls = iter(list(range(len(primary_nodes))))
async def wrapper(*args, request_policy: RequestPolicy, **kwargs):
nonlocal determined_nodes
determined_nodes = await determine_nodes(
*args, request_policy=request_policy, **kwargs
)
return determined_nodes
# Mock random.choice to always return a pre-defined sequence of nodes
monkeypatch.setattr(random, "choice", lambda seq: seq[next(calls)])
with patch.object(r, "_determine_nodes", side_effect=wrapper, autospec=True):
# Routed to a random primary node
await r.ft().create_index(
[
NumericField("random_num"),
TextField("title"),
TextField("body"),
TextField("parent"),
]
)
assert determined_nodes[0] == primary_nodes[0]
# Routed to another random primary node
info = await r.ft().info()
if is_resp2_connection(r):
assert info["index_name"] == "idx"
else:
assert info[b"index_name"] == b"idx"
assert determined_nodes[0] == primary_nodes[1]
expected_node = await r.get_nodes_from_slot("FT.SUGLEN", *["foo"])
await r.ft().suglen("foo")
assert determined_nodes[0] == expected_node[0]
# Indexing a document
await r.hset(
"search",
mapping={
"title": "RediSearch",
"body": "Redisearch impements a search engine on top of redis",
"parent": "redis",
"random_num": 10,
},
)
await r.hset(
"ai",
mapping={
"title": "RedisAI",
"body": "RedisAI executes Deep Learning/Machine Learning models and managing their data.", # noqa
"parent": "redis",
"random_num": 3,
},
)
await r.hset(
"json",
mapping={
"title": "RedisJson",
"body": "RedisJSON implements ECMA-404 The JSON Data Interchange Standard as a native data type.", # noqa
"parent": "redis",
"random_num": 8,
},
)
req = AggregateRequest("redis").group_by("@parent").cursor(1)
res = await r.ft().aggregate(req)
if is_resp2_connection(r):
cursor = res.cursor
else:
cursor = Cursor(res[1])
# Ensure that aggregate node was cached.
assert determined_nodes[0] == r._aggregate_nodes[0]
await r.ft().aggregate(cursor)
# Verify that FT.CURSOR dispatched to the same node.
assert determined_nodes[0] == r._aggregate_nodes[0]
# Error propagates to a user
with pytest.raises(ResponseError, match="Cursor not found, id:"):
await r.ft().aggregate(cursor)
assert determined_nodes[0] == primary_nodes[2]
# Core commands also randomly distributed across masters
await r.randomkey()
assert determined_nodes[0] == primary_nodes[0]
| TestClusterWithPolicies |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/widgets/menus.py | {
"start": 937,
"end": 12785
} | class ____:
"""
:param floats: List of extra Float objects to display.
:param menu_items: List of `MenuItem` objects.
"""
def __init__(
self,
body: AnyContainer,
menu_items: list[MenuItem],
floats: list[Float] | None = None,
key_bindings: KeyBindingsBase | None = None,
) -> None:
self.body = body
self.menu_items = menu_items
self.selected_menu = [0]
# Key bindings.
kb = KeyBindings()
@Condition
def in_main_menu() -> bool:
return len(self.selected_menu) == 1
@Condition
def in_sub_menu() -> bool:
return len(self.selected_menu) > 1
# Navigation through the main menu.
@kb.add("left", filter=in_main_menu)
def _left(event: E) -> None:
self.selected_menu[0] = max(0, self.selected_menu[0] - 1)
@kb.add("right", filter=in_main_menu)
def _right(event: E) -> None:
self.selected_menu[0] = min(
len(self.menu_items) - 1, self.selected_menu[0] + 1
)
@kb.add("down", filter=in_main_menu)
def _down(event: E) -> None:
self.selected_menu.append(0)
@kb.add("c-c", filter=in_main_menu)
@kb.add("c-g", filter=in_main_menu)
def _cancel(event: E) -> None:
"Leave menu."
event.app.layout.focus_last()
# Sub menu navigation.
@kb.add("left", filter=in_sub_menu)
@kb.add("c-g", filter=in_sub_menu)
@kb.add("c-c", filter=in_sub_menu)
def _back(event: E) -> None:
"Go back to parent menu."
if len(self.selected_menu) > 1:
self.selected_menu.pop()
@kb.add("right", filter=in_sub_menu)
def _submenu(event: E) -> None:
"go into sub menu."
if self._get_menu(len(self.selected_menu) - 1).children:
self.selected_menu.append(0)
# If This item does not have a sub menu. Go up in the parent menu.
elif (
len(self.selected_menu) == 2
and self.selected_menu[0] < len(self.menu_items) - 1
):
self.selected_menu = [
min(len(self.menu_items) - 1, self.selected_menu[0] + 1)
]
if self.menu_items[self.selected_menu[0]].children:
self.selected_menu.append(0)
@kb.add("up", filter=in_sub_menu)
def _up_in_submenu(event: E) -> None:
"Select previous (enabled) menu item or return to main menu."
# Look for previous enabled items in this sub menu.
menu = self._get_menu(len(self.selected_menu) - 2)
index = self.selected_menu[-1]
previous_indexes = [
i
for i, item in enumerate(menu.children)
if i < index and not item.disabled
]
if previous_indexes:
self.selected_menu[-1] = previous_indexes[-1]
elif len(self.selected_menu) == 2:
# Return to main menu.
self.selected_menu.pop()
@kb.add("down", filter=in_sub_menu)
def _down_in_submenu(event: E) -> None:
"Select next (enabled) menu item."
menu = self._get_menu(len(self.selected_menu) - 2)
index = self.selected_menu[-1]
next_indexes = [
i
for i, item in enumerate(menu.children)
if i > index and not item.disabled
]
if next_indexes:
self.selected_menu[-1] = next_indexes[0]
@kb.add("enter")
def _click(event: E) -> None:
"Click the selected menu item."
item = self._get_menu(len(self.selected_menu) - 1)
if item.handler:
event.app.layout.focus_last()
item.handler()
# Controls.
self.control = FormattedTextControl(
self._get_menu_fragments, key_bindings=kb, focusable=True, show_cursor=False
)
self.window = Window(height=1, content=self.control, style="class:menu-bar")
submenu = self._submenu(0)
submenu2 = self._submenu(1)
submenu3 = self._submenu(2)
@Condition
def has_focus() -> bool:
return get_app().layout.current_window == self.window
self.container = FloatContainer(
content=HSplit(
[
# The titlebar.
self.window,
# The 'body', like defined above.
body,
]
),
floats=[
Float(
xcursor=True,
ycursor=True,
content=ConditionalContainer(
content=Shadow(body=submenu), filter=has_focus
),
),
Float(
attach_to_window=submenu,
xcursor=True,
ycursor=True,
allow_cover_cursor=True,
content=ConditionalContainer(
content=Shadow(body=submenu2),
filter=has_focus
& Condition(lambda: len(self.selected_menu) >= 1),
),
),
Float(
attach_to_window=submenu2,
xcursor=True,
ycursor=True,
allow_cover_cursor=True,
content=ConditionalContainer(
content=Shadow(body=submenu3),
filter=has_focus
& Condition(lambda: len(self.selected_menu) >= 2),
),
),
# --
]
+ (floats or []),
key_bindings=key_bindings,
)
def _get_menu(self, level: int) -> MenuItem:
menu = self.menu_items[self.selected_menu[0]]
for i, index in enumerate(self.selected_menu[1:]):
if i < level:
try:
menu = menu.children[index]
except IndexError:
return MenuItem("debug")
return menu
def _get_menu_fragments(self) -> StyleAndTextTuples:
focused = get_app().layout.has_focus(self.window)
# This is called during the rendering. When we discover that this
# widget doesn't have the focus anymore. Reset menu state.
if not focused:
self.selected_menu = [0]
# Generate text fragments for the main menu.
def one_item(i: int, item: MenuItem) -> Iterable[OneStyleAndTextTuple]:
def mouse_handler(mouse_event: MouseEvent) -> None:
hover = mouse_event.event_type == MouseEventType.MOUSE_MOVE
if (
mouse_event.event_type == MouseEventType.MOUSE_DOWN
or hover
and focused
):
# Toggle focus.
app = get_app()
if not hover:
if app.layout.has_focus(self.window):
if self.selected_menu == [i]:
app.layout.focus_last()
else:
app.layout.focus(self.window)
self.selected_menu = [i]
yield ("class:menu-bar", " ", mouse_handler)
if i == self.selected_menu[0] and focused:
yield ("[SetMenuPosition]", "", mouse_handler)
style = "class:menu-bar.selected-item"
else:
style = "class:menu-bar"
yield style, item.text, mouse_handler
result: StyleAndTextTuples = []
for i, item in enumerate(self.menu_items):
result.extend(one_item(i, item))
return result
def _submenu(self, level: int = 0) -> Window:
def get_text_fragments() -> StyleAndTextTuples:
result: StyleAndTextTuples = []
if level < len(self.selected_menu):
menu = self._get_menu(level)
if menu.children:
result.append(("class:menu", Border.TOP_LEFT))
result.append(("class:menu", Border.HORIZONTAL * (menu.width + 4)))
result.append(("class:menu", Border.TOP_RIGHT))
result.append(("", "\n"))
try:
selected_item = self.selected_menu[level + 1]
except IndexError:
selected_item = -1
def one_item(
i: int, item: MenuItem
) -> Iterable[OneStyleAndTextTuple]:
def mouse_handler(mouse_event: MouseEvent) -> None:
if item.disabled:
# The arrow keys can't interact with menu items that are disabled.
# The mouse shouldn't be able to either.
return
hover = mouse_event.event_type == MouseEventType.MOUSE_MOVE
if (
mouse_event.event_type == MouseEventType.MOUSE_UP
or hover
):
app = get_app()
if not hover and item.handler:
app.layout.focus_last()
item.handler()
else:
self.selected_menu = self.selected_menu[
: level + 1
] + [i]
if i == selected_item:
yield ("[SetCursorPosition]", "")
style = "class:menu-bar.selected-item"
else:
style = ""
yield ("class:menu", Border.VERTICAL)
if item.text == "-":
yield (
style + "class:menu-border",
f"{Border.HORIZONTAL * (menu.width + 3)}",
mouse_handler,
)
else:
yield (
style,
f" {item.text}".ljust(menu.width + 3),
mouse_handler,
)
if item.children:
yield (style, ">", mouse_handler)
else:
yield (style, " ", mouse_handler)
if i == selected_item:
yield ("[SetMenuPosition]", "")
yield ("class:menu", Border.VERTICAL)
yield ("", "\n")
for i, item in enumerate(menu.children):
result.extend(one_item(i, item))
result.append(("class:menu", Border.BOTTOM_LEFT))
result.append(("class:menu", Border.HORIZONTAL * (menu.width + 4)))
result.append(("class:menu", Border.BOTTOM_RIGHT))
return result
return Window(FormattedTextControl(get_text_fragments), style="class:menu")
@property
def floats(self) -> list[Float] | None:
return self.container.floats
def __pt_container__(self) -> Container:
return self.container
| MenuContainer |
python | tensorflow__tensorflow | tensorflow/python/tpu/feature_column_v2.py | {
"start": 38467,
"end": 44496
} | class ____(_TPUEmbeddingColumnV2):
"""TPUEmbeddingColumn which allows serving on TensorCore."""
def __new__(cls, *args, **kwargs):
# For __new__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
cls._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
cls._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
return _TPUEmbeddingColumnV2.__new__(cls, *args, **kwargs) # pytype: disable=wrong-keyword-args # always-use-return-annotations
def __init__(self, *args, **kwargs):
# For __init__, just capture the inference dense shape and call parent.
if 'tensor_core_shape' in kwargs:
self._tensor_core_shape = kwargs['tensor_core_shape']
del kwargs['tensor_core_shape']
if 'embedding_lookup_device' in kwargs:
self._embedding_lookup_device = kwargs['embedding_lookup_device']
del kwargs['embedding_lookup_device']
_TPUEmbeddingColumnV2.__init__(self, *args, **kwargs)
def __deepcopy__(self, memo):
return _TPUDeviceSpecificEmbeddingColumnV2(
*(copy.deepcopy(a, memo) for a in self.__getnewargs__()),
tensor_core_shape=self._tensor_core_shape,
embedding_lookup_device=self._embedding_lookup_device)
def create_state(self, state_manager):
_check_invalid_cases(self._embedding_lookup_device)
# CPU case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).create_state(state_manager)
# TPU_EMBEDDING_CORE case.
return fc_lib.EmbeddingColumn.create_state(self, state_manager)
def get_dense_tensor(self, transformation_cache, state_manager):
"""Private method that follows get_dense_tensor."""
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).get_dense_tensor(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self).get_dense_tensor(transformation_cache, state_manager)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = transformation_cache.get(self.categorical_column.name,
state_manager)
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu_replication.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = transformation_cache.get(self.categorical_column.name,
state_manager)
mask = transformation_cache.get(
self.categorical_column.name + _TENSOR_CORE_MASK_KEY_SUFFIX,
state_manager)
embedding_weights = state_manager.get_variable(
self, name='embedding_weights')
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
_check_invalid_cases(self._embedding_lookup_device)
# CPU Case.
is_cpu = self._embedding_lookup_device == EmbeddingDevice.CPU
is_cpu = is_cpu or _is_running_on_cpu()
if is_cpu:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor(inputs, weight_collections,
trainable)
# TPU_EMBEDDING_CORE case.
elif self._embedding_lookup_device == EmbeddingDevice.TPU_EMBEDDING_CORE:
return super(_TPUDeviceSpecificEmbeddingColumnV2,
self)._get_dense_tensor(inputs, weight_collections,
trainable)
# TPU_EMBEDDING_CORE cases.
if tpu.under_tpu_inference_context():
# For inference, use outside compile to densify and pad the input tensors.
sparse_tensor = inputs.get(self.get_feature_key_name())
def host_computation():
return pad_sparse_embedding_lookup_indices(sparse_tensor,
self._tensor_core_shape[1])
values, mask = tpu_replication.outside_compilation(host_computation)
else:
# For training, the inputs should already have been densified and padded.
values = inputs.get(self.get_feature_key_name())
mask = inputs.get(self.get_feature_key_name() +
_TENSOR_CORE_MASK_KEY_SUFFIX)
embedding_shape = (self.categorical_column._num_buckets, self.dimension) # pylint: disable=protected-access
if (weight_collections and
ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections):
weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES)
embedding_weights = variable_scope.get_variable(
name='embedding_weights',
shape=embedding_shape,
dtype=dtypes.float32,
initializer=self.initializer,
trainable=self.trainable and trainable,
collections=weight_collections)
return sparse_embedding_aggregate_slice(embedding_weights, (values, mask),
self.get_combiner())
| _TPUDeviceSpecificEmbeddingColumnV2 |
python | django__django | tests/modeladmin/test_checks.py | {
"start": 9983,
"end": 12492
} | class ____(CheckTestCase):
def test_not_iterable(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = 10
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_vertical' must be a list or tuple.",
"admin.E017",
)
def test_missing_field(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ("non_existent_field",)
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_vertical[0]' refers to 'non_existent_field', "
"which is not a field of 'modeladmin.ValidationTestModel'.",
"admin.E019",
)
def test_invalid_field_type(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ("name",)
self.assertIsInvalid(
TestModelAdmin,
ValidationTestModel,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
"admin.E020",
)
@isolate_apps("modeladmin")
def test_invalid_reverse_m2m_field_with_related_name(self):
class Contact(Model):
pass
class Customer(Model):
contacts = ManyToManyField("Contact", related_name="customers")
class TestModelAdmin(ModelAdmin):
filter_vertical = ["customers"]
self.assertIsInvalid(
TestModelAdmin,
Contact,
"The value of 'filter_vertical[0]' must be a many-to-many field.",
"admin.E020",
)
@isolate_apps("modeladmin")
def test_invalid_m2m_field_with_through(self):
class Artist(Model):
bands = ManyToManyField("Band", through="BandArtist")
class BandArtist(Model):
artist = ForeignKey("Artist", on_delete=CASCADE)
band = ForeignKey("Band", on_delete=CASCADE)
class TestModelAdmin(ModelAdmin):
filter_vertical = ["bands"]
self.assertIsInvalid(
TestModelAdmin,
Artist,
"The value of 'filter_vertical[0]' cannot include the ManyToManyField "
"'bands', because that field manually specifies a relationship model.",
"admin.E013",
)
def test_valid_case(self):
class TestModelAdmin(ModelAdmin):
filter_vertical = ("users",)
self.assertIsValid(TestModelAdmin, ValidationTestModel)
| FilterVerticalCheckTests |
python | ray-project__ray | python/ray/data/datasource/file_meta_provider.py | {
"start": 1616,
"end": 4151
} | class ____(FileMetadataProvider):
"""Abstract callable that provides metadata for
:class:`~ray.data.datasource.file_based_datasource.FileBasedDatasource`
implementations that reuse the base :meth:`~ray.data.Datasource.prepare_read`
method.
Also supports file and file size discovery in input directory paths.
Current subclasses:
- :class:`DefaultFileMetadataProvider`
"""
def _get_block_metadata(
self,
paths: List[str],
*,
rows_per_file: Optional[int],
file_sizes: List[Optional[int]],
) -> BlockMetadata:
"""Resolves and returns block metadata for files of a single dataset block.
Args:
paths: The file paths for a single dataset block. These
paths will always be a subset of those previously returned from
:meth:`.expand_paths`.
rows_per_file: The fixed number of rows per input file, or None.
file_sizes: Optional file size per input file previously returned
from :meth:`.expand_paths`, where `file_sizes[i]` holds the size of
the file at `paths[i]`.
Returns:
BlockMetadata aggregated across the given file paths.
"""
raise NotImplementedError
def expand_paths(
self,
paths: List[str],
filesystem: Optional["RetryingPyFileSystem"],
partitioning: Optional[Partitioning] = None,
ignore_missing_paths: bool = False,
) -> Iterator[Tuple[str, int]]:
"""Expands all paths into concrete file paths by walking directories.
Also returns a sidecar of file sizes.
The input paths must be normalized for compatibility with the input
filesystem prior to invocation.
Args:
paths: A list of file and/or directory paths compatible with the
given filesystem.
filesystem: The filesystem implementation that should be used for
expanding all paths and reading their files.
ignore_missing_paths: If True, ignores any file paths in ``paths`` that
are not found. Defaults to False.
Returns:
An iterator of `(file_path, file_size)` pairs. None may be returned for the
file size if it is either unknown or will be fetched later by
`_get_block_metadata()`, but the length of
both lists must be equal.
"""
raise NotImplementedError
@DeveloperAPI
| BaseFileMetadataProvider |
python | PyCQA__pylint | doc/data/messages/u/unnecessary-pass/bad.py | {
"start": 0,
"end": 141
} | class ____(Exception):
"""This exception is raised when a user has provided incorrect data."""
pass # [unnecessary-pass]
| DataEntryError |
python | apache__airflow | task-sdk/src/airflow/sdk/bases/sensor.py | {
"start": 2119,
"end": 15216
} | class ____(BaseOperator):
"""
Sensor operators are derived from this class and inherit these attributes.
Sensor operators keep executing at a time interval and succeed when
a criteria is met and fail if and when they time out.
:param soft_fail: Set to true to mark the task as SKIPPED on failure.
Mutually exclusive with never_fail.
:param poke_interval: Time that the job should wait in between each try.
Can be ``timedelta`` or ``float`` seconds.
:param timeout: Time elapsed before the task times out and fails.
Can be ``timedelta`` or ``float`` seconds.
This should not be confused with ``execution_timeout`` of the
``BaseOperator`` class. ``timeout`` measures the time elapsed between the
first poke and the current time (taking into account any
reschedule delay between each poke), while ``execution_timeout``
checks the **running** time of the task (leaving out any reschedule
delay). In case that the ``mode`` is ``poke`` (see below), both of
them are equivalent (as the sensor is never rescheduled), which is not
the case in ``reschedule`` mode.
:param mode: How the sensor operates.
Options are: ``{ poke | reschedule }``, default is ``poke``.
When set to ``poke`` the sensor is taking up a worker slot for its
whole execution time and sleeps between pokes. Use this mode if the
expected runtime of the sensor is short or if a short poke interval
is required. Note that the sensor will hold onto a worker slot and
a pool slot for the duration of the sensor's runtime in this mode.
When set to ``reschedule`` the sensor task frees the worker slot when
the criteria is not yet met and it's rescheduled at a later time. Use
this mode if the time before the criteria is met is expected to be
quite long. The poke interval should be more than one minute to
prevent too much load on the scheduler.
:param exponential_backoff: allow progressive longer waits between
pokes by using exponential backoff algorithm
:param max_wait: maximum wait interval between pokes, can be ``timedelta`` or ``float`` seconds
:param silent_fail: If true, and poke method raises an exception different from
AirflowSensorTimeout, AirflowTaskTimeout, AirflowSkipException
and AirflowFailException, the sensor will log the error and continue
its execution. Otherwise, the sensor task fails, and it can be retried
based on the provided `retries` parameter.
:param never_fail: If true, and poke method raises an exception, sensor will be skipped.
Mutually exclusive with soft_fail.
"""
ui_color: str = "#e6f1f2"
valid_modes: Iterable[str] = ["poke", "reschedule"]
_is_sensor: bool = True
def __init__(
self,
*,
poke_interval: timedelta | float = 60,
timeout: timedelta | float = conf.getfloat("sensors", "default_timeout"),
soft_fail: bool = False,
mode: str = "poke",
exponential_backoff: bool = False,
max_wait: timedelta | float | None = None,
silent_fail: bool = False,
never_fail: bool = False,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.poke_interval = self._coerce_poke_interval(poke_interval).total_seconds()
self.soft_fail = soft_fail
self.timeout: int | float = self._coerce_timeout(timeout).total_seconds()
self.mode = mode
self.exponential_backoff = exponential_backoff
self.max_wait = self._coerce_max_wait(max_wait)
if soft_fail is True and never_fail is True:
raise ValueError("soft_fail and never_fail are mutually exclusive, you can not provide both.")
self.silent_fail = silent_fail
self.never_fail = never_fail
self._validate_input_values()
@staticmethod
def _coerce_poke_interval(poke_interval: float | timedelta) -> timedelta:
if isinstance(poke_interval, timedelta):
return poke_interval
if isinstance(poke_interval, (int, float)) and poke_interval >= 0:
return timedelta(seconds=poke_interval)
raise ValueError("Operator arg `poke_interval` must be timedelta object or a non-negative number")
@staticmethod
def _coerce_timeout(timeout: float | timedelta) -> timedelta:
if isinstance(timeout, timedelta):
return timeout
if isinstance(timeout, (int, float)) and timeout >= 0:
return timedelta(seconds=timeout)
raise ValueError("Operator arg `timeout` must be timedelta object or a non-negative number")
@staticmethod
def _coerce_max_wait(max_wait: float | timedelta | None) -> timedelta | None:
if max_wait is None or isinstance(max_wait, timedelta):
return max_wait
if isinstance(max_wait, (int, float)) and max_wait >= 0:
return timedelta(seconds=max_wait)
raise ValueError("Operator arg `max_wait` must be timedelta object or a non-negative number")
def _validate_input_values(self) -> None:
if not isinstance(self.poke_interval, (int, float)) or self.poke_interval < 0:
raise ValueError("The poke_interval must be a non-negative number")
if not isinstance(self.timeout, (int, float)) or self.timeout < 0:
raise ValueError("The timeout must be a non-negative number")
if self.mode not in self.valid_modes:
raise ValueError(
f"The mode must be one of {self.valid_modes},'{self.dag.dag_id if self.has_dag() else ''} "
f".{self.task_id}'; received '{self.mode}'."
)
def poke(self, context: Context) -> bool | PokeReturnValue:
"""Override when deriving this class."""
raise AirflowException("Override me.")
def execute(self, context: Context) -> Any:
started_at: datetime.datetime | float
if self.reschedule:
ti = context["ti"]
first_reschedule_date = ti.get_first_reschedule_date(context)
started_at = start_date = first_reschedule_date or timezone.utcnow()
def run_duration() -> float:
# If we are in reschedule mode, then we have to compute diff
# based on the time in a DB, so can't use time.monotonic
return (timezone.utcnow() - start_date).total_seconds()
else:
started_at = start_monotonic = time.monotonic()
def run_duration() -> float:
return time.monotonic() - start_monotonic
poke_count = 1
xcom_value = None
while True:
try:
poke_return = self.poke(context)
except (
AirflowSensorTimeout,
AirflowTaskTimeout,
AirflowFailException,
) as e:
if self.soft_fail:
raise AirflowSkipException("Skipping due to soft_fail is set to True.") from e
if self.never_fail:
raise AirflowSkipException("Skipping due to never_fail is set to True.") from e
raise e
except AirflowSkipException as e:
raise e
except Exception as e:
if self.silent_fail:
self.log.error("Sensor poke failed: \n %s", traceback.format_exc())
poke_return = False
elif self.never_fail:
raise AirflowSkipException("Skipping due to never_fail is set to True.") from e
else:
raise e
if poke_return:
if isinstance(poke_return, PokeReturnValue):
xcom_value = poke_return.xcom_value
break
if run_duration() > self.timeout:
# If sensor is in soft fail mode but times out raise AirflowSkipException.
message = (
f"Sensor has timed out; run duration of {run_duration()} seconds exceeds "
f"the specified timeout of {self.timeout}."
)
if self.soft_fail:
raise AirflowSkipException(message)
raise AirflowSensorTimeout(message)
if self.reschedule:
next_poke_interval = self._get_next_poke_interval(started_at, run_duration, poke_count)
reschedule_date = timezone.utcnow() + timedelta(seconds=next_poke_interval)
raise AirflowRescheduleException(reschedule_date)
time.sleep(self._get_next_poke_interval(started_at, run_duration, poke_count))
poke_count += 1
self.log.info("Success criteria met. Exiting.")
return xcom_value
def resume_execution(self, next_method: str, next_kwargs: dict[str, Any] | None, context: Context):
try:
return super().resume_execution(next_method, next_kwargs, context)
except TaskDeferralTimeout as e:
raise AirflowSensorTimeout(*e.args) from e
except (AirflowException, TaskDeferralError) as e:
if self.soft_fail:
raise AirflowSkipException(str(e)) from e
raise
def _get_next_poke_interval(
self,
started_at: datetime.datetime | float,
run_duration: Callable[[], float],
poke_count: int,
) -> float:
"""Use similar logic which is used for exponential backoff retry delay for operators."""
if not self.exponential_backoff:
return self.poke_interval
if self.reschedule:
# Calculate elapsed time since the sensor started
elapsed_time = run_duration()
# Initialize variables for the simulation
cumulative_time: float = 0.0
estimated_poke_count: int = 0
while cumulative_time <= elapsed_time:
estimated_poke_count += 1
# Calculate min_backoff for the current try number
min_backoff = max(int(self.poke_interval * (2 ** (estimated_poke_count - 2))), 1)
# Calculate the jitter
run_hash = int(
hashlib.sha1(
f"{self.dag_id}#{self.task_id}#{started_at}#{estimated_poke_count}".encode(),
usedforsecurity=False,
).hexdigest(),
16,
)
modded_hash = min_backoff + run_hash % min_backoff
# Calculate the jitter, which is used to prevent multiple sensors simultaneously poking
interval_with_jitter = min(modded_hash, timedelta.max.total_seconds() - 1)
# Add the interval to the cumulative time
cumulative_time += interval_with_jitter
# Now we have an estimated_poke_count based on the elapsed time
poke_count = estimated_poke_count or poke_count
# The value of min_backoff should always be greater than or equal to 1.
min_backoff = max(int(self.poke_interval * (2 ** (poke_count - 2))), 1)
run_hash = int(
hashlib.sha1(
f"{self.dag_id}#{self.task_id}#{started_at}#{poke_count}".encode(), usedforsecurity=False
).hexdigest(),
16,
)
modded_hash = min_backoff + run_hash % min_backoff
delay_backoff_in_seconds = min(modded_hash, timedelta.max.total_seconds() - 1)
new_interval = min(self.timeout - int(run_duration()), delay_backoff_in_seconds)
if self.max_wait:
new_interval = min(self.max_wait.total_seconds(), new_interval)
self.log.info("new %s interval is %s", self.mode, new_interval)
return new_interval
@property
def reschedule(self):
"""Define mode rescheduled sensors."""
return self.mode == "reschedule"
@classmethod
def get_serialized_fields(cls):
return super().get_serialized_fields() | {"reschedule", "_is_sensor"}
def poke_mode_only(cls):
"""
Decorate a subclass of BaseSensorOperator with poke.
Indicate that instances of this class are only safe to use poke mode.
Will decorate all methods in the class to assert they did not change
the mode from 'poke'.
:param cls: BaseSensor class to enforce methods only use 'poke' mode.
"""
def decorate(cls_type):
def mode_getter(_):
return "poke"
def mode_setter(_, value):
if value != "poke":
raise ValueError(f"Cannot set mode to '{value}'. Only 'poke' is acceptable")
if not issubclass(cls_type, BaseSensorOperator):
raise ValueError(
f"poke_mode_only decorator should only be "
f"applied to subclasses of BaseSensorOperator,"
f" got:{cls_type}."
)
cls_type.mode = property(mode_getter, mode_setter)
return cls_type
return decorate(cls)
| BaseSensorOperator |
python | django__django | django/contrib/postgres/fields/array.py | {
"start": 10508,
"end": 10599
} | class ____(ArrayRHSMixin, lookups.Overlap):
pass
@ArrayField.register_lookup
| ArrayOverlap |
python | google__jax | jax/_src/test_multiprocess.py | {
"start": 3701,
"end": 13964
} | class ____:
"""Add a signal handler that sets a flag if SIGINT or SIGTERM are caught."""
# From https://stackoverflow.com/a/31464349
kill_now = False
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
def exit_gracefully(self, sig_num, unused_stack_frame):
print(f"Caught signal: {signal.Signals(sig_num).name} ({sig_num})")
self.kill_now = True
def _main(argv, shard_main):
# TODO(emilyaf): Enable multiprocess tests on Windows.
if sys.platform == "win32":
print("Multiprocess tests are not supported on Windows.")
return
num_processes = NUM_PROCESSES.value
if MULTIPROCESS_TEST_WORKER_ID.value >= 0:
local_device_ids = _DEVICE_IDS.value
if local_device_ids is not None:
local_device_ids = map(int, local_device_ids)
distributed.initialize(
_MULTIPROCESS_TEST_CONTROLLER_ADDRESS.value,
num_processes=num_processes,
process_id=MULTIPROCESS_TEST_WORKER_ID.value,
local_device_ids=local_device_ids,
heartbeat_timeout_seconds=_HEARTBEAT_TIMEOUT.value,
shutdown_timeout_seconds=_SHUTDOWN_TIMEOUT.value,
initialization_timeout=_INITIALIZATION_TIMEOUT.value,
)
if shard_main is not None:
return shard_main()
return absltest.main(testLoader=jtu.JaxTestLoader())
if not argv[0].endswith(".py"): # Skip the interpreter path if present.
argv = argv[1:]
if num_processes is None:
raise ValueError("num_processes must be set")
gpus_per_process = _GPUS_PER_PROCESS.value
tpu_chips_per_process = _TPU_CHIPS_PER_PROCESS.value
num_tpu_chips = num_processes * tpu_chips_per_process
if num_tpu_chips == 0:
pass
elif num_tpu_chips == 1:
assert tpu_chips_per_process == 1
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "1,1,1"
elif num_tpu_chips == 4:
if tpu_chips_per_process == 1:
tpu_host_bounds = "2,2,1"
tpu_chips_per_host_bounds = "1,1,1"
elif tpu_chips_per_process == 2:
tpu_host_bounds = "2,1,1"
tpu_chips_per_host_bounds = "1,2,1"
elif tpu_chips_per_process == 4:
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "2,2,1"
else:
raise ValueError(
"Invalid number of TPU chips per worker {}".format(
tpu_chips_per_process
)
)
elif num_tpu_chips == 8:
if tpu_chips_per_process == 1:
tpu_host_bounds = "4,2,1"
tpu_chips_per_host_bounds = "1,1,1"
elif tpu_chips_per_process == 4:
# Note: this branch assumes we are using 2x4 v6e LitePod, and will not
# work with 4x2 v5e LitePod.
tpu_host_bounds = "1,2,1"
tpu_chips_per_host_bounds = "2,2,1"
elif tpu_chips_per_process == 8:
tpu_host_bounds = "1,1,1"
tpu_chips_per_host_bounds = "2,4,1"
else:
# TODO(phawkins): implement other cases.
raise ValueError(
"Invalid number of TPU chips per worker {}".format(
tpu_chips_per_process
)
)
else:
raise ValueError(f"Invalid number of TPU chips {num_tpu_chips}")
if portpicker is None:
slicebuilder_ports = [10000 + i for i in range(num_processes)]
else:
slicebuilder_ports = [
portpicker.pick_unused_port() for _ in range(num_processes)
]
slicebuilder_addresses = ",".join(
f"localhost:{port}" for port in slicebuilder_ports
)
megascale_coordinator_port = None
if gpus_per_process > 0:
# Get the number of GPUs visible to this process without initializing the runtime
if cuda_versions is not None:
local_device_count = cuda_versions.cuda_device_count()
if num_processes * gpus_per_process > local_device_count:
print(
f"Cannot run {num_processes} processes with {gpus_per_process} GPU(s) "
f"each on a system with only {local_device_count} local GPU(s), "
f"starting {local_device_count // gpus_per_process} instead - test "
"cases will likely be skipped!"
)
num_processes = local_device_count // gpus_per_process
if portpicker is None:
jax_port = 9876
else:
# TODO(emilyaf): Use a port server if there are flaky port collisions due
# to pick_unused_port() racing among tests.
jax_port = portpicker.pick_unused_port()
subprocesses = []
output_filenames = []
output_files = []
for i in range(num_processes):
device_ids = None
env = os.environ.copy()
args = [
"/proc/self/exe",
*argv,
f"--num_processes={num_processes}",
f"--multiprocess_test_worker_id={i}",
f"--multiprocess_test_controller_address=localhost:{jax_port}",
f"--heartbeat_timeout={_HEARTBEAT_TIMEOUT.value}",
f"--shutdown_timeout={_SHUTDOWN_TIMEOUT.value}",
f"--barrier_timeout={_BARRIER_TIMEOUT.value}",
f"--initialization_timeout={_INITIALIZATION_TIMEOUT.value}",
"--logtostderr",
]
if num_tpu_chips > 0:
device_ids = range(
i * tpu_chips_per_process, (i + 1) * tpu_chips_per_process)
env["CLOUD_TPU_TASK_ID"] = str(i)
env["TPU_CHIPS_PER_PROCESS_BOUNDS"] = tpu_chips_per_host_bounds
env["TPU_PROCESS_BOUNDS"] = tpu_host_bounds
env["TPU_PROCESS_ADDRESSES"] = slicebuilder_addresses
env["TPU_PROCESS_PORT"] = str(slicebuilder_ports[i])
env["TPU_VISIBLE_CHIPS"] = ",".join(map(str, device_ids))
env["ALLOW_MULTIPLE_LIBTPU_LOAD"] = "1"
if gpus_per_process > 0:
device_ids = range(i * gpus_per_process, (i + 1) * gpus_per_process)
args.append(f"--jax_cuda_visible_devices={','.join(map(str, device_ids))}")
if device_ids is not None:
args.append(f"--device_ids={','.join(map(str, device_ids))}")
cpu_collectives_impl = CPU_COLLECTIVES_IMPLEMENTATION.value
if cpu_collectives_impl:
args.append(
f"--jax_cpu_collectives_implementation={cpu_collectives_impl}"
)
if _ENABLE_MEGASCALE.value or cpu_collectives_impl == "megascale":
if portpicker is None:
megascale_port = 9877
else:
megascale_port = portpicker.pick_unused_port()
if megascale_coordinator_port is None:
megascale_coordinator_port = megascale_port
args += [
f"--megascale_coordinator_address=localhost:{megascale_coordinator_port}",
f"--megascale_port={megascale_port}",
]
args += EXTRA_TEST_ARGS.value
undeclared_outputs = os.environ.get("TEST_UNDECLARED_OUTPUTS_DIR", "/tmp")
stdout_name = f"{undeclared_outputs}/jax_{i}_stdout.log"
stderr_name = f"{undeclared_outputs}/jax_{i}_stderr.log"
if _DUMP_HLO.value:
hlo_dump_path = f"{undeclared_outputs}/jax_{i}_hlo_dump/"
os.makedirs(hlo_dump_path, exist_ok=True)
env["XLA_FLAGS"] = f"--xla_dump_to={hlo_dump_path}"
stdout = open(stdout_name, "wb")
stderr = open(stderr_name, "wb")
print(f"Launching process {i}:")
print(f" stdout: {stdout_name}")
print(f" stderr: {stderr_name}")
proc = subprocess.Popen(args, env=env, stdout=stdout, stderr=stderr)
subprocesses.append(proc)
output_filenames.append((stdout_name, stderr_name))
output_files.append((stdout, stderr))
print(" All launched, running ".center(80, "="), flush=True)
# Wait for all the children to finish or for a SIGTERM from bazel. If we get
# SIGTERM, we still want to collect their logs, so kill them and continue.
killer = GracefulKiller()
running_procs = dict(enumerate(subprocesses))
while not killer.kill_now and running_procs:
time.sleep(0.1)
for i, proc in list(running_procs.items()):
if proc.poll() is not None:
print(f"Process {i} finished.", flush=True)
running_procs.pop(i)
if killer.kill_now and running_procs:
print("Caught termination, terminating remaining children.", flush=True)
# Send a SIGTERM to each child process, to let it know it should terminate.
for i, proc in running_procs.items():
proc.terminate()
print(f"Process {i} terminated.", flush=True)
# We give the child process(es) a few seconds for their own cleanup, and
# keep the rest (up to 15s) for copying the children logs into our own.
time.sleep(5)
# Send a SIGKILL (a "hard" kill) to each child process. This is CRITICAL:
# without it, this process may end up waiting a long time on the proc.wait()
# below, and never get to saving the children logs, making test timeouts
# very hard to debug.
for i, proc in running_procs.items():
proc.kill()
print(f"Process {i} killed.")
print("Killed all child processes.", flush=True)
retvals = []
stdouts = []
stderrs = []
for proc, fds, (stdout, stderr) in zip(
subprocesses, output_files, output_filenames
):
retvals.append(proc.wait())
for fd in fds:
fd.close()
stdouts.append(pathlib.Path(stdout).read_text(errors="replace"))
stderrs.append(pathlib.Path(stderr).read_text(errors="replace"))
print(" All finished ".center(80, "="), flush=True)
print(" Summary ".center(80, "="))
for i, (retval, stdout, stderr) in enumerate(zip(retvals, stdouts, stderrs)):
m = re.search(r"Ran \d+ tests? in [\d.]+s\n\n.*", stderr, re.MULTILINE)
result = m.group().replace("\n\n", "; ") if m else "Test crashed?"
print(
f"Process {i}, ret: {retval}, len(stdout): {len(stdout)}, "
f"len(stderr): {len(stderr)}; {result}"
)
print(" Detailed logs ".center(80, "="))
for i, (retval, stdout, stderr) in enumerate(zip(retvals, stdouts, stderrs)):
print(f" Process {i}: return code: {retval} ".center(80, "="))
if stdout:
print(f" Process {i} stdout ".center(80, "-"))
print(stdout)
if stderr:
print(f" Process {i} stderr ".center(80, "-"))
print(stderr)
print(" Done detailed logs ".center(80, "="), flush=True)
for i, (retval, stderr) in enumerate(zip(retvals, stderrs)):
if retval != 0:
if expect_failures_with_regex is not None:
assert re.search(
expect_failures_with_regex, stderr
), f"process {i} failed, expected regex: {expect_failures_with_regex}"
else:
assert retval == 0, f"process {i} failed, return value: {retval}"
| GracefulKiller |
python | pydantic__pydantic | pydantic-core/python/pydantic_core/core_schema.py | {
"start": 24275,
"end": 26311
} | class ____(TypedDict, total=False):
type: Required[Literal['float']]
allow_inf_nan: bool # whether 'NaN', '+inf', '-inf' should be forbidden. default: True
multiple_of: float
le: float
ge: float
lt: float
gt: float
strict: bool
ref: str
metadata: dict[str, Any]
serialization: SerSchema
def float_schema(
*,
allow_inf_nan: bool | None = None,
multiple_of: float | None = None,
le: float | None = None,
ge: float | None = None,
lt: float | None = None,
gt: float | None = None,
strict: bool | None = None,
ref: str | None = None,
metadata: dict[str, Any] | None = None,
serialization: SerSchema | None = None,
) -> FloatSchema:
"""
Returns a schema that matches a float value, e.g.:
```py
from pydantic_core import SchemaValidator, core_schema
schema = core_schema.float_schema(le=0.8, ge=0.2)
v = SchemaValidator(schema)
assert v.validate_python('0.5') == 0.5
```
Args:
allow_inf_nan: Whether to allow inf and nan values
multiple_of: The value must be a multiple of this number
le: The value must be less than or equal to this number
ge: The value must be greater than or equal to this number
lt: The value must be strictly less than this number
gt: The value must be strictly greater than this number
strict: Whether the value should be a float or a value that can be converted to a float
ref: optional unique identifier of the schema, used to reference the schema in other places
metadata: Any other information you want to include with the schema, not used by pydantic-core
serialization: Custom serialization schema
"""
return _dict_not_none(
type='float',
allow_inf_nan=allow_inf_nan,
multiple_of=multiple_of,
le=le,
ge=ge,
lt=lt,
gt=gt,
strict=strict,
ref=ref,
metadata=metadata,
serialization=serialization,
)
| FloatSchema |
python | doocs__leetcode | solution/1300-1399/1394.Find Lucky Integer in an Array/Solution.py | {
"start": 0,
"end": 161
} | class ____:
def findLucky(self, arr: List[int]) -> int:
cnt = Counter(arr)
return max((x for x, v in cnt.items() if x == v), default=-1)
| Solution |
python | plotly__plotly.py | plotly/graph_objs/histogram/_unselected.py | {
"start": 233,
"end": 3380
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "histogram"
_path_str = "histogram.unselected"
_valid_props = {"marker", "textfont"}
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.unselected.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Returns
-------
plotly.graph_objs.histogram.unselected.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
@property
def textfont(self):
"""
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.histogram.unselected.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Returns
-------
plotly.graph_objs.histogram.unselected.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
@property
def _prop_descriptions(self):
return """\
marker
:class:`plotly.graph_objects.histogram.unselected.Marke
r` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.histogram.unselected.Textf
ont` instance or dict with compatible properties
"""
def __init__(self, arg=None, marker=None, textfont=None, **kwargs):
"""
Construct a new Unselected object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.histogram.Unselected`
marker
:class:`plotly.graph_objects.histogram.unselected.Marke
r` instance or dict with compatible properties
textfont
:class:`plotly.graph_objects.histogram.unselected.Textf
ont` instance or dict with compatible properties
Returns
-------
Unselected
"""
super().__init__("unselected")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.histogram.Unselected
constructor must be a dict or
an instance of :class:`plotly.graph_objs.histogram.Unselected`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("marker", arg, marker)
self._set_property("textfont", arg, textfont)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Unselected |
python | pypa__warehouse | tests/unit/accounts/test_services.py | {
"start": 69257,
"end": 69873
} | class ____:
def test_verify_service(self):
assert verifyClass(IEmailBreachedService, services.NullEmailBreachedService)
def test_check_email(self):
svc = services.NullEmailBreachedService()
assert svc.get_email_breach_count("foo@example.com") == 0
def test_factory(self):
context = pretend.stub()
request = pretend.stub()
svc = services.NullEmailBreachedService.create_service(context, request)
assert isinstance(svc, services.NullEmailBreachedService)
assert svc.get_email_breach_count("foo@example.com") == 0
| TestNullEmailBreachedService |
python | more-itertools__more-itertools | tests/test_more.py | {
"start": 182692,
"end": 183847
} | class ____(TestCase):
def test_invalid_n(self):
with self.assertRaises(ValueError):
list(mi.unique_in_window([], 0))
def test_basic(self):
for iterable, n, expected in [
(range(9), 10, list(range(9))),
(range(20), 10, list(range(20))),
([1, 2, 3, 4, 4, 4], 1, [1, 2, 3, 4, 4, 4]),
([1, 2, 3, 4, 4, 4], 2, [1, 2, 3, 4]),
([1, 2, 3, 4, 4, 4], 3, [1, 2, 3, 4]),
([1, 2, 3, 4, 4, 4], 4, [1, 2, 3, 4]),
([1, 2, 3, 4, 4, 4], 5, [1, 2, 3, 4]),
(
[0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 2, 3, 4, 2],
2,
[0, 1, 0, 2, 3, 4, 2],
),
]:
with self.subTest(expected=expected):
actual = list(mi.unique_in_window(iterable, n))
self.assertEqual(actual, expected)
def test_key(self):
iterable = [0, 1, 3, 4, 5, 6, 7, 8, 9]
n = 3
key = lambda x: x // 3
actual = list(mi.unique_in_window(iterable, n, key=key))
expected = [0, 3, 6, 9]
self.assertEqual(actual, expected)
| UniqueInWindowTests |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 266663,
"end": 267194
} | class ____(sgqlc.types.Input):
"""Ways in which lists of issues can be ordered upon return."""
__schema__ = github_schema
__field_names__ = ("field", "direction")
field = sgqlc.types.Field(sgqlc.types.non_null(PullRequestOrderField), graphql_name="field")
"""The field in which to order pull requests by."""
direction = sgqlc.types.Field(sgqlc.types.non_null(OrderDirection), graphql_name="direction")
"""The direction in which to order pull requests by the specified
field.
"""
| PullRequestOrder |
python | ZoranPandovski__al-go-rithms | puzzles/Rubik's Cube/rubik's cube simulator.py | {
"start": 5477,
"end": 5840
} | class ____:
def __init__(self, id):
self.id = id
self.piece = None
locations1D[id] = self
def setPiece(self, piece):
self.piece = piece
piece.location = self
return self
@staticmethod
def getLocation(id):
if id in locations1D:
return locations1D[id]
return None
| Location |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol41.py | {
"start": 1335,
"end": 1602
} | class ____:
@overload
def to_csv(self, p: WriteBuffer[bytes]) -> None: ...
@overload
def to_csv(self, p: None = ...) -> str: ...
def to_csv(self, p: Any = None) -> Any: ...
def test3(b: BytesIO) -> None:
df = NDFrame()
df.to_csv(b)
| NDFrame |
python | allegroai__clearml | clearml/utilities/gpu/pynvml.py | {
"start": 165286,
"end": 165884
} | class ____(Structure):
_fields_ = [
("lowPwrThreshold", c_uint),
]
def nvmlDeviceSetNvLinkDeviceLowPowerThreshold(device, l1threshold):
c_info = c_nvmlNvLinkPowerThres_t()
c_info.lowPwrThreshold = l1threshold
fn = _nvmlGetFunctionPointer("nvmlDeviceSetNvLinkDeviceLowPowerThreshold")
ret = fn(device, byref(c_info))
_nvmlCheckReturn(ret)
return ret
_nvmlGpuFabricState_t = c_uint
NVML_GPU_FABRIC_STATE_NOT_SUPPORTED = 0
NVML_GPU_FABRIC_STATE_NOT_STARTED = 1
NVML_GPU_FABRIC_STATE_IN_PROGRESS = 2
NVML_GPU_FABRIC_STATE_COMPLETED = 3
| c_nvmlNvLinkPowerThres_t |
python | dagster-io__dagster | python_modules/automation/automation/dagster_docs/validator.py | {
"start": 10525,
"end": 15347
} | class ____:
"""Handles dynamic importing of Python symbols from dotted paths."""
@staticmethod
def import_symbol(dotted_path: str) -> SymbolInfo:
"""Import a symbol from a dotted path like 'dagster.asset' or 'dagster.OpDefinition'.
Args:
dotted_path: The full dotted path to the symbol
Returns:
SymbolInfo object containing the imported symbol and metadata
Raises:
ImportError: If the symbol cannot be imported
AttributeError: If the symbol doesn't exist in the module
"""
parts = dotted_path.split(".")
# Try progressively longer module paths until we find one that imports
for i in range(len(parts), 0, -1):
module_path = ".".join(parts[:i])
try:
module = importlib.import_module(module_path)
except ImportError:
# This module path doesn't exist, try the next shorter one
continue
# If we imported the entire path as a module, return the module itself
if i == len(parts):
return SymbolInfo.create(module, dotted_path)
# Otherwise, walk the remaining attribute path
symbol = module
for attr_name in parts[i:]:
symbol = getattr(symbol, attr_name)
return SymbolInfo.create(symbol, dotted_path)
raise ImportError(f"Could not import symbol '{dotted_path}'")
@staticmethod
def get_all_exported_symbols(module_path: str) -> list[SymbolInfo]:
"""Get all top-level exported symbols from a module (those not starting with '_').
Args:
module_path: The dotted path to the module
Returns:
List of SymbolInfo objects for all top-level exported symbols
"""
module = importlib.import_module(module_path)
symbols = []
for name in dir(module):
if not name.startswith("_"):
symbol = getattr(module, name)
full_path = f"{module_path}.{name}"
symbols.append(SymbolInfo.create(symbol, full_path))
return symbols
@staticmethod
def get_all_public_annotated_methods(module_path: str) -> list[SymbolInfo]:
"""Get all @public-annotated methods from top-level exported classes.
Args:
module_path: The dotted path to the module
Returns:
List of SymbolInfo objects for all @public-annotated methods on top-level exported classes
"""
module = importlib.import_module(module_path)
methods = []
for name in dir(module):
if not name.startswith("_"):
symbol = getattr(module, name)
# Check if this symbol is a top-level exported class with @public annotation
if inspect.isclass(symbol) and is_public(symbol):
# Get all @public-annotated methods from this top-level exported class
class_dotted_path = f"{module_path}.{name}"
method_paths = get_public_methods_from_class(symbol, class_dotted_path)
for method_path in method_paths:
# Extract method name and get the actual method object
method_name = method_path.split(".")[-1]
method = getattr(symbol, method_name)
method_info = SymbolInfo.create(method, method_path)
methods.append(method_info)
return methods
@staticmethod
def get_all_public_symbols(module_path: str) -> list[SymbolInfo]:
"""Get all public symbols from a module (exported symbols marked @public + public-annotated methods).
Args:
module_path: The dotted path to the module
Returns:
List of SymbolInfo objects for all public symbols in the module.
Includes only top-level exported symbols that are marked with @public decorator
and all @public-annotated methods from @public classes.
Raises:
ImportError: If the module cannot be imported
"""
public_symbols = []
# Get top-level exported symbols that are also marked with @public
exported_symbols = SymbolImporter.get_all_exported_symbols(module_path)
# Filter to only include symbols that are marked as @public
for symbol_info in exported_symbols:
if is_public(symbol_info.symbol):
public_symbols.append(symbol_info)
# Get all @public-annotated methods from @public classes
method_symbols = SymbolImporter.get_all_public_annotated_methods(module_path)
public_symbols.extend(method_symbols)
return public_symbols
| SymbolImporter |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_emr.py | {
"start": 7817,
"end": 8639
} | class ____:
def test_serialization(self):
application_id = "test_application_id"
waiter_delay = 30
waiter_max_attempts = 60
aws_conn_id = "aws_default"
trigger = EmrServerlessStopApplicationTrigger(
application_id=application_id,
waiter_delay=waiter_delay,
waiter_max_attempts=waiter_max_attempts,
aws_conn_id=aws_conn_id,
)
classpath, kwargs = trigger.serialize()
assert classpath == "airflow.providers.amazon.aws.triggers.emr.EmrServerlessStopApplicationTrigger"
assert kwargs == {
"application_id": "test_application_id",
"waiter_delay": 30,
"waiter_max_attempts": 60,
"aws_conn_id": "aws_default",
}
| TestEmrServerlessStopApplicationTrigger |
python | mlflow__mlflow | dev/clint/src/clint/config.py | {
"start": 842,
"end": 2798
} | class ____:
select: set[str] = field(default_factory=set)
exclude: list[str] = field(default_factory=list)
# Path -> List of modules that should not be imported globally under that path
forbidden_top_level_imports: dict[str, list[str]] = field(default_factory=dict)
typing_extensions_allowlist: list[str] = field(default_factory=list)
example_rules: list[str] = field(default_factory=list)
# Compiled regex pattern -> Set of rule names to ignore for files matching the pattern
per_file_ignores: dict[re.Pattern[str], set[str]] = field(default_factory=dict)
@classmethod
def load(cls) -> Self:
repo_root = get_repo_root()
pyproject = repo_root / "pyproject.toml"
if not pyproject.exists():
return cls()
with pyproject.open("rb") as f:
data = tomli.load(f)
clint = data.get("tool", {}).get("clint", {})
if not clint:
return cls()
per_file_ignores_raw = clint.get("per-file-ignores", {})
per_file_ignores: dict[re.Pattern[str], set[str]] = {}
for pattern, rules in per_file_ignores_raw.items():
per_file_ignores[re.compile(pattern)] = set(rules)
select = clint.get("select")
if select is None:
select = ALL_RULES
else:
if unknown_rules := set(select) - ALL_RULES:
raise ValueError(f"Unknown rules in 'select': {unknown_rules}")
select = set(select)
exclude_paths = clint.get("exclude", [])
_validate_exclude_paths(exclude_paths)
return cls(
select=select,
exclude=exclude_paths,
forbidden_top_level_imports=clint.get("forbidden-top-level-imports", {}),
typing_extensions_allowlist=clint.get("typing-extensions-allowlist", []),
example_rules=clint.get("example-rules", []),
per_file_ignores=per_file_ignores,
)
| Config |
python | run-llama__llama_index | llama-index-integrations/retrievers/llama-index-retrievers-pathway/llama_index/retrievers/pathway/base.py | {
"start": 3722,
"end": 5111
} | class ____(BaseRetriever):
"""
Pathway retriever.
Pathway is an open data processing framework.
It allows you to easily develop data transformation pipelines
that work with live data sources and changing data.
This is the client that implements Retriever API for PathwayVectorServer.
"""
def __init__(
self,
host: Optional[str] = None,
port: Optional[int] = None,
url: Optional[str] = None,
similarity_top_k: int = DEFAULT_SIMILARITY_TOP_K,
callback_manager: Optional[CallbackManager] = None,
) -> None:
"""Initializing the Pathway retriever client."""
self.client = _VectorStoreClient(host, port, url)
self.similarity_top_k = similarity_top_k
super().__init__(callback_manager)
def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:
"""Retrieve."""
rets = self.client(query=query_bundle.query_str, k=self.similarity_top_k)
items = [
NodeWithScore(
node=TextNode(text=ret["text"], extra_info=ret["metadata"]),
# Transform cosine distance into a similairty score
# (higher is more similar)
score=1 - ret["dist"],
)
for ret in rets
]
return sorted(items, key=lambda x: x.score or 0.0, reverse=True)
| PathwayRetriever |
python | plotly__plotly.py | tests/test_core/test_update_objects/test_update_layout.py | {
"start": 66,
"end": 2067
} | class ____(TestCase):
def setUp(self):
import plotly.io as pio
pio.templates.default = None
def test_update_layout_kwargs(self):
# Create initial figure
fig = go.Figure()
fig.layout.title.font.size = 10
# Grab copy of original figure
orig_fig = go.Figure(fig)
fig.update_layout(title_font_family="Courier New")
orig_fig.layout.update(title_font_family="Courier New")
self.assertEqual(fig, orig_fig)
def test_update_layout_dict(self):
# Create initial figure
fig = go.Figure()
fig.layout.title.font.size = 10
# Grab copy of original figure
orig_fig = go.Figure(fig)
fig.update_layout(dict(title=dict(font=dict(family="Courier New"))))
orig_fig.layout.update(title_font_family="Courier New")
self.assertEqual(fig, orig_fig)
def test_update_layout_overwrite(self):
fig = go.Figure(
layout=go.Layout(
annotations=[
go.layout.Annotation(text="one"),
go.layout.Annotation(text="two"),
]
)
)
fig.update_layout(
overwrite=True,
annotations=[
go.layout.Annotation(width=10),
go.layout.Annotation(width=20),
go.layout.Annotation(width=30),
go.layout.Annotation(width=40),
go.layout.Annotation(width=50),
],
)
expected = {
"annotations": [
{"width": 10},
{"width": 20},
{"width": 30},
{"width": 40},
{"width": 50},
]
}
fig.layout.pop("template")
self.assertEqual(fig.layout.to_plotly_json(), expected)
# Remove all annotations
fig.update_layout(overwrite=True, annotations=None)
self.assertEqual(fig.layout.annotations, ())
| TestUpdateLayout |
python | Netflix__metaflow | test/test_config/helloconfig.py | {
"start": 575,
"end": 818
} | class ____(FlowMutator):
def mutate(self, mutable_flow):
for name, s in mutable_flow.steps:
if name in mutable_flow.config.run_on_titus:
s.add_decorator(titus, cpu=mutable_flow.config.cpu_count)
| TitusOrNot |
python | doocs__leetcode | solution/3000-3099/3044.Most Frequent Prime/Solution.py | {
"start": 0,
"end": 975
} | class ____:
def mostFrequentPrime(self, mat: List[List[int]]) -> int:
def is_prime(x: int) -> int:
return all(x % i != 0 for i in range(2, isqrt(x) + 1))
m, n = len(mat), len(mat[0])
cnt = Counter()
for i in range(m):
for j in range(n):
for a in range(-1, 2):
for b in range(-1, 2):
if a == 0 and b == 0:
continue
x, y, v = i + a, j + b, mat[i][j]
while 0 <= x < m and 0 <= y < n:
v = v * 10 + mat[x][y]
if is_prime(v):
cnt[v] += 1
x, y = x + a, y + b
ans, mx = -1, 0
for v, x in cnt.items():
if mx < x:
mx = x
ans = v
elif mx == x:
ans = max(ans, v)
return ans
| Solution |
python | google__jax | tests/pallas/mosaic_gpu_test.py | {
"start": 193440,
"end": 200853
} | class ____(PallasTest, jtu.CudaArchSpecificTest):
def test_multiple_wg(self):
@functools.partial(
self.kernel,
out_shape=jnp.zeros((2, 128), np.int32),
num_threads=2,
thread_name="wg",
)
def kernel(o_ref):
wg_idx = jax.lax.axis_index("wg")
o_ref[wg_idx] = jnp.broadcast_to(wg_idx, (128,))
np.testing.assert_array_equal(
kernel(), np.repeat(np.arange(2), 128).reshape(2, 128)
)
def test_multiple_wg_with_grid(self):
@functools.partial(
self.kernel,
out_shape=jnp.zeros((4, 2, 128), np.int32),
grid=(2, 2),
grid_names=("x", "y"),
num_threads=2,
thread_name="wg",
)
def kernel(o_ref):
xy_idx = jax.lax.axis_index(("x", "y"))
yx_idx = jax.lax.axis_index(("y", "x"))
wg_idx = jax.lax.axis_index("wg")
num_wgs = jax.lax.axis_size("wg")
o_ref[xy_idx, wg_idx] = jnp.broadcast_to(
yx_idx * num_wgs + wg_idx, (128,)
)
np.testing.assert_array_equal(
kernel(), np.repeat([0, 1, 4, 5, 2, 3, 6, 7], 128).reshape(4, 2, 128)
)
def test_multiple_wg_with_squashed_grid(self):
# Tests whether a grid with >3 logical dimensions is correctly squashed to
# 3 CUDA grid dimensions.
b = 4
x_dim = 3
y_dim = 5
z_dim = 7
num_threads = 2
@functools.partial(
self.kernel,
out_shape=jnp.zeros(
(b, x_dim, y_dim, z_dim, num_threads, 128), np.int32
),
grid=(b, x_dim, y_dim, z_dim),
grid_names=("b", "x", "y", "z"),
num_threads=num_threads,
thread_name="wg",
)
def kernel(o_ref):
b_idx = jax.lax.axis_index("b")
x_idx = jax.lax.axis_index("x")
y_idx = jax.lax.axis_index("y")
z_idx = jax.lax.axis_index("z")
wg_idx = jax.lax.axis_index("wg")
bxyzw_idx = jax.lax.axis_index(("b", "x", "y", "z", "wg"))
o_ref[b_idx, x_idx, y_idx, z_idx, wg_idx] = jnp.broadcast_to(
bxyzw_idx, (128,)
)
result = kernel()[:, :, :, :, :, 0]
ref = np.arange(b * x_dim * y_dim * z_dim * num_threads).reshape(
result.shape
)
np.testing.assert_array_equal(result, ref)
def test_cross_wg_barrier(self):
@functools.partial(
self.kernel,
out_shape=jnp.zeros((2, 128), np.int32),
# Each warpgroup is a single logical thread!
scratch_shapes=[plgpu.Barrier(num_arrivals=2)],
num_threads=2,
thread_name="wg",
)
def kernel(o_ref, barrier):
plgpu.barrier_arrive(barrier)
plgpu.barrier_wait(barrier)
wg_idx = jax.lax.axis_index("wg")
o_ref[wg_idx] = jnp.broadcast_to(wg_idx, (128,))
np.testing.assert_array_equal(
kernel(), np.repeat([0, 1], 128).reshape(2, 128)
)
def test_cluster(self):
@functools.partial(
self.kernel,
out_shape=jnp.zeros(128, np.int32),
grid=(2,),
grid_names=("x",),
cluster=(2,),
cluster_names=("cluster",),
)
def kernel(ref):
block_idx = jax.lax.axis_index("x")
cluster_idx = jax.lax.axis_index("cluster")
pl.debug_print("block: {} cluster: {}", block_idx, cluster_idx)
ref[...] = ref[...]
with self.capture_stdout() as output:
jax.block_until_ready(kernel())
self.assertEqual(
set(output().splitlines()),
{
"block: 0 cluster: 0",
"block: 1 cluster: 0",
"block: 0 cluster: 1",
"block: 1 cluster: 1",
},
)
def test_realistic_matmul_with_cluster(self):
self.skip_unless_sm90a() # Requires WGMMA.
dtype = jnp.float16
swizzle = 128
elems_128b = swizzle // jnp.dtype(dtype).itemsize
grid_m, grid_k, grid_n = 132, 10, 32
# TODO(slebedev): Remove ``grid_tile_n`` to simplify the test.
grid_tile_n = 4
assert grid_n % grid_tile_n == 0
cluster_m = 2
cluster_n = 2
cluster_tile_n = min(cluster_n, grid_tile_n)
tile_m = tile_n = 128
assert tile_m % elems_128b == 0
tile_k = elems_128b
m, k, n = grid_m * tile_m, grid_k * tile_k, grid_n * tile_n
transforms = self.default_transforms(dtype=dtype)
max_concurrent_steps = 2
delay_release = 1
@functools.partial(
self.kernel,
out_shape=jax.ShapeDtypeStruct((m, n), dtype),
scratch_shapes=[
plgpu.SMEM(
(max_concurrent_steps, tile_m, tile_k),
dtype,
transforms=transforms,
),
plgpu.SMEM(
(max_concurrent_steps, tile_k, tile_n),
dtype,
transforms=transforms,
),
plgpu.SMEM((tile_m, tile_n), dtype, transforms=transforms),
plgpu.ACC((tile_m, tile_n), jnp.float32),
plgpu.Barrier(num_arrivals=2, num_barriers=max_concurrent_steps),
plgpu.ClusterBarrier(
collective_axes=(("x", "z"), "y"),
num_barriers=max_concurrent_steps,
),
],
grid=(grid_tile_n, grid_m, grid_n // grid_tile_n),
grid_names=("tile_n", "m", "n"),
cluster=(cluster_tile_n, cluster_m, cluster_n // cluster_tile_n),
cluster_names=("x", "y", "z"),
)
def kernel(
a_gmem,
b_gmem,
o_gmem,
a_smem,
b_smem,
o_smem,
acc,
barrier,
cluster_barrier,
):
m_slice = pl.ds(lax.axis_index("m") * tile_m, tile_m)
n_slice = pl.ds(
(lax.axis_index("tile_n") + lax.axis_index("n") * grid_tile_n)
* tile_n,
tile_n,
)
def fetch(step, slot):
if not isinstance(slot, int): # Skip in initialization.
plgpu.barrier_arrive(cluster_barrier.at[slot])
plgpu.barrier_wait(cluster_barrier.at[slot])
k_slice = pl.ds(step * tile_k, tile_k)
plgpu.copy_gmem_to_smem(
a_gmem.at[m_slice, k_slice],
a_smem.at[slot],
barrier.at[slot],
collective_axes=("x", "z"),
)
plgpu.copy_gmem_to_smem(
b_gmem.at[k_slice, n_slice],
b_smem.at[slot],
barrier.at[slot],
collective_axes="y",
)
# Initialize the pipeline.
for slot in range(min(max_concurrent_steps, grid_k)):
fetch(slot, slot)
def body(step, _):
slot = step % max_concurrent_steps
plgpu.barrier_wait(barrier.at[slot])
plgpu.wgmma(acc, a_smem.at[slot], b_smem.at[slot])
plgpu.wgmma_wait(delay_release)
fetch_step = step + (max_concurrent_steps - delay_release)
fetch_slot = lax.rem(fetch_step, max_concurrent_steps)
jax.lax.cond(
lax.bitwise_and(step >= delay_release, fetch_step < grid_k),
lambda: fetch(fetch_step, fetch_slot),
lambda: None,
)
return ()
jax.lax.fori_loop(0, grid_k, body, ())
# Finalize the pipeline.
o_smem[...] = acc[...].astype(dtype)
plgpu.commit_smem()
plgpu.copy_smem_to_gmem(o_smem, o_gmem.at[m_slice, n_slice])
plgpu.wait_smem_to_gmem(0)
key1, key2 = jax.random.split(jax.random.key(42), 2)
a = jax.random.uniform(key1, shape=(m, k), dtype=dtype)
b = jax.random.uniform(key2, shape=(k, n), dtype=dtype)
np.testing.assert_array_equal(kernel(a, b), a @ b)
| CoreMapTest |
python | mlflow__mlflow | mlflow/haystack/autolog.py | {
"start": 2571,
"end": 8091
} | class ____(SimpleSpanProcessor):
def __init__(self):
self.span_exporter = SpanExporter()
self._pipeline_io: dict[str, tuple[dict[str, Any], dict[str, Any]]] = {}
def on_start(self, span: OTelSpan, parent_context: Context | None = None):
tracer = _get_tracer(__name__)
tracer.span_processor.on_start(span, parent_context)
trace_id = generate_trace_id_v3(span)
mlflow_span = create_mlflow_span(span, trace_id)
InMemoryTraceManager.get_instance().register_span(mlflow_span)
def on_end(self, span: OTelReadableSpan) -> None:
mlflow_span = get_mlflow_span_for_otel_span(span)
if mlflow_span is None:
_logger.debug("Span not found in the map. Skipping end.")
return
with _bypass_attribute_guard(mlflow_span._span):
if span.name in ("haystack.pipeline.run", "haystack.async_pipeline.run"):
self.set_pipeline_info(mlflow_span, span)
elif span.name in ("haystack.component.run"):
self.set_component_info(mlflow_span, span)
tracer = _get_tracer(__name__)
tracer.span_processor.on_end(span)
def set_component_info(self, mlflow_span: LiveSpan, span: OTelReadableSpan) -> None:
comp_alias = span.attributes.get("haystack.component.name")
comp_type = span.attributes.get("haystack.component.type")
mlflow_span.set_span_type(_infer_span_type_from_haystack(comp_type, comp_alias, span))
# Haystack spans originally have name='haystack.component.run'. We need to update both the
# _name field of the Otel span and the _original_name field of the MLflow span to
# customize the span name here, as otherwise it would be overwritten in the
# deduplication process
span_name = comp_type or comp_alias or span.name
mlflow_span._span._name = span_name
mlflow_span._original_name = span_name
if (inputs := span.attributes.get("haystack.component.input")) is not None:
try:
mlflow_span.set_inputs(json.loads(inputs))
except Exception:
mlflow_span.set_inputs(inputs)
if (outputs := span.attributes.get("haystack.component.output")) is not None:
try:
mlflow_span.set_outputs(json.loads(outputs))
except Exception:
mlflow_span.set_outputs(outputs)
if usage := _parse_token_usage(mlflow_span.outputs):
mlflow_span.set_attribute(SpanAttributeKey.CHAT_USAGE, usage)
if parent_id := mlflow_span.parent_id:
key = comp_alias or comp_type or mlflow_span.name
inputs_agg, outputs_agg = self._pipeline_io.setdefault(parent_id, ({}, {}))
if mlflow_span.inputs is not None:
inputs_agg[key] = mlflow_span.inputs
if mlflow_span.outputs is not None:
outputs_agg[key] = mlflow_span.outputs
def set_pipeline_info(self, mlflow_span: LiveSpan, span: OTelReadableSpan) -> None:
# Pipelines are CHAINs
mlflow_span.set_span_type(SpanType.CHAIN)
if pipe_name := span.attributes.get("haystack.pipeline.name"):
mlflow_span._span._name = pipe_name
if (inputs := span.attributes.get("haystack.pipeline.input")) is not None:
try:
mlflow_span.set_inputs(json.loads(inputs))
except Exception:
mlflow_span.set_inputs(inputs)
if (outputs := span.attributes.get("haystack.pipeline.output")) is not None:
try:
mlflow_span.set_outputs(json.loads(outputs))
except Exception:
mlflow_span.set_outputs(outputs)
if mlflow_span.span_id in self._pipeline_io:
inputs_agg, outputs_agg = self._pipeline_io.pop(mlflow_span.span_id)
if mlflow_span.inputs is None and inputs_agg:
mlflow_span.set_inputs(inputs_agg)
if mlflow_span.outputs is None and outputs_agg:
mlflow_span.set_outputs(outputs_agg)
def _parse_token_usage(outputs: Any) -> dict[str, int] | None:
try:
if not isinstance(outputs, dict):
return None
replies = outputs.get("replies")
if isinstance(replies, list) and len(replies) > 0:
usage = (
replies[0].get("meta", {}).get("usage", {}) if isinstance(replies[0], dict) else {}
)
meta = outputs.get("meta")
if isinstance(meta, list) and len(meta) > 0:
usage = meta[0].get("usage", {}) if isinstance(meta[0], dict) else {}
if isinstance(usage, dict):
in_tok = usage.get("prompt_tokens", 0)
out_tok = usage.get("completion_tokens", 0)
tot_tok = usage.get("total_tokens", 0)
return {
TokenUsageKey.INPUT_TOKENS: in_tok,
TokenUsageKey.OUTPUT_TOKENS: out_tok,
TokenUsageKey.TOTAL_TOKENS: tot_tok,
}
except Exception:
_logger.debug("Failed to parse token usage from outputs.", exc_info=True)
def teardown_haystack_tracing():
provider = get_tracer_provider()
if isinstance(provider, SDKTracerProvider):
span_processors = getattr(provider._active_span_processor, "_span_processors", ())
provider._active_span_processor._span_processors = tuple(
p for p in span_processors if not isinstance(p, HaystackSpanProcessor)
)
| HaystackSpanProcessor |
python | doocs__leetcode | lcof/面试题11. 旋转数组的最小数字/Solution.py | {
"start": 0,
"end": 349
} | class ____:
def minArray(self, numbers: List[int]) -> int:
l, r = 0, len(numbers) - 1
while l < r:
m = (l + r) >> 1
if numbers[m] > numbers[r]:
l = m + 1
elif numbers[m] < numbers[r]:
r = m
else:
r -= 1
return numbers[l]
| Solution |
python | huggingface__transformers | src/transformers/models/vit_mae/modeling_vit_mae.py | {
"start": 20851,
"end": 21393
} | class ____(nn.Module):
def __init__(self, config: ViTMAEConfig):
super().__init__()
self.config = config
self.layer = nn.ModuleList([ViTMAELayer(config) for _ in range(config.num_hidden_layers)])
self.gradient_checkpointing = False
def forward(self, hidden_states: torch.Tensor) -> BaseModelOutput:
for i, layer_module in enumerate(self.layer):
hidden_states = layer_module(hidden_states)
return BaseModelOutput(last_hidden_state=hidden_states)
@auto_docstring
| ViTMAEEncoder |
python | pypa__pipenv | pipenv/vendor/tomlkit/container.py | {
"start": 25928,
"end": 29694
} | class ____(_CustomDict):
@staticmethod
def validate(container: Container, indices: tuple[int, ...]) -> None:
"""Validate out of order tables in the given container"""
# Append all items to a temp container to see if there is any error
temp_container = Container(True)
for i in indices:
_, item = container._body[i]
if isinstance(item, Table):
for k, v in item.value.body:
temp_container.append(k, v, validate=False)
temp_container._validate_out_of_order_table()
def __init__(self, container: Container, indices: tuple[int, ...]) -> None:
self._container = container
self._internal_container = Container(True)
self._tables = []
self._tables_map = {}
for i in indices:
_, item = self._container._body[i]
if isinstance(item, Table):
self._tables.append(item)
table_idx = len(self._tables) - 1
for k, v in item.value.body:
self._internal_container._raw_append(k, v)
self._tables_map.setdefault(k, []).append(table_idx)
if k is not None:
dict.__setitem__(self, k.key, v)
self._internal_container._validate_out_of_order_table()
def unwrap(self) -> str:
return self._internal_container.unwrap()
@property
def value(self):
return self._internal_container.value
def __getitem__(self, key: Key | str) -> Any:
if key not in self._internal_container:
raise NonExistentKey(key)
return self._internal_container[key]
def __setitem__(self, key: Key | str, item: Any) -> None:
if key in self._tables_map:
# Overwrite the first table and remove others
indices = self._tables_map[key]
while len(indices) > 1:
table = self._tables[indices.pop()]
self._remove_table(table)
self._tables[indices[0]][key] = item
elif self._tables:
table = self._tables[0]
table[key] = item
else:
self._container[key] = item
self._internal_container[key] = item
if key is not None:
dict.__setitem__(self, key, item)
def _remove_table(self, table: Table) -> None:
"""Remove table from the parent container"""
self._tables.remove(table)
for idx, item in enumerate(self._container._body):
if item[1] is table:
self._container._remove_at(idx)
break
def __delitem__(self, key: Key | str) -> None:
if key not in self._tables_map:
raise NonExistentKey(key)
for i in reversed(self._tables_map[key]):
table = self._tables[i]
del table[key]
if not table and len(self._tables) > 1:
self._remove_table(table)
del self._tables_map[key]
del self._internal_container[key]
if key is not None:
dict.__delitem__(self, key)
def __iter__(self) -> Iterator[str]:
return iter(dict.keys(self))
def __len__(self) -> int:
return dict.__len__(self)
def setdefault(self, key: Key | str, default: Any) -> Any:
super().setdefault(key, default=default)
return self[key]
def ends_with_whitespace(it: Any) -> bool:
"""Returns ``True`` if the given item ``it`` is a ``Table`` or ``AoT`` object
ending with a ``Whitespace``.
"""
return (
isinstance(it, Table) and isinstance(it.value._previous_item(), Whitespace)
) or (isinstance(it, AoT) and len(it) > 0 and isinstance(it[-1], Whitespace))
| OutOfOrderTableProxy |
python | encode__starlette | starlette/applications.py | {
"start": 789,
"end": 10347
} | class ____:
"""Creates an Starlette application."""
def __init__(
self: AppType,
debug: bool = False,
routes: Sequence[BaseRoute] | None = None,
middleware: Sequence[Middleware] | None = None,
exception_handlers: Mapping[Any, ExceptionHandler] | None = None,
on_startup: Sequence[Callable[[], Any]] | None = None,
on_shutdown: Sequence[Callable[[], Any]] | None = None,
lifespan: Lifespan[AppType] | None = None,
) -> None:
"""Initializes the application.
Parameters:
debug: Boolean indicating if debug tracebacks should be returned on errors.
routes: A list of routes to serve incoming HTTP and WebSocket requests.
middleware: A list of middleware to run for every request. A starlette
application will always automatically include two middleware classes.
`ServerErrorMiddleware` is added as the very outermost middleware, to handle
any uncaught errors occurring anywhere in the entire stack.
`ExceptionMiddleware` is added as the very innermost middleware, to deal
with handled exception cases occurring in the routing or endpoints.
exception_handlers: A mapping of either integer status codes,
or exception class types onto callables which handle the exceptions.
Exception handler callables should be of the form
`handler(request, exc) -> response` and may be either standard functions, or
async functions.
on_startup: A list of callables to run on application startup.
Startup handler callables do not take any arguments, and may be either
standard functions, or async functions.
on_shutdown: A list of callables to run on application shutdown.
Shutdown handler callables do not take any arguments, and may be either
standard functions, or async functions.
lifespan: A lifespan context function, which can be used to perform
startup and shutdown tasks. This is a newer style that replaces the
`on_startup` and `on_shutdown` handlers. Use one or the other, not both.
"""
# The lifespan context function is a newer style that replaces
# on_startup / on_shutdown handlers. Use one or the other, not both.
assert lifespan is None or (on_startup is None and on_shutdown is None), (
"Use either 'lifespan' or 'on_startup'/'on_shutdown', not both."
)
self.debug = debug
self.state = State()
self.router = Router(routes, on_startup=on_startup, on_shutdown=on_shutdown, lifespan=lifespan)
self.exception_handlers = {} if exception_handlers is None else dict(exception_handlers)
self.user_middleware = [] if middleware is None else list(middleware)
self.middleware_stack: ASGIApp | None = None
def build_middleware_stack(self) -> ASGIApp:
debug = self.debug
error_handler = None
exception_handlers: dict[Any, ExceptionHandler] = {}
for key, value in self.exception_handlers.items():
if key in (500, Exception):
error_handler = value
else:
exception_handlers[key] = value
middleware = (
[Middleware(ServerErrorMiddleware, handler=error_handler, debug=debug)]
+ self.user_middleware
+ [Middleware(ExceptionMiddleware, handlers=exception_handlers, debug=debug)]
)
app = self.router
for cls, args, kwargs in reversed(middleware):
app = cls(app, *args, **kwargs)
return app
@property
def routes(self) -> list[BaseRoute]:
return self.router.routes
def url_path_for(self, name: str, /, **path_params: Any) -> URLPath:
return self.router.url_path_for(name, **path_params)
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
scope["app"] = self
if self.middleware_stack is None:
self.middleware_stack = self.build_middleware_stack()
await self.middleware_stack(scope, receive, send)
def on_event(self, event_type: str) -> Callable: # type: ignore[type-arg]
return self.router.on_event(event_type) # pragma: no cover
def mount(self, path: str, app: ASGIApp, name: str | None = None) -> None:
self.router.mount(path, app=app, name=name) # pragma: no cover
def host(self, host: str, app: ASGIApp, name: str | None = None) -> None:
self.router.host(host, app=app, name=name) # pragma: no cover
def add_middleware(
self,
middleware_class: _MiddlewareFactory[P],
*args: P.args,
**kwargs: P.kwargs,
) -> None:
if self.middleware_stack is not None: # pragma: no cover
raise RuntimeError("Cannot add middleware after an application has started")
self.user_middleware.insert(0, Middleware(middleware_class, *args, **kwargs))
def add_exception_handler(
self,
exc_class_or_status_code: int | type[Exception],
handler: ExceptionHandler,
) -> None: # pragma: no cover
self.exception_handlers[exc_class_or_status_code] = handler
def add_event_handler(
self,
event_type: str,
func: Callable, # type: ignore[type-arg]
) -> None: # pragma: no cover
self.router.add_event_handler(event_type, func)
def add_route(
self,
path: str,
route: Callable[[Request], Awaitable[Response] | Response],
methods: list[str] | None = None,
name: str | None = None,
include_in_schema: bool = True,
) -> None: # pragma: no cover
self.router.add_route(path, route, methods=methods, name=name, include_in_schema=include_in_schema)
def add_websocket_route(
self,
path: str,
route: Callable[[WebSocket], Awaitable[None]],
name: str | None = None,
) -> None: # pragma: no cover
self.router.add_websocket_route(path, route, name=name)
def exception_handler(self, exc_class_or_status_code: int | type[Exception]) -> Callable: # type: ignore[type-arg]
warnings.warn(
"The `exception_handler` decorator is deprecated, and will be removed in version 1.0.0. "
"Refer to https://starlette.dev/exceptions/ for the recommended approach.",
DeprecationWarning,
)
def decorator(func: Callable) -> Callable: # type: ignore[type-arg]
self.add_exception_handler(exc_class_or_status_code, func)
return func
return decorator
def route(
self,
path: str,
methods: list[str] | None = None,
name: str | None = None,
include_in_schema: bool = True,
) -> Callable: # type: ignore[type-arg]
"""
We no longer document this decorator style API, and its usage is discouraged.
Instead you should use the following approach:
>>> routes = [Route(path, endpoint=...), ...]
>>> app = Starlette(routes=routes)
"""
warnings.warn(
"The `route` decorator is deprecated, and will be removed in version 1.0.0. "
"Refer to https://starlette.dev/routing/ for the recommended approach.",
DeprecationWarning,
)
def decorator(func: Callable) -> Callable: # type: ignore[type-arg]
self.router.add_route(
path,
func,
methods=methods,
name=name,
include_in_schema=include_in_schema,
)
return func
return decorator
def websocket_route(self, path: str, name: str | None = None) -> Callable: # type: ignore[type-arg]
"""
We no longer document this decorator style API, and its usage is discouraged.
Instead you should use the following approach:
>>> routes = [WebSocketRoute(path, endpoint=...), ...]
>>> app = Starlette(routes=routes)
"""
warnings.warn(
"The `websocket_route` decorator is deprecated, and will be removed in version 1.0.0. "
"Refer to https://starlette.dev/routing/#websocket-routing for the recommended approach.",
DeprecationWarning,
)
def decorator(func: Callable) -> Callable: # type: ignore[type-arg]
self.router.add_websocket_route(path, func, name=name)
return func
return decorator
def middleware(self, middleware_type: str) -> Callable: # type: ignore[type-arg]
"""
We no longer document this decorator style API, and its usage is discouraged.
Instead you should use the following approach:
>>> middleware = [Middleware(...), ...]
>>> app = Starlette(middleware=middleware)
"""
warnings.warn(
"The `middleware` decorator is deprecated, and will be removed in version 1.0.0. "
"Refer to https://starlette.dev/middleware/#using-middleware for recommended approach.",
DeprecationWarning,
)
assert middleware_type == "http", 'Currently only middleware("http") is supported.'
def decorator(func: Callable) -> Callable: # type: ignore[type-arg]
self.add_middleware(BaseHTTPMiddleware, dispatch=func)
return func
return decorator
| Starlette |
python | django__django | django/contrib/gis/db/backends/spatialite/features.py | {
"start": 231,
"end": 876
} | class ____(BaseSpatialFeatures, SQLiteDatabaseFeatures):
can_alter_geometry_field = False # Not implemented
supports_3d_storage = True
@cached_property
def supports_area_geodetic(self):
return bool(self.connection.ops.geom_lib_version())
@cached_property
def django_test_skips(self):
skips = super().django_test_skips
skips.update(
{
"SpatiaLite doesn't support distance lookups with Distance objects.": {
"gis_tests.geogapp.tests.GeographyTest.test02_distance_lookup",
},
}
)
return skips
| DatabaseFeatures |
python | sphinx-doc__sphinx | sphinx/util/cfamily.py | {
"start": 5402,
"end": 6047
} | class ____(ASTAttribute):
def __init__(self, attrs: list[ASTGnuAttribute]) -> None:
self.attrs = attrs
def __eq__(self, other: object) -> bool:
if not isinstance(other, ASTGnuAttributeList):
return NotImplemented
return self.attrs == other.attrs
def __hash__(self) -> int:
return hash(self.attrs)
def _stringify(self, transform: StringifyTransform) -> str:
attrs = ', '.join(map(transform, self.attrs))
return f'__attribute__(({attrs}))'
def describe_signature(self, signode: TextElement) -> None:
signode.append(nodes.Text(str(self)))
| ASTGnuAttributeList |
python | getlogbook__logbook | src/logbook/ticketing.py | {
"start": 452,
"end": 1624
} | class ____:
"""Represents a ticket from the database."""
level_name = level_name_property()
def __init__(self, db, row):
self.db = db
self.__dict__.update(row._mapping)
@cached_property
def last_occurrence(self):
"""The last occurrence."""
if rv := self.get_occurrences(limit=1):
return rv[0]
def get_occurrences(self, order_by="-time", limit=50, offset=0):
"""Returns the occurrences for this ticket."""
return self.db.get_occurrences(self.ticket_id, order_by, limit, offset)
def solve(self):
"""Marks this ticket as solved."""
self.db.solve_ticket(self.ticket_id)
self.solved = True
def delete(self):
"""Deletes the ticket from the database."""
self.db.delete_ticket(self.ticket_id)
# Silence DeprecationWarning
__hash__ = None
def __eq__(self, other):
equal = True
for key in self.__dict__.keys():
if getattr(self, key) != getattr(other, key):
equal = False
break
return equal
def __ne__(self, other):
return not self.__eq__(other)
| Ticket |
python | jupyterlab__jupyterlab | jupyterlab/utils.py | {
"start": 137,
"end": 303
} | class ____(Warning): # noqa
"""Create our own deprecation class, since Python >= 2.7
silences deprecations by default.
"""
pass
| jupyterlab_deprecation |
python | scrapy__scrapy | tests/test_spidermiddleware_httperror.py | {
"start": 2001,
"end": 3212
} | class ____:
@pytest.fixture
def mw(self) -> HttpErrorMiddleware:
crawler = get_crawler(DefaultSpider)
crawler.spider = crawler._create_spider()
return HttpErrorMiddleware.from_crawler(crawler)
def test_process_spider_input(
self, mw: HttpErrorMiddleware, res200: Response, res404: Response
) -> None:
mw.process_spider_input(res200)
with pytest.raises(HttpError):
mw.process_spider_input(res404)
def test_process_spider_exception(
self, mw: HttpErrorMiddleware, res404: Response
) -> None:
assert mw.process_spider_exception(res404, HttpError(res404)) == []
assert mw.process_spider_exception(res404, Exception()) is None
def test_handle_httpstatus_list(
self, mw: HttpErrorMiddleware, res404: Response
) -> None:
request = Request(
"http://scrapytest.org", meta={"handle_httpstatus_list": [404]}
)
res = _response(request, 404)
mw.process_spider_input(res)
assert mw.crawler.spider
mw.crawler.spider.handle_httpstatus_list = [404] # type: ignore[attr-defined]
mw.process_spider_input(res404)
| TestHttpErrorMiddleware |
python | numba__numba | numba/cuda/tests/cudapy/test_sync.py | {
"start": 2231,
"end": 7837
} | class ____(CUDATestCase):
def _test_useless(self, kernel):
compiled = cuda.jit("void(int32[::1])")(kernel)
nelem = 10
ary = np.empty(nelem, dtype=np.int32)
exp = np.arange(nelem, dtype=np.int32)
compiled[1, nelem](ary)
np.testing.assert_equal(ary, exp)
def test_useless_syncthreads(self):
self._test_useless(useless_syncthreads)
@skip_on_cudasim("syncwarp not implemented on cudasim")
def test_useless_syncwarp(self):
self._test_useless(useless_syncwarp)
@skip_on_cudasim("syncwarp not implemented on cudasim")
@unittest.skipUnless(_safe_cc_check((7, 0)),
"Partial masks require CC 7.0 or greater")
def test_useless_syncwarp_with_mask(self):
self._test_useless(useless_syncwarp_with_mask)
@skip_on_cudasim("syncwarp not implemented on cudasim")
@unittest.skipUnless(_safe_cc_check((7, 0)),
"Partial masks require CC 7.0 or greater")
def test_coop_syncwarp(self):
# coop_syncwarp computes the sum of all integers from 0 to 31 (496)
# using a single warp
expected = 496
nthreads = 32
nblocks = 1
compiled = cuda.jit("void(int32[::1])")(coop_syncwarp)
res = np.zeros(1, dtype=np.int32)
compiled[nblocks, nthreads](res)
np.testing.assert_equal(expected, res[0])
def test_simple_smem(self):
compiled = cuda.jit("void(int32[::1])")(simple_smem)
nelem = 100
ary = np.empty(nelem, dtype=np.int32)
compiled[1, nelem](ary)
self.assertTrue(np.all(ary == np.arange(nelem, dtype=np.int32)))
def test_coop_smem2d(self):
compiled = cuda.jit("void(float32[:,::1])")(coop_smem2d)
shape = 10, 20
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape](ary)
exp = np.empty_like(ary)
for i in range(ary.shape[0]):
for j in range(ary.shape[1]):
exp[i, j] = (i + 1) / (j + 1)
self.assertTrue(np.allclose(ary, exp))
def test_dyn_shared_memory(self):
compiled = cuda.jit("void(float32[::1])")(dyn_shared_memory)
shape = 50
ary = np.empty(shape, dtype=np.float32)
compiled[1, shape, 0, ary.size * 4](ary)
self.assertTrue(np.all(ary == 2 * np.arange(ary.size, dtype=np.int32)))
def test_threadfence_codegen(self):
# Does not test runtime behavior, just the code generation.
sig = (int32[:],)
compiled = cuda.jit(sig)(use_threadfence)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.gl;", compiled.inspect_asm(sig))
def test_threadfence_block_codegen(self):
# Does not test runtime behavior, just the code generation.
sig = (int32[:],)
compiled = cuda.jit(sig)(use_threadfence_block)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.cta;", compiled.inspect_asm(sig))
def test_threadfence_system_codegen(self):
# Does not test runtime behavior, just the code generation.
sig = (int32[:],)
compiled = cuda.jit(sig)(use_threadfence_system)
ary = np.zeros(10, dtype=np.int32)
compiled[1, 1](ary)
self.assertEqual(123 + 321, ary[0])
if not ENABLE_CUDASIM:
self.assertIn("membar.sys;", compiled.inspect_asm(sig))
def _test_syncthreads_count(self, in_dtype):
compiled = cuda.jit(use_syncthreads_count)
ary_in = np.ones(72, dtype=in_dtype)
ary_out = np.zeros(72, dtype=np.int32)
ary_in[31] = 0
ary_in[42] = 0
compiled[1, 72](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 70))
def test_syncthreads_count(self):
self._test_syncthreads_count(np.int32)
def test_syncthreads_count_upcast(self):
self._test_syncthreads_count(np.int16)
def test_syncthreads_count_downcast(self):
self._test_syncthreads_count(np.int64)
def _test_syncthreads_and(self, in_dtype):
compiled = cuda.jit(use_syncthreads_and)
nelem = 100
ary_in = np.ones(nelem, dtype=in_dtype)
ary_out = np.zeros(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
ary_in[31] = 0
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
def test_syncthreads_and(self):
self._test_syncthreads_and(np.int32)
def test_syncthreads_and_upcast(self):
self._test_syncthreads_and(np.int16)
def test_syncthreads_and_downcast(self):
self._test_syncthreads_and(np.int64)
def _test_syncthreads_or(self, in_dtype):
compiled = cuda.jit(use_syncthreads_or)
nelem = 100
ary_in = np.zeros(nelem, dtype=in_dtype)
ary_out = np.zeros(nelem, dtype=np.int32)
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 0))
ary_in[31] = 1
compiled[1, nelem](ary_in, ary_out)
self.assertTrue(np.all(ary_out == 1))
def test_syncthreads_or(self):
self._test_syncthreads_or(np.int32)
def test_syncthreads_or_upcast(self):
self._test_syncthreads_or(np.int16)
def test_syncthreads_or_downcast(self):
self._test_syncthreads_or(np.int64)
if __name__ == '__main__':
unittest.main()
| TestCudaSync |
python | sympy__sympy | sympy/holonomic/holonomic.py | {
"start": 3129,
"end": 5333
} | class ____:
r"""
An Ore Algebra is a set of noncommutative polynomials in the
intermediate ``Dx`` and coefficients in a base polynomial ring :math:`A`.
It follows the commutation rule:
.. math ::
Dxa = \sigma(a)Dx + \delta(a)
for :math:`a \subset A`.
Where :math:`\sigma: A \Rightarrow A` is an endomorphism and :math:`\delta: A \rightarrow A`
is a skew-derivation i.e. :math:`\delta(ab) = \delta(a) b + \sigma(a) \delta(b)`.
If one takes the sigma as identity map and delta as the standard derivation
then it becomes the algebra of Differential Operators also called
a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
This class represents a Weyl Algebra and serves as the parent ring for
Differential Operators.
Examples
========
>>> from sympy import ZZ
>>> from sympy import symbols
>>> from sympy.holonomic.holonomic import DifferentialOperators
>>> x = symbols('x')
>>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
>>> R
Univariate Differential Operator Algebra in intermediate Dx over the base ring
ZZ[x]
See Also
========
DifferentialOperator
"""
def __init__(self, base, generator):
# the base polynomial ring for the algebra
self.base = base
# the operator representing differentiation i.e. `Dx`
self.derivative_operator = DifferentialOperator(
[base.zero, base.one], self)
if generator is None:
self.gen_symbol = Symbol('Dx', commutative=False)
else:
if isinstance(generator, str):
self.gen_symbol = Symbol(generator, commutative=False)
elif isinstance(generator, Symbol):
self.gen_symbol = generator
def __str__(self):
string = 'Univariate Differential Operator Algebra in intermediate '\
+ sstr(self.gen_symbol) + ' over the base ring ' + \
(self.base).__str__()
return string
__repr__ = __str__
def __eq__(self, other):
return self.base == other.base and \
self.gen_symbol == other.gen_symbol
| DifferentialOperatorAlgebra |
python | django__django | tests/admin_views/models.py | {
"start": 2554,
"end": 2905
} | class ____(models.Model):
title = models.CharField(max_length=100, verbose_name="¿Title?")
content = models.TextField()
book = models.ForeignKey(Book, models.CASCADE)
class Meta:
# Use a utf-8 bytestring to ensure it works (see #11710)
verbose_name = "¿Chapter?"
def __str__(self):
return self.title
| Chapter |
python | Netflix__metaflow | metaflow/_vendor/yaml/constructor.py | {
"start": 18086,
"end": 27622
} | class ____(SafeConstructor):
# 'extend' is blacklisted because it is used by
# construct_python_object_apply to add `listitems` to a newly generate
# python instance
def get_state_keys_blacklist(self):
return ['^extend$', '^__.*__$']
def get_state_keys_blacklist_regexp(self):
if not hasattr(self, 'state_keys_blacklist_regexp'):
self.state_keys_blacklist_regexp = re.compile('(' + '|'.join(self.get_state_keys_blacklist()) + ')')
return self.state_keys_blacklist_regexp
def construct_python_str(self, node):
return self.construct_scalar(node)
def construct_python_unicode(self, node):
return self.construct_scalar(node)
def construct_python_bytes(self, node):
try:
value = self.construct_scalar(node).encode('ascii')
except UnicodeEncodeError as exc:
raise ConstructorError(None, None,
"failed to convert base64 data into ascii: %s" % exc,
node.start_mark)
try:
if hasattr(base64, 'decodebytes'):
return base64.decodebytes(value)
else:
return base64.decodestring(value)
except binascii.Error as exc:
raise ConstructorError(None, None,
"failed to decode base64 data: %s" % exc, node.start_mark)
def construct_python_long(self, node):
return self.construct_yaml_int(node)
def construct_python_complex(self, node):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python module", mark,
"expected non-empty name appended to the tag", mark)
if unsafe:
try:
__import__(name)
except ImportError as exc:
raise ConstructorError("while constructing a Python module", mark,
"cannot find module %r (%s)" % (name, exc), mark)
if name not in sys.modules:
raise ConstructorError("while constructing a Python module", mark,
"module %r is not imported" % name, mark)
return sys.modules[name]
def find_python_name(self, name, mark, unsafe=False):
if not name:
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if '.' in name:
module_name, object_name = name.rsplit('.', 1)
else:
module_name = 'builtins'
object_name = name
if unsafe:
try:
__import__(module_name)
except ImportError as exc:
raise ConstructorError("while constructing a Python object", mark,
"cannot find module %r (%s)" % (module_name, exc), mark)
if module_name not in sys.modules:
raise ConstructorError("while constructing a Python object", mark,
"module %r is not imported" % module_name, mark)
module = sys.modules[module_name]
if not hasattr(module, object_name):
raise ConstructorError("while constructing a Python object", mark,
"cannot find %r in the module %r"
% (object_name, module.__name__), mark)
return getattr(module, object_name)
def construct_python_name(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python name", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_name(suffix, node.start_mark)
def construct_python_module(self, suffix, node):
value = self.construct_scalar(node)
if value:
raise ConstructorError("while constructing a Python module", node.start_mark,
"expected the empty value, but found %r" % value, node.start_mark)
return self.find_python_module(suffix, node.start_mark)
def make_python_instance(self, suffix, node,
args=None, kwds=None, newobj=False, unsafe=False):
if not args:
args = []
if not kwds:
kwds = {}
cls = self.find_python_name(suffix, node.start_mark)
if not (unsafe or isinstance(cls, type)):
raise ConstructorError("while constructing a Python instance", node.start_mark,
"expected a class, but found %r" % type(cls),
node.start_mark)
if newobj and isinstance(cls, type):
return cls.__new__(cls, *args, **kwds)
else:
return cls(*args, **kwds)
def set_python_instance_state(self, instance, state, unsafe=False):
if hasattr(instance, '__setstate__'):
instance.__setstate__(state)
else:
slotstate = {}
if isinstance(state, tuple) and len(state) == 2:
state, slotstate = state
if hasattr(instance, '__dict__'):
if not unsafe and state:
for key in state.keys():
self.check_state_key(key)
instance.__dict__.update(state)
elif state:
slotstate.update(state)
for key, value in slotstate.items():
if not unsafe:
self.check_state_key(key)
setattr(instance, key, value)
def construct_python_object(self, suffix, node):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
yield instance
deep = hasattr(instance, '__setstate__')
state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
# !!python/object/apply # (or !!python/object/new)
# args: [ ... arguments ... ]
# kwds: { ... keywords ... }
# state: ... state ...
# listitems: [ ... listitems ... ]
# dictitems: { ... dictitems ... }
# or short format:
# !!python/object/apply [ ... arguments ... ]
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
listitems = value.get('listitems', [])
dictitems = value.get('dictitems', {})
instance = self.make_python_instance(suffix, node, args, kwds, newobj)
if state:
self.set_python_instance_state(instance, state)
if listitems:
instance.extend(listitems)
if dictitems:
for key in dictitems:
instance[key] = dictitems[key]
return instance
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/none',
FullConstructor.construct_yaml_null)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bool',
FullConstructor.construct_yaml_bool)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/str',
FullConstructor.construct_python_str)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/unicode',
FullConstructor.construct_python_unicode)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/bytes',
FullConstructor.construct_python_bytes)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/int',
FullConstructor.construct_yaml_int)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/long',
FullConstructor.construct_python_long)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/float',
FullConstructor.construct_yaml_float)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/complex',
FullConstructor.construct_python_complex)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/list',
FullConstructor.construct_yaml_seq)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/tuple',
FullConstructor.construct_python_tuple)
FullConstructor.add_constructor(
'tag:yaml.org,2002:python/dict',
FullConstructor.construct_yaml_map)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/name:',
FullConstructor.construct_python_name)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/module:',
FullConstructor.construct_python_module)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object:',
FullConstructor.construct_python_object)
FullConstructor.add_multi_constructor(
'tag:yaml.org,2002:python/object/new:',
FullConstructor.construct_python_object_new)
| FullConstructor |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-google-genai/tests/test_llms_google_genai.py | {
"start": 1241,
"end": 1445
} | class ____(BaseModel):
"""A model of a table in a database."""
name: str = Field(description="Table name field")
columns: List[Column] = Field(description="List of random Column objects")
| Table |
python | getsentry__sentry | tests/sentry/users/models/test_authenticator.py | {
"start": 617,
"end": 2724
} | class ____(TestCase):
def test_user_has_2fa(self) -> None:
user = self.create_user("foo@example.com")
assert user.has_2fa() is False
assert Authenticator.objects.filter(user=user).count() == 0
RecoveryCodeInterface().enroll(user)
assert user.has_2fa() is False
assert Authenticator.objects.filter(user=user).count() == 1
TotpInterface().enroll(user)
assert user.has_2fa() is True
assert Authenticator.objects.filter(user=user).count() == 2
def test_bulk_users_have_2fa(self) -> None:
user1 = self.create_user("foo1@example.com")
user2 = self.create_user("foo2@example.com")
TotpInterface().enroll(user1)
assert Authenticator.objects.bulk_users_have_2fa([user1.id, user2.id, 9999]) == {
user1.id: True,
user2.id: False,
9999: False,
}
@django_db_all
def test_authenticator_config_compatibility() -> None:
field_json = AuthenticatorConfig()
value = {
"devices": [
{
"binding": {
"publicKey": "publickey",
"keyHandle": "aowerkoweraowerkkro",
"appId": "https://dev.getsentry.net:8000/auth/2fa/u2fappid.json",
},
"name": "Sentry",
"ts": 1512505334,
},
{
"name": "Alert Escargot",
"ts": 1512505334,
"binding": AuthenticatorData.create(
sha256(b"test"),
0x41,
1,
create_credential_object(
{
"publicKey": "webauthn",
"keyHandle": "webauthn",
}
),
),
},
]
}
encoded = field_json.get_db_prep_value(value, connection=connection)
encoded_s = encoded.dumps(encoded.adapted)
assert field_json.from_db_value(encoded_s, Expression("config"), connection) == value
| AuthenticatorTest |
python | numba__numba | numba/core/typing/mathdecl.py | {
"start": 3118,
"end": 3190
} | class ____(Math_predicate):
pass
@infer_global(math.pow)
| Math_isfinite |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/eq_without_hash.py | {
"start": 1395,
"end": 1525
} | class ____:
match ...:
case int(): ...
case _:
def __eq__(self, other): ...
| MaybeEqMatchCaseWildcard |
python | apache__airflow | providers/http/src/airflow/providers/http/sensors/http.py | {
"start": 1713,
"end": 8563
} | class ____(BaseSensorOperator):
"""
Execute HTTP GET statement; return False on failure 404 Not Found or `response_check` returning False.
HTTP Error codes other than 404 (like 403) or Connection Refused Error
would raise an exception and fail the sensor itself directly (no more poking).
To avoid failing the task for other codes than 404, the argument ``response_error_codes_allowlist``
can be passed with the list containing all the allowed error status codes, like ``["404", "503"]``
To skip error status code check at all, the argument ``extra_option``
can be passed with the value ``{'check_response': False}``. It will make the ``response_check``
be execute for any http status code.
The response check can access the template context to the operator:
.. code-block:: python
def response_check(response, task_instance):
# The task_instance is injected, so you can pull data form xcom
# Other context variables such as dag, ds, logical_date are also available.
xcom_data = task_instance.xcom_pull(task_ids="pushing_task")
# In practice you would do something more sensible with this data..
print(xcom_data)
return True
HttpSensor(task_id="my_http_sensor", ..., response_check=response_check)
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:HttpSensor`
:param http_conn_id: The :ref:`http connection<howto/connection:http>` to run the
sensor against
:param method: The HTTP request method to use
:param endpoint: The relative part of the full url
:param request_params: The parameters to be added to the GET url
:param headers: The HTTP headers to be added to the GET request
:param response_error_codes_allowlist: An allowlist to return False on poke(), not to raise exception.
If the ``None`` value comes in, it is assigned ["404"] by default, for backward compatibility.
When you also want ``404 Not Found`` to raise the error, explicitly deliver the blank list ``[]``.
:param response_check: A check against the 'requests' response object.
The callable takes the response object as the first positional argument
and optionally any number of keyword arguments available in the context dictionary.
It should return True for 'pass' and False otherwise.
:param extra_options: Extra options for the 'requests' library, see the
'requests' documentation (options to modify timeout, ssl, etc.)
:param tcp_keep_alive: Enable TCP Keep Alive for the connection.
:param tcp_keep_alive_idle: The TCP Keep Alive Idle parameter (corresponds to ``socket.TCP_KEEPIDLE``).
:param tcp_keep_alive_count: The TCP Keep Alive count parameter (corresponds to ``socket.TCP_KEEPCNT``)
:param tcp_keep_alive_interval: The TCP Keep Alive interval parameter (corresponds to
``socket.TCP_KEEPINTVL``)
:param deferrable: If waiting for completion, whether to defer the task until done,
default is ``False``
"""
template_fields: Sequence[str] = ("endpoint", "request_params", "headers")
def __init__(
self,
*,
endpoint: str,
http_conn_id: str = "http_default",
method: str = "GET",
request_params: dict[str, Any] | None = None,
request_kwargs: dict[str, Any] | None = None,
headers: dict[str, Any] | None = None,
response_error_codes_allowlist: list[str] | None = None,
response_check: Callable[..., bool | PokeReturnValue] | None = None,
extra_options: dict[str, Any] | None = None,
tcp_keep_alive: bool = True,
tcp_keep_alive_idle: int = 120,
tcp_keep_alive_count: int = 20,
tcp_keep_alive_interval: int = 30,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
**kwargs: Any,
) -> None:
super().__init__(**kwargs)
self.endpoint = endpoint
self.http_conn_id = http_conn_id
self.method = method
self.response_error_codes_allowlist = (
("404",) if response_error_codes_allowlist is None else tuple(response_error_codes_allowlist)
)
self.request_params = request_params or {}
self.headers = headers or {}
self.extra_options = extra_options or {}
self.response_check = response_check
self.tcp_keep_alive = tcp_keep_alive
self.tcp_keep_alive_idle = tcp_keep_alive_idle
self.tcp_keep_alive_count = tcp_keep_alive_count
self.tcp_keep_alive_interval = tcp_keep_alive_interval
self.deferrable = deferrable
self.request_kwargs = request_kwargs or {}
def poke(self, context: Context) -> bool | PokeReturnValue:
from airflow.utils.operator_helpers import determine_kwargs
hook = HttpHook(
method=self.method,
http_conn_id=self.http_conn_id,
tcp_keep_alive=self.tcp_keep_alive,
tcp_keep_alive_idle=self.tcp_keep_alive_idle,
tcp_keep_alive_count=self.tcp_keep_alive_count,
tcp_keep_alive_interval=self.tcp_keep_alive_interval,
)
self.log.info("Poking: %s", self.endpoint)
try:
response = hook.run(
self.endpoint,
data=self.request_params,
headers=self.headers,
extra_options=self.extra_options,
**self.request_kwargs,
)
if self.response_check:
kwargs = determine_kwargs(self.response_check, [response], context)
return self.response_check(response, **kwargs)
except AirflowException as exc:
if str(exc).startswith(self.response_error_codes_allowlist):
return False
raise exc
return True
def execute(self, context: Context) -> Any:
if not self.deferrable or self.response_check:
return super().execute(context=context)
if not self.poke(context):
self.defer(
timeout=timedelta(seconds=self.timeout),
trigger=HttpSensorTrigger(
endpoint=self.endpoint,
http_conn_id=self.http_conn_id,
data=self.request_params,
headers=self.headers,
method=self.method,
extra_options=self.extra_options,
poke_interval=self.poke_interval,
),
method_name="execute_complete",
)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> None:
self.log.info("%s completed successfully.", self.task_id)
| HttpSensor |
python | apache__airflow | airflow-ctl/src/airflowctl/api/datamodels/generated.py | {
"start": 58413,
"end": 58684
} | class ____(BaseModel):
"""
Queued Event Collection serializer for responses.
"""
queued_events: Annotated[list[QueuedEventResponse], Field(title="Queued Events")]
total_entries: Annotated[int, Field(title="Total Entries")]
| QueuedEventCollectionResponse |
python | doocs__leetcode | solution/2200-2299/2250.Count Number of Rectangles Containing Each Point/Solution.py | {
"start": 0,
"end": 494
} | class ____:
def countRectangles(
self, rectangles: List[List[int]], points: List[List[int]]
) -> List[int]:
d = defaultdict(list)
for x, y in rectangles:
d[y].append(x)
for y in d.keys():
d[y].sort()
ans = []
for x, y in points:
cnt = 0
for h in range(y, 101):
xs = d[h]
cnt += len(xs) - bisect_left(xs, x)
ans.append(cnt)
return ans
| Solution |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 54533,
"end": 54649
} | class ____(Elemwise):
_parameters = ["frame"]
operation = M.notnull
_projection_passthrough = True
| NotNull |
python | spyder-ide__spyder | spyder/plugins/variableexplorer/widgets/texteditor.py | {
"start": 1186,
"end": 7012
} | class ____(BaseDialog, SpyderWidgetMixin, SpyderFontsMixin):
"""Array Editor Dialog"""
CONF_SECTION = 'variable_explorer'
def __init__(self, text, title='', parent=None, readonly=False):
super().__init__(parent)
# Destroying the C++ object right after closing the dialog box,
# otherwise it may be garbage-collected in another QThread
# (e.g. the editor's analysis thread in Spyder), thus leading to
# a segmentation fault on UNIX or an application crash on Windows
self.setAttribute(Qt.WA_DeleteOnClose)
self.text = None
self.btn_save_and_close = None
self.close_action = self.create_action(
name=TextEditorActions.Close,
icon=self.create_icon('close_pane'),
text=_('Close'),
triggered=self.reject,
shortcut=self.get_shortcut(TextEditorActions.Close),
register_action=False,
register_shortcut=True
)
self.register_shortcut_for_widget(name='close', triggered=self.reject)
# Display text as unicode if it comes as bytes, so users see
# its right representation
if isinstance(text, bytes):
self.is_binary = True
text = str(text, 'utf8')
else:
self.is_binary = False
self.layout = QVBoxLayout()
self.setLayout(self.layout)
self.toolbar = self.create_toolbar(
TextEditorWidgets.Toolbar,
register=False
)
self.layout.addWidget(self.toolbar)
# Text edit
self.edit = QTextEdit(parent)
self.edit.setReadOnly(readonly)
self.edit.textChanged.connect(self.text_changed)
self.edit.setPlainText(text)
font = self.get_font(SpyderFontType.MonospaceInterface)
self.edit.setFont(font)
self.layout.addWidget(self.edit)
# Buttons configuration
btn_layout = QHBoxLayout()
btn_layout.addStretch()
if not readonly:
self.btn_save_and_close = QPushButton(_('Save and Close'))
self.btn_save_and_close.setDisabled(True)
self.btn_save_and_close.clicked.connect(self.accept)
btn_layout.addWidget(self.btn_save_and_close)
self.btn_close = QPushButton(_('Close'))
self.btn_close.setAutoDefault(True)
self.btn_close.setDefault(True)
self.btn_close.clicked.connect(self.reject)
btn_layout.addWidget(self.btn_close)
self.layout.addLayout(btn_layout)
# Make the dialog act as a window
if sys.platform == 'darwin':
# See spyder-ide/spyder#12825
self.setWindowFlags(Qt.Tool)
else:
# Make the dialog act as a window
self.setWindowFlags(Qt.Window)
self.setWindowIcon(ima.icon('edit'))
if title:
try:
unicode_title = str(title)
except UnicodeEncodeError:
unicode_title = u''
else:
unicode_title = u''
self.setWindowTitle(_("Text editor") + \
u"%s" % (u" - " + unicode_title
if unicode_title else u""))
stretcher = self.create_stretcher(
TextEditorWidgets.ToolbarStretcher
)
options_menu = self.create_menu(
TextEditorMenus.Options,
register=False
)
for item in [self.close_action]:
self.add_item_to_menu(item, options_menu)
options_button = self.create_toolbutton(
name=TextEditorWidgets.OptionsToolButton,
text=_('Options'),
icon=ima.icon('tooloptions'),
register=False
)
options_button.setPopupMode(QToolButton.InstantPopup)
options_button.setMenu(options_menu)
self.toolbar.clear()
self.toolbar._section_items.clear()
self.toolbar._item_map.clear()
for item in [stretcher, options_button]:
self.add_item_to_toolbar(
item,
self.toolbar,
section=TextEditorToolbarSections.Copy
)
self.toolbar.render()
@Slot()
def text_changed(self):
"""Text has changed"""
# Save text as bytes, if it was initially bytes
if self.is_binary:
self.text = bytes(self.edit.toPlainText(), 'utf8')
else:
self.text = str(self.edit.toPlainText())
if self.btn_save_and_close:
self.btn_save_and_close.setEnabled(True)
self.btn_save_and_close.setAutoDefault(True)
self.btn_save_and_close.setDefault(True)
def get_value(self):
"""Return modified text"""
# It is import to avoid accessing Qt C++ object as it has probably
# already been destroyed, due to the Qt.WA_DeleteOnClose attribute
return self.text
def setup_and_check(self, value):
"""Verify if TextEditor is able to display strings passed to it."""
try:
if not isinstance(value, str):
str(value, 'utf8')
return True
except Exception:
return False
#==============================================================================
# Tests
#==============================================================================
def test():
"""Text editor demo"""
from spyder.utils.qthelpers import qapplication
_app = qapplication() # analysis:ignore
text = """01234567890123456789012345678901234567890123456789012345678901234567890123456789
dedekdh elkd ezd ekjd lekdj elkdfjelfjk e"""
dialog = TextEditor(text)
dialog.exec_()
dlg_text = dialog.get_value()
assert text == dlg_text
if __name__ == "__main__":
test()
| TextEditor |
python | pytorch__pytorch | test/dynamo/test_functions.py | {
"start": 83596,
"end": 84877
} | class ____(torch.nn.Module):
def forward(self, s9: "Sym(s9)", L_lambda0_keywords_y_: "f32[s9, s9]"):
l_lambda0_keywords_y_ = L_lambda0_keywords_y_
mul: "f32[s9, s9]" = l_lambda0_keywords_y_ * l_lambda0_keywords_y_
add: "f32[s9, s9]" = l_lambda0_keywords_y_ + l_lambda0_keywords_y_; l_lambda0_keywords_y_ = None
mul_1: "f32[s9, s9]" = torch.mul(mul, add); mul = add = None
return (mul_1,)
""",
)
def test_partials_graph_break_reconstruct_mix_no_source(self):
def fn(udf_mul_0, x):
udf_add_1 = lambda x, y: x + y
lambda0 = functools.partial(udf_mul_0, y=x)
lambda1 = functools.partial(udf_add_1, x)
print("break")
return torch.mul(lambda0(x), lambda1(x))
backend = EagerAndRecordGraphs()
cnts = CompileCounterWithBackend(backend)
x = torch.randn(2, 2)
dynamo_result = torch.compile(fn, backend=cnts)(udf_mul, x)
eager_result = fn(udf_mul, x)
self.assertEqual(eager_result, dynamo_result)
if torch._dynamo.config.assume_static_by_default:
self.assertExpectedInline(
normalize_gm(backend.graphs[0].print_readable(print_output=False)),
"""\
| GraphModule |
python | fastai__fastai | fastai/vision/augment.py | {
"start": 42466,
"end": 43393
} | class ____(RandTransform):
"Apply `fs` to the logits"
order = 40
def __init__(self,
fs:Callable|MutableSequence, # Transformation functions applying in a space
space_fn:Callable, # Function converting rgb to a space and back to rgb after appying `fs`
**kwargs
):
super().__init__(**kwargs)
self.space_fn=space_fn
self.fs=L(fs)
def before_call(self,
b,
split_idx:int, # Index of the train/valid dataset
):
self.do = True
while isinstance(b, tuple): b = b[0]
for t in self.fs: t.before_call(b)
def compose(self,
tfm:Callable # Transformation function to compose
):
"Compose `self` with another `LightingTransform`"
self.fs += tfm.fs
def encodes(self,x:TensorImage): return self.space_fn(x,partial(compose_tfms, tfms=self.fs))
# %% ../../nbs/09_vision.augment.ipynb 201
| SpaceTfm |
python | doocs__leetcode | solution/2300-2399/2337.Move Pieces to Obtain a String/Solution2.py | {
"start": 0,
"end": 592
} | class ____:
def canChange(self, start: str, target: str) -> bool:
n = len(start)
i = j = 0
while 1:
while i < n and start[i] == '_':
i += 1
while j < n and target[j] == '_':
j += 1
if i >= n and j >= n:
return True
if i >= n or j >= n or start[i] != target[j]:
return False
if start[i] == 'L' and i < j:
return False
if start[i] == 'R' and i > j:
return False
i, j = i + 1, j + 1
| Solution |
python | django__django | tests/migrations/test_add_many_to_many_field_initial/0002_initial.py | {
"start": 43,
"end": 351
} | class ____(migrations.Migration):
initial = True
dependencies = [
("migrations", "0001_initial"),
]
operations = [
migrations.AddField(
model_name="task",
name="projects",
field=models.ManyToManyField(to="Project"),
),
]
| Migration |
python | pytorch__pytorch | torch/testing/_internal/common_optimizers.py | {
"start": 2403,
"end": 6972
} | class ____:
"""Optimizer information to be used in testing."""
def __init__(
self,
optim_cls: Optimizer, # Class object for the Optimizer under test
*,
# Function to generate optimizer inputs EXCLUDING params. We delegate params responsibility
# to the test using the OptimizerInfo. OptimizerInput.params is likely None.
# Can optionally take in device to filter out certain unsupported configs
optim_inputs_func,
# Tuple of lambdas to generate LRScheduler instances to run with the optimizer for the
# LRScheduler tests like test_forloop_goes_right_direction with_lrsched.
# We DO NOT expect to thoroughly test LRSchedulers through the optimizers, so not every
# LRScheduler configuration will be included. See test_lrscheduler.py for that instead.
# A few optimizers like SGD and Adam will test more LRSchedulers.
scheduler_inputs=(
[
lambda opt: StepLR(opt, gamma=0.9, step_size=10),
lambda opt: ReduceLROnPlateau(opt),
],
),
# A subset of the global-cliquey flags (fused, foreach, differentiable) the optimizer
# supports. See NOTE: [optimizer kwarg categories] for what global-cliquey means.
supported_impls: tuple[str, ...] = ("foreach", "differentiable"),
# A subset of all flags, signifying which ones were only supported after the
# original optimizer had already been released. aka impls where we need to check BC.
not_og_supported_flags: tuple[str, ...] = (
"foreach",
"differentiable",
"maximize",
"capturable",
),
# the optim supports passing in sparse gradients as well as dense grads
supports_sparse: bool = False,
# the optimizer constructor supports passing in capturable as a kwarg
has_capturable_arg: bool = False,
# the optim only supports one config: sparse grads w/ dense params, see SparseAdam
only_supports_sparse_grads: bool = False,
# Tuple of (optimizer kwargs, schedulers_constructors) specifically for sparse tests,
# with especially tuned hyperparameters. These only apply if the optimizer supports
# sparse parameters or grads.
metadata_for_sparse=({}, []),
# the optim supports complex parameters
supports_complex: bool = True,
# whether the optimizer.step() function requires a closure to be passed
step_requires_closure: bool = False,
# whether the optimizer supports per-param options with parameter groups
supports_param_groups: bool = True,
# whether the optimizer supports parameters on multiple devices
supports_multiple_devices: bool = True,
skips=(), # Indicates which tests to skip
decorators=None, # Additional decorators to apply to generated tests
optim_error_inputs_func=None, # Function to generate optim inputs that error
supports_fused_on: tuple[str, ...] = (),
):
self.optim_cls = optim_cls
self.optim_inputs_func = optim_inputs_func
self.scheduler_inputs = scheduler_inputs
self.supported_impls = supported_impls
self.not_og_supported_flags = not_og_supported_flags
self.supports_sparse = supports_sparse
self.has_capturable_arg = has_capturable_arg
self.metadata_for_sparse = metadata_for_sparse
self.only_supports_sparse_grads = only_supports_sparse_grads
self.supports_complex = supports_complex
self.step_requires_closure = step_requires_closure
self.supports_param_groups = supports_param_groups
self.supports_multiple_devices = supports_multiple_devices
self.decorators = (
*(decorators if decorators else []),
*(skips if skips else []),
)
self.optim_error_inputs_func = optim_error_inputs_func
self.supports_fused_on = supports_fused_on
def get_decorators(self, test_class, test_name, device, dtype, param_kwargs):
result = []
for decorator in self.decorators:
if isinstance(decorator, DecorateInfo):
if decorator.is_active(
test_class, test_name, device, dtype, param_kwargs
):
result.extend(decorator.decorators)
else:
result.append(decorator)
return result
@property
def name(self):
return self.optim_cls.__name__
| OptimizerInfo |
python | langchain-ai__langchain | libs/langchain/langchain_classic/chains/summarize/chain.py | {
"start": 861,
"end": 8361
} | class ____(Protocol):
"""Interface for loading the combine documents chain."""
def __call__(
self,
llm: BaseLanguageModel,
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Callable to load the combine documents chain."""
def _load_stuff_chain(
llm: BaseLanguageModel,
*,
prompt: BasePromptTemplate = stuff_prompt.PROMPT,
document_variable_name: str = "text",
verbose: bool | None = None,
**kwargs: Any,
) -> StuffDocumentsChain:
llm_chain = LLMChain(llm=llm, prompt=prompt, verbose=verbose)
"""Load a StuffDocumentsChain for summarization.
Args:
llm: Language Model to use in the chain.
prompt: Prompt template that controls how the documents are formatted and
passed into the LLM.
document_variable_name: Variable name in the prompt template where the
document text will be inserted.
verbose: Whether to log progress and intermediate steps.
**kwargs: Additional keyword arguments passed to the StuffDocumentsChain.
Returns:
A StuffDocumentsChain that takes in documents, formats them with the
given prompt, and runs the chain on the provided LLM.
"""
return StuffDocumentsChain(
llm_chain=llm_chain,
document_variable_name=document_variable_name,
verbose=verbose,
**kwargs,
)
def _load_map_reduce_chain(
llm: BaseLanguageModel,
*,
map_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_prompt: BasePromptTemplate = map_reduce_prompt.PROMPT,
combine_document_variable_name: str = "text",
map_reduce_document_variable_name: str = "text",
collapse_prompt: BasePromptTemplate | None = None,
reduce_llm: BaseLanguageModel | None = None,
collapse_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
token_max: int = 3000,
callbacks: Callbacks = None,
collapse_max_retries: int | None = None,
**kwargs: Any,
) -> MapReduceDocumentsChain:
map_chain = LLMChain(
llm=llm,
prompt=map_prompt,
verbose=verbose,
callbacks=callbacks,
)
_reduce_llm = reduce_llm or llm
reduce_chain = LLMChain(
llm=_reduce_llm,
prompt=combine_prompt,
verbose=verbose,
callbacks=callbacks,
)
"""Load a MapReduceDocumentsChain for summarization.
This chain first applies a "map" step to summarize each document,
then applies a "reduce" step to combine the summaries into a
final result. Optionally, a "collapse" step can be used to handle
long intermediate results.
Args:
llm: Language Model to use for map and reduce steps.
map_prompt: Prompt used to summarize each document in the map step.
combine_prompt: Prompt used to combine summaries in the reduce step.
combine_document_variable_name: Variable name in the `combine_prompt` where
the mapped summaries are inserted.
map_reduce_document_variable_name: Variable name in the `map_prompt`
where document text is inserted.
collapse_prompt: Optional prompt used to collapse intermediate summaries
if they exceed the token limit (`token_max`).
reduce_llm: Optional separate LLM for the reduce step.
which uses the same model as the map step.
collapse_llm: Optional separate LLM for the collapse step.
which uses the same model as the map step.
verbose: Whether to log progress and intermediate steps.
token_max: Token threshold that triggers the collapse step during reduction.
callbacks: Optional callbacks for logging and tracing.
collapse_max_retries: Maximum retries for the collapse step if it fails.
**kwargs: Additional keyword arguments passed to the MapReduceDocumentsChain.
Returns:
A MapReduceDocumentsChain that maps each document to a summary,
then reduces all summaries into a single cohesive result.
"""
combine_documents_chain = StuffDocumentsChain(
llm_chain=reduce_chain,
document_variable_name=combine_document_variable_name,
verbose=verbose,
callbacks=callbacks,
)
if collapse_prompt is None:
collapse_chain = None
if collapse_llm is not None:
msg = (
"collapse_llm provided, but collapse_prompt was not: please "
"provide one or stop providing collapse_llm."
)
raise ValueError(msg)
else:
_collapse_llm = collapse_llm or llm
collapse_chain = StuffDocumentsChain(
llm_chain=LLMChain(
llm=_collapse_llm,
prompt=collapse_prompt,
verbose=verbose,
callbacks=callbacks,
),
document_variable_name=combine_document_variable_name,
)
reduce_documents_chain = ReduceDocumentsChain(
combine_documents_chain=combine_documents_chain,
collapse_documents_chain=collapse_chain,
token_max=token_max,
verbose=verbose,
callbacks=callbacks,
collapse_max_retries=collapse_max_retries,
)
return MapReduceDocumentsChain(
llm_chain=map_chain,
reduce_documents_chain=reduce_documents_chain,
document_variable_name=map_reduce_document_variable_name,
verbose=verbose,
callbacks=callbacks,
**kwargs,
)
def _load_refine_chain(
llm: BaseLanguageModel,
*,
question_prompt: BasePromptTemplate = refine_prompts.PROMPT,
refine_prompt: BasePromptTemplate = refine_prompts.REFINE_PROMPT,
document_variable_name: str = "text",
initial_response_name: str = "existing_answer",
refine_llm: BaseLanguageModel | None = None,
verbose: bool | None = None,
**kwargs: Any,
) -> RefineDocumentsChain:
initial_chain = LLMChain(llm=llm, prompt=question_prompt, verbose=verbose)
_refine_llm = refine_llm or llm
refine_chain = LLMChain(llm=_refine_llm, prompt=refine_prompt, verbose=verbose)
return RefineDocumentsChain(
initial_llm_chain=initial_chain,
refine_llm_chain=refine_chain,
document_variable_name=document_variable_name,
initial_response_name=initial_response_name,
verbose=verbose,
**kwargs,
)
def load_summarize_chain(
llm: BaseLanguageModel,
chain_type: str = "stuff",
verbose: bool | None = None, # noqa: FBT001
**kwargs: Any,
) -> BaseCombineDocumentsChain:
"""Load summarizing chain.
Args:
llm: Language Model to use in the chain.
chain_type: Type of document combining chain to use. Should be one of "stuff",
"map_reduce", and "refine".
verbose: Whether chains should be run in verbose mode or not. Note that this
applies to all chains that make up the final chain.
**kwargs: Additional keyword arguments.
Returns:
A chain to use for summarizing.
"""
loader_mapping: Mapping[str, LoadingCallable] = {
"stuff": _load_stuff_chain,
"map_reduce": _load_map_reduce_chain,
"refine": _load_refine_chain,
}
if chain_type not in loader_mapping:
msg = (
f"Got unsupported chain type: {chain_type}. "
f"Should be one of {loader_mapping.keys()}"
)
raise ValueError(msg)
return loader_mapping[chain_type](llm, verbose=verbose, **kwargs)
| LoadingCallable |
python | davidhalter__jedi | jedi/inference/value/instance.py | {
"start": 16249,
"end": 17882
} | class ____(FunctionMixin, ValueWrapper):
def __init__(self, instance, class_context, function):
super().__init__(function)
self.instance = instance
self._class_context = class_context
def is_bound_method(self):
return True
@property
def name(self):
return FunctionNameInClass(
self._class_context,
super().name
)
def py__class__(self):
c, = values_from_qualified_names(self.inference_state, 'types', 'MethodType')
return c
def _get_arguments(self, arguments):
assert arguments is not None
return InstanceArguments(self.instance, arguments)
def _as_context(self, arguments=None):
if arguments is None:
return AnonymousMethodExecutionContext(self.instance, self)
arguments = self._get_arguments(arguments)
return MethodExecutionContext(self.instance, self, arguments)
def py__call__(self, arguments):
if isinstance(self._wrapped_value, OverloadedFunctionValue):
return self._wrapped_value.py__call__(self._get_arguments(arguments))
function_execution = self.as_context(arguments)
return function_execution.infer()
def get_signature_functions(self):
return [
BoundMethod(self.instance, self._class_context, f)
for f in self._wrapped_value.get_signature_functions()
]
def get_signatures(self):
return [sig.bind(self) for sig in super().get_signatures()]
def __repr__(self):
return '<%s: %s>' % (self.__class__.__name__, self._wrapped_value)
| BoundMethod |
python | google__pytype | pytype/pytd/optimize.py | {
"start": 21852,
"end": 22439
} | class ____(visitors.Visitor):
"""Simplifies containers whose type parameters are all Any.
For example, this will change
def f() -> List[any]
to
def f() -> list
Note that we don't simplify TupleType or CallableType, since they have
variable-length parameters, and the parameter length is meaningful even when
the parameters are all Any.
"""
def _Simplify(self, t):
if all(isinstance(p, pytd.AnythingType) for p in t.parameters):
return t.base_type
else:
return t
def VisitGenericType(self, t):
return self._Simplify(t)
| SimplifyContainers |
python | kamyu104__LeetCode-Solutions | Python/find-score-of-an-array-after-marking-all-elements.py | {
"start": 64,
"end": 609
} | class ____(object):
def findScore(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
idxs = range(len(nums))
idxs.sort(key=lambda x: (nums[x], x))
lookup = [False]*len(nums)
result = 0
for i in idxs:
if lookup[i]:
continue
lookup[i] = True
if i-1 >= 0:
lookup[i-1] = True
if i+1 < len(lookup):
lookup[i+1] = True
result += nums[i]
return result
| Solution |
python | huggingface__transformers | tests/models/kosmos2_5/test_modeling_kosmos2_5.py | {
"start": 4009,
"end": 6498
} | class ____:
def __init__(
self,
parent,
batch_size=6,
seq_length=7,
is_training=True,
use_input_mask=True,
use_labels=True,
vocab_size=99,
hidden_size=32,
ffn_dim=64,
num_hidden_layers=2,
num_attention_heads=4,
dropout=0,
attention_dropout=0,
max_position_embeddings=512,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.is_training = is_training
self.use_input_mask = use_input_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.ffn_dim = ffn_dim
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.attention_dropout = attention_dropout
self.max_position_embeddings = max_position_embeddings
self.scope = scope
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)
input_mask = None
if self.use_input_mask:
input_mask = random_attention_mask([self.batch_size, self.seq_length])
if input_mask is not None:
batch_size, seq_length = input_mask.shape
rnd_start_indices = np.random.randint(1, seq_length - 1, size=(batch_size,))
for batch_idx, start_index in enumerate(rnd_start_indices):
input_mask[batch_idx, :start_index] = 1
input_mask[batch_idx, start_index:] = 0
config = self.get_config()
return config, input_ids, input_mask
def get_config(self):
return Kosmos2_5TextConfig(
vocab_size=self.vocab_size,
embed_dim=self.hidden_size,
ffn_dim=self.ffn_dim,
layers=self.num_hidden_layers,
attention_heads=self.num_attention_heads,
dropout=self.dropout,
attention_dropout=self.attention_dropout,
max_position_embeddings=self.max_position_embeddings,
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, input_ids, input_mask = config_and_inputs
inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
| Kosmos2_5TextModelTester |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 55049,
"end": 59077
} | class ____(Scope):
# The builtin namespace.
is_builtin_scope = True
def __init__(self):
if Options.pre_import is None:
Scope.__init__(self, "__builtin__", None, None)
else:
Scope.__init__(self, "__builtin__", PreImportScope(), None)
self.type_names = {}
def lookup(self, name, language_level=None):
# 'language_level' is passed by ModuleScope
if name == 'unicode' or name == 'basestring':
# Keep recognising 'unicode' and 'basestring' in legacy code but map them to 'str'.
name = 'str'
elif name == 'long' and language_level == 2:
# Keep recognising 'long' in legacy Py2 code but map it to 'int'.
name = 'int'
return Scope.lookup(self, name)
def declare_builtin(self, name, pos):
if name not in Code.KNOWN_PYTHON_BUILTINS:
if self.outer_scope is not None:
return self.outer_scope.declare_builtin(name, pos)
else:
if Options.error_on_unknown_names:
error(pos, "undeclared name not builtin: %s" % name)
else:
warning(pos, "undeclared name not builtin: %s" % name, 2)
def declare_builtin_cfunction(self, name, type, cname, python_equiv=None, utility_code=None, specialiser=None):
# If python_equiv == "*", the Python equivalent has the same name
# as the entry, otherwise it has the name specified by python_equiv.
name = EncodedString(name)
entry = self.declare_cfunction(name, type, None, cname, visibility='extern', utility_code=utility_code)
if specialiser is not None:
entry.specialiser = specialiser
if python_equiv:
if python_equiv == "*":
python_equiv = name
else:
python_equiv = EncodedString(python_equiv)
var_entry = Entry(python_equiv, python_equiv, py_object_type)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_builtin = 1
var_entry.utility_code = utility_code
var_entry.scope = entry.scope
entry.as_variable = var_entry
return entry
def declare_builtin_type(self, name, cname,
objstruct_cname=None, type_class=PyrexTypes.BuiltinObjectType,
utility_code=None):
name = EncodedString(name)
type = type_class(name, cname, objstruct_cname)
scope = CClassScope(name, outer_scope=None, visibility='extern', parent_type=type)
scope.directives = {}
type.set_scope(scope)
self.type_names[name] = 1
entry = self.declare_type(name, type, None, visibility='extern')
if utility_code:
entry.utility_code = utility_code
if name == 'range' and 'xrange' not in self.entries:
# Keep supporting legacy Py2 'xrange' because it's still in use.
self.entries['xrange'] = entry
var_entry = Entry(
name=entry.name,
type=self.lookup('type').type, # make sure "type" is the first type declared...
pos=entry.pos,
cname=entry.type.typeptr_cname,
)
var_entry.qualified_name = self.qualify_name(name)
var_entry.is_variable = 1
var_entry.is_cglobal = 1
var_entry.is_readonly = 1
var_entry.is_builtin = 1
var_entry.scope = self
if Options.cache_builtins:
var_entry.is_const = True
if utility_code:
var_entry.utility_code = utility_code
entry.as_variable = var_entry
return type
def builtin_scope(self):
return self
def handle_already_declared_name(self, name, cname, type, pos, visibility, copy_entry=False):
# Overriding is OK in the builtin scope
return None
const_counter = 1 # As a temporary solution for compiling code in pxds
| BuiltinScope |
python | pytorch__pytorch | torch/_higher_order_ops/invoke_subgraph.py | {
"start": 17208,
"end": 30690
} | class ____(torch.autograd.Function):
"""
Saves the subgraph, i.e. original callable, in the forward method. And then
traces out a joint graph in the backward. This delaying of tracing in
backward, also called as lazy backward, ensures that the assumptions about
the grad_out strides and tensor-subclass-ness are already accounted for.
"""
@staticmethod
# pyrefly: ignore [bad-override]
def forward(
ctx,
subgraph,
identifier,
output_metadata,
*operands,
):
# We want to delay the backward graph construction until the backward.
# So in forward, we just run the fw callable as is. And save all the
# information necessary to construct the backward graph in the ctx.
ctx._subgraph = subgraph
ctx._identifier = identifier
ctx._output_metadata = output_metadata
# We snapshot the dispatch keys in forward for materializing the
# the bw_graph in backward.
ctx._fw_include_key_set = torch._C._dispatch_tls_local_include_set()
ctx._fw_exclude_key_set = torch._C._dispatch_tls_local_exclude_set()
save_tensors_and_symints_for_backward(ctx, operands)
with torch._C._AutoDispatchBelowAutograd():
out = invoke_subgraph(
subgraph,
f"fw_{identifier}",
*operands,
)
# Check that int (coming from symint) is at expected indexes.
for idx, o in enumerate(out):
if isinstance(o, int):
assert idx in output_metadata.indexes_with_symint
return out
@staticmethod
def backward(
ctx,
*grad_outs,
):
from torch._dynamo.utils import dynamo_timed
subgraph = ctx._subgraph
identifier = ctx._identifier
output_metadata = ctx._output_metadata
primals = saved_tensors_and_symints(ctx)
# Filter out grads that are None or do not require_grad. This was
# the assumption we made during the tracing of joint_graph.
filtered_grad_outs = []
for idx, o in enumerate(grad_outs):
if o is None:
assert idx in output_metadata.indexes_with_symint
elif idx in output_metadata.indexes_with_no_grad:
# Deliberately skip over the grad_outs which we know should be
# None because the corresponding fwd_out does not require_grad.
pass
else:
filtered_grad_outs.append(o)
filtered_grad_outs = tuple(filtered_grad_outs)
# Important note - Even though the forward graph can be same for
# different invoke_subgraphs, the backward graph can be different
# because the tangent strides can be different. So, here we cache on
# tangent_metadata in addition to identifier
from torch._guards import detect_fake_mode
from torch._subclasses._fake_tensor_utils import _CacheKeyState
from torch._subclasses.fake_tensor import extract_tensor_metadata
fake_mode = detect_fake_mode(primals + filtered_grad_outs)
assert fake_mode is not None, "fake_mode should be enabled for HOPs"
state = _CacheKeyState(fake_mode.shape_env)
tangent_metadata: list[object] = []
for tangent in filtered_grad_outs:
metadata = extract_tensor_metadata(tangent)
metadata._flatten_into(tangent_metadata, fake_mode, state)
# pyrefly: ignore [bad-assignment]
tangent_metadata = tuple(tangent_metadata)
# bw_graph is a joint graph with signature (*primals_and_tangents) and
# returns (*grads_and_fw_outs). To get the grads, we use the num_fw_outs
# to extract the grads.
primals_and_tangents = primals + filtered_grad_outs
# Check if we have already traced the bwd subgraph.
bw_graph = None
suffix = None
invoke_subgraph_cache = get_invoke_subgraph_cache()
cache_hit = False
if invoke_subgraph_cache:
bw_graph, suffix = invoke_subgraph_cache.get_lazy_bwd_entry(
identifier, tangent_metadata
)
cache_hit = bw_graph is not None
if bw_graph is None:
assert suffix is None
with dynamo_timed(
"invoke_subgraph_trace_joint_graph", log_pt2_compile_event=True
):
bw_graph = trace_joint_graph_as_bwd(
subgraph,
len(primals),
primals_and_tangents,
ctx._fw_include_key_set,
ctx._fw_exclude_key_set,
)
if invoke_subgraph_cache and not cache_hit:
suffix = invoke_subgraph_cache.add_lazy_bwd_entry(
identifier, tangent_metadata, bw_graph
)
grads = invoke_subgraph(
bw_graph, f"bw_{identifier}_{suffix}", *primals_and_tangents
)[: -output_metadata.num_fw_outs]
return None, None, None, *grads
@invoke_subgraph.py_autograd_impl
def _(subgraph, identifier, *operands):
# Check if we have already traced the subgraph.
invoke_subgraph_cache = get_invoke_subgraph_cache()
if invoke_subgraph_cache:
if saved_autograd_fn := invoke_subgraph_cache.get_autograd_key_entry(
identifier
):
return saved_autograd_fn(*operands)
output_metadata = get_output_metadata(subgraph, *operands)
def autograd_fn_callable(*args):
return InvokeSubgraphAutogradOp.apply(
subgraph, identifier, output_metadata, *args
)
# Save the autograd_fn_callable in the dispatch set cache.
if invoke_subgraph_cache:
invoke_subgraph_cache.add_autograd_key_entry(identifier, autograd_fn_callable)
return autograd_fn_callable(*operands)
@invoke_subgraph.py_impl(DispatchKey.CompositeExplicitAutograd)
def _(subgraph, identifier, *operands):
from torch.utils._python_dispatch import _get_current_dispatch_mode
mode = _get_current_dispatch_mode()
assert mode is None, "Mode should never be enabled for CPU/CUDA key"
return subgraph(*operands)
@invoke_subgraph.py_functionalize_impl
def _(ctx, subgraph, identifier, *operands):
from torch._higher_order_ops.auto_functionalize import (
can_auto_functionalize,
do_auto_functionalize_v2,
)
# (in the functionalization metadata phase) Capture tokens before
tokens_before = dict(ctx.mode._tokens)
# Check if this subgraph has effects stored in the cache
invoke_subgraph_cache = get_invoke_subgraph_cache()
effects = None
if invoke_subgraph_cache:
effects = invoke_subgraph_cache.get_effects(identifier)
if effects:
assert len(effects) == 1, "Multiple effects within a subgraph NYI"
tokens = ctx.mode._tokens
effects = next(iter(effects))
token_input = tokens[effects]
operands = (token_input, *operands)
def wrap_subgraph(subgraph):
def wrapped_subgraph(token, *args):
res = subgraph(*args)
return ctx.unwrap_tensors(ctx.mode._tokens[effects]), *res
return wrapped_subgraph
subgraph = wrap_subgraph(subgraph)
unwrapped_operands = ctx.unwrap_tensors(operands)
hop_instance = HopInstance.create(invoke_subgraph, subgraph, identifier, *operands)
if can_auto_functionalize(hop_instance):
# NOTE: [auto_functionalize x invoke_subgraph caching]
# We call auto_functionalized_v2 to support input mutation of invoke_subgraph.
# See NOTE [Support input mutation of hops] for the overall design.
#
# invoke_subgraph is special because of its identifier based caching mechanism.
# In invoke_subgraph's functionalization key implementation, we create a new
# identifier because the subgraph is replaced by FunctionWithNoFreeVars in a
# functional + epilogue form.
assert isinstance(identifier, str), identifier
return do_auto_functionalize_v2(
ctx.mode,
hop_instance,
(subgraph, "auto_functionalized_" + identifier, *operands),
{},
)
with ctx.redispatch_to_next():
# NB: There is an assumption that subgraph does not mutate inputs and
# there is no aliasing. Its Dynamo responsibility to prevent formation
# of invoke_subgraph ops if input aliasing/mutation is detected.
functionalized_subgraph = FunctionalizeCtxWrapper(ctx, subgraph)
out = invoke_subgraph(functionalized_subgraph, identifier, *unwrapped_operands)
if effects:
(new_token, *out) = out
ctx.mode._tokens[effects] = new_token
# (in the functionalization metadata phase) Capture tokens after and see if
# there are any differences (there are new effects or the token value for an
# effect type has changed)
tokens_after = dict(ctx.mode._tokens)
discovered_effects = set()
for effect_type, token in tokens_after.items():
if effect_type not in tokens_before or tokens_before[effect_type] is not token:
discovered_effects.add(effect_type)
if discovered_effects:
assert ctx.mode._allow_token_discovery, (
f"Number of tokens changed by {len(discovered_effects)} when tracing subgraph {subgraph}."
)
# Store discovered effects in the cache by identifier
if invoke_subgraph_cache:
invoke_subgraph_cache.add_effects(identifier, discovered_effects)
return ctx.wrap_tensors(out)
# Register the hop fake fn. This will be called in the fake_tensor _dispatch_impl.
@register_fake(invoke_subgraph)
def _(subgraph, identifier, *operands):
from torch._dynamo.utils import dynamo_timed
with dynamo_timed("invoke_subgraph_fake_tensor", log_pt2_compile_event=True):
return subgraph(*operands)
@invoke_subgraph.py_impl(ProxyTorchDispatchMode)
def _(proxy_mode: ProxyTorchDispatchMode, subgraph, identifier, *operands):
# Check if we have already traced the subgraph.
graph = None
invoke_subgraph_cache = get_invoke_subgraph_cache()
if invoke_subgraph_cache:
graph = invoke_subgraph_cache.get_proxy_dispatch_entry(identifier)
if graph is None:
from torch._dynamo.utils import dynamo_timed
with dynamo_timed("invoke_subgraph_proxy_tensor", log_pt2_compile_event=True):
graph = reenter_make_fx(subgraph)(*operands)
from torch._guards import detect_fake_mode
fake_mode = detect_fake_mode(operands)
assert fake_mode is not None and fake_mode.shape_env is not None
insert_deferred_runtime_asserts(
graph,
fake_mode.shape_env,
"invoke_subgraph_proxy_torch_dispatch_mode",
export=True,
)
graph.recompile()
assert isinstance(proxy_mode.tracer, torch.fx.Tracer)
if invoke_subgraph_cache:
invoke_subgraph_cache.add_proxy_dispatch_entry(identifier, graph)
node_args = (graph, identifier, *operands)
def _unwrap_proxy(arg):
if isinstance(arg, torch.fx.GraphModule):
# NOTE: [invoke_subgraph proxy_mode x auto_functionalize]
# Previously, we assumed that `invoke_subgraph` would always be traced with the same tracer.
# This allowed us to cache modules by their identifiers, assuming they were already registered.
#
# However, this assumption no longer holds when we auto-functionalize `invoke_subgraph`.
# auto_functionalize functionalizes the subgraph and wrap it with `FunctionWithNoFreeVars`.
# In the proxy mode implementation of `auto_functionalized_v2`, we need to materialize `FunctionWithNoFreeVars`
# input as a graph module. To do this, we re-trace the `invoke_subgraph` hop, which starts a new sub-tracer
# (see NOTE [materialize callable inputs as graph]). # When the new sub-tracer traces the `invoke_subgraph`
# with a previously cached identifier, the corresponding graph module might not
# exist as a submodule in the new tracer's root. Therefore, we register it as a submodule below.
#
# The alternative is to give a new identifier when we re-trace the invoke_subgraph but this will increase
# the compilatoin time, which defeats the purpose of caching.
registered_before = False
for (
_,
submod,
) in proxy_mode.tracer.root.named_modules(): # type: ignore[union-attr]
if arg is submod:
registered_before = True
if not registered_before:
qualname = proxy_mode.tracer.get_fresh_qualname("repeated_subgraph") # type: ignore[union-attr]
proxy_mode.tracer.root.register_module(qualname, arg) # type: ignore[union-attr]
return proxy_mode.tracer.unwrap_proxy(arg) # type: ignore[union-attr]
proxy_args = pytree.tree_map(_unwrap_proxy, node_args) # type: ignore[union-attr]
out_proxy = proxy_mode.tracer.create_proxy(
"call_function", invoke_subgraph, proxy_args, {}
)
example_out = invoke_subgraph(graph, identifier, *operands)
return track_tensor_tree(
example_out, out_proxy, constant=None, tracer=proxy_mode.tracer
)
| InvokeSubgraphAutogradOp |
python | spack__spack | lib/spack/spack/vendor/attr/exceptions.py | {
"start": 1185,
"end": 1371
} | class ____(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
| UnannotatedAttributeError |
python | getsentry__sentry | src/sentry/issues/escalating/escalating_issues_alg.py | {
"start": 136,
"end": 221
} | class ____(TypedDict):
forecasted_date: str
forecasted_value: int
| IssueForecast |
python | kamyu104__LeetCode-Solutions | Python/maximum-containers-on-a-ship.py | {
"start": 36,
"end": 259
} | class ____(object):
def maxContainers(self, n, w, maxWeight):
"""
:type n: int
:type w: int
:type maxWeight: int
:rtype: int
"""
return min(maxWeight//w, n*n)
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/autoVariance3.py | {
"start": 3243,
"end": 3804
} | class ____(dict[K, V]):
pass
# This should generate an error based on variance.
vinv3_1: ShouldBeInvariant3[float, str] = ShouldBeInvariant3[int, str]()
# This should generate an error based on variance.
vinv3_2: ShouldBeInvariant3[int, str] = ShouldBeInvariant3[float, str]()
# This should generate an error based on variance.
vinv3_3: ShouldBeInvariant3[str, float] = ShouldBeInvariant3[str, int]()
# This should generate an error based on variance.
vinv3_4: ShouldBeInvariant3[str, int] = ShouldBeInvariant3[str, float]()
@dataclass
| ShouldBeInvariant3 |
python | pytorch__pytorch | test/inductor/test_kernel_optimization.py | {
"start": 305,
"end": 892
} | class ____(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(
self,
input: torch.Tensor,
weights: torch.Tensor,
bias: torch.Tensor,
input2: torch.Tensor,
weights2: torch.Tensor,
bias2: torch.Tensor,
) -> torch.Tensor:
output = torch.functional.einsum("bni, nio -> bno", input, weights)
add1 = output.add(bias)
output2 = torch.functional.einsum("bni, bnio -> bno", input2, weights2)
add2 = output2 + bias2
return add1 + add2
| TestEinsumtoPointwise |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 58387,
"end": 58598
} | class ____(_PrintableStructure):
_fields_ = [
('bar1Total', c_ulonglong),
('bar1Free', c_ulonglong),
('bar1Used', c_ulonglong),
]
_fmt_ = {'<default>': "%d B"}
| c_nvmlBAR1Memory_t |
python | fastapi__sqlmodel | docs_src/tutorial/code_structure/tutorial002_py310/hero_model.py | {
"start": 149,
"end": 488
} | class ____(SQLModel, table=True):
id: int | None = Field(default=None, primary_key=True)
name: str = Field(index=True)
secret_name: str
age: int | None = Field(default=None, index=True)
team_id: int | None = Field(default=None, foreign_key="team.id")
team: Optional["Team"] = Relationship(back_populates="heroes")
| Hero |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 55274,
"end": 55612
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional["Record"] = Field(default=None, description="")
| InlineResponse20012 |
python | django__django | django/db/models/functions/datetime.py | {
"start": 4946,
"end": 5079
} | class ____(Extract):
"""Return Monday=1 through Sunday=7, based on ISO-8601."""
lookup_name = "iso_week_day"
| ExtractIsoWeekDay |
python | ray-project__ray | python/ray/util/actor_group.py | {
"start": 662,
"end": 1806
} | class ____:
def __init__(self, actor_group: "ActorGroup", method_name: str):
self.actor_group = weakref.ref(actor_group)
self._method_name = method_name
def __call__(self, *args, **kwargs):
raise TypeError(
"ActorGroup methods cannot be called directly. "
"Instead "
f"of running 'object.{self._method_name}()', try "
f"'object.{self._method_name}.remote()'."
)
def remote(self, *args, **kwargs):
return [
getattr(a.actor, self._method_name).remote(*args, **kwargs)
for a in self.actor_group().actors
]
@Deprecated(
message="For stateless/task processing, use ray.util.multiprocessing, see details "
f"in https://docs.ray.io/en/{get_ray_doc_version()}/ray-more-libs/multiprocessing.html. " # noqa: E501
"For stateful/actor processing such as batch prediction, use "
"Datasets.map_batches(compute=ActorPoolStrategy, ...), see details in "
f"https://docs.ray.io/en/{get_ray_doc_version()}/data/api/dataset.html#ray.data.Dataset.map_batches.", # noqa: E501
warning=True,
)
| ActorGroupMethod |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.