language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numba__numba | numba/tests/test_enums.py | {
"start": 3131,
"end": 4988
} | class ____(BaseEnumTest, TestCase):
"""
Tests for IntEnum classes and members.
"""
values = [Shape.circle, Shape.square]
pairs = [
(Shape.circle, Shape.circle),
(Shape.circle, Shape.square),
(RequestError.not_found, RequestError.not_found),
(RequestError.internal_error, RequestError.not_found),
]
def test_int_coerce(self):
pyfunc = int_coerce_usecase
cfunc = jit(nopython=True)(pyfunc)
for arg in [300, 450, 550]:
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
def test_int_cast(self):
pyfunc = int_cast_usecase
cfunc = jit(nopython=True)(pyfunc)
for arg in [300, 450, 550]:
self.assertPreciseEqual(pyfunc(arg), cfunc(arg))
def test_vectorize(self):
cfunc = vectorize(nopython=True)(vectorize_usecase)
arg = np.array([2, 404, 500, 404])
sol = np.array([vectorize_usecase(i) for i in arg], dtype=arg.dtype)
self.assertPreciseEqual(sol, cfunc(arg))
def test_hash(self):
def pyfun(x):
return hash(x)
cfunc = jit(nopython=True)(pyfun)
for member in IntEnumWithNegatives:
self.assertPreciseEqual(pyfun(member), cfunc(member))
def test_int_shape_cast(self):
def pyfun_empty(x):
return np.empty((x, x), dtype='int64').fill(-1)
def pyfun_zeros(x):
return np.zeros((x, x), dtype='int64')
def pyfun_ones(x):
return np.ones((x, x), dtype='int64')
for pyfun in [pyfun_empty, pyfun_zeros, pyfun_ones]:
cfunc = jit(nopython=True)(pyfun)
for member in IntEnumWithNegatives:
if member >= 0:
self.assertPreciseEqual(pyfun(member), cfunc(member))
if __name__ == '__main__':
unittest.main()
| TestIntEnum |
python | pyca__cryptography | tests/hazmat/primitives/test_hash_vectors.py | {
"start": 4409,
"end": 4770
} | class ____:
test_sha3_256 = generate_hash_test(
load_hash_vectors,
os.path.join("hashes", "SHA3"),
["SHA3_256LongMsg.rsp", "SHA3_256ShortMsg.rsp"],
hashes.SHA3_256(),
)
@pytest.mark.supported(
only_if=lambda backend: backend.hash_supported(hashes.SHA3_384()),
skip_message="Does not support SHA3_384",
)
| TestSHA3256 |
python | viewflow__viewflow | tests/_cases/test_workflow_undo_tasks.py | {
"start": 2670,
"end": 2958
} | class ____(flow.Flow):
start = flow.Start(views.CreateProcessView.as_view(fields=[])).Next(this.task)
task = flow.View(views.UpdateProcessView.as_view(fields=[])).Next(this.end)
end = flow.End()
urlpatterns = [path("workflow/", flow.FlowViewset(TestUndoFlow).urls)]
| TestUndoFlow |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-cohere/llama_index/embeddings/cohere/base.py | {
"start": 1111,
"end": 3521
} | class ____(str, Enum):
START = "START"
END = "END"
NONE = "NONE"
# convenient shorthand
CAMN = CohereAIModelName
CAIT = CohereAIInputType
CAT = CohereAITruncate
# This list would be used for model name and input type validation
VALID_MODEL_INPUT_TYPES = {
CAMN.EMBED_V4: [
None,
CAIT.SEARCH_QUERY,
CAIT.SEARCH_DOCUMENT,
CAIT.CLASSIFICATION,
CAIT.CLUSTERING,
],
CAMN.ENGLISH_V3: [
None,
CAIT.SEARCH_QUERY,
CAIT.SEARCH_DOCUMENT,
CAIT.CLASSIFICATION,
CAIT.CLUSTERING,
],
CAMN.ENGLISH_LIGHT_V3: [
None,
CAIT.SEARCH_QUERY,
CAIT.SEARCH_DOCUMENT,
CAIT.CLASSIFICATION,
CAIT.CLUSTERING,
],
CAMN.MULTILINGUAL_V3: [
None,
CAIT.SEARCH_QUERY,
CAIT.SEARCH_DOCUMENT,
CAIT.CLASSIFICATION,
CAIT.CLUSTERING,
],
CAMN.MULTILINGUAL_LIGHT_V3: [
None,
CAIT.SEARCH_QUERY,
CAIT.SEARCH_DOCUMENT,
CAIT.CLASSIFICATION,
CAIT.CLUSTERING,
],
CAMN.ENGLISH_V2: [None],
CAMN.ENGLISH_LIGHT_V2: [None],
CAMN.MULTILINGUAL_V2: [None],
}
# v4 models support input type parameter
V4_MODELS = [CAMN.EMBED_V4]
# v3 models require an input_type field
# supported models for multimodal embeddings
V3_MODELS = [
CAMN.ENGLISH_V3,
CAMN.ENGLISH_LIGHT_V3,
CAMN.MULTILINGUAL_V3,
CAMN.MULTILINGUAL_LIGHT_V3,
]
# This list would be used for model name and embedding types validation
# Embedding type can be float/ int8/ uint8/ binary/ ubinary based on model.
VALID_MODEL_EMBEDDING_TYPES = {
CAMN.EMBED_V4: ["float", "int8", "uint8", "binary", "ubinary"],
CAMN.ENGLISH_V3: ["float", "int8", "uint8", "binary", "ubinary"],
CAMN.ENGLISH_LIGHT_V3: ["float", "int8", "uint8", "binary", "ubinary"],
CAMN.MULTILINGUAL_V3: ["float", "int8", "uint8", "binary", "ubinary"],
CAMN.MULTILINGUAL_LIGHT_V3: ["float", "int8", "uint8", "binary", "ubinary"],
CAMN.ENGLISH_V2: ["float"],
CAMN.ENGLISH_LIGHT_V2: ["float"],
CAMN.MULTILINGUAL_V2: ["float"],
}
VALID_TRUNCATE_OPTIONS = [CAT.START, CAT.END, CAT.NONE]
# supported image formats
SUPPORTED_IMAGE_FORMATS = {"png", "jpeg", "jpg", "webp", "gif"}
# Maximum batch size for Cohere API
MAX_EMBED_BATCH_SIZE = 96
# Assuming BaseEmbedding is a Pydantic model and handles its own initializations
| CohereAITruncate |
python | apache__airflow | providers/discord/tests/unit/discord/hooks/test_discord_webhook.py | {
"start": 3600,
"end": 4819
} | class ____:
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id="default-discord-webhook",
conn_type="discord",
host="https://discordapp.com/api/",
extra='{"webhook_endpoint": "webhooks/00000/some-discord-token_000"}',
)
)
def test_get_webhook_endpoint_manual_token(self):
# Given
provided_endpoint = "webhooks/11111/some-discord-token_111"
hook = DiscordWebhookHook(webhook_endpoint=provided_endpoint)
# When
webhook_endpoint = hook._get_webhook_endpoint(None, provided_endpoint)
# Then
assert webhook_endpoint == provided_endpoint
def test_get_webhook_endpoint_conn_id(self):
# Given
conn_id = "default-discord-webhook"
hook = DiscordWebhookHook(http_conn_id=conn_id)
expected_webhook_endpoint = "webhooks/00000/some-discord-token_000"
# When
webhook_endpoint = hook._get_webhook_endpoint(conn_id, None)
# Then
assert webhook_endpoint == expected_webhook_endpoint
| TestDiscordWebhookHook |
python | PyCQA__pylint | doc/data/messages/i/invalid-length-hint-returned/bad.py | {
"start": 0,
"end": 151
} | class ____:
"""__length_hint__ returns non-int"""
def __length_hint__(self): # [invalid-length-hint-returned]
return 3.0
| CustomLengthHint |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_dataproc.py | {
"start": 4300,
"end": 13222
} | class ____:
def test_async_cluster_trigger_serialization_should_execute_successfully(self, cluster_trigger):
classpath, kwargs = cluster_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger"
assert kwargs == {
"cluster_name": TEST_CLUSTER_NAME,
"project_id": TEST_PROJECT_ID,
"region": TEST_REGION,
"gcp_conn_id": TEST_GCP_CONN_ID,
"impersonation_chain": None,
"polling_interval_seconds": TEST_POLL_INTERVAL,
"delete_on_error": True,
}
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
async def test_async_cluster_triggers_on_success_should_execute_successfully(
self, mock_get_async_hook, cluster_trigger
):
future = asyncio.Future()
future.set_result(TEST_RUNNING_CLUSTER)
mock_get_async_hook.return_value.get_cluster.return_value = future
generator = cluster_trigger.run()
actual_event = await generator.asend(None)
expected_event = TriggerEvent(
{
"cluster_name": TEST_CLUSTER_NAME,
"cluster_state": ClusterStatus.State(ClusterStatus.State.RUNNING).name,
"cluster": actual_event.payload["cluster"],
}
)
assert expected_event == actual_event
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.fetch_cluster")
@mock.patch(
"airflow.providers.google.cloud.hooks.dataproc.DataprocAsyncHook.delete_cluster",
return_value=asyncio.Future(),
)
@mock.patch("google.auth.default")
async def test_async_cluster_trigger_run_returns_error_event(
self, mock_auth, mock_delete_cluster, mock_fetch_cluster, cluster_trigger, async_get_cluster, caplog
):
mock_credentials = mock.MagicMock()
mock_credentials.universe_domain = "googleapis.com"
mock_auth.return_value = (mock_credentials, "project-id")
mock_delete_cluster.return_value = asyncio.Future()
mock_delete_cluster.return_value.set_result(None)
mock_fetch_cluster.return_value = TEST_ERROR_CLUSTER
caplog.set_level(logging.INFO)
trigger_event = None
async for event in cluster_trigger.run():
trigger_event = event
assert trigger_event.payload["cluster_name"] == TEST_CLUSTER_NAME
assert (
trigger_event.payload["cluster_state"] == ClusterStatus.State(ClusterStatus.State.DELETING).name
)
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
@mock.patch.object(DataprocClusterTrigger, "log")
async def test_cluster_run_loop_is_still_running(self, mock_log, mock_get_async_hook, cluster_trigger):
mock_cluster = mock.MagicMock()
mock_cluster.status = ClusterStatus(state=ClusterStatus.State.CREATING)
future = asyncio.Future()
future.set_result(mock_cluster)
mock_get_async_hook.return_value.get_cluster.return_value = future
task = asyncio.create_task(cluster_trigger.run().__anext__())
await asyncio.sleep(0.5)
assert not task.done()
mock_log.info.assert_called()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_sync_hook")
@mock.patch.object(DataprocClusterTrigger, "log")
async def test_cluster_trigger_cancellation_handling(
self, mock_log, mock_get_sync_hook, mock_get_async_hook
):
cluster = Cluster(status=ClusterStatus(state=ClusterStatus.State.RUNNING))
mock_get_async_hook.return_value.get_cluster.return_value = asyncio.Future()
mock_get_async_hook.return_value.get_cluster.return_value.set_result(cluster)
mock_delete_cluster = mock.MagicMock()
mock_get_sync_hook.return_value.delete_cluster = mock_delete_cluster
cluster_trigger = DataprocClusterTrigger(
cluster_name="cluster_name",
project_id="project-id",
region="region",
gcp_conn_id="google_cloud_default",
impersonation_chain=None,
polling_interval_seconds=5,
delete_on_error=True,
)
cluster_trigger_gen = cluster_trigger.run()
try:
await cluster_trigger_gen.__anext__()
await cluster_trigger_gen.aclose()
except asyncio.CancelledError:
# Verify that cancellation was handled as expected
if cluster_trigger.delete_on_error:
mock_get_sync_hook.assert_called_once()
mock_delete_cluster.assert_called_once_with(
region=cluster_trigger.region,
cluster_name=cluster_trigger.cluster_name,
project_id=cluster_trigger.project_id,
)
mock_log.info.assert_called()
else:
mock_delete_cluster.assert_not_called()
except Exception as e:
pytest.fail(f"Unexpected exception raised: {e}")
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
async def test_fetch_cluster_status(self, mock_get_async_hook, cluster_trigger):
mock_cluster = mock.MagicMock()
mock_cluster.status = ClusterStatus(state=ClusterStatus.State.RUNNING)
future = asyncio.Future()
future.set_result(mock_cluster)
mock_get_async_hook.return_value.get_cluster.return_value = future
cluster = await cluster_trigger.fetch_cluster()
assert cluster.status.state == ClusterStatus.State.RUNNING, "The cluster state should be RUNNING"
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
@mock.patch.object(DataprocClusterTrigger, "log")
async def test_delete_when_error_occurred(self, mock_log, mock_get_async_hook, cluster_trigger):
mock_cluster = mock.MagicMock(spec=Cluster)
type(mock_cluster).status = mock.PropertyMock(
return_value=mock.MagicMock(state=ClusterStatus.State.ERROR)
)
mock_delete_future = asyncio.Future()
mock_delete_future.set_result(None)
mock_get_async_hook.return_value.delete_cluster.return_value = mock_delete_future
cluster_trigger.delete_on_error = True
await cluster_trigger.delete_when_error_occurred(mock_cluster)
mock_get_async_hook.return_value.delete_cluster.assert_called_once_with(
region=cluster_trigger.region,
cluster_name=cluster_trigger.cluster_name,
project_id=cluster_trigger.project_id,
)
mock_get_async_hook.return_value.delete_cluster.reset_mock()
cluster_trigger.delete_on_error = False
await cluster_trigger.delete_when_error_occurred(mock_cluster)
mock_get_async_hook.return_value.delete_cluster.assert_not_called()
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_async_hook")
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.get_sync_hook")
@mock.patch("airflow.providers.google.cloud.triggers.dataproc.DataprocClusterTrigger.safe_to_cancel")
@mock.patch.object(DataprocClusterTrigger, "log")
async def test_cluster_trigger_run_cancelled_not_safe_to_cancel(
self, mock_log, mock_safe_to_cancel, mock_get_sync_hook, mock_get_async_hook, cluster_trigger
):
"""Test the trigger's cancellation behavior when it is not safe to cancel."""
mock_safe_to_cancel.return_value = False
cluster = Cluster(status=ClusterStatus(state=ClusterStatus.State.RUNNING))
future_cluster = asyncio.Future()
future_cluster.set_result(cluster)
mock_get_async_hook.return_value.get_cluster.return_value = future_cluster
mock_delete_cluster = mock.MagicMock()
mock_get_sync_hook.return_value.delete_cluster = mock_delete_cluster
cluster_trigger.delete_on_error = True
async_gen = cluster_trigger.run()
task = asyncio.create_task(async_gen.__anext__())
await sleep(0)
task.cancel()
with contextlib.suppress(CancelledError):
await task
assert mock_delete_cluster.call_count == 0
mock_delete_cluster.assert_not_called()
| TestDataprocClusterTrigger |
python | tensorflow__tensorflow | tensorflow/compiler/tests/xla_test.py | {
"start": 3731,
"end": 14537
} | class ____(test.TestCase):
"""XLA test cases are parameterized test cases."""
def __init__(self, method_name='runTest'):
super(XLATestCase, self).__init__(method_name)
if 'XLA' in FLAGS.test_device:
context.context().enable_xla_devices()
# Check if the mlir bridge has been explicitly enabled or disabled. If
# is_mlir_bridge_enabled() returns None, the user did not explicitly enable
# or disable the bridge so do not update enable_mlir_bridge.
if test_util.is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
elif test_util.is_mlir_bridge_enabled() is not None:
context.context().enable_mlir_bridge = False
self.device = FLAGS.test_device
self.has_custom_call = self.device == 'XLA_CPU'
# Some tests (e.g. ftrl_ops) only work if the program goes through the
# _TPUCompileMLIR op. They will set this flag to True.
# TODO(kramm): Flip to true (and enable MLIR bridge) for more tests.
self.rewrite_ops_for_tpu = False
self._all_tf_types = set([
dtypes.as_dtype(types_pb2.DataType.Value(name))
for name in FLAGS.types.split(',')
])
self.int_tf_types = set(
[dtype for dtype in self._all_tf_types if dtype.is_integer]
)
self._float_tf_types = set(
[dtype for dtype in self._all_tf_types if dtype.is_floating]
)
self.complex_tf_types = set(
[dtype for dtype in self._all_tf_types if dtype.is_complex]
)
self._numeric_tf_types = set(
self.int_tf_types | self._float_tf_types | self.complex_tf_types
)
self.quantized_tf_types = set(
dtype for dtype in self._all_tf_types if dtype.is_quantized
)
# Quantized types don't have a numpy equivalent, include them in
# all_tf_types but not in all_types.
# TODO(b/115960798): Parametrize tests on TF types instead of numpy types
# and remove all_types.
self._all_types = set(
dtype.as_numpy_dtype
for dtype in self._all_tf_types
if not dtype.is_quantized
)
self._int_types = set([dtype.as_numpy_dtype for dtype in self.int_tf_types])
self.signed_int_types = set(
dtype.as_numpy_dtype
for dtype in self.int_tf_types
if not dtype.is_unsigned
)
self.unsigned_int_types = set(
dtype.as_numpy_dtype for dtype in self.int_tf_types if dtype.is_unsigned
)
self._float_types = set(
[dtype.as_numpy_dtype for dtype in self._float_tf_types]
)
self.complex_types = set(
[dtype.as_numpy_dtype for dtype in self.complex_tf_types]
)
self._numeric_types = set(
self._int_types | self._float_types | self.complex_types
)
# Parse the manifest file, if any, into a regex identifying tests to
# disable
# TODO(xpan): Make it text proto if it doesn't scale.
# Each line of the manifest file specifies an entry. The entry can be
# 1) TestNameRegex // E.g. CumprodTest.* Or
# 2) TestName TypeName // E.g. AdamOptimizerTest.testSharing DT_BFLOAT16
# The 1) disables the entire test. While 2) only filter some numeric types
# so that they are not used in those tests.
self.disabled_regex = None
self._method_types_filter = {}
if FLAGS.disabled_manifest is not None:
with open(FLAGS.disabled_manifest, 'r') as manifest_file:
disabled_regex, self._method_types_filter = parse_disabled_manifest(
manifest_file.read()
)
if disabled_regex:
self.disabled_regex = re.compile(disabled_regex)
if FLAGS.tf_xla_flags is not None:
os.environ['TF_XLA_FLAGS'] = FLAGS.tf_xla_flags
@property
def all_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
tf_types = set(
[dtypes.as_dtype(t) for t in self._method_types_filter.get(name, set())]
)
return self._all_tf_types - tf_types
@property
def float_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._float_types - self._method_types_filter.get(name, set())
@property
def float_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._float_tf_types - self._method_types_filter.get(name, set())
@property
def int_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._int_types - self._method_types_filter.get(name, set())
@property
def numeric_tf_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
tf_types = set(
[dtypes.as_dtype(t) for t in self._method_types_filter.get(name, set())]
)
return self._numeric_tf_types - tf_types
@property
def numeric_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._numeric_types - self._method_types_filter.get(name, set())
@property
def all_types(self):
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
return self._all_types - self._method_types_filter.get(name, set())
def setUp(self):
super(XLATestCase, self).setUp()
name = '{}.{}'.format(type(self).__name__, self._testMethodName)
if self.disabled_regex is not None and self.disabled_regex.match(name):
logging.info('Disabled test case: %s', name)
self.skipTest('{} is disabled by manifest.'.format(name))
return
logging.info('Start test case: %s', name)
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
super(XLATestCase, self).tearDown()
logging.info('End test case: %s', self._testMethodName)
@contextlib.contextmanager
def session(self) -> Iterator[session.Session]:
"""Custom implementation of session() for XLA tests.
We override the standard Tensorflow session() since it is too
specific to CPU and GPU tests. In particular, we want to disable soft
placement and explicitly assign ops to devices under test.
Yields:
A session to use when running a test case.
"""
graph = ops.Graph()
config = context.context().config
# Grappler can constant fold TensorListFromTensor ops into DT_VARIANT
# constants which XLA does not understand. So disable constant folding in
# these tests.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF
)
if self.rewrite_ops_for_tpu:
session_type = TPURewriteSession
else:
session_type = session.Session
with session_type(graph=graph, config=config) as sess, graph.as_default():
yield sess
def cached_session(self):
raise NotImplementedError(
'cached_session not supported on XLATestCase, please use session'
)
def test_session(self):
raise NotImplementedError(
'test_session not supported on XLATestCase, please use session'
)
@contextlib.contextmanager
def device_scope(self):
"""Scope that runs tests on `self.device`.
Yields:
A scope to apply to the operators under test.
"""
with ops.device('device:{}:0'.format(self.device)):
yield
def assert_op_output_matches_expected(
self, op, inp, expected, local_session,
equality_test=None, rtol=1e-3, atol=1e-5
):
"""Verifies that 'op' produces 'expected' when fed input 'inp' .
Args:
op: operator to test
inp: numpy input array to use as input to 'op'.
expected: numpy array representing the expected output of 'op'.
local_session: The session to use for the test.
equality_test: either None, or a function that tests two numpy arrays for
equality. If None, self.assertAllClose is used.
rtol: relative tolerance for equality test.
atol: absolute tolerance for equality test.
"""
with self.test_scope():
pinp = array_ops.placeholder(
dtypes.as_dtype(inp.dtype), inp.shape, name='a'
)
output = op(pinp)
result = local_session.run(output, {pinp: inp})
if equality_test is None:
self.assertEqual(output.dtype, expected.dtype)
self.assertAllCloseAccordingToType(
expected, result, rtol=rtol, atol=atol, bfloat16_rtol=0.03
)
else:
equality_test(result, expected, rtol=rtol, atol=atol)
def test_scope(self):
"""Deprecated alias of `device_scope`.
This should be avoided as the name starts with `test`, so test runners
treat it as a test. This interferes with class decorators that operate on
each test method.
"""
return self.device_scope()
def Benchmark(
tf_bench, builder_fn, use_xla_jit, device, separate_compiled_gradients=False
):
"""Build a graph and run benchmarks against it, with or without XLA.
Args:
tf_bench: An instance of tf.test.Benchmark, used to run the benchmark.
builder_fn: A function that builds a graph when invoked, and returns (name,
fetches), where name is the name of the test, and fetches is a list of
tensors to fetch as output.
use_xla_jit: If true compile with the XLA JIT, otherwise use regular TF.
device: The tensorflow device to run on, e.g. "cpu", "gpu".
separate_compiled_gradients: If true put each gradient subgraph into a
separate compilation scope. This gives fine-grained control over which
portions of the graph will be compiled as a single unit. Compiling
gradients separately may yield better performance for some graphs. The
scope is named based on the scope of the forward computation as well as
the name of the gradients. As a result, the gradients will be compiled in
a scope that is separate from both the forward computation, and from other
gradients.
"""
with ops.Graph().as_default():
name = None
targets = []
with ops.device(device):
fetches = []
jit_scope = jit.experimental_jit_scope
with jit_scope(
compile_ops=use_xla_jit,
separate_compiled_gradients=separate_compiled_gradients,
):
name, fetches = builder_fn()
# We only want to benchmark the operations themselves, and not the data
# transfer of the result(s). Non-compiled identity ops ensure XLA
# doesn't know we're dropping the results, otherwise it might compile
# away the entire computation.
for fetch in fetches:
targets.append(array_ops.identity(fetch).op)
# TODO(b/132430685): Should we allow soft placement here?
config = config_pb2.ConfigProto(allow_soft_placement=True)
with session.Session(config=config) as sess:
sess.run(variables.global_variables_initializer())
xla = 'xla_' if use_xla_jit else ''
tf_bench.run_op_benchmark(
sess, targets, name='%s_%s%s' % (name, xla, device)
)
| XLATestCase |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_2.py | {
"start": 273,
"end": 313
} | class ____(C):
x: pyproj.Transformer
| D |
python | huggingface__transformers | src/transformers/models/canine/modeling_canine.py | {
"start": 1821,
"end": 4075
} | class ____(ModelOutput):
r"""
last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Sequence of hidden-states at the output of the last layer of the model (i.e. the output of the final
shallow Transformer encoder).
pooler_output (`torch.FloatTensor` of shape `(batch_size, hidden_size)`):
Hidden-state of the first token of the sequence (classification token) at the last layer of the deep
Transformer encoder, further processed by a Linear layer and a Tanh activation function. The Linear layer
weights are trained from the next sentence prediction (classification) objective during pretraining.
hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the input to each encoder + one for the output of each layer of each
encoder) of shape `(batch_size, sequence_length, hidden_size)` and `(batch_size, sequence_length //
config.downsampling_rate, hidden_size)`. Hidden-states of the model at the output of each layer plus the
initial input to each Transformer encoder. The hidden states of the shallow encoders have length
`sequence_length`, but the hidden states of the deep encoder have length `sequence_length` //
`config.downsampling_rate`.
attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of the 3 Transformer encoders of shape `(batch_size,
num_heads, sequence_length, sequence_length)` and `(batch_size, num_heads, sequence_length //
config.downsampling_rate, sequence_length // config.downsampling_rate)`. Attentions weights after the
attention softmax, used to compute the weighted average in the self-attention heads.
"""
last_hidden_state: Optional[torch.FloatTensor] = None
pooler_output: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor]] = None
attentions: Optional[tuple[torch.FloatTensor]] = None
| CanineModelOutputWithPooling |
python | psf__black | tests/data/cases/backslash_before_indent.py | {
"start": 32,
"end": 59
} | class ____:
\
pass
| Plotter |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/linalg/linear_operator_addition_test.py | {
"start": 15895,
"end": 16778
} | class ____(test.TestCase):
def setUp(self):
self._adder = linear_operator_addition._AddAndReturnMatrix()
@test_util.run_deprecated_v1
def test_diag_plus_diag(self):
diag1 = linalg.LinearOperatorDiag([1., 2.])
diag2 = linalg.LinearOperatorDiag([-1., 3.])
hints = linear_operator_addition._Hints(
is_positive_definite=False, is_non_singular=False)
self.assertTrue(self._adder.can_add(diag1, diag2))
operator = self._adder.add(diag1, diag2, "my_operator", hints)
self.assertIsInstance(operator, linalg.LinearOperatorFullMatrix)
with self.cached_session():
self.assertAllClose([[0., 0.], [0., 5.]], operator.to_dense())
self.assertFalse(operator.is_positive_definite)
self.assertFalse(operator.is_non_singular)
self.assertEqual("my_operator", operator.name)
if __name__ == "__main__":
test.main()
| AddAndReturnMatrixTest |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 9625,
"end": 15811
} | class ____(os.PathLike, BaseAsset):
"""A representation of data asset dependencies between workflows."""
name: str = attrs.field(
validator=[_validate_asset_name],
)
uri: str = attrs.field(
validator=[_validate_non_empty_identifier],
converter=_sanitize_uri,
)
group: str = attrs.field(
default=attrs.Factory(operator.attrgetter("asset_type"), takes_self=True),
validator=[_validate_identifier],
)
extra: dict[str, JsonValue] = attrs.field(
factory=dict,
converter=_set_extra_default,
)
watchers: list[AssetWatcher | SerializedAssetWatcher] = attrs.field(
factory=list,
)
asset_type: ClassVar[str] = "asset"
__version__: ClassVar[int] = 1
@overload
def __init__(
self,
name: str,
uri: str | ObjectStoragePath,
*,
group: str = ...,
extra: dict[str, JsonValue] | None = None,
watchers: list[AssetWatcher | SerializedAssetWatcher] = ...,
) -> None:
"""Canonical; both name and uri are provided."""
@overload
def __init__(
self,
name: str,
*,
group: str = ...,
extra: dict[str, JsonValue] | None = None,
watchers: list[AssetWatcher | SerializedAssetWatcher] = ...,
) -> None:
"""It's possible to only provide the name, either by keyword or as the only positional argument."""
@overload
def __init__(
self,
*,
uri: str | ObjectStoragePath,
group: str = ...,
extra: dict[str, JsonValue] | None = None,
watchers: list[AssetWatcher | SerializedAssetWatcher] = ...,
) -> None:
"""It's possible to only provide the URI as a keyword argument."""
def __init__(
self,
name: str | None = None,
uri: str | ObjectStoragePath | None = None,
*,
group: str | None = None,
extra: dict[str, JsonValue] | None = None,
watchers: list[AssetWatcher | SerializedAssetWatcher] | None = None,
) -> None:
if name is None and uri is None:
raise TypeError("Asset() requires either 'name' or 'uri'")
if name is None:
name = str(uri)
elif uri is None:
uri = name
if TYPE_CHECKING:
assert name is not None
assert uri is not None
# attrs default (and factory) does not kick in if any value is given to
# the argument. We need to exclude defaults from the custom ___init___.
kwargs: dict[str, Any] = {}
if group is not None:
kwargs["group"] = group
if extra is not None:
kwargs["extra"] = extra
if watchers is not None:
kwargs["watchers"] = watchers
self.__attrs_init__(name=name, uri=uri, **kwargs)
@overload
@staticmethod
def ref(*, name: str) -> AssetNameRef: ...
@overload
@staticmethod
def ref(*, uri: str) -> AssetUriRef: ...
@staticmethod
def ref(*, name: str = "", uri: str = "") -> AssetRef:
if name and uri:
raise TypeError("Asset reference must be made to either name or URI, not both")
if name:
return AssetNameRef(name)
if uri:
return AssetUriRef(uri)
raise TypeError("Asset reference expects keyword argument 'name' or 'uri'")
def __fspath__(self) -> str:
return self.uri
def __eq__(self, other: Any) -> bool:
# The Asset class can be subclassed, and we don't want fields added by a
# subclass to break equality. This explicitly filters out only fields
# defined by the Asset class for comparison.
if not isinstance(other, Asset):
return NotImplemented
f = attrs.filters.include(*attrs.fields_dict(Asset))
return attrs.asdict(self, filter=f) == attrs.asdict(other, filter=f)
def __hash__(self):
f = attrs.filters.include(*attrs.fields_dict(Asset))
return hash(attrs.asdict(self, filter=f))
@property
def normalized_uri(self) -> str | None:
"""
Returns the normalized and AIP-60 compliant URI whenever possible.
If we can't retrieve the scheme from URI or no normalizer is provided or if parsing fails,
it returns None.
If a normalizer for the scheme exists and parsing is successful we return the normalizer result.
"""
if not (normalized_scheme := _get_normalized_scheme(self.uri)):
return None
if (normalizer := _get_uri_normalizer(normalized_scheme)) is None:
return None
parsed = urllib.parse.urlsplit(self.uri)
try:
normalized_uri = normalizer(parsed)
return urllib.parse.urlunsplit(normalized_uri)
except ValueError:
return None
def as_expression(self) -> Any:
"""
Serialize the asset into its scheduling expression.
:meta private:
"""
return {"asset": {"uri": self.uri, "name": self.name, "group": self.group}}
def iter_assets(self) -> Iterator[tuple[AssetUniqueKey, Asset]]:
yield AssetUniqueKey.from_asset(self), self
def iter_asset_aliases(self) -> Iterator[tuple[str, AssetAlias]]:
return iter(())
def iter_asset_refs(self) -> Iterator[AssetRef]:
return iter(())
def iter_dag_dependencies(self, *, source: str, target: str) -> Iterator[DagDependency]:
"""
Iterate an asset as dag dependency.
:meta private:
"""
yield DagDependency(
source=source or "asset",
target=target or "asset",
label=self.name,
dependency_type="asset",
# We can't get asset id at this stage.
# This will be updated when running SerializedDagModel.get_dag_dependencies
dependency_id=AssetUniqueKey.from_asset(self).to_str(),
)
def asprofile(self) -> AssetProfile:
"""
Profiles Asset to AssetProfile.
:meta private:
"""
return AssetProfile(name=self.name or None, uri=self.uri or None, type=Asset.__name__)
| Asset |
python | gevent__gevent | _setuputils.py | {
"start": 12127,
"end": 12388
} | class ____(Exception):
pass
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError # pylint:disable=no-name-in-module,import-error
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError)
| BuildFailed |
python | PrefectHQ__prefect | tests/deployment/test_base.py | {
"start": 2477,
"end": 4574
} | class ____:
async def test_initialize_project_works(self):
files = initialize_project()
assert len(files) >= 2
for file in files:
assert Path(file).exists()
# test defaults
with open("prefect.yaml", "r") as f:
contents = yaml.safe_load(f)
assert contents["name"] is not None
assert contents["prefect-version"] == prefect.__version__
async def test_initialize_project_with_name(self):
files = initialize_project(name="my-test-its-a-test")
assert len(files) >= 2
with open("prefect.yaml", "r") as f:
contents = yaml.safe_load(f)
assert contents["name"] == "my-test-its-a-test"
async def test_initialize_project_with_recipe(self):
files = initialize_project(recipe="docker-git")
assert len(files) >= 2
with open("prefect.yaml", "r") as f:
contents = yaml.safe_load(f)
clone_step = contents["pull"][0]
assert "prefect.deployments.steps.git_clone" in clone_step
build_step = contents["build"][0]
assert "prefect_docker.deployments.steps.build_docker_image" in build_step
@pytest.mark.parametrize(
"recipe",
[
d.absolute().name
for d in Path(
prefect.__development_base_path__
/ "src"
/ "prefect"
/ "deployments"
/ "recipes"
).iterdir()
if d.is_dir() and "docker" in d.absolute().name
],
)
async def test_initialize_project_with_docker_recipe_default_image(self, recipe):
files = initialize_project(recipe=recipe)
assert len(files) >= 2
with open("prefect.yaml", "r") as f:
contents = yaml.safe_load(f)
build_step = contents["build"][0]
assert "prefect_docker.deployments.steps.build_docker_image" in build_step
assert (
contents["deployments"][0]["work_pool"]["job_variables"]["image"]
== "{{ build_image.image }}"
)
| TestInitProject |
python | tensorflow__tensorflow | tensorflow/python/autograph/operators/data_structures.py | {
"start": 10001,
"end": 11744
} | class ____(
collections.namedtuple('ListStackOpts',
('element_dtype', 'original_call'))):
pass
def list_stack(list_, opts):
"""The list stack function.
This does not have a direct correspondent in Python. The closest idiom to
this is tf.append or np.stack. It's different from those in the sense that it
accepts a Tensor list, rather than a list of tensors. It can also accept
TensorArray. When the target is anything else, the dispatcher will rely on
ctx.original_call for fallback.
Args:
list_: An entity that supports append semantics.
opts: A ListStackOpts object.
Returns:
The output of the stack operation, typically a Tensor.
"""
assert isinstance(opts, ListStackOpts)
if isinstance(list_, tensor_array_ops.TensorArray):
return _tf_tensorarray_stack(list_)
elif tensor_util.is_tf_type(list_):
if list_.dtype == dtypes.variant:
return _tf_tensor_list_stack(list_, opts)
else:
# No-op for primitive Tensor arguments.
return list_
else:
return _py_list_stack(list_, opts)
def _tf_tensorarray_stack(list_):
"""Overload of list_stack that stages a TensorArray stack."""
return list_.stack()
def _tf_tensor_list_stack(list_, opts):
"""Overload of list_stack that stages a Tensor list write."""
if opts.element_dtype is None:
raise ValueError('cannot stack a list without knowing its element type;'
' use set_element_type to annotate it')
return list_ops.tensor_list_stack(list_, element_dtype=opts.element_dtype)
def _py_list_stack(list_, opts):
"""Overload of list_stack that executes a Python list append."""
# Revert to the original call.
return opts.original_call(list_)
| ListStackOpts |
python | ray-project__ray | python/ray/serve/_private/common.py | {
"start": 30533,
"end": 32976
} | class ____:
"""Report from a deployment handle on queued and ongoing requests.
Args:
deployment_id: The deployment ID of the deployment handle.
handle_id: The handle ID of the deployment handle.
actor_id: If the deployment handle (from which this metric was
sent) lives on an actor, the ID of that actor.
handle_source: Describes what kind of entity holds this
deployment handle: a Serve proxy, a Serve replica, or
unknown.
aggregated_queued_requests: average number of queued requests at the
handle over the past look_back_period_s seconds.
queued_requests: list of values of queued requests at the
handle over the past look_back_period_s seconds. This is a list because
we take multiple measurements over time.
aggregated_metrics: A map of metric name to the aggregated value over the past
look_back_period_s seconds at the handle for each replica.
metrics: A map of metric name to the list of values running at that handle for each replica
over the past look_back_period_s seconds. This is a list because
we take multiple measurements over time.
timestamp: The time at which this report was created.
"""
deployment_id: DeploymentID
handle_id: str
actor_id: str
handle_source: DeploymentHandleSource
aggregated_queued_requests: float
queued_requests: TimeSeries
aggregated_metrics: Dict[str, Dict[ReplicaID, float]]
metrics: Dict[str, Dict[ReplicaID, TimeSeries]]
timestamp: float
@property
def total_requests(self) -> float:
"""Total number of queued and running requests."""
return self.aggregated_queued_requests + sum(
self.aggregated_metrics.get(RUNNING_REQUESTS_KEY, {}).values()
)
@property
def is_serve_component_source(self) -> bool:
"""Whether the handle source is a Serve actor.
More specifically, this returns whether a Serve actor tracked
by the controller holds the deployment handle that sent this
report. If the deployment handle lives on a driver, a Ray task,
or an actor that's not a Serve replica, then this returns False.
"""
return self.handle_source in [
DeploymentHandleSource.PROXY,
DeploymentHandleSource.REPLICA,
]
@dataclass
| HandleMetricReport |
python | encode__django-rest-framework | tests/schemas/views.py | {
"start": 6002,
"end": 6381
} | class ____(generics.GenericAPIView):
serializer_class = ExampleSerializerModel
schema = AutoSchema(component_name="Ulysses")
def get(self, *args, **kwargs):
from datetime import datetime
now = datetime.now()
serializer = self.get_serializer(data=now.date(), datetime=now)
return Response(serializer.data)
| ExampleAutoSchemaComponentName |
python | tiangolo__fastapi | fastapi/exceptions.py | {
"start": 1877,
"end": 4081
} | class ____(StarletteWebSocketException):
"""
A WebSocket exception you can raise in your own code to show errors to the client.
This is for client errors, invalid authentication, invalid data, etc. Not for server
errors in your code.
Read more about it in the
[FastAPI docs for WebSockets](https://fastapi.tiangolo.com/advanced/websockets/).
## Example
```python
from typing import Annotated
from fastapi import (
Cookie,
FastAPI,
WebSocket,
WebSocketException,
status,
)
app = FastAPI()
@app.websocket("/items/{item_id}/ws")
async def websocket_endpoint(
*,
websocket: WebSocket,
session: Annotated[str | None, Cookie()] = None,
item_id: str,
):
if session is None:
raise WebSocketException(code=status.WS_1008_POLICY_VIOLATION)
await websocket.accept()
while True:
data = await websocket.receive_text()
await websocket.send_text(f"Session cookie is: {session}")
await websocket.send_text(f"Message text was: {data}, for item ID: {item_id}")
```
"""
def __init__(
self,
code: Annotated[
int,
Doc(
"""
A closing code from the
[valid codes defined in the specification](https://datatracker.ietf.org/doc/html/rfc6455#section-7.4.1).
"""
),
],
reason: Annotated[
Union[str, None],
Doc(
"""
The reason to close the WebSocket connection.
It is UTF-8-encoded data. The interpretation of the reason is up to the
application, it is not specified by the WebSocket specification.
It could contain text that could be human-readable or interpretable
by the client code, etc.
"""
),
] = None,
) -> None:
super().__init__(code=code, reason=reason)
RequestErrorModel: Type[BaseModel] = create_model("Request")
WebSocketErrorModel: Type[BaseModel] = create_model("WebSocket")
| WebSocketException |
python | getsentry__sentry | src/sentry/integrations/api/bases/external_actor.py | {
"start": 7264,
"end": 7477
} | class ____(OrganizationPermission):
scope_map = {
"POST": ["org:write", "org:admin"],
"PUT": ["org:write", "org:admin"],
"DELETE": ["org:write", "org:admin"],
}
| ExternalUserPermission |
python | bokeh__bokeh | tests/unit/bokeh/core/property/test_constraints.py | {
"start": 1207,
"end": 1247
} | class ____(HasProps, Local):
pass
| Child0 |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 126847,
"end": 127018
} | class ____:
xlMaximized = -4137 # from enum XlWindowState
xlMinimized = -4140 # from enum XlWindowState
xlNormal = -4143 # from enum XlWindowState
| WindowState |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/tasks.py | {
"start": 10054,
"end": 13302
} | class ____(GoogleCloudBaseOperator):
"""
Gets a queue from Cloud Tasks.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudTasksQueueGetOperator`
:param location: The location name in which the queue was created.
:param queue_name: The queue's name.
:param project_id: (Optional) The ID of the Google Cloud project that owns the Cloud Tasks.
If set to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: (Optional) A retry object used to retry requests.
If None is specified, requests will not be retried.
:param timeout: (Optional) The amount of time, in seconds, to wait for the request
to complete. Note that if retry is specified, the timeout applies to each
individual attempt.
:param metadata: (Optional) Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"queue_name",
"project_id",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (CloudTasksQueueLink(),)
def __init__(
self,
*,
location: str,
queue_name: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: MetaData = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.queue_name = queue_name
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudTasksHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
queue = hook.get_queue(
location=self.location,
queue_name=self.queue_name,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
CloudTasksQueueLink.persist(
context=context,
queue_name=queue.name,
)
return Queue.to_dict(queue)
| CloudTasksQueueGetOperator |
python | python__mypy | mypyc/build.py | {
"start": 1446,
"end": 28287
} | class ____(NamedTuple):
module: str
c_files: list[str]
other_files: list[str]
include_dirs: list[str]
LIBRT_MODULES = [
ModDesc("librt.internal", ["librt_internal.c"], [], []),
ModDesc("librt.strings", ["librt_strings.c"], [], []),
ModDesc(
"librt.base64",
[
"librt_base64.c",
"base64/lib.c",
"base64/codec_choose.c",
"base64/tables/tables.c",
"base64/arch/generic/codec.c",
"base64/arch/ssse3/codec.c",
"base64/arch/sse41/codec.c",
"base64/arch/sse42/codec.c",
"base64/arch/avx/codec.c",
"base64/arch/avx2/codec.c",
"base64/arch/avx512/codec.c",
"base64/arch/neon32/codec.c",
"base64/arch/neon64/codec.c",
],
[
"base64/arch/avx/enc_loop_asm.c",
"base64/arch/avx2/enc_loop.c",
"base64/arch/avx2/enc_loop_asm.c",
"base64/arch/avx2/enc_reshuffle.c",
"base64/arch/avx2/enc_translate.c",
"base64/arch/avx2/dec_loop.c",
"base64/arch/avx2/dec_reshuffle.c",
"base64/arch/generic/32/enc_loop.c",
"base64/arch/generic/64/enc_loop.c",
"base64/arch/generic/32/dec_loop.c",
"base64/arch/generic/enc_head.c",
"base64/arch/generic/enc_tail.c",
"base64/arch/generic/dec_head.c",
"base64/arch/generic/dec_tail.c",
"base64/arch/ssse3/dec_reshuffle.c",
"base64/arch/ssse3/dec_loop.c",
"base64/arch/ssse3/enc_loop_asm.c",
"base64/arch/ssse3/enc_translate.c",
"base64/arch/ssse3/enc_reshuffle.c",
"base64/arch/ssse3/enc_loop.c",
"base64/arch/neon64/dec_loop.c",
"base64/arch/neon64/enc_loop_asm.c",
"base64/codecs.h",
"base64/env.h",
"base64/tables/tables.h",
"base64/tables/table_dec_32bit.h",
"base64/tables/table_enc_12bit.h",
],
["base64"],
),
]
try:
# Import setuptools so that it monkey-patch overrides distutils
import setuptools
except ImportError:
pass
if TYPE_CHECKING:
if sys.version_info >= (3, 12):
from setuptools import Extension
else:
from distutils.core import Extension as _distutils_Extension
from typing import TypeAlias
from setuptools import Extension as _setuptools_Extension
Extension: TypeAlias = _setuptools_Extension | _distutils_Extension
if sys.version_info >= (3, 12):
# From setuptools' monkeypatch
from distutils import ccompiler, sysconfig # type: ignore[import-not-found]
else:
from distutils import ccompiler, sysconfig
def get_extension() -> type[Extension]:
# We can work with either setuptools or distutils, and pick setuptools
# if it has been imported.
use_setuptools = "setuptools" in sys.modules
extension_class: type[Extension]
if sys.version_info < (3, 12) and not use_setuptools:
import distutils.core
extension_class = distutils.core.Extension
else:
if not use_setuptools:
sys.exit("error: setuptools not installed")
extension_class = setuptools.Extension
return extension_class
def setup_mypycify_vars() -> None:
"""Rewrite a bunch of config vars in pretty dubious ways."""
# There has to be a better approach to this.
# The vars can contain ints but we only work with str ones
vars = cast(dict[str, str], sysconfig.get_config_vars())
if sys.platform == "darwin":
# Disable building 32-bit binaries, since we generate too much code
# for a 32-bit Mach-O object. There has to be a better way to do this.
vars["LDSHARED"] = vars["LDSHARED"].replace("-arch i386", "")
vars["LDFLAGS"] = vars["LDFLAGS"].replace("-arch i386", "")
vars["CFLAGS"] = vars["CFLAGS"].replace("-arch i386", "")
def fail(message: str) -> NoReturn:
# TODO: Is there something else we should do to fail?
sys.exit(message)
def emit_messages(options: Options, messages: list[str], dt: float, serious: bool = False) -> None:
# ... you know, just in case.
if options.junit_xml:
py_version = f"{options.python_version[0]}_{options.python_version[1]}"
write_junit_xml(
dt,
serious,
{None: messages} if messages else {},
options.junit_xml,
py_version,
options.platform,
)
if messages:
print("\n".join(messages))
def get_mypy_config(
mypy_options: list[str],
only_compile_paths: Iterable[str] | None,
compiler_options: CompilerOptions,
fscache: FileSystemCache | None,
) -> tuple[list[BuildSource], list[BuildSource], Options]:
"""Construct mypy BuildSources and Options from file and options lists"""
all_sources, options = process_options(mypy_options, fscache=fscache)
if only_compile_paths is not None:
paths_set = set(only_compile_paths)
mypyc_sources = [s for s in all_sources if s.path in paths_set]
else:
mypyc_sources = all_sources
if compiler_options.separate:
mypyc_sources = [
src for src in mypyc_sources if src.path and not src.path.endswith("__init__.py")
]
if not mypyc_sources:
return mypyc_sources, all_sources, options
# Override whatever python_version is inferred from the .ini file,
# and set the python_version to be the currently used version.
options.python_version = sys.version_info[:2]
if options.python_version[0] == 2:
fail("Python 2 not supported")
if not options.strict_optional:
fail("Disabling strict optional checking not supported")
options.show_traceback = True
# Needed to get types for all AST nodes
options.export_types = True
# We use mypy incremental mode when doing separate/incremental mypyc compilation
options.incremental = compiler_options.separate
options.preserve_asts = True
for source in mypyc_sources:
options.per_module_options.setdefault(source.module, {})["mypyc"] = True
return mypyc_sources, all_sources, options
def generate_c_extension_shim(
full_module_name: str, module_name: str, dir_name: str, group_name: str
) -> str:
"""Create a C extension shim with a passthrough PyInit function.
Arguments:
full_module_name: the dotted full module name
module_name: the final component of the module name
dir_name: the directory to place source code
group_name: the name of the group
"""
cname = "%s.c" % full_module_name.replace(".", os.sep)
cpath = os.path.join(dir_name, cname)
if IS_FREE_THREADED:
# We use multi-phase init in free-threaded builds to enable free threading.
shim_name = "module_shim_no_gil_multiphase.tmpl"
else:
shim_name = "module_shim.tmpl"
# We load the C extension shim template from a file.
# (So that the file could be reused as a bazel template also.)
with open(os.path.join(include_dir(), shim_name)) as f:
shim_template = f.read()
write_file(
cpath,
shim_template.format(
modname=module_name,
libname=shared_lib_name(group_name),
full_modname=exported_name(full_module_name),
),
)
return cpath
def group_name(modules: list[str]) -> str:
"""Produce a probably unique name for a group from a list of module names."""
if len(modules) == 1:
return modules[0]
h = hashlib.sha1()
h.update(",".join(modules).encode())
return h.hexdigest()[:20]
def include_dir() -> str:
"""Find the path of the lib-rt dir that needs to be included"""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), "lib-rt")
def generate_c(
sources: list[BuildSource],
options: Options,
groups: emitmodule.Groups,
fscache: FileSystemCache,
compiler_options: CompilerOptions,
) -> tuple[list[list[tuple[str, str]]], str]:
"""Drive the actual core compilation step.
The groups argument describes how modules are assigned to C
extension modules. See the comments on the Groups type in
mypyc.emitmodule for details.
Returns the C source code and (for debugging) the pretty printed IR.
"""
t0 = time.time()
try:
result = emitmodule.parse_and_typecheck(
sources, options, compiler_options, groups, fscache
)
except CompileError as e:
emit_messages(options, e.messages, time.time() - t0, serious=(not e.use_stdout))
sys.exit(1)
t1 = time.time()
if result.errors:
emit_messages(options, result.errors, t1 - t0)
sys.exit(1)
if compiler_options.verbose:
print(f"Parsed and typechecked in {t1 - t0:.3f}s")
errors = Errors(options)
modules, ctext, mapper = emitmodule.compile_modules_to_c(
result, compiler_options=compiler_options, errors=errors, groups=groups
)
t2 = time.time()
emit_messages(options, errors.new_messages(), t2 - t1)
if errors.num_errors:
# No need to stop the build if only warnings were emitted.
sys.exit(1)
if compiler_options.verbose:
print(f"Compiled to C in {t2 - t1:.3f}s")
if options.mypyc_annotation_file:
generate_annotated_html(options.mypyc_annotation_file, result, modules, mapper)
return ctext, "\n".join(format_modules(modules))
def build_using_shared_lib(
sources: list[BuildSource],
group_name: str,
cfiles: list[str],
deps: list[str],
build_dir: str,
extra_compile_args: list[str],
) -> list[Extension]:
"""Produce the list of extension modules when a shared library is needed.
This creates one shared library extension module that all the
others import, and one shim extension module for each
module in the build. Each shim simply calls an initialization function
in the shared library.
The shared library (which lib_name is the name of) is a Python
extension module that exports the real initialization functions in
Capsules stored in module attributes.
"""
extensions = [
get_extension()(
shared_lib_name(group_name),
sources=cfiles,
include_dirs=[include_dir(), build_dir],
depends=deps,
extra_compile_args=extra_compile_args,
)
]
for source in sources:
module_name = source.module.split(".")[-1]
shim_file = generate_c_extension_shim(source.module, module_name, build_dir, group_name)
# We include the __init__ in the "module name" we stick in the Extension,
# since this seems to be needed for it to end up in the right place.
full_module_name = source.module
assert source.path
if os.path.split(source.path)[1] == "__init__.py":
full_module_name += ".__init__"
extensions.append(
get_extension()(
full_module_name, sources=[shim_file], extra_compile_args=extra_compile_args
)
)
return extensions
def build_single_module(
sources: list[BuildSource], cfiles: list[str], extra_compile_args: list[str]
) -> list[Extension]:
"""Produce the list of extension modules for a standalone extension.
This contains just one module, since there is no need for a shared module.
"""
return [
get_extension()(
sources[0].module,
sources=cfiles,
include_dirs=[include_dir()],
extra_compile_args=extra_compile_args,
)
]
def write_file(path: str, contents: str) -> None:
"""Write data into a file.
If the file already exists and has the same contents we
want to write, skip writing so as to preserve the mtime
and avoid triggering recompilation.
"""
# We encode it ourselves and open the files as binary to avoid windows
# newline translation
encoded_contents = contents.encode("utf-8")
try:
with open(path, "rb") as f:
old_contents: bytes | None = f.read()
except OSError:
old_contents = None
if old_contents != encoded_contents:
os.makedirs(os.path.dirname(path), exist_ok=True)
with open(path, "wb") as g:
g.write(encoded_contents)
# Fudge the mtime forward because otherwise when two builds happen close
# together (like in a test) setuptools might not realize the source is newer
# than the new artifact.
# XXX: This is bad though.
new_mtime = os.stat(path).st_mtime + 1
os.utime(path, times=(new_mtime, new_mtime))
def construct_groups(
sources: list[BuildSource],
separate: bool | list[tuple[list[str], str | None]],
use_shared_lib: bool,
group_name_override: str | None,
) -> emitmodule.Groups:
"""Compute Groups given the input source list and separate configs.
separate is the user-specified configuration for how to assign
modules to compilation groups (see mypycify docstring for details).
This takes that and expands it into our internal representation of
group configuration, documented in mypyc.emitmodule's definition
of Group.
"""
if separate is True:
groups: emitmodule.Groups = [([source], None) for source in sources]
elif isinstance(separate, list):
groups = []
used_sources = set()
for files, name in separate:
group_sources = [src for src in sources if src.path in files]
groups.append((group_sources, name))
used_sources.update(group_sources)
unused_sources = [src for src in sources if src not in used_sources]
if unused_sources:
groups.extend([([source], None) for source in unused_sources])
else:
groups = [(sources, None)]
# Generate missing names
for i, (group, name) in enumerate(groups):
if use_shared_lib and not name:
if group_name_override is not None:
name = group_name_override
else:
name = group_name([source.module for source in group])
groups[i] = (group, name)
return groups
def get_header_deps(cfiles: list[tuple[str, str]]) -> list[str]:
"""Find all the headers used by a group of cfiles.
We do this by just regexping the source, which is a bit simpler than
properly plumbing the data through.
Arguments:
cfiles: A list of (file name, file contents) pairs.
"""
headers: set[str] = set()
for _, contents in cfiles:
headers.update(re.findall(r'#include "(.*)"', contents))
return sorted(headers)
def mypyc_build(
paths: list[str],
compiler_options: CompilerOptions,
*,
separate: bool | list[tuple[list[str], str | None]] = False,
only_compile_paths: Iterable[str] | None = None,
skip_cgen_input: Any | None = None,
always_use_shared_lib: bool = False,
) -> tuple[emitmodule.Groups, list[tuple[list[str], list[str]]]]:
"""Do the front and middle end of mypyc building, producing and writing out C source."""
fscache = FileSystemCache()
mypyc_sources, all_sources, options = get_mypy_config(
paths, only_compile_paths, compiler_options, fscache
)
# We generate a shared lib if there are multiple modules or if any
# of the modules are in package. (Because I didn't want to fuss
# around with making the single module code handle packages.)
use_shared_lib = (
len(mypyc_sources) > 1
or any("." in x.module for x in mypyc_sources)
or always_use_shared_lib
)
groups = construct_groups(mypyc_sources, separate, use_shared_lib, compiler_options.group_name)
if compiler_options.group_name is not None:
assert len(groups) == 1, "If using custom group_name, only one group is expected"
# We let the test harness just pass in the c file contents instead
# so that it can do a corner-cutting version without full stubs.
if not skip_cgen_input:
group_cfiles, ops_text = generate_c(
all_sources, options, groups, fscache, compiler_options=compiler_options
)
# TODO: unique names?
write_file(os.path.join(compiler_options.target_dir, "ops.txt"), ops_text)
else:
group_cfiles = skip_cgen_input
# Write out the generated C and collect the files for each group
# Should this be here??
group_cfilenames: list[tuple[list[str], list[str]]] = []
for cfiles in group_cfiles:
cfilenames = []
for cfile, ctext in cfiles:
cfile = os.path.join(compiler_options.target_dir, cfile)
if not options.mypyc_skip_c_generation:
write_file(cfile, ctext)
if os.path.splitext(cfile)[1] == ".c":
cfilenames.append(cfile)
deps = [os.path.join(compiler_options.target_dir, dep) for dep in get_header_deps(cfiles)]
group_cfilenames.append((cfilenames, deps))
return groups, group_cfilenames
def mypycify(
paths: list[str],
*,
only_compile_paths: Iterable[str] | None = None,
verbose: bool = False,
opt_level: str = "3",
debug_level: str = "1",
strip_asserts: bool = False,
multi_file: bool = False,
separate: bool | list[tuple[list[str], str | None]] = False,
skip_cgen_input: Any | None = None,
target_dir: str | None = None,
include_runtime_files: bool | None = None,
strict_dunder_typing: bool = False,
group_name: str | None = None,
log_trace: bool = False,
depends_on_librt_internal: bool = False,
install_librt: bool = False,
experimental_features: bool = False,
) -> list[Extension]:
"""Main entry point to building using mypyc.
This produces a list of Extension objects that should be passed as the
ext_modules parameter to setup.
Arguments:
paths: A list of file paths to build. It may also contain mypy options.
only_compile_paths: If not None, an iterable of paths that are to be
the only modules compiled, even if other modules
appear in the mypy command line given to paths.
(These modules must still be passed to paths.)
verbose: Should mypyc be more verbose. Defaults to false.
opt_level: The optimization level, as a string. Defaults to '3' (meaning '-O3').
debug_level: The debug level, as a string. Defaults to '1' (meaning '-g1').
strip_asserts: Should asserts be stripped from the generated code.
multi_file: Should each Python module be compiled into its own C source file.
This can reduce compile time and memory requirements at the likely
cost of runtime performance of compiled code. Defaults to false.
separate: Should compiled modules be placed in separate extension modules.
If False, all modules are placed in a single shared library.
If True, every module is placed in its own library.
Otherwise, separate should be a list of
(file name list, optional shared library name) pairs specifying
groups of files that should be placed in the same shared library
(while all other modules will be placed in its own library).
Each group can be compiled independently, which can
speed up compilation, but calls between groups can
be slower than calls within a group and can't be
inlined.
target_dir: The directory to write C output files. Defaults to 'build'.
include_runtime_files: If not None, whether the mypyc runtime library
should be directly #include'd instead of linked
separately in order to reduce compiler invocations.
Defaults to False in multi_file mode, True otherwise.
strict_dunder_typing: If True, force dunder methods to have the return type
of the method strictly, which can lead to more
optimization opportunities. Defaults to False.
group_name: If set, override the default group name derived from
the hash of module names. This is used for the names of the
output C files and the shared library. This is only supported
if there is a single group. [Experimental]
log_trace: If True, compiled code writes a trace log of events in
mypyc_trace.txt (derived from executed operations). This is
useful for performance analysis, such as analyzing which
primitive ops are used the most and on which lines.
depends_on_librt_internal: This is True only for mypy itself.
install_librt: If True, also build the librt extension modules. Normally,
those are build and published on PyPI separately, but during
tests, we want to use their development versions (i.e. from
current commit).
experimental_features: Enable experimental features (install_librt=True is
also needed if using experimental librt features). These
have no backward compatibility guarantees!
"""
# Figure out our configuration
compiler_options = CompilerOptions(
strip_asserts=strip_asserts,
multi_file=multi_file,
verbose=verbose,
separate=separate is not False,
target_dir=target_dir,
include_runtime_files=include_runtime_files,
strict_dunder_typing=strict_dunder_typing,
group_name=group_name,
log_trace=log_trace,
depends_on_librt_internal=depends_on_librt_internal,
experimental_features=experimental_features,
)
# Generate all the actual important C code
groups, group_cfilenames = mypyc_build(
paths,
only_compile_paths=only_compile_paths,
compiler_options=compiler_options,
separate=separate,
skip_cgen_input=skip_cgen_input,
)
# Mess around with setuptools and actually get the thing built
setup_mypycify_vars()
# Create a compiler object so we can make decisions based on what
# compiler is being used. typeshed is missing some attributes on the
# compiler object so we give it type Any
compiler: Any = ccompiler.new_compiler()
sysconfig.customize_compiler(compiler)
build_dir = compiler_options.target_dir
cflags: list[str] = []
if compiler.compiler_type == "unix":
cflags += [
f"-O{opt_level}",
f"-g{debug_level}",
"-Werror",
"-Wno-unused-function",
"-Wno-unused-label",
"-Wno-unreachable-code",
"-Wno-unused-variable",
"-Wno-unused-command-line-argument",
"-Wno-unknown-warning-option",
"-Wno-unused-but-set-variable",
"-Wno-ignored-optimization-argument",
# Disables C Preprocessor (cpp) warnings
# See https://github.com/mypyc/mypyc/issues/956
"-Wno-cpp",
]
if log_trace:
cflags.append("-DMYPYC_LOG_TRACE")
if experimental_features:
cflags.append("-DMYPYC_EXPERIMENTAL")
elif compiler.compiler_type == "msvc":
# msvc doesn't have levels, '/O2' is full and '/Od' is disable
if opt_level == "0":
opt_level = "d"
elif opt_level in ("1", "2", "3"):
opt_level = "2"
if debug_level == "0":
debug_level = "NONE"
elif debug_level == "1":
debug_level = "FASTLINK"
elif debug_level in ("2", "3"):
debug_level = "FULL"
cflags += [
f"/O{opt_level}",
f"/DEBUG:{debug_level}",
"/wd4102", # unreferenced label
"/wd4101", # unreferenced local variable
"/wd4146", # negating unsigned int
]
if multi_file:
# Disable whole program optimization in multi-file mode so
# that we actually get the compilation speed and memory
# use wins that multi-file mode is intended for.
cflags += ["/GL-", "/wd9025"] # warning about overriding /GL
if log_trace:
cflags.append("/DMYPYC_LOG_TRACE")
if experimental_features:
cflags.append("/DMYPYC_EXPERIMENTAL")
# If configured to (defaults to yes in multi-file mode), copy the
# runtime library in. Otherwise it just gets #included to save on
# compiler invocations.
shared_cfilenames = []
if not compiler_options.include_runtime_files:
for name in RUNTIME_C_FILES:
rt_file = os.path.join(build_dir, name)
with open(os.path.join(include_dir(), name), encoding="utf-8") as f:
write_file(rt_file, f.read())
shared_cfilenames.append(rt_file)
extensions = []
for (group_sources, lib_name), (cfilenames, deps) in zip(groups, group_cfilenames):
if lib_name:
extensions.extend(
build_using_shared_lib(
group_sources,
lib_name,
cfilenames + shared_cfilenames,
deps,
build_dir,
cflags,
)
)
else:
extensions.extend(
build_single_module(group_sources, cfilenames + shared_cfilenames, cflags)
)
if install_librt:
for name in RUNTIME_C_FILES:
rt_file = os.path.join(build_dir, name)
with open(os.path.join(include_dir(), name), encoding="utf-8") as f:
write_file(rt_file, f.read())
for mod, file_names, addit_files, includes in LIBRT_MODULES:
for file_name in file_names + addit_files:
rt_file = os.path.join(build_dir, file_name)
with open(os.path.join(include_dir(), file_name), encoding="utf-8") as f:
write_file(rt_file, f.read())
extensions.append(
get_extension()(
mod,
sources=[
os.path.join(build_dir, file) for file in file_names + RUNTIME_C_FILES
],
include_dirs=[include_dir()]
+ [os.path.join(include_dir(), d) for d in includes],
extra_compile_args=cflags,
)
)
return extensions
| ModDesc |
python | getsentry__sentry | src/sentry/snuba/metrics/naming_layer/public.py | {
"start": 933,
"end": 2387
} | class ____(Enum):
"""
These are the public facing names of the API and only the session fields listed here are
queryable in the API.
"""
DURATION = "session.duration"
ALL = "session.all"
ABNORMAL = "session.abnormal"
UNHANDLED = "session.unhandled"
CRASHED = "session.crashed"
CRASH_FREE = "session.crash_free"
ERRORED = "session.errored"
ERRORED_PREAGGREGATED = "session.errored_preaggregated"
HEALTHY = "session.healthy"
CRASH_RATE = "session.crash_rate"
CRASH_FREE_RATE = "session.crash_free_rate"
ALL_USER = "session.all_user"
ABNORMAL_USER = "session.abnormal_user"
CRASHED_USER = "session.crashed_user"
UNHANDLED_USER = "session.unhandled_user"
CRASH_FREE_USER = "session.crash_free_user"
ERRORED_USER = "session.errored_user"
HEALTHY_USER = "session.healthy_user"
CRASH_USER_RATE = "session.crash_user_rate"
CRASH_FREE_USER_RATE = "session.crash_free_user_rate"
ERRORED_SET = "sessions.errored.unique"
ANR_RATE = "session.anr_rate"
FOREGROUND_ANR_RATE = "session.foreground_anr_rate"
ABNORMAL_RATE = "session.abnormal_rate"
ABNORMAL_USER_RATE = "session.abnormal_user_rate"
ERRORED_RATE = "session.errored_rate"
ERRORED_USER_RATE = "session.errored_user_rate"
UNHANDLED_RATE = "session.unhandled_rate"
UNHANDLED_USER_RATE = "session.unhandled_user_rate"
UNHEALTHY_RATE = "session.unhealthy_rate"
| SessionMetricKey |
python | keras-team__keras | keras/src/layers/preprocessing/normalization.py | {
"start": 337,
"end": 15723
} | class ____(DataLayer):
"""A preprocessing layer that normalizes continuous features.
This layer will shift and scale inputs into a distribution centered around
0 with standard deviation 1. It accomplishes this by precomputing the mean
and variance of the data, and calling `(input - mean) / sqrt(var)` at
runtime.
The mean and variance values for the layer must be either supplied on
construction or learned via `adapt()`. `adapt()` will compute the mean and
variance of the data and store them as the layer's weights. `adapt()` should
be called before `fit()`, `evaluate()`, or `predict()`.
**Note:** This layer is safe to use inside a `tf.data` or `grain` pipeline
(independently of which backend you're using).
Args:
axis: Integer, tuple of integers, or None. The axis or axes that should
have a separate mean and variance for each index in the shape.
For example, if shape is `(None, 5)` and `axis=1`, the layer will
track 5 separate mean and variance values for the last axis.
If `axis` is set to `None`, the layer will normalize
all elements in the input by a scalar mean and variance.
When `-1`, the last axis of the input is assumed to be a
feature dimension and is normalized per index.
Note that in the specific case of batched scalar inputs where
the only axis is the batch axis, the default will normalize
each index in the batch separately.
In this case, consider passing `axis=None`. Defaults to `-1`.
mean: The mean value(s) to use during normalization. The passed value(s)
will be broadcast to the shape of the kept axes above;
if the value(s) cannot be broadcast, an error will be raised when
this layer's `build()` method is called.
`mean` and `variance` must be specified together.
variance: The variance value(s) to use during normalization. The passed
value(s) will be broadcast to the shape of the kept axes above;
if the value(s) cannot be broadcast, an error will be raised when
this layer's `build()` method is called.
`mean` and `variance` must be specified together.
invert: If `True`, this layer will apply the inverse transformation
to its inputs: it would turn a normalized input back into its
original form.
Examples:
Calculate a global mean and variance by analyzing the dataset in `adapt()`.
>>> adapt_data = np.array([1., 2., 3., 4., 5.], dtype='float32')
>>> input_data = np.array([1., 2., 3.], dtype='float32')
>>> layer = keras.layers.Normalization(axis=None)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([-1.4142135, -0.70710677, 0.], dtype=float32)
Calculate a mean and variance for each index on the last axis.
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[0., 7., 4.]], dtype='float32')
>>> layer = keras.layers.Normalization(axis=-1)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([-1., -1., -1.], dtype=float32)
Pass the mean and variance directly.
>>> input_data = np.array([[1.], [2.], [3.]], dtype='float32')
>>> layer = keras.layers.Normalization(mean=3., variance=2.)
>>> layer(input_data)
array([[-1.4142135 ],
[-0.70710677],
[ 0. ]], dtype=float32)
Use the layer to de-normalize inputs (after adapting the layer).
>>> adapt_data = np.array([[0., 7., 4.],
... [2., 9., 6.],
... [0., 7., 4.],
... [2., 9., 6.]], dtype='float32')
>>> input_data = np.array([[1., 2., 3.]], dtype='float32')
>>> layer = keras.layers.Normalization(axis=-1, invert=True)
>>> layer.adapt(adapt_data)
>>> layer(input_data)
array([2., 10., 8.], dtype=float32)
"""
def __init__(
self, axis=-1, mean=None, variance=None, invert=False, **kwargs
):
super().__init__(**kwargs)
# Standardize `axis` to a tuple.
if axis is None:
axis = ()
elif isinstance(axis, int):
axis = (axis,)
else:
axis = tuple(axis)
self.axis = axis
self.input_mean = mean
self.input_variance = variance
self.invert = invert
self.supports_masking = True
self._build_input_shape = None
self.mean = None
# Set `mean` and `variance` if passed.
if (mean is not None) != (variance is not None):
raise ValueError(
"When setting values directly, both `mean` and `variance` "
f"must be set. Received: mean={mean} and variance={variance}"
)
def build(self, input_shape):
if input_shape is None:
return
ndim = len(input_shape)
self._build_input_shape = input_shape
if any(a < -ndim or a >= ndim for a in self.axis):
raise ValueError(
"All `axis` values must be in the range [-ndim, ndim). "
f"Received inputs with ndim={ndim}, while axis={self.axis}"
)
# Axes to be kept, replacing negative values with positive equivalents.
# Sorted to avoid transposing axes.
self._keep_axis = tuple(
sorted([d if d >= 0 else d + ndim for d in self.axis])
)
# All axes to be kept should have known shape.
for d in self._keep_axis:
if input_shape[d] is None:
raise ValueError(
"All `axis` values to be kept must have a known shape. "
f"Received axis={self.axis}, "
f"inputs.shape={input_shape}, "
f"with unknown axis at index {d}"
)
# Axes to be reduced.
self._reduce_axis = tuple(
d for d in range(ndim) if d not in self._keep_axis
)
# 1 if an axis should be reduced, 0 otherwise.
self._reduce_axis_mask = [
0 if d in self._keep_axis else 1 for d in range(ndim)
]
# Broadcast any reduced axes.
self._broadcast_shape = [
input_shape[d] if d in self._keep_axis else 1 for d in range(ndim)
]
mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis)
self._mean_and_var_shape = mean_and_var_shape
if self.input_mean is None:
self.adapt_mean = self.add_weight(
name="mean",
shape=mean_and_var_shape,
initializer="zeros",
trainable=False,
)
self.adapt_variance = self.add_weight(
name="variance",
shape=mean_and_var_shape,
initializer="ones",
trainable=False,
)
# For backwards compatibility with older saved models.
self.count = self.add_weight(
name="count",
shape=(),
dtype="int",
initializer="zeros",
trainable=False,
)
self.built = True
self.finalize_state()
else:
# In the no adapt case, make constant tensors for mean and variance
# with proper broadcast shape for use during call.
mean = ops.convert_to_tensor(self.input_mean)
variance = ops.convert_to_tensor(self.input_variance)
mean = ops.broadcast_to(mean, self._broadcast_shape)
variance = ops.broadcast_to(variance, self._broadcast_shape)
self.mean = ops.cast(mean, dtype=self.compute_dtype)
self.variance = ops.cast(variance, dtype=self.compute_dtype)
def adapt(self, data):
"""Computes the mean and variance of values in a dataset.
Calling `adapt()` on a `Normalization` layer is an alternative to
passing in `mean` and `variance` arguments during layer construction. A
`Normalization` layer should always either be adapted over a dataset or
passed `mean` and `variance`.
During `adapt()`, the layer will compute a `mean` and `variance`
separately for each position in each axis specified by the `axis`
argument. To calculate a single `mean` and `variance` over the input
data, simply pass `axis=None` to the layer.
Arg:
data: The data to train on. It can be passed either as a
`tf.data.Dataset`, as a NumPy array, or as a backend-native
eager tensor.
If a dataset, *it must be batched*. Keras will assume that the
data is batched, and if that assumption doesn't hold, the mean
and variance may be incorrectly computed.
"""
if isinstance(data, np.ndarray) or backend.is_tensor(data):
input_shape = data.shape
elif isinstance(data, tf.data.Dataset):
input_shape = tuple(data.element_spec.shape)
if len(input_shape) == 1:
# Batch dataset if it isn't batched
data = data.batch(128)
input_shape = tuple(data.element_spec.shape)
elif isinstance(data, PyDataset):
data = data[0]
if isinstance(data, tuple):
# handling (x, y) or (x, y, sample_weight)
data = data[0]
input_shape = data.shape
else:
raise TypeError(
f"Unsupported data type: {type(data)}. `adapt` supports "
f"`np.ndarray`, backend tensors, `tf.data.Dataset`, and "
f"`keras.utils.PyDataset`."
)
if not self.built:
self.build(input_shape)
else:
for d in self._keep_axis:
if input_shape[d] != self._build_input_shape[d]:
raise ValueError(
"The layer was built with "
f"input_shape={self._build_input_shape}, "
"but adapt() is being called with data with "
f"an incompatible shape, data.shape={input_shape}"
)
if isinstance(data, np.ndarray):
total_mean = np.mean(data, axis=self._reduce_axis)
total_var = np.var(data, axis=self._reduce_axis)
elif backend.is_tensor(data):
total_mean = ops.mean(data, axis=self._reduce_axis)
total_var = ops.var(data, axis=self._reduce_axis)
elif isinstance(data, (tf.data.Dataset, PyDataset)):
total_mean = ops.zeros(self._mean_and_var_shape)
total_var = ops.zeros(self._mean_and_var_shape)
total_count = 0
for batch in data:
batch = backend.convert_to_tensor(
batch, dtype=self.compute_dtype
)
batch_mean = ops.mean(batch, axis=self._reduce_axis)
batch_var = ops.var(batch, axis=self._reduce_axis)
if self._reduce_axis:
batch_reduce_shape = (
batch.shape[d] for d in self._reduce_axis
)
batch_count = math.prod(batch_reduce_shape)
else:
batch_count = 1
total_count += batch_count
batch_weight = float(batch_count) / total_count
existing_weight = 1.0 - batch_weight
new_total_mean = (
total_mean * existing_weight + batch_mean * batch_weight
)
# The variance is computed using the lack-of-fit sum of squares
# formula (see
# https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares).
total_var = (
total_var + (total_mean - new_total_mean) ** 2
) * existing_weight + (
batch_var + (batch_mean - new_total_mean) ** 2
) * batch_weight
total_mean = new_total_mean
else:
raise NotImplementedError(f"Unsupported data type: {type(data)}")
self.adapt_mean.assign(total_mean)
self.adapt_variance.assign(total_var)
self.finalize_state()
def finalize_state(self):
if self.input_mean is not None or not self.built:
return
# In the adapt case, we make constant tensors for mean and variance with
# proper broadcast shape and dtype each time `finalize_state` is called.
self.mean = ops.reshape(self.adapt_mean, self._broadcast_shape)
self.mean = ops.cast(self.mean, self.compute_dtype)
self.variance = ops.reshape(self.adapt_variance, self._broadcast_shape)
self.variance = ops.cast(self.variance, self.compute_dtype)
def call(self, inputs):
# This layer can be called in tf.data
# even with another backend after it has been adapted.
# However it must use backend-native logic for adapt().
if self.mean is None:
# May happen when in tf.data when mean/var was passed explicitly
raise ValueError(
"You must call `.build(input_shape)` "
"on the layer before using it."
)
inputs = self.backend.core.convert_to_tensor(
inputs, dtype=self.compute_dtype
)
# Ensure the weights are in the correct backend. Without this, it is
# possible to cause breakage when using this layer in tf.data.
mean = self.convert_weight(self.mean)
variance = self.convert_weight(self.variance)
if self.invert:
return self.backend.numpy.add(
mean,
self.backend.numpy.multiply(
inputs,
self.backend.numpy.maximum(
self.backend.numpy.sqrt(variance), backend.epsilon()
),
),
)
else:
return self.backend.numpy.divide(
self.backend.numpy.subtract(inputs, mean),
self.backend.numpy.maximum(
self.backend.numpy.sqrt(variance), backend.epsilon()
),
)
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = super().get_config()
config.update(
{
"axis": self.axis,
"invert": self.invert,
"mean": np.array(self.input_mean).tolist(),
"variance": np.array(self.input_variance).tolist(),
}
)
return config
def load_own_variables(self, store):
super().load_own_variables(store)
# Ensure that we call finalize_state after variable loading.
self.finalize_state()
def get_build_config(self):
if self._build_input_shape:
return {"input_shape": self._build_input_shape}
def build_from_config(self, config):
if config:
self.build(config["input_shape"])
| Normalization |
python | huggingface__transformers | tests/models/encodec/test_modeling_encodec.py | {
"start": 4841,
"end": 63312
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (EncodecModel,) if is_torch_available() else ()
is_encoder_decoder = True
test_resize_embeddings = False
pipeline_model_mapping = {"feature-extraction": EncodecModel} if is_torch_available() else {}
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
# model does not have attention and does not support returning hidden states
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if "output_attentions" in inputs_dict:
inputs_dict.pop("output_attentions")
if "output_hidden_states" in inputs_dict:
inputs_dict.pop("output_hidden_states")
return inputs_dict
def setUp(self):
self.model_tester = EncodecModelTester(self)
self.config_tester = ConfigTester(
self, config_class=EncodecConfig, hidden_size=37, common_properties=[], has_text_modality=False
)
def test_config(self):
self.config_tester.run_common_tests()
def test_model_forward(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_forward(*config_and_inputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
expected_arg_names = ["input_values", "padding_mask", "bandwidth"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
@unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="The EncodecModel is not transformers based, thus it does not have `inputs_embeds` logics")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic"
)
def test_retain_grad_hidden_states_attentions(self):
pass
@unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `attention` logic"
)
def test_attention_outputs(self):
pass
def test_feed_forward_chunking(self):
(original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common()
# original_config.norm_type = "time_group_norm"
for model_class in self.all_model_classes:
torch.manual_seed(0)
config = copy.deepcopy(original_config)
config.chunk_length_s = None
config.overlap = None
config.sampling_rate = 20
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs["input_values"] = inputs["input_values"].repeat(1, 1, 10)
hidden_states_no_chunk = model(**inputs)[1]
torch.manual_seed(0)
config.chunk_length_s = 2
config.overlap = 0
config.sampling_rate = 20
model = model_class(config)
model.to(torch_device)
model.eval()
hidden_states_with_chunk = model(**inputs)[1]
torch.testing.assert_close(hidden_states_no_chunk, hidden_states_with_chunk, rtol=1e-1, atol=1e-2)
@unittest.skip(
reason="The EncodecModel is not transformers based, thus it does not have the usual `hidden_states` logic"
)
def test_hidden_states_output(self):
pass
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_determinism(first, second):
# outputs are not tensors but list (since each sequence don't have the same frame_length)
out_1 = first.cpu().numpy()
out_2 = second.cpu().numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
first = model(**self._prepare_for_class(inputs_dict, model_class))[0]
second = model(**self._prepare_for_class(inputs_dict, model_class))[0]
if isinstance(first, tuple) and isinstance(second, tuple):
for tensor1, tensor2 in zip(first, second):
check_determinism(tensor1, tensor2)
else:
check_determinism(first, second)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(t):
t[t != t] = 0
return t
def assert_nested_tensors_close(a, b):
if isinstance(a, (tuple, list)) and isinstance(b, (tuple, list)):
assert len(a) == len(b), f"Length mismatch: {len(a)} vs {len(b)}"
for i, (x, y) in enumerate(zip(a, b)):
assert_nested_tensors_close(x, y)
elif torch.is_tensor(a) and torch.is_tensor(b):
a_clean = set_nan_tensor_to_zero(a)
b_clean = set_nan_tensor_to_zero(b)
assert torch.allclose(a_clean, b_clean, atol=1e-5), (
"Tuple and dict output are not equal. Difference:"
f" Max diff: {torch.max(torch.abs(a_clean - b_clean))}. "
f"Tuple has nan: {torch.isnan(a).any()} and inf: {torch.isinf(a)}. "
f"Dict has nan: {torch.isnan(b).any()} and inf: {torch.isinf(b)}."
)
else:
raise ValueError(f"Mismatch between {a} vs {b}")
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
with torch.no_grad():
tuple_output = model(**tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(**dict_inputs, return_dict=True, **additional_kwargs)
self.assertTrue(isinstance(tuple_output, tuple))
self.assertTrue(isinstance(dict_output, dict))
# cast dict_output.values() to list as it is a odict_values object
assert_nested_tensors_close(tuple_output, list(dict_output.values()))
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
def test_identity_shortcut(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs()
config.use_conv_shortcut = False
self.model_tester.create_and_check_model_forward(config, inputs_dict)
def test_model_forward_with_normalization(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_normalization()
self.model_tester.create_and_check_model_forward(config, inputs_dict)
def normalize(arr):
norm = np.linalg.norm(arr)
normalized_arr = arr / norm
return normalized_arr
def compute_rmse(arr1, arr2):
arr1_np = arr1.cpu().numpy().squeeze()
arr2_np = arr2.cpu().numpy().squeeze()
max_length = min(arr1.shape[-1], arr2.shape[-1])
arr1_np = arr1_np[..., :max_length]
arr2_np = arr2_np[..., :max_length]
arr1_normalized = normalize(arr1_np)
arr2_normalized = normalize(arr2_np)
return np.sqrt(((arr1_normalized - arr2_normalized) ** 2).mean())
"""
Integration tests for the Encodec model.
Code for expected output can be found below:
- test_integration: https://gist.github.com/ebezzam/2a34e249e729881130d1f5a42229d31f#file-test_encodec-py
- test_batch: https://gist.github.com/ebezzam/2a34e249e729881130d1f5a42229d31f#file-test_encodec_batch-py
"""
# fmt: off
# first key is model_id from hub, second key is bandwidth
# -- test_integration
EXPECTED_ENCODER_CODES = {
"facebook/encodec_24khz": {
"1.5": torch.tensor([[[ 62, 835, 835, 835, 835, 835, 835, 835, 408, 408],
[1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424]]]),
"3.0": torch.tensor(
[
[
[62, 835, 835, 835, 835, 835, 835, 835, 408, 408],
[1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424],
[786, 678, 821, 786, 36, 36, 786, 212, 937, 937],
[741, 741, 741, 993, 741, 1018, 993, 919, 741, 741],
],
]
),
"6.0": torch.tensor(
[
[
[62, 835, 835, 835, 835, 835, 835, 835, 408, 408],
[1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424],
[786, 678, 821, 786, 36, 36, 786, 212, 937, 937],
[741, 741, 741, 993, 741, 1018, 993, 919, 741, 741],
[528, 446, 198, 190, 446, 622, 646, 448, 646, 448],
[1011, 140, 185, 986, 683, 986, 435, 41, 140, 939],
[896, 772, 562, 772, 485, 528, 896, 853, 562, 772],
[899, 975, 468, 468, 468, 701, 1013, 828, 518, 899],
],
]
),
"12.0": torch.tensor(
[
[
[62, 835, 835, 835, 835, 835, 835, 835, 408, 408],
[1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424],
[786, 678, 821, 786, 36, 36, 786, 212, 937, 937],
[741, 741, 741, 993, 741, 1018, 993, 919, 741, 741],
[528, 446, 198, 190, 446, 622, 646, 448, 646, 448],
[1011, 140, 185, 986, 683, 986, 435, 41, 140, 939],
[896, 772, 562, 772, 485, 528, 896, 853, 562, 772],
[899, 975, 468, 468, 468, 701, 1013, 828, 518, 899],
[827, 807, 938, 320, 699, 470, 909, 628, 301, 827],
[963, 801, 630, 477, 717, 354, 205, 359, 874, 744],
[1000, 1000, 388, 1000, 408, 740, 568, 364, 709, 843],
[413, 835, 382, 840, 742, 1019, 375, 962, 835, 742],
[971, 410, 998, 485, 798, 410, 351, 485, 485, 920],
[848, 694, 662, 784, 848, 427, 1022, 848, 920, 694],
[420, 911, 889, 911, 993, 776, 948, 477, 911, 911],
[587, 755, 834, 962, 860, 425, 982, 982, 425, 461],
],
]
),
"24.0": torch.tensor(
[
[
[62, 835, 835, 835, 835, 835, 835, 835, 408, 408],
[1007, 1007, 1007, 544, 424, 424, 1007, 424, 302, 424],
[786, 678, 821, 786, 36, 36, 786, 212, 937, 937],
[741, 741, 741, 993, 741, 1018, 993, 919, 741, 741],
[528, 446, 198, 190, 446, 622, 646, 448, 646, 448],
[1011, 140, 185, 986, 683, 986, 435, 41, 140, 939],
[896, 772, 562, 772, 485, 528, 896, 853, 562, 772],
[899, 975, 468, 468, 468, 701, 1013, 828, 518, 899],
[827, 807, 938, 320, 699, 470, 909, 628, 301, 827],
[963, 801, 630, 477, 717, 354, 205, 359, 874, 744],
[1000, 1000, 388, 1000, 408, 740, 568, 364, 709, 843],
[413, 835, 382, 840, 742, 1019, 375, 962, 835, 742],
[971, 410, 998, 485, 798, 410, 351, 485, 485, 920],
[848, 694, 662, 784, 848, 427, 1022, 848, 920, 694],
[420, 911, 889, 911, 993, 776, 948, 477, 911, 911],
[587, 755, 834, 962, 860, 425, 982, 982, 425, 461],
[270, 160, 26, 131, 597, 506, 670, 637, 248, 160],
[ 15, 215, 134, 69, 215, 155, 1012, 1009, 260, 417],
[580, 561, 686, 896, 497, 637, 580, 245, 896, 264],
[511, 239, 560, 691, 571, 627, 571, 571, 258, 619],
[591, 942, 591, 251, 250, 250, 857, 486, 295, 295],
[565, 546, 654, 301, 301, 623, 639, 568, 565, 282],
[539, 317, 639, 539, 651, 539, 538, 640, 615, 615],
[637, 556, 637, 582, 640, 515, 515, 632, 254, 613],
[305, 643, 500, 550, 522, 500, 550, 561, 522, 305],
[954, 456, 584, 755, 505, 782, 661, 671, 497, 505],
[577, 464, 637, 647, 552, 552, 624, 647, 624, 647],
[728, 748, 931, 608, 538, 1015, 294, 294, 666, 538],
[602, 535, 666, 665, 655, 979, 574, 535, 571, 781],
[321, 620, 557, 566, 511, 910, 672, 623, 853, 674],
[621, 556, 947, 474, 610, 752, 1002, 597, 474, 474],
[605, 948, 657, 588, 485, 633, 459, 968, 939, 325],
],
]
),
},
"facebook/encodec_48khz": {
"3.0": torch.tensor([[[214, 214, 214, 214, 214, 118, 214, 214, 214, 214],
[989, 989, 611, 77, 77, 989, 976, 976, 976, 77]]]),
"6.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214],
[ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77],
[ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925],
[ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847]]]),
"12.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214],
[ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77],
[ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925],
[ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847],
[ 979, 1012, 323, 695, 1018, 1023, 979, 1023, 979, 650],
[ 945, 762, 528, 865, 824, 945, 945, 945, 957, 957],
[ 904, 973, 1014, 681, 582, 1014, 1014, 1014, 1014, 681],
[ 229, 392, 796, 392, 977, 1017, 250, 1017, 250, 1017]]]),
"24.0": torch.tensor([[[ 214, 214, 214, 214, 214, 118, 214, 214, 214, 214],
[ 989, 989, 611, 77, 77, 989, 976, 976, 976, 77],
[ 977, 1009, 538, 925, 925, 977, 1022, 1022, 1022, 925],
[ 376, 1012, 1023, 725, 725, 1023, 376, 962, 376, 847],
[ 979, 1012, 323, 695, 1018, 1023, 979, 1023, 979, 650],
[ 945, 762, 528, 865, 824, 945, 945, 945, 957, 957],
[ 904, 973, 1014, 681, 582, 1014, 1014, 1014, 1014, 681],
[ 229, 392, 796, 392, 977, 1017, 250, 1017, 250, 1017],
[ 902, 436, 935, 1011, 1023, 1023, 1023, 154, 1023, 392],
[ 982, 878, 961, 832, 629, 431, 919, 629, 919, 792],
[ 727, 727, 401, 727, 979, 587, 727, 487, 413, 201],
[ 928, 924, 965, 934, 840, 480, 924, 920, 924, 486],
[ 10, 625, 712, 552, 712, 259, 394, 131, 726, 516],
[ 882, 1022, 32, 524, 267, 861, 974, 882, 108, 521],
[ 304, 841, 306, 415, 69, 376, 928, 510, 381, 104],
[ 0, 0, 0, 484, 83, 0, 307, 262, 0, 0]]])
}
}
EXPECTED_ENCODER_SCALES = {
"facebook/encodec_24khz": {
"1.5": None,
"3.0": None,
"6.0": None,
"12.0": None,
"24.0": None
},
"facebook/encodec_48khz": {
"3.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02,
4.483359e-02, 1.000000e-08]),
"6.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02,
4.483359e-02, 1.000000e-08]),
"12.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02,
4.483359e-02, 1.000000e-08]),
"24.0": torch.tensor([5.365404e-02, 8.153407e-02, 6.266369e-02, 6.688326e-02, 5.458422e-02,
4.483359e-02, 1.000000e-08])
}
}
EXPECTED_DECODER_OUTPUTS = {
"facebook/encodec_24khz": {
"1.5": torch.tensor(
[[ 0.0003, -0.0002, -0.0000, -0.0004, 0.0004, 0.0003, -0.0000, 0.0001, 0.0005, 0.0001, -0.0015, -0.0007, -0.0002, -0.0018, -0.0003, 0.0013, 0.0011, 0.0008, 0.0008, 0.0008, 0.0008, 0.0002, -0.0003, -0.0004, -0.0006, -0.0009, -0.0010, -0.0012, -0.0011, -0.0006, -0.0006, -0.0005, 0.0000, 0.0001, 0.0003, 0.0002, -0.0001, -0.0002, -0.0008, -0.0012, -0.0011, -0.0012, -0.0013, -0.0003, 0.0002, 0.0006, 0.0006, 0.0006, 0.0009, 0.0010]]
),
"3.0": torch.tensor(
[[ 0.0003, -0.0002, -0.0000, -0.0004, 0.0004, 0.0003, -0.0000, 0.0001, 0.0006, 0.0002, -0.0015, -0.0008, -0.0002, -0.0018, -0.0003, 0.0013, 0.0011, 0.0008, 0.0008, 0.0008, 0.0008, 0.0002, -0.0003, -0.0004, -0.0005, -0.0008, -0.0010, -0.0012, -0.0011, -0.0006, -0.0006, -0.0005, -0.0000, 0.0001, 0.0003, 0.0002, -0.0001, -0.0002, -0.0008, -0.0013, -0.0011, -0.0013, -0.0014, -0.0003, 0.0002, 0.0006, 0.0006, 0.0006, 0.0009, 0.0010]]
),
"6.0": torch.tensor(
[[ 0.0004, -0.0001, 0.0001, -0.0003, 0.0004, 0.0003, 0.0000, 0.0001, 0.0007, 0.0002, -0.0013, -0.0007, -0.0002, -0.0015, -0.0001, 0.0014, 0.0014, 0.0011, 0.0010, 0.0010, 0.0009, 0.0004, 0.0000, 0.0000, 0.0000, -0.0000, -0.0001, -0.0004, -0.0004, -0.0001, -0.0002, -0.0002, 0.0002, 0.0005, 0.0009, 0.0010, 0.0008, 0.0007, 0.0002, -0.0003, -0.0004, -0.0008, -0.0008, 0.0000, 0.0006, 0.0010, 0.0012, 0.0012, 0.0013, 0.0014]]
),
"12.0": torch.tensor(
[[ 0.0004, -0.0001, 0.0001, -0.0004, 0.0003, 0.0002, -0.0000, 0.0001, 0.0006, 0.0002, -0.0013, -0.0006, -0.0001, -0.0014, 0.0001, 0.0018, 0.0018, 0.0014, 0.0012, 0.0013, 0.0011, 0.0006, 0.0000, 0.0000, -0.0000, -0.0001, -0.0001, -0.0004, -0.0004, -0.0000, -0.0000, -0.0000, 0.0005, 0.0007, 0.0011, 0.0011, 0.0009, 0.0007, 0.0002, -0.0003, -0.0004, -0.0007, -0.0007, 0.0002, 0.0009, 0.0013, 0.0015, 0.0014, 0.0015, 0.0016]]
),
"24.0": torch.tensor(
[[ 0.0005, 0.0001, 0.0004, -0.0001, 0.0003, 0.0002, 0.0000, 0.0001, 0.0007, 0.0005, -0.0011, -0.0005, -0.0001, -0.0018, -0.0000, 0.0021, 0.0019, 0.0013, 0.0011, 0.0012, 0.0012, 0.0006, -0.0000, -0.0001, -0.0000, -0.0000, -0.0001, -0.0004, -0.0004, -0.0000, -0.0001, -0.0002, 0.0003, 0.0004, 0.0008, 0.0007, 0.0006, 0.0007, 0.0001, -0.0004, -0.0003, -0.0006, -0.0008, 0.0004, 0.0011, 0.0015, 0.0016, 0.0015, 0.0016, 0.0018]]
)
},
"facebook/encodec_48khz": {
"3.0": torch.tensor(
[
[0.0034, 0.0028, 0.0037, 0.0041, 0.0029, 0.0022, 0.0021, 0.0020, 0.0021, 0.0023, 0.0021, 0.0018, 0.0019, 0.0020, 0.0020, 0.0020, 0.0021, 0.0023, 0.0025, 0.0022, 0.0017, 0.0015, 0.0017, 0.0020, 0.0024, 0.0031, 0.0039, 0.0045, 0.0046, 0.0042, 0.0034, 0.0027, 0.0023, 0.0022, 0.0023, 0.0024, 0.0022, 0.0023, 0.0024, 0.0027, 0.0027, 0.0027, 0.0025, 0.0024, 0.0024, 0.0026, 0.0028, 0.0027, 0.0024, 0.0022],
[ -0.0031, -0.0027, -0.0018, -0.0017, -0.0024, -0.0029, -0.0030, -0.0026, -0.0021, -0.0018, -0.0018, -0.0019, -0.0017, -0.0014, -0.0012, -0.0010, -0.0008, -0.0004, -0.0001, -0.0004, -0.0012, -0.0015, -0.0014, -0.0013, -0.0011, -0.0005, 0.0002, 0.0007, 0.0008, 0.0004, -0.0003, -0.0010, -0.0012, -0.0011, -0.0009, -0.0009, -0.0009, -0.0008, -0.0006, -0.0005, -0.0005, -0.0005, -0.0006, -0.0008, -0.0008, -0.0006, -0.0005, -0.0007, -0.0010, -0.0012],
]
),
"6.0": torch.tensor(
[
[0.0052, 0.0049, 0.0057, 0.0058, 0.0048, 0.0043, 0.0042, 0.0041, 0.0041, 0.0042, 0.0040, 0.0038, 0.0038, 0.0038, 0.0037, 0.0037, 0.0037, 0.0037, 0.0038, 0.0037, 0.0035, 0.0034, 0.0036, 0.0039, 0.0043, 0.0047, 0.0053, 0.0057, 0.0057, 0.0055, 0.0050, 0.0046, 0.0043, 0.0041, 0.0042, 0.0042, 0.0041, 0.0041, 0.0042, 0.0043, 0.0043, 0.0043, 0.0041, 0.0040, 0.0040, 0.0041, 0.0042, 0.0042, 0.0040, 0.0039],
[ 0.0001, 0.0006, 0.0013, 0.0011, 0.0005, 0.0001, -0.0001, 0.0001, 0.0003, 0.0005, 0.0005, 0.0005, 0.0006, 0.0007, 0.0008, 0.0009, 0.0010, 0.0013, 0.0015, 0.0014, 0.0010, 0.0008, 0.0010, 0.0012, 0.0015, 0.0019, 0.0023, 0.0026, 0.0026, 0.0024, 0.0020, 0.0016, 0.0013, 0.0013, 0.0014, 0.0015, 0.0015, 0.0016, 0.0017, 0.0017, 0.0017, 0.0016, 0.0015, 0.0013, 0.0013, 0.0013, 0.0013, 0.0012, 0.0010, 0.0009],
]
),
"12.0": torch.tensor(
[
[0.0014, 0.0012, 0.0021, 0.0024, 0.0017, 0.0013, 0.0012, 0.0011, 0.0011, 0.0012, 0.0011, 0.0010, 0.0009, 0.0009, 0.0008, 0.0008, 0.0009, 0.0010, 0.0012, 0.0012, 0.0009, 0.0008, 0.0010, 0.0013, 0.0017, 0.0024, 0.0031, 0.0036, 0.0036, 0.0033, 0.0028, 0.0023, 0.0020, 0.0020, 0.0022, 0.0022, 0.0022, 0.0022, 0.0023, 0.0024, 0.0024, 0.0023, 0.0021, 0.0021, 0.0021, 0.0023, 0.0024, 0.0024, 0.0022, 0.0021],
[ -0.0034, -0.0029, -0.0020, -0.0020, -0.0024, -0.0027, -0.0030, -0.0030, -0.0028, -0.0025, -0.0025, -0.0025, -0.0025, -0.0025, -0.0023, -0.0022, -0.0020, -0.0017, -0.0013, -0.0014, -0.0017, -0.0019, -0.0018, -0.0015, -0.0011, -0.0006, 0.0000, 0.0005, 0.0005, 0.0002, -0.0003, -0.0008, -0.0010, -0.0009, -0.0007, -0.0006, -0.0006, -0.0005, -0.0005, -0.0005, -0.0005, -0.0007, -0.0008, -0.0009, -0.0009, -0.0008, -0.0007, -0.0008, -0.0010, -0.0011],
]
),
"24.0": torch.tensor(
[
[ 0.0010, 0.0008, 0.0018, 0.0021, 0.0014, 0.0011, 0.0009, 0.0007, 0.0006, 0.0006, 0.0005, 0.0003, 0.0003, 0.0002, 0.0001, 0.0001, 0.0001, 0.0002, 0.0002, 0.0001, -0.0002, -0.0004, -0.0003, 0.0000, 0.0005, 0.0011, 0.0018, 0.0022, 0.0022, 0.0018, 0.0012, 0.0007, 0.0004, 0.0003, 0.0004, 0.0006, 0.0006, 0.0007, 0.0007, 0.0009, 0.0008, 0.0007, 0.0005, 0.0004, 0.0004, 0.0006, 0.0007, 0.0007, 0.0005, 0.0004],
[-0.0039, -0.0035, -0.0027, -0.0026, -0.0028, -0.0031, -0.0035, -0.0035, -0.0034, -0.0033, -0.0032, -0.0032, -0.0031, -0.0031, -0.0029, -0.0028, -0.0026, -0.0024, -0.0021, -0.0021, -0.0024, -0.0025, -0.0024, -0.0021, -0.0017, -0.0011, -0.0006, -0.0002, -0.0002, -0.0004, -0.0009, -0.0013, -0.0015, -0.0015, -0.0014, -0.0013, -0.0012, -0.0011, -0.0010, -0.0010, -0.0011, -0.0012, -0.0014, -0.0015, -0.0015, -0.0014, -0.0013, -0.0014, -0.0016, -0.0017],
]
)
}
}
EXPECTED_CODEC_ERROR = {
"facebook/encodec_24khz": {
"1.5": 0.0022229827009141445,
"3.0": 0.001862662611529231,
"6.0": 0.0015231302240863442,
"12.0": 0.0013,
"24.0": 0.0012,
},
"facebook/encodec_48khz": {
"3.0": 0.000840399123262614,
"6.0": 0.0006692984024994075,
"12.0": 0.0005328940460458398,
"24.0": 0.0004473362350836396,
}
}
# -- test_batch
EXPECTED_ENCODER_CODES_BATCH = {
"facebook/encodec_24khz": {
"1.5": torch.tensor(
[
[
[62, 106, 475, 475, 404, 404, 475, 404, 404, 475, 475, 404, 475, 475, 475, 835, 475, 475, 835, 835,
106, 106, 738, 106, 738, 106, 408, 408, 738, 408, 408, 408, 738, 408, 408, 408, 408, 738, 408,
1017, 604, 64, 303, 394, 5, 570, 991, 570, 969, 814],
[424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007, 729, 1007, 961, 1007, 1007, 961, 969, 1007,
1007, 424, 518, 1007, 544, 1007, 518, 913, 424, 424, 544, 424, 518, 518, 518, 302, 424, 424, 424,
544, 424, 114, 200, 787, 931, 343, 434, 315, 487, 872, 769, 463],
],
[
[835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 835, 408, 835, 738, 408, 408, 408, 408, 408,
408, 738, 408, 408, 408, 408, 408, 408, 408, 408, 738, 408, 408, 408, 408, 408, 408, 408, 408, 408,
339, 834, 819, 875, 957, 670, 811, 670, 237, 53],
[857, 857, 544, 518, 937, 518, 913, 913, 518, 913, 518, 913, 518, 518, 544, 424, 424, 518, 424, 424,
424, 544, 424, 424, 424, 518, 424, 518, 518, 937, 544, 424, 518, 302, 518, 424, 424, 518, 424, 424,
913, 857, 841, 363, 463, 78, 176, 645, 255, 571],
],
]
),
"3.0": torch.tensor(
[
[
[62, 106, 475, 475, 404, 404, 475, 404, 404, 475],
[424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007],
[212, 832, 212, 36, 36, 36, 767, 653, 982, 1016],
[956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019],
],
[
[835, 835, 835, 835, 835, 835, 835, 835, 835, 835],
[857, 857, 544, 518, 937, 518, 913, 913, 518, 913],
[705, 989, 934, 989, 678, 934, 934, 786, 934, 786],
[366, 1018, 398, 398, 398, 398, 673, 741, 398, 741],
],
]
),
"6.0": torch.tensor(
[
[
[62, 106, 475, 475, 404, 404, 475, 404, 404, 475],
[424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007],
[212, 832, 212, 36, 36, 36, 767, 653, 982, 1016],
[956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019],
[712, 862, 712, 448, 528, 646, 446, 373, 694, 373],
[939, 881, 939, 19, 334, 881, 1005, 763, 632, 781],
[853, 464, 772, 782, 782, 983, 890, 874, 983, 782],
[899, 475, 173, 701, 701, 947, 468, 1019, 882, 518],
],
[
[835, 835, 835, 835, 835, 835, 835, 835, 835, 835],
[857, 857, 544, 518, 937, 518, 913, 913, 518, 913],
[705, 989, 934, 989, 678, 934, 934, 786, 934, 786],
[366, 1018, 398, 398, 398, 398, 673, 741, 398, 741],
[373, 373, 375, 373, 373, 222, 862, 373, 190, 373],
[293, 949, 435, 435, 435, 293, 949, 881, 632, 986],
[800, 528, 528, 853, 782, 485, 772, 900, 528, 853],
[916, 237, 828, 701, 518, 835, 948, 315, 948, 315],
],
]
),
"12.0": torch.tensor(
[
[
[62, 106, 475, 475, 404, 404, 475, 404, 404, 475],
[424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007],
[212, 832, 212, 36, 36, 36, 767, 653, 982, 1016],
[956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019],
[712, 862, 712, 448, 528, 646, 446, 373, 694, 373],
[939, 881, 939, 19, 334, 881, 1005, 763, 632, 781],
[853, 464, 772, 782, 782, 983, 890, 874, 983, 782],
[899, 475, 173, 701, 701, 947, 468, 1019, 882, 518],
[817, 470, 588, 675, 675, 588, 960, 927, 909, 466],
[953, 776, 717, 630, 359, 717, 861, 630, 861, 359],
[623, 740, 1000, 388, 420, 388, 740, 818, 958, 743],
[413, 835, 742, 249, 892, 352, 190, 498, 866, 890],
[817, 351, 804, 751, 938, 535, 434, 879, 351, 971],
[792, 495, 935, 848, 792, 795, 942, 935, 723, 531],
[622, 681, 477, 713, 752, 871, 713, 514, 993, 777],
[928, 799, 962, 1005, 860, 439, 312, 922, 982, 922],
],
[
[835, 835, 835, 835, 835, 835, 835, 835, 835, 835],
[857, 857, 544, 518, 937, 518, 913, 913, 518, 913],
[705, 989, 934, 989, 678, 934, 934, 786, 934, 786],
[366, 1018, 398, 398, 398, 398, 673, 741, 398, 741],
[373, 373, 375, 373, 373, 222, 862, 373, 190, 373],
[293, 949, 435, 435, 435, 293, 949, 881, 632, 986],
[800, 528, 528, 853, 782, 485, 772, 900, 528, 853],
[916, 237, 828, 701, 518, 835, 948, 315, 948, 315],
[420, 628, 918, 628, 628, 628, 248, 628, 909, 811],
[736, 717, 994, 974, 477, 874, 963, 979, 355, 979],
[1002, 1002, 894, 875, 388, 709, 534, 408, 881, 709],
[735, 828, 763, 742, 640, 835, 828, 375, 840, 375],
[898, 938, 556, 658, 410, 951, 486, 658, 877, 877],
[ 0, 797, 428, 694, 428, 920, 1022, 1022, 809, 797],
[622, 421, 422, 776, 911, 911, 958, 421, 776, 421],
[1005, 312, 922, 755, 834, 461, 461, 702, 597, 974],
],
]
),
"24.0": torch.tensor(
[
[
[62, 106, 475, 475, 404, 404, 475, 404, 404, 475],
[424, 969, 913, 1007, 544, 1007, 1007, 1007, 969, 1007],
[212, 832, 212, 36, 36, 36, 767, 653, 982, 1016],
[956, 741, 838, 1019, 739, 780, 838, 1019, 1014, 1019],
[712, 862, 712, 448, 528, 646, 446, 373, 694, 373],
[939, 881, 939, 19, 334, 881, 1005, 763, 632, 781],
[853, 464, 772, 782, 782, 983, 890, 874, 983, 782],
[899, 475, 173, 701, 701, 947, 468, 1019, 882, 518],
[817, 470, 588, 675, 675, 588, 960, 927, 909, 466],
[953, 776, 717, 630, 359, 717, 861, 630, 861, 359],
[623, 740, 1000, 388, 420, 388, 740, 818, 958, 743],
[413, 835, 742, 249, 892, 352, 190, 498, 866, 890],
[817, 351, 804, 751, 938, 535, 434, 879, 351, 971],
[792, 495, 935, 848, 792, 795, 942, 935, 723, 531],
[622, 681, 477, 713, 752, 871, 713, 514, 993, 777],
[928, 799, 962, 1005, 860, 439, 312, 922, 982, 922],
[939, 637, 861, 506, 861, 61, 475, 264, 1019, 260],
[166, 215, 69, 69, 890, 69, 284, 828, 396, 180],
[561, 896, 841, 144, 580, 659, 886, 514, 686, 451],
[691, 691, 239, 735, 62, 287, 383, 972, 550, 505],
[451, 811, 238, 251, 250, 841, 734, 329, 551, 846],
[313, 601, 494, 763, 811, 565, 748, 441, 601, 480],
[653, 242, 630, 572, 701, 973, 632, 374, 561, 521],
[984, 987, 419, 454, 386, 507, 532, 636, 515, 671],
[647, 550, 515, 292, 876, 1011, 719, 549, 691, 911],
[683, 536, 656, 603, 698, 867, 987, 857, 886, 491],
[444, 937, 826, 555, 585, 710, 466, 852, 655, 591],
[658, 952, 903, 508, 739, 596, 420, 721, 464, 306],
[665, 334, 765, 532, 618, 278, 836, 838, 517, 597],
[613, 674, 596, 904, 987, 977, 938, 615, 672, 776],
[689, 386, 749, 658, 250, 869, 957, 806, 750, 659],
[652, 509, 910, 826, 566, 622, 951, 696, 900, 895],
],
[
[835, 835, 835, 835, 835, 835, 835, 835, 835, 835],
[857, 857, 544, 518, 937, 518, 913, 913, 518, 913],
[705, 989, 934, 989, 678, 934, 934, 786, 934, 786],
[366, 1018, 398, 398, 398, 398, 673, 741, 398, 741],
[373, 373, 375, 373, 373, 222, 862, 373, 190, 373],
[293, 949, 435, 435, 435, 293, 949, 881, 632, 986],
[800, 528, 528, 853, 782, 485, 772, 900, 528, 853],
[916, 237, 828, 701, 518, 835, 948, 315, 948, 315],
[420, 628, 918, 628, 628, 628, 248, 628, 909, 811],
[736, 717, 994, 974, 477, 874, 963, 979, 355, 979],
[1002, 1002, 894, 875, 388, 709, 534, 408, 881, 709],
[735, 828, 763, 742, 640, 835, 828, 375, 840, 375],
[898, 938, 556, 658, 410, 951, 486, 658, 877, 877],
[ 0, 797, 428, 694, 428, 920, 1022, 1022, 809, 797],
[622, 421, 422, 776, 911, 911, 958, 421, 776, 421],
[1005, 312, 922, 755, 834, 461, 461, 702, 597, 974],
[248, 248, 637, 248, 977, 506, 546, 270, 670, 506],
[547, 447, 15, 134, 1009, 215, 134, 396, 260, 160],
[635, 497, 686, 765, 264, 497, 244, 675, 624, 656],
[864, 571, 616, 511, 588, 781, 525, 258, 674, 503],
[449, 757, 857, 451, 658, 486, 299, 299, 251, 596],
[809, 628, 255, 568, 623, 301, 639, 546, 617, 623],
[551, 497, 908, 539, 661, 710, 640, 539, 646, 315],
[689, 507, 875, 515, 613, 637, 527, 515, 662, 637],
[983, 686, 456, 768, 601, 561, 768, 653, 500, 688],
[493, 566, 664, 782, 683, 683, 721, 603, 323, 497],
[1015, 552, 411, 423, 607, 646, 687, 1018, 689, 607],
[516, 293, 471, 294, 293, 294, 608, 538, 803, 717],
[974, 994, 952, 637, 637, 927, 535, 571, 602, 535],
[776, 789, 476, 944, 652, 959, 589, 679, 321, 623],
[776, 931, 720, 1009, 676, 731, 386, 676, 701, 676],
[684, 543, 716, 392, 661, 517, 792, 588, 922, 676],
],
]
)
},
"facebook/encodec_48khz": {
"3.0": torch.tensor([[[790, 790, 790, 214, 214, 214, 799, 214, 214, 214],
[989, 989, 77, 546, 989, 546, 989, 160, 546, 989]],
[[214, 214, 214, 214, 214, 214, 214, 214, 214, 214],
[289, 289, 989, 764, 289, 289, 882, 882, 882, 882]]]),
"6.0": torch.tensor([[[ 790, 790, 790, 214, 214, 214, 799, 214, 214, 214],
[ 989, 989, 77, 546, 989, 546, 989, 160, 546, 989],
[ 977, 977, 977, 977, 538, 977, 977, 960, 977, 977],
[ 376, 376, 962, 962, 607, 962, 963, 896, 962, 376]],
[[ 214, 214, 214, 214, 214, 214, 214, 214, 214, 214],
[ 289, 289, 989, 764, 289, 289, 882, 882, 882, 882],
[1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267],
[ 979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023]]]),
"12.0": torch.tensor([[[ 790, 790, 790, 214, 214, 214, 799, 214, 214, 214],
[ 989, 989, 77, 546, 989, 546, 989, 160, 546, 989],
[ 977, 977, 977, 977, 538, 977, 977, 960, 977, 977],
[ 376, 376, 962, 962, 607, 962, 963, 896, 962, 376],
[ 979, 979, 979, 1012, 979, 1012, 921, 0, 1002, 695],
[ 824, 1018, 762, 957, 824, 762, 762, 1007, 957, 336],
[ 681, 973, 973, 452, 211, 681, 802, 679, 547, 884],
[ 950, 1017, 1016, 1017, 986, 1017, 229, 607, 1017, 689]],
[[ 214, 214, 214, 214, 214, 214, 214, 214, 214, 214],
[ 289, 289, 989, 764, 289, 289, 882, 882, 882, 882],
[1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267],
[ 979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023],
[ 403, 940, 976, 1018, 677, 1002, 979, 677, 677, 677],
[1018, 794, 762, 444, 485, 485, 974, 548, 548, 1018],
[ 679, 243, 679, 1005, 1005, 973, 1014, 1005, 1005, 1014],
[ 810, 13, 1017, 537, 522, 702, 202, 1017, 1017, 15]]]),
"24.0": torch.tensor(
[
[
[790, 790, 790, 214, 214, 214, 799, 214, 214, 214],
[989, 989, 77, 546, 989, 546, 989, 160, 546, 989],
[977, 977, 977, 977, 538, 977, 977, 960, 977, 977],
[376, 376, 962, 962, 607, 962, 963, 896, 962, 376],
[979, 979, 979, 1012, 979, 1012, 921, 0, 1002, 695],
[824, 1018, 762, 957, 824, 762, 762, 1007, 957, 336],
[681, 973, 973, 452, 211, 681, 802, 679, 547, 884],
[950, 1017, 1016, 1017, 986, 1017, 229, 607, 1017, 689],
[1004, 1011, 669, 1023, 1023, 1023, 905, 297, 810, 970],
[982, 681, 982, 629, 662, 919, 878, 476, 629, 982],
[727, 727, 959, 959, 979, 959, 530, 959, 337, 961],
[924, 456, 924, 486, 924, 959, 102, 924, 805, 924],
[649, 542, 993, 993, 949, 787, 56, 886, 949, 405],
[864, 1022, 1022, 1022, 460, 753, 805, 309, 1022, 32],
[953, 0, 0, 180, 352, 10, 581, 516, 322, 452],
[300, 0, 1020, 307, 0, 543, 924, 627, 258, 262],
],
[
[214, 214, 214, 214, 214, 214, 214, 214, 214, 214],
[289, 289, 989, 764, 289, 289, 882, 882, 882, 882],
[1022, 1022, 471, 925, 821, 821, 267, 925, 925, 267],
[979, 992, 914, 921, 0, 0, 1023, 963, 963, 1023],
[403, 940, 976, 1018, 677, 1002, 979, 677, 677, 677],
[1018, 794, 762, 444, 485, 485, 974, 548, 548, 1018],
[679, 243, 679, 1005, 1005, 973, 1014, 1005, 1005, 1014],
[810, 13, 1017, 537, 522, 702, 202, 1017, 1017, 15],
[728, 252, 970, 984, 971, 950, 673, 902, 1011, 810],
[332, 1014, 476, 854, 1014, 861, 332, 411, 411, 408],
[959, 727, 611, 979, 611, 727, 999, 497, 821, 0],
[995, 698, 924, 688, 102, 510, 924, 970, 344, 961],
[ 81, 516, 847, 924, 10, 240, 1005, 726, 993, 378],
[467, 496, 484, 496, 456, 1022, 337, 600, 456, 1022],
[789, 65, 937, 976, 159, 953, 343, 764, 179, 159],
[ 10, 790, 483, 10, 1020, 352, 848, 333, 83, 848],
],
]
)
}
}
EXPECTED_ENCODER_SCALES_BATCH = {
"facebook/encodec_24khz": {
"1.5": None,
"3.0": None,
"6.0": None,
"12.0": None,
"24.0": None
},
"facebook/encodec_48khz": {
"3.0": torch.tensor([[[1.027247e-01],
[7.877284e-02]],
[[1.014922e-01],
[8.696266e-02]],
[[6.308002e-02],
[7.748771e-02]],
[[6.899278e-02],
[1.045912e-01]],
[[6.440169e-02],
[8.843135e-02]],
[[4.139878e-02],
[1.000000e-08]],
[[5.848629e-02],
[1.000000e-08]],
[[2.329416e-04],
[1.000000e-08]],
[[1.000000e-08],
[1.000000e-08]]]),
"6.0": torch.tensor([[[1.027247e-01],
[7.877284e-02]],
[[1.014922e-01],
[8.696266e-02]],
[[6.308002e-02],
[7.748771e-02]],
[[6.899278e-02],
[1.045912e-01]],
[[6.440169e-02],
[8.843135e-02]],
[[4.139878e-02],
[1.000000e-08]],
[[5.848629e-02],
[1.000000e-08]],
[[2.329416e-04],
[1.000000e-08]],
[[1.000000e-08],
[1.000000e-08]]]),
"12.0": torch.tensor([[[1.027247e-01],
[7.877284e-02]],
[[1.014922e-01],
[8.696266e-02]],
[[6.308002e-02],
[7.748771e-02]],
[[6.899278e-02],
[1.045912e-01]],
[[6.440169e-02],
[8.843135e-02]],
[[4.139878e-02],
[1.000000e-08]],
[[5.848629e-02],
[1.000000e-08]],
[[2.329416e-04],
[1.000000e-08]],
[[1.000000e-08],
[1.000000e-08]]]),
"24.0": torch.tensor([[[1.027247e-01],
[7.877284e-02]],
[[1.014922e-01],
[8.696266e-02]],
[[6.308002e-02],
[7.748771e-02]],
[[6.899278e-02],
[1.045912e-01]],
[[6.440169e-02],
[8.843135e-02]],
[[4.139878e-02],
[1.000000e-08]],
[[5.848629e-02],
[1.000000e-08]],
[[2.329416e-04],
[1.000000e-08]],
[[1.000000e-08],
[1.000000e-08]]])
}
}
EXPECTED_DECODER_OUTPUTS_BATCH = {
"facebook/encodec_24khz": {
"1.5": torch.tensor(
[
[[ 0.0010, 0.0004, 0.0005, 0.0002, 0.0005, -0.0001, -0.0003, -0.0001, 0.0003, 0.0001, -0.0014, -0.0009, -0.0007, -0.0023, -0.0009, 0.0008, 0.0007, 0.0003, 0.0001, 0.0001, 0.0003, -0.0001, -0.0003, -0.0004, -0.0005, -0.0007, -0.0009, -0.0011, -0.0010, -0.0006, -0.0007, -0.0007, -0.0005, -0.0005, -0.0003, -0.0002, -0.0002, -0.0001, -0.0005, -0.0008, -0.0005, -0.0007, -0.0009, -0.0002, 0.0003, 0.0005, 0.0004, 0.0001, 0.0003, 0.0004]],
[[ -0.0001, -0.0000, 0.0003, 0.0001, 0.0005, 0.0001, -0.0006, -0.0002, 0.0002, 0.0002, -0.0031, -0.0004, 0.0006, -0.0066, -0.0032, 0.0044, 0.0025, -0.0019, -0.0017, 0.0001, 0.0019, -0.0010, -0.0014, -0.0009, -0.0007, -0.0009, -0.0019, -0.0024, -0.0019, -0.0001, -0.0017, -0.0022, -0.0004, 0.0005, -0.0014, -0.0023, 0.0002, 0.0015, -0.0022, -0.0033, 0.0024, 0.0009, -0.0041, 0.0000, 0.0030, 0.0020, -0.0015, -0.0018, 0.0014, 0.0007]],
]
),
"3.0": torch.tensor(
[
[[ 0.0013, 0.0007, 0.0009, 0.0005, 0.0006, 0.0002, -0.0001, 0.0000, 0.0005, 0.0003, -0.0012, -0.0006, -0.0003, -0.0019, -0.0003, 0.0015, 0.0013, 0.0009, 0.0008, 0.0007, 0.0008, 0.0004, 0.0001, -0.0000, -0.0001, -0.0002, -0.0003, -0.0004, -0.0004, 0.0001, -0.0000, -0.0000, 0.0003, 0.0003, 0.0005, 0.0005, 0.0004, 0.0005, 0.0001, -0.0003, -0.0002, -0.0004, -0.0006, 0.0003, 0.0009, 0.0012, 0.0013, 0.0012, 0.0014, 0.0015]],
[[ 0.0000, -0.0003, 0.0005, 0.0004, 0.0011, 0.0013, 0.0002, 0.0005, 0.0002, 0.0006, -0.0025, -0.0005, 0.0004, -0.0069, -0.0027, 0.0038, 0.0013, -0.0015, -0.0005, 0.0003, 0.0014, -0.0006, -0.0002, -0.0010, -0.0008, -0.0001, -0.0006, -0.0012, -0.0016, 0.0010, 0.0001, -0.0010, -0.0002, 0.0013, -0.0002, -0.0017, 0.0005, 0.0019, -0.0019, -0.0035, 0.0022, -0.0001, -0.0040, 0.0012, 0.0015, 0.0012, 0.0001, -0.0010, 0.0005, 0.0004]],
]
),
"6.0": torch.tensor(
[
[[ 0.0010, 0.0005, 0.0007, 0.0001, 0.0003, -0.0000, -0.0002, -0.0001, 0.0003, 0.0001, -0.0014, -0.0007, -0.0004, -0.0019, -0.0004, 0.0013, 0.0012, 0.0008, 0.0007, 0.0007, 0.0008, 0.0003, 0.0001, 0.0001, -0.0000, -0.0001, -0.0001, -0.0002, -0.0001, 0.0002, 0.0002, 0.0001, 0.0005, 0.0005, 0.0008, 0.0008, 0.0007, 0.0008, 0.0004, 0.0001, 0.0002, -0.0001, -0.0002, 0.0006, 0.0012, 0.0015, 0.0016, 0.0014, 0.0016, 0.0017]],
[[ -0.0005, -0.0001, 0.0003, 0.0001, 0.0010, 0.0012, 0.0002, 0.0004, 0.0012, 0.0003, -0.0023, -0.0003, -0.0005, -0.0063, -0.0026, 0.0040, 0.0024, -0.0018, -0.0005, 0.0016, 0.0004, -0.0008, 0.0009, 0.0002, -0.0015, -0.0003, 0.0004, -0.0011, -0.0013, 0.0012, 0.0001, -0.0019, 0.0007, 0.0021, -0.0009, -0.0016, 0.0015, 0.0013, -0.0022, -0.0015, 0.0016, -0.0014, -0.0033, 0.0017, 0.0025, -0.0004, -0.0005, 0.0010, 0.0005, 0.0001]],
]
),
"12.0": torch.tensor(
[
[[ 0.0003, 0.0002, 0.0004, -0.0004, -0.0003, -0.0007, -0.0008, -0.0006, -0.0001, -0.0002, -0.0016, -0.0009, -0.0004, -0.0021, -0.0003, 0.0015, 0.0016, 0.0012, 0.0011, 0.0010, 0.0010, 0.0005, 0.0002, 0.0001, 0.0000, -0.0001, -0.0002, -0.0004, -0.0004, 0.0000, -0.0000, -0.0002, 0.0001, 0.0001, 0.0004, 0.0003, 0.0002, 0.0004, -0.0001, -0.0005, -0.0004, -0.0006, -0.0007, 0.0003, 0.0009, 0.0013, 0.0015, 0.0015, 0.0017, 0.0018]],
[[ -0.0008, -0.0003, 0.0003, -0.0001, 0.0008, 0.0013, 0.0004, 0.0008, 0.0015, 0.0006, -0.0021, -0.0001, -0.0003, -0.0062, -0.0022, 0.0043, 0.0028, -0.0013, -0.0002, 0.0017, 0.0010, -0.0001, 0.0008, 0.0001, -0.0010, 0.0003, 0.0008, -0.0006, -0.0007, 0.0012, 0.0003, -0.0013, 0.0007, 0.0019, -0.0002, -0.0013, 0.0011, 0.0016, -0.0016, -0.0017, 0.0014, -0.0006, -0.0029, 0.0011, 0.0028, 0.0006, -0.0004, 0.0005, 0.0008, 0.0003]],
]
),
"24.0": torch.tensor(
[
[[ 0.0009, 0.0004, 0.0007, 0.0002, 0.0004, -0.0001, -0.0003, -0.0002, 0.0002, 0.0001, -0.0015, -0.0009, -0.0006, -0.0024, -0.0005, 0.0016, 0.0014, 0.0010, 0.0009, 0.0008, 0.0008, 0.0004, 0.0001, 0.0000, -0.0001, -0.0002, -0.0003, -0.0006, -0.0006, -0.0003, -0.0005, -0.0006, -0.0003, -0.0004, -0.0001, -0.0002, -0.0003, -0.0001, -0.0006, -0.0011, -0.0008, -0.0010, -0.0012, -0.0000, 0.0007, 0.0011, 0.0012, 0.0011, 0.0013, 0.0014]],
[[ -0.0009, -0.0004, 0.0001, -0.0003, 0.0007, 0.0012, 0.0003, 0.0006, 0.0017, 0.0008, -0.0020, 0.0001, -0.0002, -0.0064, -0.0023, 0.0047, 0.0029, -0.0016, -0.0004, 0.0019, 0.0010, -0.0002, 0.0007, -0.0001, -0.0013, 0.0005, 0.0012, -0.0007, -0.0008, 0.0013, -0.0001, -0.0022, 0.0004, 0.0020, -0.0004, -0.0014, 0.0017, 0.0020, -0.0018, -0.0016, 0.0015, -0.0015, -0.0036, 0.0014, 0.0030, 0.0004, 0.0002, 0.0015, 0.0011, 0.0007]],
]
)
},
"facebook/encodec_48khz": {
"3.0": torch.tensor([[[ 0.005083, 0.004669, 0.005723, 0.005600, 0.004231, 0.003830,
0.003684, 0.003349, 0.003032, 0.003055, 0.002768, 0.002370,
0.002384, 0.002450, 0.002391, 0.002363, 0.002357, 0.002435,
0.002568, 0.002463, 0.002137, 0.002092, 0.002440, 0.002772,
0.003035, 0.003473, 0.003963, 0.004288, 0.004315, 0.004087,
0.003618, 0.003166, 0.002874, 0.002775, 0.002820, 0.002758,
0.002565, 0.002498, 0.002583, 0.002671, 0.002656, 0.002613,
0.002433, 0.002236, 0.002215, 0.002302, 0.002287, 0.002113,
0.001909, 0.001767],
[-0.003928, -0.002733, -0.001330, -0.001914, -0.002927, -0.003272,
-0.003677, -0.003615, -0.003341, -0.002907, -0.002764, -0.002742,
-0.002593, -0.002308, -0.002024, -0.001856, -0.001672, -0.001256,
-0.000929, -0.001217, -0.001864, -0.002118, -0.002025, -0.001932,
-0.001816, -0.001572, -0.001214, -0.000885, -0.000829, -0.000976,
-0.001417, -0.001874, -0.002030, -0.001952, -0.001858, -0.001863,
-0.001895, -0.001843, -0.001801, -0.001792, -0.001812, -0.001865,
-0.002008, -0.002120, -0.002132, -0.002093, -0.002170, -0.002370,
-0.002587, -0.002749]],
[[ 0.004229, 0.003422, 0.005044, 0.006059, 0.005242, 0.004623,
0.004231, 0.004050, 0.004314, 0.004701, 0.004559, 0.004105,
0.003874, 0.003713, 0.003355, 0.003055, 0.003235, 0.003927,
0.004500, 0.004195, 0.003328, 0.002804, 0.002628, 0.002456,
0.002693, 0.003883, 0.005604, 0.006791, 0.006702, 0.005427,
0.003622, 0.002328, 0.002173, 0.002871, 0.003505, 0.003410,
0.002851, 0.002511, 0.002534, 0.002685, 0.002714, 0.002538,
0.002110, 0.001697, 0.001786, 0.002415, 0.002940, 0.002856,
0.002348, 0.001883],
[-0.003444, -0.002916, -0.000590, 0.000157, -0.000702, -0.001472,
-0.002032, -0.001891, -0.001283, -0.000670, -0.000590, -0.000875,
-0.001090, -0.001095, -0.001172, -0.001287, -0.000907, 0.000111,
0.000858, 0.000471, -0.000532, -0.001127, -0.001463, -0.001853,
-0.001762, -0.000666, 0.000964, 0.002054, 0.001914, 0.000743,
-0.000876, -0.001990, -0.001951, -0.001042, -0.000229, -0.000171,
-0.000558, -0.000752, -0.000704, -0.000609, -0.000594, -0.000723,
-0.001085, -0.001455, -0.001374, -0.000795, -0.000350, -0.000480,
-0.000993, -0.001432]]]),
"6.0": torch.tensor([[[ 5.892794e-03, 5.767163e-03, 7.065284e-03, 7.068626e-03,
5.825328e-03, 5.601424e-03, 5.582351e-03, 5.209565e-03,
4.829186e-03, 4.809568e-03, 4.663883e-03, 4.402087e-03,
4.337528e-03, 4.311915e-03, 4.236566e-03, 4.209972e-03,
4.179818e-03, 4.196202e-03, 4.309553e-03, 4.267083e-03,
4.052189e-03, 4.068719e-03, 4.381632e-03, 4.692366e-03,
4.998885e-03, 5.466312e-03, 5.895300e-03, 6.115717e-03,
6.055626e-03, 5.773376e-03, 5.316667e-03, 4.826934e-03,
4.450697e-03, 4.315911e-03, 4.310716e-03, 4.202125e-03,
4.008702e-03, 3.957694e-03, 4.017603e-03, 4.060654e-03,
4.036821e-03, 3.923071e-03, 3.659022e-03, 3.427053e-03,
3.387271e-03, 3.462438e-03, 3.434755e-03, 3.247944e-03,
3.009581e-03, 2.800536e-03],
[-1.867314e-03, -6.082351e-04, 9.374358e-04, 5.555808e-04,
-3.020080e-04, -5.281629e-04, -9.364292e-04, -1.057594e-03,
-9.703087e-04, -6.292185e-04, -4.193477e-04, -3.605868e-04,
-2.948678e-04, -1.198237e-04, 4.924605e-05, 1.602105e-04,
3.162385e-04, 6.700790e-04, 9.868707e-04, 8.484383e-04,
4.327767e-04, 3.108105e-04, 4.244343e-04, 5.422112e-04,
7.239584e-04, 1.008546e-03, 1.265120e-03, 1.447669e-03,
1.436084e-03, 1.271058e-03, 8.684017e-04, 4.149990e-04,
2.143449e-04, 2.508474e-04, 3.018488e-04, 2.782424e-04,
2.369677e-04, 3.040710e-04, 3.242530e-04, 2.599912e-04,
2.211208e-04, 1.311762e-04, -9.807519e-05, -2.752687e-04,
-3.114068e-04, -2.832832e-04, -3.900219e-04, -6.142824e-04,
-8.507833e-04, -1.055882e-03]],
[[ 3.971702e-04, -2.164055e-04, 1.562327e-03, 2.695718e-03,
2.374928e-03, 2.145125e-03, 1.870762e-03, 1.852614e-03,
2.074345e-03, 2.312302e-03, 2.222824e-03, 1.876336e-03,
1.609606e-03, 1.420574e-03, 1.193270e-03, 9.592943e-04,
1.132237e-03, 1.776782e-03, 2.258269e-03, 1.945908e-03,
9.930646e-04, 1.733529e-04, -2.533881e-04, -3.138177e-04,
3.226010e-04, 1.859203e-03, 3.879325e-03, 5.267750e-03,
5.101699e-03, 3.609465e-03, 1.653315e-03, 2.709297e-04,
-3.190451e-05, 5.129501e-04, 1.224789e-03, 1.397457e-03,
1.110794e-03, 8.736057e-04, 8.860155e-04, 1.055910e-03,
1.100855e-03, 8.834896e-04, 3.825913e-04, -3.267327e-05,
6.586456e-05, 7.147206e-04, 1.394876e-03, 1.535393e-03,
1.192172e-03, 7.061819e-04],
[-6.897163e-03, -6.407891e-03, -4.015491e-03, -3.082125e-03,
-3.434983e-03, -3.885052e-03, -4.456392e-03, -4.296550e-03,
-3.861045e-03, -3.553474e-03, -3.547473e-03, -3.800863e-03,
-4.123025e-03, -4.237277e-03, -4.244958e-03, -4.263899e-03,
-3.808572e-03, -2.811858e-03, -2.147519e-03, -2.516703e-03,
-3.550721e-03, -4.353373e-03, -4.846224e-03, -4.960613e-03,
-4.273535e-03, -2.714785e-03, -7.043980e-04, 6.689885e-04,
5.069164e-04, -9.122533e-04, -2.816979e-03, -4.124952e-03,
-4.235019e-03, -3.491365e-03, -2.676077e-03, -2.381226e-03,
-2.492559e-03, -2.634424e-03, -2.632524e-03, -2.528266e-03,
-2.536691e-03, -2.746170e-03, -3.187869e-03, -3.553530e-03,
-3.462211e-03, -2.862707e-03, -2.273719e-03, -2.201617e-03,
-2.565818e-03, -3.044683e-03]]]),
"12.0": torch.tensor([[[ 2.237194e-03, 2.508208e-03, 3.986347e-03, 4.020395e-03,
2.889890e-03, 2.733388e-03, 2.684146e-03, 2.251372e-03,
1.787451e-03, 1.720550e-03, 1.689184e-03, 1.495478e-03,
1.321027e-03, 1.185375e-03, 1.098422e-03, 1.055453e-03,
9.591801e-04, 9.328910e-04, 1.026154e-03, 1.031992e-03,
9.155220e-04, 9.732856e-04, 1.282264e-03, 1.624059e-03,
1.920021e-03, 2.333685e-03, 2.730524e-03, 2.919153e-03,
2.856711e-03, 2.632692e-03, 2.256703e-03, 1.901129e-03,
1.684760e-03, 1.638201e-03, 1.644909e-03, 1.569378e-03,
1.448412e-03, 1.478291e-03, 1.580583e-03, 1.633777e-03,
1.597190e-03, 1.475462e-03, 1.242885e-03, 1.065243e-03,
1.052842e-03, 1.103825e-03, 1.059115e-03, 9.251673e-04,
7.235570e-04, 5.053390e-04],
[-4.534880e-03, -3.111026e-03, -1.486247e-03, -1.739966e-03,
-2.399862e-03, -2.583335e-03, -3.157276e-03, -3.517166e-03,
-3.598212e-03, -3.303007e-03, -3.037215e-03, -2.982930e-03,
-3.026671e-03, -2.958387e-03, -2.836909e-03, -2.775315e-03,
-2.719575e-03, -2.431532e-03, -2.090512e-03, -2.095603e-03,
-2.366266e-03, -2.404480e-03, -2.235661e-03, -2.063206e-03,
-1.888533e-03, -1.640449e-03, -1.407782e-03, -1.250053e-03,
-1.275359e-03, -1.373277e-03, -1.601508e-03, -1.838720e-03,
-1.876643e-03, -1.736149e-03, -1.622051e-03, -1.578928e-03,
-1.564748e-03, -1.455850e-03, -1.391748e-03, -1.418254e-03,
-1.462577e-03, -1.554713e-03, -1.730076e-03, -1.829485e-03,
-1.816249e-03, -1.772218e-03, -1.855736e-03, -2.013720e-03,
-2.196174e-03, -2.378810e-03]],
[[ 8.993230e-04, 6.808847e-04, 2.595528e-03, 3.586462e-03,
3.023965e-03, 2.479527e-03, 1.868662e-03, 1.565682e-03,
1.563900e-03, 1.666364e-03, 1.715061e-03, 1.609638e-03,
1.294764e-03, 8.647116e-04, 5.122397e-04, 2.899101e-04,
3.817413e-04, 8.303743e-04, 1.253686e-03, 1.179640e-03,
6.591807e-04, 1.167982e-04, -3.405492e-04, -5.258832e-04,
-4.165239e-05, 1.393227e-03, 3.473584e-03, 4.953051e-03,
4.779391e-03, 3.182305e-03, 1.140233e-03, -2.133392e-04,
-4.233644e-04, 2.426380e-04, 1.126914e-03, 1.557022e-03,
1.490265e-03, 1.264647e-03, 1.170405e-03, 1.237709e-03,
1.112253e-03, 6.990263e-04, 1.700171e-04, -1.761244e-04,
1.852706e-05, 8.140961e-04, 1.621285e-03, 1.813497e-03,
1.394625e-03, 7.860070e-04],
[-4.677887e-03, -3.966209e-03, -1.634288e-03, -8.592710e-04,
-1.395248e-03, -2.189968e-03, -3.198638e-03, -3.410639e-03,
-3.241918e-03, -3.051681e-03, -2.845973e-03, -2.786646e-03,
-3.078280e-03, -3.367662e-03, -3.450923e-03, -3.427895e-03,
-3.058358e-03, -2.258006e-03, -1.607386e-03, -1.647450e-03,
-2.164357e-03, -2.647080e-03, -3.110953e-03, -3.304542e-03,
-2.798792e-03, -1.407999e-03, 5.630683e-04, 1.961336e-03,
1.813856e-03, 3.529640e-04, -1.526076e-03, -2.695498e-03,
-2.702039e-03, -1.889018e-03, -9.337939e-04, -3.885011e-04,
-2.970786e-04, -4.415356e-04, -5.492531e-04, -5.430978e-04,
-7.051138e-04, -1.102020e-03, -1.577104e-03, -1.846151e-03,
-1.623901e-03, -8.853760e-04, -1.772702e-04, -4.866864e-05,
-4.633263e-04, -1.017192e-03]]]),
"24.0": torch.tensor(
[
[
[0.0004, 0.0008, 0.0024, 0.0024, 0.0013, 0.0013, 0.0013, 0.0009, 0.0005, 0.0005, 0.0006, 0.0005, 0.0005, 0.0003, 0.0003, 0.0003, 0.0002, 0.0002, 0.0003, 0.0004, 0.0003, 0.0004, 0.0008, 0.0012, 0.0015, 0.0018, 0.0021, 0.0022, 0.0021, 0.0019, 0.0016, 0.0014, 0.0012, 0.0011, 0.0012, 0.0012, 0.0012, 0.0012, 0.0013, 0.0014, 0.0014, 0.0013, 0.0011, 0.0009, 0.0009, 0.0010, 0.0010, 0.0010, 0.0009, 0.0007],
[ -0.0055, -0.0040, -0.0024, -0.0026, -0.0031, -0.0031, -0.0036, -0.0039, -0.0039, -0.0035, -0.0031, -0.0029, -0.0028, -0.0027, -0.0026, -0.0024, -0.0023, -0.0020, -0.0017, -0.0016, -0.0017, -0.0017, -0.0015, -0.0012, -0.0010, -0.0008, -0.0006, -0.0004, -0.0004, -0.0005, -0.0006, -0.0007, -0.0006, -0.0004, -0.0002, -0.0001, 0.0001, 0.0002, 0.0003, 0.0004, 0.0004, 0.0003, 0.0001, 0.0001, 0.0000, 0.0001, 0.0000, -0.0001, -0.0002, -0.0004],
],
[
[-0.0024, -0.0029, -0.0009, 0.0002, -0.0002, -0.0007, -0.0012, -0.0013, -0.0012, -0.0011, -0.0011, -0.0012, -0.0016, -0.0021, -0.0024, -0.0026, -0.0024, -0.0018, -0.0013, -0.0015, -0.0022, -0.0029, -0.0035, -0.0038, -0.0031, -0.0015, 0.0008, 0.0025, 0.0023, 0.0006, -0.0016, -0.0030, -0.0032, -0.0024, -0.0015, -0.0010, -0.0009, -0.0011, -0.0010, -0.0009, -0.0010, -0.0014, -0.0020, -0.0023, -0.0020, -0.0011, -0.0001, 0.0001, -0.0003, -0.0009],
[-0.0086, -0.0081, -0.0059, -0.0050, -0.0053, -0.0061, -0.0071, -0.0071, -0.0069, -0.0067, -0.0066, -0.0066, -0.0070, -0.0073, -0.0074, -0.0073, -0.0069, -0.0060, -0.0053, -0.0055, -0.0061, -0.0067, -0.0072, -0.0074, -0.0067, -0.0052, -0.0031, -0.0015, -0.0016, -0.0029, -0.0048, -0.0059, -0.0059, -0.0051, -0.0041, -0.0036, -0.0034, -0.0034, -0.0034, -0.0033, -0.0035, -0.0039, -0.0043, -0.0046, -0.0043, -0.0035, -0.0027, -0.0025, -0.0029, -0.0034],
],
]
)
}
}
# ---- error over whole batch
EXPECTED_CODEC_ERROR_BATCH = {
"facebook/encodec_24khz": {
"1.5": 0.0011174238752573729,
"3.0": 0.0009308119188062847,
"6.0": 0.0008,
"12.0": 0.0006830253987573087,
"24.0": 0.000642190920189023,
},
"facebook/encodec_48khz": {
"3.0": 0.00039895583176985383,
"6.0": 0.0003249854489695281,
"12.0": 0.0002540576097089797,
"24.0": 0.00021899679268244654,
}
}
# fmt: on
@slow
@require_torch
| EncodecModelTest |
python | django__django | tests/composite_pk/test_names_to_path.py | {
"start": 167,
"end": 4624
} | class ____(TestCase):
def test_id(self):
query = Query(User)
path, final_field, targets, rest = query.names_to_path(["id"], User._meta)
self.assertEqual(path, [])
self.assertEqual(final_field, User._meta.get_field("id"))
self.assertEqual(targets, (User._meta.get_field("id"),))
self.assertEqual(rest, [])
def test_pk(self):
query = Query(User)
path, final_field, targets, rest = query.names_to_path(["pk"], User._meta)
self.assertEqual(path, [])
self.assertEqual(final_field, User._meta.get_field("pk"))
self.assertEqual(targets, (User._meta.get_field("pk"),))
self.assertEqual(rest, [])
def test_tenant_id(self):
query = Query(User)
path, final_field, targets, rest = query.names_to_path(
["tenant", "id"], User._meta
)
self.assertEqual(
path,
[
PathInfo(
from_opts=User._meta,
to_opts=Tenant._meta,
target_fields=(Tenant._meta.get_field("id"),),
join_field=User._meta.get_field("tenant"),
m2m=False,
direct=True,
filtered_relation=None,
),
],
)
self.assertEqual(final_field, Tenant._meta.get_field("id"))
self.assertEqual(targets, (Tenant._meta.get_field("id"),))
self.assertEqual(rest, [])
def test_user_id(self):
query = Query(Comment)
path, final_field, targets, rest = query.names_to_path(
["user", "id"], Comment._meta
)
self.assertEqual(
path,
[
PathInfo(
from_opts=Comment._meta,
to_opts=User._meta,
target_fields=(
User._meta.get_field("tenant"),
User._meta.get_field("id"),
),
join_field=Comment._meta.get_field("user"),
m2m=False,
direct=True,
filtered_relation=None,
),
],
)
self.assertEqual(final_field, User._meta.get_field("id"))
self.assertEqual(targets, (User._meta.get_field("id"),))
self.assertEqual(rest, [])
def test_user_tenant_id(self):
query = Query(Comment)
path, final_field, targets, rest = query.names_to_path(
["user", "tenant", "id"], Comment._meta
)
self.assertEqual(
path,
[
PathInfo(
from_opts=Comment._meta,
to_opts=User._meta,
target_fields=(
User._meta.get_field("tenant"),
User._meta.get_field("id"),
),
join_field=Comment._meta.get_field("user"),
m2m=False,
direct=True,
filtered_relation=None,
),
PathInfo(
from_opts=User._meta,
to_opts=Tenant._meta,
target_fields=(Tenant._meta.get_field("id"),),
join_field=User._meta.get_field("tenant"),
m2m=False,
direct=True,
filtered_relation=None,
),
],
)
self.assertEqual(final_field, Tenant._meta.get_field("id"))
self.assertEqual(targets, (Tenant._meta.get_field("id"),))
self.assertEqual(rest, [])
def test_comments(self):
query = Query(User)
path, final_field, targets, rest = query.names_to_path(["comments"], User._meta)
self.assertEqual(
path,
[
PathInfo(
from_opts=User._meta,
to_opts=Comment._meta,
target_fields=(Comment._meta.get_field("pk"),),
join_field=User._meta.get_field("comments"),
m2m=True,
direct=False,
filtered_relation=None,
),
],
)
self.assertEqual(final_field, User._meta.get_field("comments"))
self.assertEqual(targets, (Comment._meta.get_field("pk"),))
self.assertEqual(rest, [])
| NamesToPathTests |
python | mlflow__mlflow | tests/gateway/tools.py | {
"start": 2625,
"end": 3392
} | class ____:
def __init__(self, data: dict[str, Any], status: int = 200):
# Extract status and headers from data, if present
self.status = status
self.headers = data.pop("headers", {"Content-Type": "application/json"})
# Save the rest of the data as content
self._content = data
def raise_for_status(self) -> None:
if 400 <= self.status < 600:
raise aiohttp.ClientResponseError(None, None, status=self.status)
async def json(self) -> dict[str, Any]:
return self._content
async def text(self) -> str:
return json.dumps(self._content)
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, traceback):
pass
| MockAsyncResponse |
python | pytorch__pytorch | torch/_inductor/distributed_autotune.py | {
"start": 1240,
"end": 3576
} | class ____:
index: int
local: bool
def get_autotune_pg() -> dist.ProcessGroup | None:
if dist.is_available() and dist.is_initialized():
global _AUTOTUNE_PG
if _AUTOTUNE_PG is None:
_AUTOTUNE_PG = dist.distributed_c10d._new_group_with_tag(
pg_tag="pt2_distributed_autotune_pg"
)
return _AUTOTUNE_PG
return None
def schedule(scheduler: torch._inductor.scheduler.Scheduler) -> None:
"""
Finish the distributed autotuning by propagating the autotuning results
between the ranks and then replacing the placeholder with the real Buffer.
"""
assert config.distributed_max_autotune_gemm
autotune_results = _autotune_local_nodes(scheduler)
choices_by_index = _sync(autotune_results)
_autotune_remote_nodes(scheduler, choices_by_index)
@contextlib.contextmanager
def graph_context() -> Generator[None, None, None]:
"""
Wrapped around processing a graph, sets up figuring out which ranks tune
which shapes.
"""
assert not isinstance(
V.get_distributed_autotune_state(check_poisoned=False), # type: ignore[call-arg]
_DistributedAutotuneState,
)
V.set_distributed_autotune_state(_DistributedAutotuneState())
try:
yield
finally:
V.set_distributed_autotune_state(NullHandler())
def maybe_autotune_remote(
name: str, choices: list[ChoiceCaller], inputs: list[Buffer], layout: Layout
) -> TensorBox | ShapeAsConstantBuffer | None:
"""
Used by an op (like `mm`) to determine if the op should be autotuned
locally (returns None) or remotely (returns a placeholder Buffer).
"""
if not config.distributed_max_autotune_gemm:
return None
if not (autotune_pg := get_autotune_pg()):
return None
if len(choices) <= 1:
return None
state = V.distributed_autotune_state
index = state.autotuned_index
state.autotuned_index += 1
local = index % autotune_pg.size() == autotune_pg.rank()
V.current_node.meta[_DISTRIBUTED_AUTOTUNE_KEY] = _DistributedAutotuneInfo(
index, local
)
if local:
state.autotuned_local_count += 1
return None
return torch._inductor.ir.TensorBox.create(
_DistributedAutotuneBuffer(name, inputs, layout)
)
| _DistributedAutotuneInfo |
python | getsentry__sentry | tests/sentry/api/serializers/rest_framework/test_base.py | {
"start": 1321,
"end": 1463
} | class ____(CamelSnakeModelSerializer):
class Meta:
model = ContentType
fields = ["app_label", "model"]
| ContentTypeSerializer |
python | apache__avro | lang/py/avro/test/test_io.py | {
"start": 1113,
"end": 9501
} | class ____(TypedDict):
H: object
SCHEMAS_TO_VALIDATE = tuple(
(json.dumps(schema), datum)
for schema, datum in (
("null", None),
("boolean", True),
("string", "adsfasdf09809dsf-=adsf"),
("bytes", b"12345abcd"),
("int", 1234),
("long", 1234),
("float", 1234.0),
("double", 1234.0),
({"type": "fixed", "name": "Test", "size": 1}, b"B"),
(
{
"type": "fixed",
"logicalType": "decimal",
"name": "Test",
"size": 8,
"precision": 5,
"scale": 4,
},
decimal.Decimal("3.1415"),
),
(
{
"type": "fixed",
"logicalType": "decimal",
"name": "Test",
"size": 8,
"precision": 5,
"scale": 4,
},
decimal.Decimal("-3.1415"),
),
(
{
"type": "fixed",
"logicalType": "decimal",
"name": "Test",
"size": 8,
"precision": 1,
},
decimal.Decimal("3"),
),
(
{"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 4},
decimal.Decimal("3.1415"),
),
(
{"type": "bytes", "logicalType": "decimal", "precision": 5, "scale": 4},
decimal.Decimal("-3.1415"),
),
(
{"type": "bytes", "logicalType": "decimal", "precision": 1},
decimal.Decimal("3"),
),
({"type": "enum", "name": "Test", "symbols": ["A", "B"]}, "B"),
({"type": "array", "items": "long"}, [1, 3, 2]),
({"type": "map", "values": "long"}, {"a": 1, "b": 3, "c": 2}),
(["string", "null", "long"], None),
({"type": "int", "logicalType": "date"}, datetime.date(2000, 1, 1)),
(
{"type": "int", "logicalType": "time-millis"},
datetime.time(23, 59, 59, 999000),
),
({"type": "int", "logicalType": "time-millis"}, datetime.time(0, 0, 0, 000000)),
(
{"type": "long", "logicalType": "time-micros"},
datetime.time(23, 59, 59, 999999),
),
(
{"type": "long", "logicalType": "time-micros"},
datetime.time(0, 0, 0, 000000),
),
(
{"type": "long", "logicalType": "timestamp-millis"},
datetime.datetime(1000, 1, 1, 0, 0, 0, 000000, tzinfo=avro.timezones.utc),
),
(
{"type": "long", "logicalType": "timestamp-millis"},
datetime.datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=avro.timezones.utc),
),
(
{"type": "long", "logicalType": "timestamp-millis"},
datetime.datetime(2000, 1, 18, 2, 2, 1, 100000, tzinfo=avro.timezones.tst),
),
(
{"type": "long", "logicalType": "timestamp-micros"},
datetime.datetime(1000, 1, 1, 0, 0, 0, 000000, tzinfo=avro.timezones.utc),
),
(
{"type": "long", "logicalType": "timestamp-micros"},
datetime.datetime(9999, 12, 31, 23, 59, 59, 999999, tzinfo=avro.timezones.utc),
),
(
{"type": "long", "logicalType": "timestamp-micros"},
datetime.datetime(2000, 1, 18, 2, 2, 1, 123499, tzinfo=avro.timezones.tst),
),
(
{"type": "string", "logicalType": "uuid"},
"a4818e1c-8e59-11eb-8dcd-0242ac130003",
), # UUID1
(
{"type": "string", "logicalType": "uuid"},
"570feebe-2bbc-4937-98df-285944e1dbbd",
), # UUID4
({"type": "string", "logicalType": "unknown-logical-type"}, "12345abcd"),
({"type": "string", "logicalType": "timestamp-millis"}, "12345abcd"),
(
{
"type": "record",
"name": "Test",
"fields": [{"name": "f", "type": "long"}],
},
{"f": 5},
),
(
{
"type": "record",
"name": "Lisp",
"fields": [
{
"name": "value",
"type": [
"null",
"string",
{
"type": "record",
"name": "Cons",
"fields": [
{"name": "car", "type": "Lisp"},
{"name": "cdr", "type": "Lisp"},
],
},
],
}
],
},
{"value": {"car": {"value": "head"}, "cdr": {"value": None}}},
),
(
{"type": "record", "name": "record", "fields": [{"name": "value", "type": "int"}, {"name": "next", "type": ["null", "record"]}]},
{"value": 0, "next": {"value": 1, "next": None}},
),
(
{"type": "record", "name": "ns.long", "fields": [{"name": "value", "type": "int"}, {"name": "next", "type": ["null", "ns.long"]}]},
{"value": 0, "next": {"value": 1, "next": None}},
),
# Optional logical types.
(
[{"logicalType": "uuid", "type": "string"}, "null"],
None,
),
(
[{"logicalType": "uuid", "type": "string"}, "null"],
uuid.uuid4().hex,
),
(
[{"type": "long", "logicalType": "timestamp-millis"}, "null"],
datetime.datetime(1000, 1, 1, 0, 0, 0, 0, tzinfo=avro.timezones.utc),
),
(
[{"type": "long", "logicalType": "timestamp-millis"}, "null"],
None,
),
)
)
BINARY_ENCODINGS = (
(0, b"00"),
(-1, b"01"),
(1, b"02"),
(-2, b"03"),
(2, b"04"),
(-64, b"7f"),
(64, b"80 01"),
(8192, b"80 80 01"),
(-8193, b"81 80 01"),
)
DEFAULT_VALUE_EXAMPLES = (
("null", None),
("boolean", True),
("string", "foo"),
("bytes", "\xff\xff"),
("int", 5),
("long", 5),
("float", 1.1),
("double", 1.1),
({"type": "fixed", "name": "F", "size": 2}, "\xff\xff"),
({"type": "enum", "name": "F", "symbols": ["FOO", "BAR"]}, "FOO"),
({"type": "array", "items": "int"}, [1, 2, 3]),
({"type": "map", "values": "int"}, {"a": 1, "b": 2}),
(["int", "null"], 5),
(
{"type": "record", "name": "F", "fields": [{"name": "A", "type": "int"}]},
{"A": 5},
),
)
LONG_RECORD_SCHEMA = avro.schema.parse(
json.dumps(
{
"type": "record",
"name": "Test",
"fields": [
{"name": "A", "type": "int"},
{"name": "B", "type": "int"},
{"name": "C", "type": "int"},
{"name": "D", "type": "int"},
{"name": "E", "type": "int"},
{"name": "F", "type": "int"},
{"name": "G", "type": "int"},
],
}
)
)
LONG_RECORD_DATUM = {"A": 1, "B": 2, "C": 3, "D": 4, "E": 5, "F": 6, "G": 7}
def avro_hexlify(reader: BinaryIO) -> bytes:
"""Return the hex value, as a string, of a binary-encoded int or long."""
b = []
current_byte = reader.read(1)
b.append(binascii.hexlify(current_byte))
while (ord(current_byte) & 0x80) != 0:
current_byte = reader.read(1)
b.append(binascii.hexlify(current_byte))
return b" ".join(b)
def write_datum(datum: object, writers_schema: avro.schema.Schema) -> Tuple[io.BytesIO, avro.io.BinaryEncoder, avro.io.DatumWriter]:
writer = io.BytesIO()
encoder = avro.io.BinaryEncoder(writer)
datum_writer = avro.io.DatumWriter(writers_schema)
datum_writer.write(datum, encoder)
return writer, encoder, datum_writer
def read_datum(buffer: io.BytesIO, writers_schema: avro.schema.Schema, readers_schema: Optional[avro.schema.Schema] = None) -> object:
reader = io.BytesIO(buffer.getvalue())
decoder = avro.io.BinaryDecoder(reader)
datum_reader = avro.io.DatumReader(writers_schema, readers_schema)
return datum_reader.read(decoder)
| DefaultValueTestCaseType |
python | kamyu104__LeetCode-Solutions | Python/find-the-last-marked-nodes-in-tree.py | {
"start": 2881,
"end": 4611
} | class ____(object):
def lastMarkedNodes(self, edges):
"""
:type edges: List[List[int]]
:rtype: List[int]
"""
def increase(x):
return (x[0]+1, x[1])
def topological_traversal():
p = [-2]*len(adj)
p[0] = -1
topological_order = [0]
for u in topological_order:
for v in reversed(adj[u]):
if p[v] != -2:
continue
p[v] = u
topological_order.append(v)
dp = [[(0, u)]*2 for u in xrange(len(adj))]
for u in reversed(topological_order):
for v in adj[u]:
if v == p[u]:
continue
curr = increase(dp[v][0])
for i in xrange(len(dp[u])):
if curr > dp[u][i]:
curr, dp[u][i] = dp[u][i], curr
return dp
def bfs():
result = [-1]*len(adj)
q = [(0, -1, (0, -1))]
while q:
new_q = []
for u, p, curr in q:
result[u] = max(dp[u][0], curr)[1]
for v in adj[u]:
if v == p:
continue
new_q.append((v, u, increase(max(dp[u][dp[u][0][1] == dp[v][0][1]], curr))))
q = new_q
return result
adj = [[] for _ in xrange(len(edges)+1)]
for u, v in edges:
adj[u].append(v)
adj[v].append(u)
dp = topological_traversal()
return bfs()
# Time: O(n)
# Space: O(n)
# iterative dfs, tree dp
| Solution3 |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/executors/aws_lambda/utils.py | {
"start": 1304,
"end": 1596
} | class ____:
"""Represents a Lambda task that is queued. The task will be run in the next heartbeat."""
key: TaskInstanceKey
command: CommandType
queue: str
executor_config: ExecutorConfigType
attempt_number: int
next_attempt_time: datetime.datetime
| LambdaQueuedTask |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/core.py | {
"start": 22037,
"end": 25222
} | class ____(Layer):
"""Flattens the input. Does not affect the batch size.
Note: If inputs are shaped `(batch,)` without a feature axis, then
flattening adds an extra channel dimension and output shape is `(batch, 1)`.
Args:
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch, ..., channels)` while `channels_first` corresponds to
inputs with shape `(batch, channels, ...)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Example:
>>> model = tf.keras.Sequential()
>>> model.add(tf.keras.layers.Conv2D(64, 3, 3, input_shape=(3, 32, 32)))
>>> model.output_shape
(None, 1, 10, 64)
>>> model.add(Flatten())
>>> model.output_shape
(None, 640)
"""
def __init__(self, data_format=None, **kwargs):
super(Flatten, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
self.input_spec = InputSpec(min_ndim=1)
self._channels_first = self.data_format == 'channels_first'
def call(self, inputs):
if self._channels_first:
rank = inputs.shape.rank
if rank and rank > 1:
# Switch to channels-last format.
permutation = [0]
permutation.extend(range(2, rank))
permutation.append(1)
inputs = array_ops.transpose(inputs, perm=permutation)
if context.executing_eagerly():
# Full static shape is guaranteed to be available.
# Performance: Using `constant_op` is much faster than passing a list.
flattened_shape = constant_op.constant([inputs.shape[0], -1])
return array_ops.reshape(inputs, flattened_shape)
else:
input_shape = inputs.shape
rank = input_shape.rank
if rank == 1:
return array_ops.expand_dims_v2(inputs, axis=1)
else:
batch_dim = tensor_shape.dimension_value(input_shape[0])
non_batch_dims = input_shape[1:]
# Reshape in a way that preserves as much shape info as possible.
if non_batch_dims.is_fully_defined():
last_dim = int(functools.reduce(operator.mul, non_batch_dims))
flattened_shape = constant_op.constant([-1, last_dim])
elif batch_dim is not None:
flattened_shape = constant_op.constant([int(batch_dim), -1])
else:
flattened_shape = [array_ops.shape_v2(inputs)[0], -1]
return array_ops.reshape(inputs, flattened_shape)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
if not input_shape:
output_shape = tensor_shape.TensorShape([1])
else:
output_shape = [input_shape[0]]
if np.all(input_shape[1:]):
output_shape += [np.prod(input_shape[1:], dtype=int)]
else:
output_shape += [None]
return tensor_shape.TensorShape(output_shape)
def get_config(self):
config = super(Flatten, self).get_config()
config.update({'data_format': self.data_format})
return config
| Flatten |
python | huggingface__transformers | tests/models/align/test_modeling_align.py | {
"start": 18581,
"end": 19658
} | class ____(unittest.TestCase):
@slow
def test_inference(self):
model_name = "kakaobrain/align-base"
model = AlignModel.from_pretrained(model_name).to(torch_device)
processor = AlignProcessor.from_pretrained(model_name)
image = prepare_img()
texts = ["a photo of a cat", "a photo of a dog"]
inputs = processor(images=image, text=texts, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
# verify the logits
self.assertEqual(
outputs.logits_per_image.shape,
torch.Size((inputs.pixel_values.shape[0], inputs.input_ids.shape[0])),
)
self.assertEqual(
outputs.logits_per_text.shape,
torch.Size((inputs.input_ids.shape[0], inputs.pixel_values.shape[0])),
)
expected_logits = torch.tensor([[9.7093, 3.4679]], device=torch_device)
torch.testing.assert_close(outputs.logits_per_image, expected_logits, rtol=1e-3, atol=1e-3)
| AlignModelIntegrationTest |
python | Unity-Technologies__ml-agents | ml-agents/mlagents/trainers/torch_entities/action_log_probs.py | {
"start": 281,
"end": 952
} | class ____(_ActionTupleBase):
"""
An object whose fields correspond to the log probs of actions of different types.
Continuous and discrete are numpy arrays
Dimensions are of (n_agents, continuous_size) and (n_agents, discrete_size),
respectively. Note, this also holds when continuous or discrete size is
zero.
"""
@property
def discrete_dtype(self) -> np.dtype:
"""
The dtype of a discrete log probability.
"""
return np.float32
@staticmethod
def empty_log_probs() -> "LogProbsTuple":
"""
Generates a dummy LogProbsTuple
"""
return LogProbsTuple()
| LogProbsTuple |
python | plotly__plotly.py | plotly/figure_factory/_distplot.py | {
"start": 8029,
"end": 14099
} | class ____(object):
"""
Refer to TraceFactory.create_distplot() for docstring
"""
def __init__(
self,
hist_data,
histnorm,
group_labels,
bin_size,
curve_type,
colors,
rug_text,
show_hist,
show_curve,
):
self.hist_data = hist_data
self.histnorm = histnorm
self.group_labels = group_labels
self.bin_size = bin_size
self.show_hist = show_hist
self.show_curve = show_curve
self.trace_number = len(hist_data)
if rug_text:
self.rug_text = rug_text
else:
self.rug_text = [None] * self.trace_number
self.start = []
self.end = []
if colors:
self.colors = colors
else:
self.colors = [
"rgb(31, 119, 180)",
"rgb(255, 127, 14)",
"rgb(44, 160, 44)",
"rgb(214, 39, 40)",
"rgb(148, 103, 189)",
"rgb(140, 86, 75)",
"rgb(227, 119, 194)",
"rgb(127, 127, 127)",
"rgb(188, 189, 34)",
"rgb(23, 190, 207)",
]
self.curve_x = [None] * self.trace_number
self.curve_y = [None] * self.trace_number
for trace in self.hist_data:
self.start.append(min(trace) * 1.0)
self.end.append(max(trace) * 1.0)
def make_hist(self):
"""
Makes the histogram(s) for FigureFactory.create_distplot().
:rtype (list) hist: list of histogram representations
"""
hist = [None] * self.trace_number
for index in range(self.trace_number):
hist[index] = dict(
type="histogram",
x=self.hist_data[index],
xaxis="x1",
yaxis="y1",
histnorm=self.histnorm,
name=self.group_labels[index],
legendgroup=self.group_labels[index],
marker=dict(color=self.colors[index % len(self.colors)]),
autobinx=False,
xbins=dict(
start=self.start[index],
end=self.end[index],
size=self.bin_size[index],
),
opacity=0.7,
)
return hist
def make_kde(self):
"""
Makes the kernel density estimation(s) for create_distplot().
This is called when curve_type = 'kde' in create_distplot().
:rtype (list) curve: list of kde representations
"""
curve = [None] * self.trace_number
for index in range(self.trace_number):
self.curve_x[index] = [
self.start[index] + x * (self.end[index] - self.start[index]) / 500
for x in range(500)
]
self.curve_y[index] = scipy_stats.gaussian_kde(self.hist_data[index])(
self.curve_x[index]
)
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(
type="scatter",
x=self.curve_x[index],
y=self.curve_y[index],
xaxis="x1",
yaxis="y1",
mode="lines",
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index % len(self.colors)]),
)
return curve
def make_normal(self):
"""
Makes the normal curve(s) for create_distplot().
This is called when curve_type = 'normal' in create_distplot().
:rtype (list) curve: list of normal curve representations
"""
curve = [None] * self.trace_number
mean = [None] * self.trace_number
sd = [None] * self.trace_number
for index in range(self.trace_number):
mean[index], sd[index] = scipy_stats.norm.fit(self.hist_data[index])
self.curve_x[index] = [
self.start[index] + x * (self.end[index] - self.start[index]) / 500
for x in range(500)
]
self.curve_y[index] = scipy_stats.norm.pdf(
self.curve_x[index], loc=mean[index], scale=sd[index]
)
if self.histnorm == ALTERNATIVE_HISTNORM:
self.curve_y[index] *= self.bin_size[index]
for index in range(self.trace_number):
curve[index] = dict(
type="scatter",
x=self.curve_x[index],
y=self.curve_y[index],
xaxis="x1",
yaxis="y1",
mode="lines",
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=False if self.show_hist else True,
marker=dict(color=self.colors[index % len(self.colors)]),
)
return curve
def make_rug(self):
"""
Makes the rug plot(s) for create_distplot().
:rtype (list) rug: list of rug plot representations
"""
rug = [None] * self.trace_number
for index in range(self.trace_number):
rug[index] = dict(
type="scatter",
x=self.hist_data[index],
y=([self.group_labels[index]] * len(self.hist_data[index])),
xaxis="x1",
yaxis="y2",
mode="markers",
name=self.group_labels[index],
legendgroup=self.group_labels[index],
showlegend=(False if self.show_hist or self.show_curve else True),
text=self.rug_text[index],
marker=dict(
color=self.colors[index % len(self.colors)], symbol="line-ns-open"
),
)
return rug
| _Distplot |
python | tiangolo__fastapi | tests/test_dependency_contextmanager.py | {
"start": 671,
"end": 11787
} | class ____(Exception):
pass
async def asyncgen_state(state: Dict[str, str] = Depends(get_state)):
state["/async"] = "asyncgen started"
yield state["/async"]
state["/async"] = "asyncgen completed"
def generator_state(state: Dict[str, str] = Depends(get_state)):
state["/sync"] = "generator started"
yield state["/sync"]
state["/sync"] = "generator completed"
async def asyncgen_state_try(state: Dict[str, str] = Depends(get_state)):
state["/async_raise"] = "asyncgen raise started"
try:
yield state["/async_raise"]
except AsyncDependencyError:
errors.append("/async_raise")
raise
finally:
state["/async_raise"] = "asyncgen raise finalized"
def generator_state_try(state: Dict[str, str] = Depends(get_state)):
state["/sync_raise"] = "generator raise started"
try:
yield state["/sync_raise"]
except SyncDependencyError:
errors.append("/sync_raise")
raise
finally:
state["/sync_raise"] = "generator raise finalized"
async def context_a(state: dict = Depends(get_state)):
state["context_a"] = "started a"
try:
yield state
finally:
state["context_a"] = "finished a"
async def context_b(state: dict = Depends(context_a)):
state["context_b"] = "started b"
try:
yield state
finally:
state["context_b"] = f"finished b with a: {state['context_a']}"
@app.get("/async")
async def get_async(state: str = Depends(asyncgen_state)):
return state
@app.get("/sync")
async def get_sync(state: str = Depends(generator_state)):
return state
@app.get("/async_raise")
async def get_async_raise(state: str = Depends(asyncgen_state_try)):
assert state == "asyncgen raise started"
raise AsyncDependencyError()
@app.get("/sync_raise")
async def get_sync_raise(state: str = Depends(generator_state_try)):
assert state == "generator raise started"
raise SyncDependencyError()
@app.get("/async_raise_other")
async def get_async_raise_other(state: str = Depends(asyncgen_state_try)):
assert state == "asyncgen raise started"
raise OtherDependencyError()
@app.get("/sync_raise_other")
async def get_sync_raise_other(state: str = Depends(generator_state_try)):
assert state == "generator raise started"
raise OtherDependencyError()
@app.get("/context_b")
async def get_context_b(state: dict = Depends(context_b)):
return state
@app.get("/context_b_raise")
async def get_context_b_raise(state: dict = Depends(context_b)):
assert state["context_b"] == "started b"
assert state["context_a"] == "started a"
raise OtherDependencyError()
@app.get("/context_b_bg")
async def get_context_b_bg(tasks: BackgroundTasks, state: dict = Depends(context_b)):
async def bg(state: dict):
state["bg"] = f"bg set - b: {state['context_b']} - a: {state['context_a']}"
tasks.add_task(bg, state)
return state
# Sync versions
@app.get("/sync_async")
def get_sync_async(state: str = Depends(asyncgen_state)):
return state
@app.get("/sync_sync")
def get_sync_sync(state: str = Depends(generator_state)):
return state
@app.get("/sync_async_raise")
def get_sync_async_raise(state: str = Depends(asyncgen_state_try)):
assert state == "asyncgen raise started"
raise AsyncDependencyError()
@app.get("/sync_sync_raise")
def get_sync_sync_raise(state: str = Depends(generator_state_try)):
assert state == "generator raise started"
raise SyncDependencyError()
@app.get("/sync_async_raise_other")
def get_sync_async_raise_other(state: str = Depends(asyncgen_state_try)):
assert state == "asyncgen raise started"
raise OtherDependencyError()
@app.get("/sync_sync_raise_other")
def get_sync_sync_raise_other(state: str = Depends(generator_state_try)):
assert state == "generator raise started"
raise OtherDependencyError()
@app.get("/sync_context_b")
def get_sync_context_b(state: dict = Depends(context_b)):
return state
@app.get("/sync_context_b_raise")
def get_sync_context_b_raise(state: dict = Depends(context_b)):
assert state["context_b"] == "started b"
assert state["context_a"] == "started a"
raise OtherDependencyError()
@app.get("/sync_context_b_bg")
async def get_sync_context_b_bg(
tasks: BackgroundTasks, state: dict = Depends(context_b)
):
async def bg(state: dict):
state["sync_bg"] = (
f"sync_bg set - b: {state['context_b']} - a: {state['context_a']}"
)
tasks.add_task(bg, state)
return state
@app.middleware("http")
async def middleware(request, call_next):
response: StreamingResponse = await call_next(request)
response.headers["x-state"] = json.dumps(state.copy())
return response
client = TestClient(app)
def test_async_state():
assert state["/async"] == "asyncgen not started"
response = client.get("/async")
assert response.status_code == 200, response.text
assert response.json() == "asyncgen started"
assert state["/async"] == "asyncgen completed"
def test_sync_state():
assert state["/sync"] == "generator not started"
response = client.get("/sync")
assert response.status_code == 200, response.text
assert response.json() == "generator started"
assert state["/sync"] == "generator completed"
def test_async_raise_other():
assert state["/async_raise"] == "asyncgen raise not started"
with pytest.raises(OtherDependencyError):
client.get("/async_raise_other")
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" not in errors
def test_sync_raise_other():
assert state["/sync_raise"] == "generator raise not started"
with pytest.raises(OtherDependencyError):
client.get("/sync_raise_other")
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" not in errors
def test_async_raise_raises():
with pytest.raises(AsyncDependencyError):
client.get("/async_raise")
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" in errors
errors.clear()
def test_async_raise_server_error():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/async_raise")
assert response.status_code == 500, response.text
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" in errors
errors.clear()
def test_context_b():
response = client.get("/context_b")
data = response.json()
assert data["context_b"] == "started b"
assert data["context_a"] == "started a"
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
def test_context_b_raise():
with pytest.raises(OtherDependencyError):
client.get("/context_b_raise")
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
def test_background_tasks():
response = client.get("/context_b_bg")
data = response.json()
assert data["context_b"] == "started b"
assert data["context_a"] == "started a"
assert data["bg"] == "not set"
middleware_state = json.loads(response.headers["x-state"])
assert middleware_state["context_b"] == "started b"
assert middleware_state["context_a"] == "started a"
assert middleware_state["bg"] == "not set"
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
assert state["bg"] == "bg set - b: started b - a: started a"
def test_sync_raise_raises():
with pytest.raises(SyncDependencyError):
client.get("/sync_raise")
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" in errors
errors.clear()
def test_sync_raise_server_error():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/sync_raise")
assert response.status_code == 500, response.text
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" in errors
errors.clear()
def test_sync_async_state():
response = client.get("/sync_async")
assert response.status_code == 200, response.text
assert response.json() == "asyncgen started"
assert state["/async"] == "asyncgen completed"
def test_sync_sync_state():
response = client.get("/sync_sync")
assert response.status_code == 200, response.text
assert response.json() == "generator started"
assert state["/sync"] == "generator completed"
def test_sync_async_raise_other():
with pytest.raises(OtherDependencyError):
client.get("/sync_async_raise_other")
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" not in errors
def test_sync_sync_raise_other():
with pytest.raises(OtherDependencyError):
client.get("/sync_sync_raise_other")
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" not in errors
def test_sync_async_raise_raises():
with pytest.raises(AsyncDependencyError):
client.get("/sync_async_raise")
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" in errors
errors.clear()
def test_sync_async_raise_server_error():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/sync_async_raise")
assert response.status_code == 500, response.text
assert state["/async_raise"] == "asyncgen raise finalized"
assert "/async_raise" in errors
errors.clear()
def test_sync_sync_raise_raises():
with pytest.raises(SyncDependencyError):
client.get("/sync_sync_raise")
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" in errors
errors.clear()
def test_sync_sync_raise_server_error():
client = TestClient(app, raise_server_exceptions=False)
response = client.get("/sync_sync_raise")
assert response.status_code == 500, response.text
assert state["/sync_raise"] == "generator raise finalized"
assert "/sync_raise" in errors
errors.clear()
def test_sync_context_b():
response = client.get("/sync_context_b")
data = response.json()
assert data["context_b"] == "started b"
assert data["context_a"] == "started a"
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
def test_sync_context_b_raise():
with pytest.raises(OtherDependencyError):
client.get("/sync_context_b_raise")
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
def test_sync_background_tasks():
response = client.get("/sync_context_b_bg")
data = response.json()
assert data["context_b"] == "started b"
assert data["context_a"] == "started a"
assert data["sync_bg"] == "not set"
assert state["context_b"] == "finished b with a: started a"
assert state["context_a"] == "finished a"
assert state["sync_bg"] == "sync_bg set - b: started b - a: started a"
| OtherDependencyError |
python | huggingface__transformers | src/transformers/models/jetmoe/modeling_jetmoe.py | {
"start": 32488,
"end": 35386
} | class ____(JetMoePreTrainedModel, GenerationMixin):
_tied_weights_keys = {"lm_head.weight": "model.embed_tokens.weight"}
def __init__(self, config):
super().__init__(config)
self.model = JetMoeModel(config)
self.vocab_size = config.vocab_size
self.aux_loss_coef = config.aux_loss_coef
self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.tie_word_embeddings = config.tie_word_embeddings
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
logits_to_keep: Union[int, torch.Tensor] = 0,
output_router_logits: Optional[bool] = False,
**kwargs,
) -> MoeCausalLMOutputWithPast:
outputs: MoeModelOutputWithPast = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
hidden_states = outputs.last_hidden_state
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
logits = self.lm_head(hidden_states[:, slice_indices, :])
loss = None
if labels is not None:
loss = self.loss_function(
logits,
labels,
vocab_size=self.config.vocab_size,
**kwargs,
)
aux_loss = None
if output_router_logits:
aux_loss = load_balancing_loss_func(
outputs.router_logits,
self.num_experts,
self.num_experts_per_tok,
attention_mask,
)
if labels is not None:
loss += self.aux_loss_coef * aux_loss.to(loss.device) # make sure to reside in the same device
return MoeCausalLMOutputWithPast(
loss=loss,
aux_loss=aux_loss,
logits=logits,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
router_logits=outputs.router_logits,
)
| JetMoeForCausalLM |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 483041,
"end": 483479
} | class ____(sgqlc.types.Type):
"""The Common Vulnerability Scoring System"""
__schema__ = github_schema
__field_names__ = ("score", "vector_string")
score = sgqlc.types.Field(sgqlc.types.non_null(Float), graphql_name="score")
"""The CVSS score associated with this advisory"""
vector_string = sgqlc.types.Field(String, graphql_name="vectorString")
"""The CVSS vector string associated with this advisory"""
| CVSS |
python | django-import-export__django-import-export | tests/core/tests/test_command_utils.py | {
"start": 316,
"end": 1038
} | class ____(TestCase):
def test_load_by_model(self):
resource_class = get_resource_class("core.Book")
self.assertIsNotNone(resource_class)
self.assertEqual(resource_class.Meta.model, Book)
def test_load_by_resource(self):
resource_class = get_resource_class("core.admin.BookResource")
self.assertEqual(resource_class, BookResource)
def test_invalid_name(self):
invalid_name = "invalid.model"
with self.assertRaises(CommandError) as context:
get_resource_class(invalid_name)
self.assertEqual(
str(context.exception),
f"Cannot import '{invalid_name}' as a resource class or model.",
)
| GetResourceClassTest |
python | tensorflow__tensorflow | tensorflow/lite/toco/logging/gen_html_test.py | {
"start": 1095,
"end": 4634
} | class ____(test_util.TensorFlowTestCase):
def test_generate_html(self):
toco_conversion_log_before = _toco_conversion_log_pb2.TocoConversionLog()
toco_conversion_log_after = _toco_conversion_log_pb2.TocoConversionLog()
toco_conversion_log_before.op_list.extend([
"Conv1", "Conv2", "Identity", "Reshape", "Dense", "Dense", "CustomOp",
"AvgPool3D", "Softmax"
])
toco_conversion_log_before.model_size = 9
toco_conversion_log_after.op_list.extend([
"Conv1", "Conv2", "Dense", "Dense", "CustomOp", "AvgPool3D", "Softmax"
])
toco_conversion_log_after.built_in_ops["Conv1"] = 1
toco_conversion_log_after.built_in_ops["Conv2"] = 1
toco_conversion_log_after.built_in_ops["Dense"] = 2
toco_conversion_log_after.built_in_ops["Softmax"] = 1
toco_conversion_log_after.custom_ops["CustomOp"] = 1
toco_conversion_log_after.select_ops["AvgPool3D"] = 1
toco_conversion_log_after.model_size = 7
export_path = os.path.join(self.get_temp_dir(), "generated.html")
html_generator = gen_html.HTMLGenerator(
html_template_path=resource_loader.get_path_to_datafile(
"template.html"),
export_report_path=export_path)
html_generator.generate(toco_conversion_log_before,
toco_conversion_log_after, True,
"digraph {a -> b}", "digraph {a -> b}", "",
"/path/to/flatbuffer")
with _file_io.FileIO(export_path, "r") as f_export, _file_io.FileIO(
resource_loader.get_path_to_datafile("testdata/generated.html"),
"r") as f_expect:
expected = f_expect.read()
exported = f_export.read()
self.assertEqual(exported, expected)
def test_gen_conversion_log_html(self):
# Copies all required data files into a temporary folder for testing.
export_path = self.get_temp_dir()
toco_log_before_path = resource_loader.get_path_to_datafile(
"testdata/toco_log_before.pb")
toco_log_after_path = resource_loader.get_path_to_datafile(
"testdata/toco_log_after.pb")
dot_before = resource_loader.get_path_to_datafile(
"testdata/toco_tf_graph.dot")
dot_after = resource_loader.get_path_to_datafile(
"testdata/toco_tflite_graph.dot")
shutil.copy(toco_log_before_path, export_path)
shutil.copy(toco_log_after_path, export_path)
shutil.copy(dot_before, export_path)
shutil.copy(dot_after, export_path)
# Generate HTML content based on files in the test folder.
gen_html.gen_conversion_log_html(export_path, True, "/path/to/flatbuffer")
result_html = os.path.join(export_path, "toco_conversion_summary.html")
with _file_io.FileIO(result_html, "r") as f_export, _file_io.FileIO(
resource_loader.get_path_to_datafile("testdata/generated.html"),
"r") as f_expect:
expected = f_expect.read()
exported = f_export.read()
self.assertEqual(exported, expected)
def test_get_input_type_from_signature(self):
op_signatures = [
("INPUT:[1,73,73,160]::float::[64,1,1,160]::float::[64]::float::"
"OUTPUT:[1,73,73,64]::float::NAME:Conv::VERSION:1")
]
expect_input_types = [
("shape:[1,73,73,160],type:float,shape:[64,1,1,160],type:float,"
"shape:[64],type:float")
]
for i in range(len(op_signatures)):
self.assertEqual(
gen_html.get_input_type_from_signature(op_signatures[i]),
expect_input_types[i])
if __name__ == "__main__":
test.main()
| GenHtmlTest |
python | hyperopt__hyperopt | hyperopt/anneal.py | {
"start": 817,
"end": 13953
} | class ____(SuggestAlgo):
"""
This simple annealing algorithm begins by sampling from the prior,
but tends over time to sample from points closer and closer to the best
ones observed.
In addition to the value of this algorithm as a baseline optimization
strategy, it is a simple starting point for implementing new algorithms.
# The Annealing Algorithm
The annealing algorithm is to choose one of the previous trial points
as a starting point, and then to sample each hyperparameter from a similar
distribution to the one specified in the prior, but whose density is more
concentrated around the trial point we selected.
This algorithm is a simple variation on random search that leverages
smoothness in the response surface. The annealing rate is not adaptive.
## Choosing a Best Trial
The algorithm formalizes the notion of "one of the best trials" by
sampling a position from a geometric distribution whose mean is the
`avg_best_idx` parameter. The "best trial" is the trial thus selected
from the set of all trials (`self.trials`).
It may happen that in the process of ancestral sampling, we may find that
the best trial at some ancestral point did not use the hyperparameter we
need to draw. In such a case, this algorithm will draw a new "runner up"
best trial, and use that one as if it had been chosen as the best trial.
The set of best trials, and runner-up best trials obtained during the
process of choosing all hyperparameters is kept sorted by the validation
loss, and at each point where the best trial does not define a
required hyperparameter value, we actually go through all the list of
runners-up too, before giving up and adding a new runner-up trial.
## Concentrating Prior Distributions
To sample a hyperparameter X within a search space, we look at
what kind of hyperparameter it is (what kind of distribution it's from)
and the previous successful values of that hyperparameter, and make
a new proposal for that hyperparameter independently of other
hyperparameters (except technically any choice nodes that led us to use
this current hyperparameter in the first place).
For example, if X is a uniform-distributed hyperparameters drawn from
`U(l, h)`, we look at the value `x` of the hyperparameter in the selected
trial, and draw from a new uniform density `U(x - w/2, x + w/2)`, where w
is related to the initial range, and the number of observations we have for
X so far. If W is the initial range, and T is the number of observations
we have, then w = W / (1 + T * shrink_coef). If the resulting range would
extend either below l or above h, we shift it to fit into the original
bounds.
"""
def __init__(self, domain, trials, seed, avg_best_idx=2.0, shrink_coef=0.1):
"""
Parameters
----------
avg_best_idx: float
Mean of geometric distribution over which trial to explore around,
selecting from trials sorted by score (0 is best)
shrink_coef: float
Rate of reduction in the size of sampling neighborhood as more
points have been explored.
"""
SuggestAlgo.__init__(self, domain, trials, seed=seed)
self.avg_best_idx = avg_best_idx
self.shrink_coef = shrink_coef
doc_by_tid = {}
for doc in trials.trials:
# get either this docs own tid or the one that it's from
tid = doc["tid"]
loss = domain.loss(doc["result"], doc["spec"])
# -- associate infinite loss to new/running/failed jobs
loss = float("inf" if loss is None else loss)
doc_by_tid[tid] = (doc, loss)
self.tid_docs_losses = sorted(doc_by_tid.items())
self.tids = np.asarray([t for (t, (d, l)) in self.tid_docs_losses])
self.losses = np.asarray([l for (t, (d, l)) in self.tid_docs_losses])
self.tid_losses_dct = dict(list(zip(self.tids, self.losses)))
# node_tids: dict from hp label -> trial ids (tids) using that hyperparam
# node_vals: dict from hp label -> values taken by that hyperparam
self.node_tids, self.node_vals = miscs_to_idxs_vals(
[d["misc"] for (tid, (d, l)) in self.tid_docs_losses],
keys=list(domain.params.keys()),
)
self.best_tids = []
def shrinking(self, label):
"""Return fraction of original search width
Parameters
----------
label: string
the name of a hyperparameter
"""
T = len(self.node_vals[label])
return 1 / (1 + T * self.shrink_coef)
def choose_ltv(self, label, size):
"""Returns (loss, tid, val) of best/runner-up trial"""
tids = self.node_tids[label]
vals = self.node_vals[label]
losses = [self.tid_losses_dct[tid] for tid in tids]
if size == 1:
# -- try to return the value corresponding to one of the
# trials that was previously chosen (non-independence
# of hyperparameter values)
# This doesn't really make sense if we're sampling a lot of
# points at a time.
tid_set = set(tids)
for tid in self.best_tids:
if tid in tid_set:
idx = tids.index(tid)
rval = losses[idx], tid, vals[idx]
return rval
# -- choose a new good seed point
good_idx = self.rng.geometric(1 / self.avg_best_idx, size=size) - 1
good_idx = np.clip(good_idx, 0, len(tids) - 1).astype("int32")
picks = np.argsort(losses)[good_idx]
picks_loss = np.asarray(losses)[picks]
picks_tids = np.asarray(tids)[picks]
picks_vals = np.asarray(vals)[picks]
if size == 1:
self.best_tids.append(int(picks_tids[0]))
return picks_loss, picks_tids, picks_vals
def on_node_hyperparameter(self, memo, node, label):
"""
Return a new value for one hyperparameter.
Parameters:
-----------
memo - a partially-filled dictionary of node -> list-of-values
for the nodes in a vectorized representation of the
original search space.
node - an Apply instance in the vectorized search space,
which corresponds to a hyperparameter
label - a string, the name of the hyperparameter
Returns: a list with one value in it: the suggested value for this
hyperparameter
Notes
-----
This function works by delegating to self.hp_HPTYPE functions to
handle each of the kinds of hyperparameters in hyperopt.pyll_utils.
Other search algorithms can implement this function without
delegating based on the hyperparameter type, but it's a pattern
I've used a few times so I show it here.
"""
n_observations = len(self.node_vals[label])
if n_observations > 0:
# -- Pick a previous trial on which to base the new sample
size = memo[node.arg["size"]]
loss, tid, val = self.choose_ltv(label, size=size)
try:
handler = getattr(self, "hp_%s" % node.name)
except AttributeError:
raise NotImplementedError("Annealing", node.name)
return handler(memo, node, label, tid, val)
else:
# -- Draw the new sample from the prior
return ExprEvaluator.on_node(self, memo, node)
def hp_uniform(
self,
memo,
node,
label,
tid,
val,
log_scale=False,
pass_q=False,
uniform_like=uniform,
):
"""
Return a new value for a uniform hyperparameter.
Parameters:
-----------
memo - (see on_node_hyperparameter)
node - (see on_node_hyperparameter)
label - (see on_node_hyperparameter)
tid - trial-identifier of the model trial on which to base a new sample
val - the value of this hyperparameter on the model trial
Returns: a list with one value in it: the suggested value for this
hyperparameter
"""
midpt = np.log(val) if log_scale else val
high = memo[node.arg["high"]]
low = memo[node.arg["low"]]
width = (high - low) * self.shrinking(label)
half = 0.5 * width
min_midpt = low + half
max_midpt = high - half
clipped_midpt = np.clip(midpt, min_midpt, max_midpt)
if pass_q:
return uniform_like(
low=clipped_midpt - half,
high=clipped_midpt + half,
rng=self.rng,
q=memo[node.arg["q"]],
size=memo[node.arg["size"]],
)
else:
return uniform_like(
low=clipped_midpt - half,
high=clipped_midpt + half,
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_quniform(self, *args, **kwargs):
return self.hp_uniform(pass_q=True, uniform_like=quniform, *args, **kwargs)
def hp_loguniform(self, *args, **kwargs):
return self.hp_uniform(
log_scale=True, pass_q=False, uniform_like=loguniform, *args, **kwargs
)
def hp_qloguniform(self, *args, **kwargs):
return self.hp_uniform(
log_scale=True, pass_q=True, uniform_like=qloguniform, *args, **kwargs
)
def hp_randint(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
low = memo[node.arg["low"]]
high = memo.get(node.arg["high"])
# if high is None, the domain is [0, low), else it is [low, high)
domain_size = low if high is None else high - low
offset = 0 if high is None else low
val1 = np.atleast_1d(val)
if val1.size:
counts = bincount(val1, offset=offset, minlength=domain_size) / val1.size
else:
counts = np.zeros(domain_size)
prior = self.shrinking(label)
p = (1 - prior) * counts + prior / domain_size
rval = categorical(p=p, rng=self.rng, size=memo[node.arg["size"]]) + offset
return rval
def hp_categorical(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
size = memo[node.arg["size"]]
if size == 0:
return []
val1 = np.atleast_1d(val)
p = p_orig = np.asarray(memo[node.arg["p"]])
if p.ndim == 2:
if len(p) not in (1, len(val1)):
print(node)
print(p)
print(np.asarray(p).shape)
assert len(p) in (1, len(val1))
else:
assert p.ndim == 1
p = p[np.newaxis, :]
if val1.size:
counts = np.bincount(val1, minlength=p.size) / val1.size
prior = self.shrinking(label)
else:
counts = np.zeros(p.size)
prior = 1.0
new_p = (1 - prior) * counts + prior * p
assert new_p.ndim == 2
rval = categorical(p=new_p, rng=self.rng, size=size)
if p_orig.ndim == 1:
assert len(rval) == 1
return rval[0]
return rval
def hp_normal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return normal(
mu=val,
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_lognormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return lognormal(
mu=np.log(val),
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_qlognormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return qlognormal(
# -- prevent log(0) without messing up algo
mu=np.log(1e-16 + val),
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
q=memo[node.arg["q"]],
rng=self.rng,
size=memo[node.arg["size"]],
)
def hp_qnormal(self, memo, node, label, tid, val):
"""
Parameters: See `hp_uniform`
"""
return qnormal(
mu=val,
sigma=memo[node.arg["sigma"]] * self.shrinking(label),
q=memo[node.arg["q"]],
rng=self.rng,
size=memo[node.arg["size"]],
)
def suggest(new_ids, domain, trials, seed, *args, **kwargs):
(new_id,) = new_ids
return AnnealingAlgo(domain, trials, seed, *args, **kwargs)(new_id)
def suggest_batch(new_ids, domain, trials, seed, *args, **kwargs):
return AnnealingAlgo(domain, trials, seed, *args, **kwargs).batch(new_ids)
# -- flake-8 abhors blank line EOF
| AnnealingAlgo |
python | django__django | tests/serializers/models/base.py | {
"start": 4041,
"end": 4210
} | class ____(models.Model):
field1 = models.CharField(max_length=10)
field2 = models.CharField(max_length=10)
field3 = models.CharField(max_length=10)
| ComplexModel |
python | kubernetes-client__python | kubernetes/client/models/v1_persistent_volume_status.py | {
"start": 383,
"end": 7067
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'last_phase_transition_time': 'datetime',
'message': 'str',
'phase': 'str',
'reason': 'str'
}
attribute_map = {
'last_phase_transition_time': 'lastPhaseTransitionTime',
'message': 'message',
'phase': 'phase',
'reason': 'reason'
}
def __init__(self, last_phase_transition_time=None, message=None, phase=None, reason=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._last_phase_transition_time = None
self._message = None
self._phase = None
self._reason = None
self.discriminator = None
if last_phase_transition_time is not None:
self.last_phase_transition_time = last_phase_transition_time
if message is not None:
self.message = message
if phase is not None:
self.phase = phase
if reason is not None:
self.reason = reason
@property
def last_phase_transition_time(self):
"""Gets the last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. # noqa: E501
:return: The last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
:rtype: datetime
"""
return self._last_phase_transition_time
@last_phase_transition_time.setter
def last_phase_transition_time(self, last_phase_transition_time):
"""Sets the last_phase_transition_time of this V1PersistentVolumeStatus.
lastPhaseTransitionTime is the time the phase transitioned from one to another and automatically resets to current time everytime a volume phase transitions. # noqa: E501
:param last_phase_transition_time: The last_phase_transition_time of this V1PersistentVolumeStatus. # noqa: E501
:type: datetime
"""
self._last_phase_transition_time = last_phase_transition_time
@property
def message(self):
"""Gets the message of this V1PersistentVolumeStatus. # noqa: E501
message is a human-readable message indicating details about why the volume is in this state. # noqa: E501
:return: The message of this V1PersistentVolumeStatus. # noqa: E501
:rtype: str
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this V1PersistentVolumeStatus.
message is a human-readable message indicating details about why the volume is in this state. # noqa: E501
:param message: The message of this V1PersistentVolumeStatus. # noqa: E501
:type: str
"""
self._message = message
@property
def phase(self):
"""Gets the phase of this V1PersistentVolumeStatus. # noqa: E501
phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase # noqa: E501
:return: The phase of this V1PersistentVolumeStatus. # noqa: E501
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this V1PersistentVolumeStatus.
phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase # noqa: E501
:param phase: The phase of this V1PersistentVolumeStatus. # noqa: E501
:type: str
"""
self._phase = phase
@property
def reason(self):
"""Gets the reason of this V1PersistentVolumeStatus. # noqa: E501
reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. # noqa: E501
:return: The reason of this V1PersistentVolumeStatus. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this V1PersistentVolumeStatus.
reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI. # noqa: E501
:param reason: The reason of this V1PersistentVolumeStatus. # noqa: E501
:type: str
"""
self._reason = reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeStatus):
return True
return self.to_dict() != other.to_dict()
| V1PersistentVolumeStatus |
python | django__django | tests/admin_views/admin.py | {
"start": 29609,
"end": 29689
} | class ____(admin.TabularInline):
model = InlineReference
| InlineReferenceInline |
python | PyCQA__bandit | bandit/core/extension_loader.py | {
"start": 167,
"end": 4039
} | class ____:
# These IDs are for bandit built in tests
builtin = ["B001"] # Built in blacklist test
def __init__(
self,
formatters_namespace="bandit.formatters",
plugins_namespace="bandit.plugins",
blacklists_namespace="bandit.blacklists",
):
# Cache the extension managers, loaded extensions, and extension names
self.load_formatters(formatters_namespace)
self.load_plugins(plugins_namespace)
self.load_blacklists(blacklists_namespace)
def load_formatters(self, formatters_namespace):
self.formatters_mgr = extension.ExtensionManager(
namespace=formatters_namespace,
invoke_on_load=False,
verify_requirements=False,
)
self.formatters = list(self.formatters_mgr)
self.formatter_names = self.formatters_mgr.names()
def load_plugins(self, plugins_namespace):
self.plugins_mgr = extension.ExtensionManager(
namespace=plugins_namespace,
invoke_on_load=False,
verify_requirements=False,
)
def test_has_id(plugin):
if not hasattr(plugin.plugin, "_test_id"):
# logger not setup yet, so using print
print(
f"WARNING: Test '{plugin.name}' has no ID, skipping.",
file=sys.stderr,
)
return False
return True
self.plugins = list(filter(test_has_id, list(self.plugins_mgr)))
self.plugin_names = [plugin.name for plugin in self.plugins]
self.plugins_by_id = {p.plugin._test_id: p for p in self.plugins}
self.plugins_by_name = {p.name: p for p in self.plugins}
def get_test_id(self, test_name):
if test_name in self.plugins_by_name:
return self.plugins_by_name[test_name].plugin._test_id
if test_name in self.blacklist_by_name:
return self.blacklist_by_name[test_name]["id"]
return None
def load_blacklists(self, blacklist_namespace):
self.blacklists_mgr = extension.ExtensionManager(
namespace=blacklist_namespace,
invoke_on_load=False,
verify_requirements=False,
)
self.blacklist = {}
blacklist = list(self.blacklists_mgr)
for item in blacklist:
for key, val in item.plugin().items():
utils.check_ast_node(key)
self.blacklist.setdefault(key, []).extend(val)
self.blacklist_by_id = {}
self.blacklist_by_name = {}
for val in self.blacklist.values():
for b in val:
self.blacklist_by_id[b["id"]] = b
self.blacklist_by_name[b["name"]] = b
def validate_profile(self, profile):
"""Validate that everything in the configured profiles looks good."""
for inc in profile["include"]:
if not self.check_id(inc):
LOG.warning(f"Unknown test found in profile: {inc}")
for exc in profile["exclude"]:
if not self.check_id(exc):
LOG.warning(f"Unknown test found in profile: {exc}")
union = set(profile["include"]) & set(profile["exclude"])
if len(union) > 0:
raise ValueError(
f"Non-exclusive include/exclude test sets: {union}"
)
def check_id(self, test):
return (
test in self.plugins_by_id
or test in self.blacklist_by_id
or test in self.builtin
)
# Using entry-points and pkg_resources *can* be expensive. So let's load these
# once, store them on the object, and have a module global object for
# accessing them. After the first time this module is imported, it should save
# this attribute on the module and not have to reload the entry-points.
MANAGER = Manager()
| Manager |
python | doocs__leetcode | solution/1500-1599/1574.Shortest Subarray to be Removed to Make Array Sorted/Solution2.py | {
"start": 0,
"end": 509
} | class ____:
def findLengthOfShortestSubarray(self, arr: List[int]) -> int:
n = len(arr)
i, j = 0, n - 1
while i + 1 < n and arr[i] <= arr[i + 1]:
i += 1
while j - 1 >= 0 and arr[j - 1] <= arr[j]:
j -= 1
if i >= j:
return 0
ans = min(n - i - 1, j)
r = j
for l in range(i + 1):
while r < n and arr[r] < arr[l]:
r += 1
ans = min(ans, r - l - 1)
return ans
| Solution |
python | tiangolo__fastapi | fastapi/openapi/models.py | {
"start": 1876,
"end": 2040
} | class ____(BaseModel):
if PYDANTIC_V2:
model_config = {"extra": "allow"}
else:
class Config:
extra = "allow"
| BaseModelWithConfig |
python | bokeh__bokeh | tests/unit/bokeh/test_client_server.py | {
"start": 2089,
"end": 2152
} | class ____(Model):
bar = Int(1)
| AnotherModelInTestClientServer |
python | tensorflow__tensorflow | tensorflow/python/keras/metrics.py | {
"start": 115571,
"end": 118128
} | class ____(MeanMetricWrapper):
"""Computes the crossentropy metric between the labels and predictions.
Use this crossentropy metric when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` metric.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Args:
name: (Optional) string name of the metric instance.
dtype: (Optional) data type of the metric result.
from_logits: (Optional) Whether output is expected to be a logits tensor.
By default, we consider that output encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the metric is
computed.
Standalone usage:
>>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
>>> # logits = log(y_pred)
>>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
>>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
>>> # xent = -sum(y * log(softmax), 1)
>>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
>>> # [-2.3026, -0.2231, -2.3026]]
>>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
>>> # xent = [0.0513, 2.3026]
>>> # Reduced xent = (0.0513 + 2.3026) / 2
>>> m = tf.keras.metrics.SparseCategoricalCrossentropy()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
>>> m.result().numpy()
1.1769392
>>> m.reset_state()
>>> m.update_state([1, 2],
... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
... sample_weight=tf.constant([0.3, 0.7]))
>>> m.result().numpy()
1.6271976
Usage with `compile()` API:
```python
model.compile(
optimizer='sgd',
loss='mse',
metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
```
"""
def __init__(self,
name='sparse_categorical_crossentropy',
dtype=None,
from_logits=False,
axis=-1):
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name,
dtype=dtype,
from_logits=from_logits,
axis=axis)
| SparseCategoricalCrossentropy |
python | huggingface__transformers | tests/models/sew_d/test_modeling_sew_d.py | {
"start": 11346,
"end": 15609
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (SEWDForCTC, SEWDModel, SEWDForSequenceClassification) if is_torch_available() else ()
pipeline_model_mapping = (
{
"audio-classification": SEWDForSequenceClassification,
"automatic-speech-recognition": SEWDForCTC,
"feature-extraction": SEWDModel,
}
if is_torch_available()
else {}
)
def setUp(self):
self.model_tester = SEWDModelTester(self)
self.config_tester = ConfigTester(self, config_class=SEWDConfig, hidden_size=37)
def test_config(self):
self.config_tester.run_common_tests()
def test_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*config_and_inputs)
def test_ctc_loss_inference(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_loss(*config_and_inputs)
def test_ctc_train(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_ctc_training(*config_and_inputs)
def test_labels_out_of_vocab(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_labels_out_of_vocab(*config_and_inputs)
@unittest.skip(reason="Model has no inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Model has input_values instead of input_ids")
def test_forward_signature(self):
pass
@unittest.skip(reason="Model has no tokens embeddings")
def test_resize_tokens_embeddings(self):
pass
@unittest.skip(reason="Model has no inputs_embeds")
def test_model_get_set_embeddings(self):
pass
def test_retain_grad_hidden_states_attentions(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
# set layer drop to 0
model.config.layerdrop = 0.0
input_values = inputs_dict["input_values"]
input_lengths = torch.tensor(
[input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device
)
output_lengths = model._get_feat_extract_output_lengths(input_lengths)
labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size)
inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"])
inputs_dict["labels"] = labels
outputs = model(**inputs_dict)
output = outputs[0]
# Encoder-/Decoder-only models
hidden_states = outputs.hidden_states[0]
attentions = outputs.attentions[0]
hidden_states.retain_grad()
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(hidden_states.grad)
self.assertIsNotNone(attentions.grad)
# overwrite from test_modeling_common
def _mock_init_weights(self, module):
if hasattr(module, "weight") and module.weight is not None:
module.weight.fill_(3)
if hasattr(module, "weight_g") and module.weight_g is not None:
module.weight_g.data.fill_(3)
if hasattr(module, "weight_v") and module.weight_v is not None:
module.weight_v.data.fill_(3)
if hasattr(module, "bias") and module.bias is not None:
module.bias.fill_(3)
if hasattr(module, "masked_spec_embed") and module.masked_spec_embed is not None:
module.masked_spec_embed.data.fill_(3)
@unittest.skip(reason="Feed forward chunking is not implemented")
def test_feed_forward_chunking(self):
pass
@slow
def test_model_from_pretrained(self):
model = SEWDModel.from_pretrained("asapp/sew-d-tiny-100k")
self.assertIsNotNone(model)
@require_torch
| SEWDModelTest |
python | numpy__numpy | tools/swig/test/testFlat.py | {
"start": 2622,
"end": 2883
} | class ____(FlatTestCase):
def __init__(self, methodName="runTest"):
FlatTestCase.__init__(self, methodName)
self.typeStr = "schar"
self.typeCode = "b"
######################################################################
| scharTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/dataclassTransform4.py | {
"start": 1216,
"end": 1515
} | class ____:
id: int = field1(resolver=lambda: 0)
name: str = field1(default="Voldemort")
CustomerModel1()
CustomerModel1(name="hi")
# This should generate an error because "id" is not
# supposed to be part of the init function.
CustomerModel1(id=1, name="hi")
@create_model()
| CustomerModel1 |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 44341,
"end": 46599
} | class ____:
grid: Sequence[int] = ()
grid_names: Sequence[str] = ()
cluster: Sequence[int] = ()
cluster_names: Sequence[str] = ()
# Those are NOT CUDA threads. On Hopper they correspond to warpgroups.
num_threads: int | None = None
thread_name: str | None = None
kernel_name: str | None = None
def __post_init__(self):
if len(self.cluster) > 3:
raise ValueError(f"cluster= must be at most 3D, got {self}.")
if len(self.grid_names) != len(self.grid):
raise ValueError(
f"grid_names must have the same length as grid, got {self}."
)
if len(self.cluster_names) != len(self.cluster):
raise ValueError(
f"cluster_names must have the same length as cluster, got {self}."
)
if (self.thread_name is None) != (self.num_threads is None):
raise ValueError(
"num_threads and thread_name must be either both set or both None,"
f" got {self}"
)
max_mosaic_threads = 2048 // 128
if self.num_threads is not None and self.num_threads > max_mosaic_threads:
raise ValueError(
"Requested too many CUDA threads per block. Each Mosaic thread"
f" corresponds to 128 CUDA threads. At most {max_mosaic_threads}"
f" are supported, got {self}"
)
object.__setattr__(self, "grid", tuple(self.grid))
object.__setattr__(self, "grid_names", tuple(self.grid_names))
object.__setattr__(self, "cluster", tuple(self.cluster))
object.__setattr__(self, "cluster_names", tuple(self.cluster_names))
@property
def backend(self) -> str:
return "mosaic_gpu"
@property
def shape(self) -> collections.OrderedDict[object, int]:
pairs: Iterable[tuple[object, int]]
if self.num_threads is not None:
pairs = zip(
(*self.grid_names, *self.cluster_names, self.thread_name),
(*self.grid, *self.cluster, self.num_threads),
)
else:
pairs = zip(
(*self.grid_names, *self.cluster_names),
(*self.grid, *self.cluster),
)
return collections.OrderedDict(pairs)
def discharges_effect(self, effect: jax_core.Effect):
return effect is _wgmma_pipeline_effect or effect is _memory_effect
@dataclasses.dataclass(frozen=True, kw_only=True)
| Mesh |
python | doocs__leetcode | solution/2400-2499/2427.Number of Common Factors/Solution2.py | {
"start": 0,
"end": 261
} | class ____:
def commonFactors(self, a: int, b: int) -> int:
g = gcd(a, b)
ans, x = 0, 1
while x * x <= g:
if g % x == 0:
ans += 1
ans += x * x < g
x += 1
return ans
| Solution |
python | keras-team__keras | keras/src/layers/rnn/lstm_test.py | {
"start": 169,
"end": 10089
} | class ____(testing.TestCase):
@pytest.mark.requires_trainable_backend
def test_basics(self):
self.run_layer_test(
layers.LSTM,
init_kwargs={"units": 3, "dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.LSTM,
init_kwargs={"units": 3, "dropout": 0.5, "recurrent_dropout": 0.5},
input_shape=(3, 2, 4),
call_kwargs={"training": True},
expected_output_shape=(3, 3),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
self.run_layer_test(
layers.LSTM,
init_kwargs={
"units": 3,
"return_sequences": True,
"bias_regularizer": "l1",
"kernel_regularizer": "l2",
"recurrent_regularizer": "l2",
},
input_shape=(3, 2, 4),
expected_output_shape=(3, 2, 3),
expected_num_losses=3,
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
)
@parameterized.parameters([1, 2])
def test_correctness(self, implementation):
sequence = np.arange(72).reshape((3, 6, 4)).astype("float32")
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.6288687, 0.6288687, 0.6288687],
[0.86899155, 0.86899155, 0.86899155],
[0.9460773, 0.9460773, 0.9460773],
]
),
output,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.35622165, 0.35622165, 0.35622165],
[0.74789524, 0.74789524, 0.74789524],
[0.8872726, 0.8872726, 0.8872726],
]
),
output,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.6288687, 0.6288687, 0.6288687],
[0.86899155, 0.86899155, 0.86899155],
[0.9460773, 0.9460773, 0.9460773],
]
),
output,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unit_forget_bias=False,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.57019705, 0.57019705, 0.57019705],
[0.8661914, 0.8661914, 0.8661914],
[0.9459622, 0.9459622, 0.9459622],
]
),
output,
)
layer = layers.LSTM(
3,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
use_bias=False,
implementation=implementation,
)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.54986924, 0.54986924, 0.54986924],
[0.86226785, 0.86226785, 0.86226785],
[0.9443936, 0.9443936, 0.9443936],
]
),
output,
)
def test_statefulness(self):
sequence = np.arange(24).reshape((2, 3, 4)).astype("float32")
layer = layers.LSTM(
4,
stateful=True,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.3124785, 0.3124785, 0.3124785, 0.3124785],
[0.6863672, 0.6863672, 0.6863672, 0.6863672],
]
),
output,
)
layer.reset_state()
layer(sequence)
output = layer(sequence)
self.assertAllClose(
np.array(
[
[0.3124785, 0.3124785, 0.3124785, 0.3124785],
[0.6863672, 0.6863672, 0.6863672, 0.6863672],
]
),
output,
)
def test_pass_initial_state(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
initial_state = [
np.arange(4).reshape((2, 2)).astype("float32"),
np.arange(4).reshape((2, 2)).astype("float32"),
]
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.20574439, 0.3558822], [0.64930826, 0.66276]]),
output,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, initial_state=initial_state)
self.assertAllClose(
np.array([[0.13281618, 0.2790356], [0.5839337, 0.5992567]]),
output,
)
def test_masking(self):
sequence = np.arange(24).reshape((2, 4, 3)).astype("float32")
mask = np.array([[True, True, False, True], [True, False, False, True]])
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
unroll=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.1524914, 0.1524914], [0.35969394, 0.35969394]]),
output,
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0158891, 0.0158891],
[0.05552047, 0.05552047],
[0.05552047, 0.05552047],
[0.1524914, 0.1524914],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.14185596, 0.14185596],
[0.14185596, 0.14185596],
[0.14185596, 0.14185596],
[0.35969394, 0.35969394],
],
),
output[1],
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
return_sequences=True,
zero_output_for_mask=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array(
[
[0.0158891, 0.0158891],
[0.05552047, 0.05552047],
[0.0, 0.0],
[0.1524914, 0.1524914],
],
),
output[0],
)
self.assertAllClose(
np.array(
[
[0.14185596, 0.14185596],
[0.0, 0.0],
[0.0, 0.0],
[0.35969394, 0.35969394],
],
),
output[1],
)
layer = layers.LSTM(
2,
kernel_initializer=initializers.Constant(0.01),
recurrent_initializer=initializers.Constant(0.02),
bias_initializer=initializers.Constant(0.03),
go_backwards=True,
)
output = layer(sequence, mask=mask)
self.assertAllClose(
np.array([[0.10056866, 0.10056866], [0.31006062, 0.31006062]]),
output,
)
| LSTMTest |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/hooks/test_athena_sql.py | {
"start": 1361,
"end": 6125
} | class ____:
def setup_method(self):
conn = Connection(
conn_type="athena",
schema=SCHEMA_NAME,
extra={"work_group": WORK_GROUP, "region_name": REGION_NAME},
)
self.conn_athena = AwsConnectionWrapper(conn)
self.db_hook = AthenaSQLHook()
self.db_hook.get_connection = mock.Mock()
self.db_hook.get_connection.return_value = conn
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.AthenaSQLHook.get_credentials")
def test_get_uri(self, mock_get_credentials):
mock_get_credentials.return_value = mock.Mock(
access_key=AWS_ACCESS_KEY_ID, secret_key=AWS_SECRET_ACCESS_KEY, token=AWS_SESSION_TOKEN
)
expected_athena_uri = "awsathena+rest://aws_access_key_id:aws_secret_access_key@athena.us-east-1.amazonaws.com:443/athena_sql_schema?aws_session_token=aws_session_token®ion_name=us-east-1&work_group=test-work-group"
athena_uri = self.db_hook.get_uri()
mock_get_credentials.assert_called_once_with(region_name=REGION_NAME)
assert athena_uri == expected_athena_uri
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.AthenaSQLHook._get_conn_params")
def test_get_uri_change_driver(self, mock_get_conn_params):
mock_get_conn_params.return_value = dict(
driver="arrow", schema_name=SCHEMA_NAME, region_name=REGION_NAME, aws_domain="amazonaws.com"
)
athena_uri = self.db_hook.get_uri()
assert athena_uri.startswith("awsathena+arrow://")
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.pyathena.connect")
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.AthenaSQLHook.get_session")
def test_get_conn(self, mock_get_session, mock_connect):
self.db_hook.get_conn()
mock_get_session.assert_called_once_with(region_name=REGION_NAME)
mock_connect.assert_called_once_with(
schema_name=SCHEMA_NAME,
region_name=REGION_NAME,
session=mock_get_session.return_value,
work_group=WORK_GROUP,
)
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.pyathena.connect")
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.AthenaSQLHook.get_session")
def test_get_conn_with_aws_conn(self, mock_get_session, mock_connect):
self.db_hook.get_conn()
mock_get_session.assert_called_once_with(region_name=REGION_NAME)
mock_connect.assert_called_once_with(
schema_name=SCHEMA_NAME,
region_name=REGION_NAME,
session=mock_get_session.return_value,
work_group=WORK_GROUP,
)
@pytest.mark.parametrize(
("conn_params", "conn_extra", "expected_call_args"),
[
(
{"schema": "athena_sql_schema1"},
{"region_name": "us-east-2"},
{"region_name": "us-east-2", "schema_name": "athena_sql_schema1", "session": mock.ANY},
),
(
{"schema": "athena_sql_schema2"},
{"work_group": "test-work-group", "region_name": "us-east-2"},
{
"region_name": "us-east-2",
"schema_name": "athena_sql_schema2",
"work_group": "test-work-group",
"session": mock.ANY,
},
),
(
{"schema": "athena_sql_schema3"},
{"s3_staging_dir": "s3://test-bucket/", "region_name": "us-east-3"},
{
"region_name": "us-east-3",
"schema_name": "athena_sql_schema3",
"s3_staging_dir": "s3://test-bucket/",
"session": mock.ANY,
},
),
],
)
@mock.patch("airflow.providers.amazon.aws.hooks.athena_sql.pyathena.connect")
def test_get_conn_passing_args(self, mock_connect, conn_params, conn_extra, expected_call_args):
with mock.patch(
"airflow.providers.amazon.aws.hooks.athena_sql.AthenaSQLHook.conn",
AwsConnectionWrapper(Connection(conn_type="athena", extra=conn_extra, **conn_params)),
):
self.db_hook.get_conn()
mock_connect.assert_called_once_with(**expected_call_args)
def test_conn_id_default_setter(self):
assert self.db_hook.athena_conn_id == "athena_default"
assert self.db_hook.aws_conn_id == "aws_default"
def test_conn_id_override_setter(self):
hook = AthenaSQLHook(athena_conn_id=AWS_ATHENA_CONN_ID, aws_conn_id=AWS_CONN_ID)
assert hook.athena_conn_id == AWS_ATHENA_CONN_ID
assert hook.aws_conn_id == AWS_CONN_ID
| TestAthenaSQLHookConn |
python | pola-rs__polars | py-polars/src/polars/lazyframe/group_by.py | {
"start": 658,
"end": 24023
} | class ____:
"""
Utility class for performing a group by operation over a lazy DataFrame.
Generated by calling `df.lazy().group_by(...)`.
"""
def __init__(self, lgb: PyLazyGroupBy) -> None:
self.lgb = lgb
def having(self, *predicates: IntoExpr | Iterable[IntoExpr]) -> LazyGroupBy:
"""
Filter groups with a list of predicates after aggregation.
Using this method is equivalent to adding the predicates to the aggregation and
filtering afterwards.
This method can be chained and all conditions will be combined using `&`.
Parameters
----------
*predicates
Expressions that evaluate to a boolean value for each group. Typically, this
requires the use of an aggregation function. Multiple predicates are
combined using `&`.
Examples
--------
Only keep groups that contain more than one element.
>>> ldf = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "c"],
... }
... ).lazy()
>>> ldf.group_by("a").having(
... pl.len() > 1
... ).agg().collect() # doctest: +IGNORE_RESULT
shape: (2, 1)
┌─────┐
│ a │
│ --- │
│ str │
╞═════╡
│ b │
│ a │
└─────┘
"""
pyexprs = parse_into_list_of_expressions(*predicates)
self.lgb = self.lgb.having(pyexprs)
return self
def agg(
self,
*aggs: IntoExpr | Iterable[IntoExpr],
**named_aggs: IntoExpr,
) -> LazyFrame:
"""
Compute aggregations for each group of a group by operation.
Parameters
----------
*aggs
Aggregations to compute for each group of the group by operation,
specified as positional arguments.
Accepts expression input. Strings are parsed as column names.
**named_aggs
Additional aggregations, specified as keyword arguments.
The resulting columns will be renamed to the keyword used.
Examples
--------
Compute the aggregation of the columns for each group.
>>> ldf = pl.DataFrame(
... {
... "a": ["a", "b", "a", "b", "c"],
... "b": [1, 2, 1, 3, 3],
... "c": [5, 4, 3, 2, 1],
... }
... ).lazy()
>>> ldf.group_by("a").agg(
... [pl.col("b"), pl.col("c")]
... ).collect() # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────────┬───────────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ list[i64] ┆ list[i64] │
╞═════╪═══════════╪═══════════╡
│ a ┆ [1, 1] ┆ [5, 3] │
│ b ┆ [2, 3] ┆ [4, 2] │
│ c ┆ [3] ┆ [1] │
└─────┴───────────┴───────────┘
Compute the sum of a column for each group.
>>> ldf.group_by("a").agg(
... pl.col("b").sum()
... ).collect() # doctest: +IGNORE_RESULT
shape: (3, 2)
┌─────┬─────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════╪═════╡
│ a ┆ 2 │
│ b ┆ 5 │
│ c ┆ 3 │
└─────┴─────┘
Compute multiple aggregates at once by passing a list of expressions.
>>> ldf.group_by("a").agg(
... [pl.sum("b"), pl.mean("c")]
... ).collect() # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬─────┬─────┐
│ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═════╪═════╡
│ c ┆ 3 ┆ 1.0 │
│ a ┆ 2 ┆ 4.0 │
│ b ┆ 5 ┆ 3.0 │
└─────┴─────┴─────┘
Or use positional arguments to compute multiple aggregations in the same way.
>>> ldf.group_by("a").agg(
... pl.sum("b").name.suffix("_sum"),
... (pl.col("c") ** 2).mean().name.suffix("_mean_squared"),
... ).collect() # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────┬────────────────┐
│ a ┆ b_sum ┆ c_mean_squared │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═══════╪════════════════╡
│ a ┆ 2 ┆ 17.0 │
│ c ┆ 3 ┆ 1.0 │
│ b ┆ 5 ┆ 10.0 │
└─────┴───────┴────────────────┘
Use keyword arguments to easily name your expression inputs.
>>> ldf.group_by("a").agg(
... b_sum=pl.sum("b"),
... c_mean_squared=(pl.col("c") ** 2).mean(),
... ).collect() # doctest: +IGNORE_RESULT
shape: (3, 3)
┌─────┬───────┬────────────────┐
│ a ┆ b_sum ┆ c_mean_squared │
│ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 │
╞═════╪═══════╪════════════════╡
│ a ┆ 2 ┆ 17.0 │
│ c ┆ 3 ┆ 1.0 │
│ b ┆ 5 ┆ 10.0 │
└─────┴───────┴────────────────┘
"""
if aggs and isinstance(aggs[0], dict):
msg = (
"specifying aggregations as a dictionary is not supported"
"\n\nTry unpacking the dictionary to take advantage of the keyword syntax"
" of the `agg` method."
)
raise TypeError(msg)
pyexprs = parse_into_list_of_expressions(*aggs, **named_aggs)
return wrap_ldf(self.lgb.agg(pyexprs))
def map_groups(
self,
function: Callable[[DataFrame], DataFrame],
schema: SchemaDict | None,
) -> LazyFrame:
"""
Apply a custom/user-defined function (UDF) over the groups as a new DataFrame.
.. warning::
This method is much slower than the native expressions API.
Only use it if you cannot implement your logic otherwise.
Using this is considered an anti-pattern as it will be very slow because:
- it forces the engine to materialize the whole `DataFrames` for the groups.
- it is not parallelized
- it blocks optimizations as the passed python function is opaque to the
optimizer
The idiomatic way to apply custom functions over multiple columns is using:
`pl.struct([my_columns]).apply(lambda struct_series: ..)`
Parameters
----------
function
Function to apply over each group of the `LazyFrame`.
schema
Schema of the output function. This has to be known statically. If the
given schema is incorrect, this is a bug in the caller's query and may
lead to errors. If set to None, polars assumes the schema is unchanged.
Examples
--------
For each color group sample two rows:
>>> df = pl.DataFrame(
... {
... "id": [0, 1, 2, 3, 4],
... "color": ["red", "green", "green", "red", "red"],
... "shape": ["square", "triangle", "square", "triangle", "square"],
... }
... )
>>> (
... df.lazy()
... .group_by("color")
... .map_groups(lambda group_df: group_df.sample(2), schema=None)
... .collect()
... ) # doctest: +IGNORE_RESULT
shape: (4, 3)
┌─────┬───────┬──────────┐
│ id ┆ color ┆ shape │
│ --- ┆ --- ┆ --- │
│ i64 ┆ str ┆ str │
╞═════╪═══════╪══════════╡
│ 1 ┆ green ┆ triangle │
│ 2 ┆ green ┆ square │
│ 4 ┆ red ┆ square │
│ 3 ┆ red ┆ triangle │
└─────┴───────┴──────────┘
It is better to implement this with an expression:
>>> df.lazy().filter(
... pl.int_range(pl.len()).shuffle().over("color") < 2
... ).collect() # doctest: +IGNORE_RESULT
"""
return wrap_ldf(
self.lgb.map_groups(lambda df: function(wrap_df(df))._df, schema)
)
def head(self, n: int = 5) -> LazyFrame:
"""
Get the first `n` rows of each group.
Parameters
----------
n
Number of rows to return.
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
│ c ┆ 2 │
│ a ┆ 3 │
│ c ┆ 4 │
│ a ┆ 5 │
│ b ┆ 6 │
└─────────┴─────┘
>>> df.group_by("letters").head(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
│ a ┆ 5 │
│ b ┆ 6 │
│ c ┆ 1 │
│ c ┆ 2 │
└─────────┴─────┘
"""
return wrap_ldf(self.lgb.head(n))
def tail(self, n: int = 5) -> LazyFrame:
"""
Get the last `n` rows of each group.
Parameters
----------
n
Number of rows to return.
Examples
--------
>>> df = pl.DataFrame(
... {
... "letters": ["c", "c", "a", "c", "a", "b"],
... "nrs": [1, 2, 3, 4, 5, 6],
... }
... )
>>> df
shape: (6, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ c ┆ 1 │
│ c ┆ 2 │
│ a ┆ 3 │
│ c ┆ 4 │
│ a ┆ 5 │
│ b ┆ 6 │
└─────────┴─────┘
>>> df.group_by("letters").tail(2).sort("letters")
shape: (5, 2)
┌─────────┬─────┐
│ letters ┆ nrs │
│ --- ┆ --- │
│ str ┆ i64 │
╞═════════╪═════╡
│ a ┆ 3 │
│ a ┆ 5 │
│ b ┆ 6 │
│ c ┆ 2 │
│ c ┆ 4 │
└─────────┴─────┘
"""
return wrap_ldf(self.lgb.tail(n))
def all(self) -> LazyFrame:
"""
Aggregate the groups into Series.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": ["one", "two", "one", "two"],
... "b": [1, 2, 3, 4],
... }
... ).lazy()
>>> ldf.group_by("a", maintain_order=True).all().collect()
shape: (2, 2)
┌─────┬───────────┐
│ a ┆ b │
│ --- ┆ --- │
│ str ┆ list[i64] │
╞═════╪═══════════╡
│ one ┆ [1, 3] │
│ two ┆ [2, 4] │
└─────┴───────────┘
"""
return self.agg(F.all())
def len(self, name: str | None = None) -> LazyFrame:
"""
Return the number of rows in each group.
Parameters
----------
name
Assign a name to the resulting column; if unset, defaults to "len".
Examples
--------
>>> lf = pl.LazyFrame({"a": ["Apple", "Apple", "Orange"], "b": [1, None, 2]})
>>> lf.group_by("a").len().collect() # doctest: +IGNORE_RESULT
shape: (2, 2)
┌────────┬─────┐
│ a ┆ len │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴─────┘
>>> lf.group_by("a").len(name="n").collect() # doctest: +IGNORE_RESULT
shape: (2, 2)
┌────────┬─────┐
│ a ┆ n │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴─────┘
"""
len_expr = F.len()
if name is not None:
len_expr = len_expr.alias(name)
return self.agg(len_expr)
@deprecated("`count` was renamed; use `len` instead")
def count(self) -> LazyFrame:
"""
Return the number of rows in each group.
.. deprecated:: 0.20.5
This method has been renamed to :func:`LazyGroupBy.len`.
Rows containing null values count towards the total.
Examples
--------
>>> lf = pl.LazyFrame(
... {
... "a": ["Apple", "Apple", "Orange"],
... "b": [1, None, 2],
... }
... )
>>> lf.group_by("a").count().collect() # doctest: +SKIP
shape: (2, 2)
┌────────┬───────┐
│ a ┆ count │
│ --- ┆ --- │
│ str ┆ u32 │
╞════════╪═══════╡
│ Apple ┆ 2 │
│ Orange ┆ 1 │
└────────┴───────┘
"""
return self.agg(F.len().alias("count"))
def first(self, *, ignore_nulls: bool = False) -> LazyFrame:
"""
Aggregate the first values in the group.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the first non-null value for each aggregation is returned,
otherwise `None` is returned if no non-null value exists.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [None, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).first().collect()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ null │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
>>> ldf.group_by("d", maintain_order=True).first(ignore_nulls=True).collect()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ true │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
"""
return self.agg(F.all().first(ignore_nulls=ignore_nulls))
def last(self, *, ignore_nulls: bool = False) -> LazyFrame:
"""
Aggregate the last values in the group.
Parameters
----------
ignore_nulls
Ignore null values (default `False`).
If set to `True`, the last non-null value for each aggregation is returned,
otherwise `None` is returned if no non-null value exists.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 14, 13],
... "c": [True, True, False, None, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).last().collect()
shape: (3, 4)
┌────────┬─────┬──────┬──────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ null │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ 13.0 ┆ true │
└────────┴─────┴──────┴──────┘
>>> ldf.group_by("d", maintain_order=True).last(ignore_nulls=True).collect()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ false │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ 13.0 ┆ true │
└────────┴─────┴──────┴───────┘
"""
return self.agg(F.all().last(ignore_nulls=ignore_nulls))
def max(self) -> LazyFrame:
"""
Reduce the groups to the maximal value.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).max().collect()
shape: (3, 4)
┌────────┬─────┬──────┬──────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪══════╡
│ Apple ┆ 3 ┆ 10.0 ┆ true │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 5 ┆ 14.0 ┆ true │
└────────┴─────┴──────┴──────┘
"""
return self.agg(F.all().max())
def mean(self) -> LazyFrame:
"""
Reduce the groups to the mean values.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).mean().collect()
shape: (3, 4)
┌────────┬─────┬──────────┬──────────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 ┆ f64 │
╞════════╪═════╪══════════╪══════════╡
│ Apple ┆ 2.0 ┆ 4.833333 ┆ 0.666667 │
│ Orange ┆ 2.0 ┆ 0.5 ┆ 1.0 │
│ Banana ┆ 4.5 ┆ 13.5 ┆ 0.5 │
└────────┴─────┴──────────┴──────────┘
"""
return self.agg(F.all().mean())
def median(self) -> LazyFrame:
"""
Return the median per group.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "d": ["Apple", "Banana", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).median().collect()
shape: (2, 3)
┌────────┬─────┬──────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 │
╞════════╪═════╪══════╡
│ Apple ┆ 2.0 ┆ 4.0 │
│ Banana ┆ 4.0 ┆ 13.0 │
└────────┴─────┴──────┘
"""
return self.agg(F.all().median())
def min(self) -> LazyFrame:
"""
Reduce the groups to the minimal value.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).min().collect()
shape: (3, 4)
┌────────┬─────┬──────┬───────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ bool │
╞════════╪═════╪══════╪═══════╡
│ Apple ┆ 1 ┆ 0.5 ┆ false │
│ Orange ┆ 2 ┆ 0.5 ┆ true │
│ Banana ┆ 4 ┆ 13.0 ┆ false │
└────────┴─────┴──────┴───────┘
"""
return self.agg(F.all().min())
def n_unique(self) -> LazyFrame:
"""
Count the unique values per group.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 1, 3, 4, 5],
... "b": [0.5, 0.5, 0.5, 10, 13, 14],
... "d": ["Apple", "Banana", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).n_unique().collect()
shape: (2, 3)
┌────────┬─────┬─────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ u32 ┆ u32 │
╞════════╪═════╪═════╡
│ Apple ┆ 2 ┆ 2 │
│ Banana ┆ 3 ┆ 3 │
└────────┴─────┴─────┘
"""
return self.agg(F.all().n_unique())
def quantile(
self, quantile: float, interpolation: QuantileMethod = "nearest"
) -> LazyFrame:
"""
Compute the quantile per group.
Parameters
----------
quantile
Quantile between 0.0 and 1.0.
interpolation : {'nearest', 'higher', 'lower', 'midpoint', 'linear', 'equiprobable'}
Interpolation method.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).quantile(1).collect()
shape: (3, 3)
┌────────┬─────┬──────┐
│ d ┆ a ┆ b │
│ --- ┆ --- ┆ --- │
│ str ┆ f64 ┆ f64 │
╞════════╪═════╪══════╡
│ Apple ┆ 3.0 ┆ 10.0 │
│ Orange ┆ 2.0 ┆ 0.5 │
│ Banana ┆ 5.0 ┆ 14.0 │
└────────┴─────┴──────┘
""" # noqa: W505
return self.agg(F.all().quantile(quantile, interpolation=interpolation))
def sum(self) -> LazyFrame:
"""
Reduce the groups to the sum.
Examples
--------
>>> ldf = pl.DataFrame(
... {
... "a": [1, 2, 2, 3, 4, 5],
... "b": [0.5, 0.5, 4, 10, 13, 14],
... "c": [True, True, True, False, False, True],
... "d": ["Apple", "Orange", "Apple", "Apple", "Banana", "Banana"],
... }
... ).lazy()
>>> ldf.group_by("d", maintain_order=True).sum().collect()
shape: (3, 4)
┌────────┬─────┬──────┬─────┐
│ d ┆ a ┆ b ┆ c │
│ --- ┆ --- ┆ --- ┆ --- │
│ str ┆ i64 ┆ f64 ┆ u32 │
╞════════╪═════╪══════╪═════╡
│ Apple ┆ 6 ┆ 14.5 ┆ 2 │
│ Orange ┆ 2 ┆ 0.5 ┆ 1 │
│ Banana ┆ 9 ┆ 27.0 ┆ 1 │
└────────┴─────┴──────┴─────┘
"""
return self.agg(F.all().sum())
| LazyGroupBy |
python | realpython__materials | python-serialize/http-payload/pydantic-demo/main.py | {
"start": 123,
"end": 212
} | class ____(BaseModel):
uuid: UUID = Field(alias="id")
created_at: datetime
| Metadata |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/defs_module.py | {
"start": 3663,
"end": 5377
} | class ____(Component):
def __init__(
self,
components: Sequence[Component],
source_positions: Sequence[SourcePosition],
asset_post_processor_lists: Sequence[Sequence[AssetPostProcessor]],
):
self.components = components
self.source_positions = source_positions
check.invariant(
len(components) == len(asset_post_processor_lists),
"Number of components and post processors must match",
)
self.asset_post_processors_list = asset_post_processor_lists
def build_defs(self, context: ComponentLoadContext) -> Definitions:
component_yaml = check.not_none(find_defs_or_component_yaml(context.path))
defs_list = []
for component_decl, component, source_position, asset_post_processors in zip(
context.component_decl.iterate_child_component_decls(),
self.components,
self.source_positions,
self.asset_post_processors_list,
):
defs_list.append(
post_process_defs(
context.build_defs_at_path(component_decl.path).with_definition_metadata_update(
lambda metadata: _add_defs_yaml_metadata(
component_yaml_path=component_yaml,
load_context=context,
component=component,
source_position=source_position,
metadata=metadata,
)
),
list(asset_post_processors),
)
)
return Definitions.merge(*defs_list)
@record
| CompositeYamlComponent |
python | pandas-dev__pandas | pandas/tests/series/methods/test_quantile.py | {
"start": 233,
"end": 8284
} | class ____:
def test_quantile(self, datetime_series):
q = datetime_series.quantile(0.1)
assert q == np.percentile(datetime_series.dropna(), 10)
q = datetime_series.quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# object dtype
q = Series(datetime_series, dtype=object).quantile(0.9)
assert q == np.percentile(datetime_series.dropna(), 90)
# datetime64[ns] dtype
dts = datetime_series.index.to_series()
q = dts.quantile(0.2)
assert q == Timestamp("2000-01-10 19:12:00")
# timedelta64[ns] dtype
tds = dts.diff()
q = tds.quantile(0.25)
assert q == pd.to_timedelta("24:00:00")
# GH7661
result = Series([np.timedelta64("NaT")]).sum()
assert result == pd.Timedelta(0)
msg = "percentiles should all be in the interval \\[0, 1\\]"
for invalid in [-1, 2, [0.5, -1], [0.5, 2]]:
with pytest.raises(ValueError, match=msg):
datetime_series.quantile(invalid)
s = Series(np.random.default_rng(2).standard_normal(100))
percentile_array = [-0.5, 0.25, 1.5]
with pytest.raises(ValueError, match=msg):
s.quantile(percentile_array)
def test_quantile_multi(self, datetime_series, unit):
datetime_series.index = datetime_series.index.as_unit(unit)
qs = [0.1, 0.9]
result = datetime_series.quantile(qs)
expected = Series(
[
np.percentile(datetime_series.dropna(), 10),
np.percentile(datetime_series.dropna(), 90),
],
index=qs,
name=datetime_series.name,
)
tm.assert_series_equal(result, expected)
dts = datetime_series.index.to_series()
dts.name = "xxx"
result = dts.quantile((0.2, 0.2))
expected = Series(
[Timestamp("2000-01-10 19:12:00"), Timestamp("2000-01-10 19:12:00")],
index=[0.2, 0.2],
name="xxx",
dtype=f"M8[{unit}]",
)
tm.assert_series_equal(result, expected)
result = datetime_series.quantile([])
expected = Series(
[], name=datetime_series.name, index=Index([], dtype=float), dtype="float64"
)
tm.assert_series_equal(result, expected)
def test_quantile_interpolation(self, datetime_series):
# see gh-10174
# interpolation = linear (default case)
q = datetime_series.quantile(0.1, interpolation="linear")
assert q == np.percentile(datetime_series.dropna(), 10)
q1 = datetime_series.quantile(0.1)
assert q1 == np.percentile(datetime_series.dropna(), 10)
# test with and without interpolation keyword
assert q == q1
def test_quantile_interpolation_dtype(self):
# GH #10174
# interpolation = linear (default case)
q = Series([1, 3, 4]).quantile(0.5, interpolation="lower")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
q = Series([1, 3, 4]).quantile(0.5, interpolation="higher")
assert q == np.percentile(np.array([1, 3, 4]), 50)
assert is_integer(q)
def test_quantile_nan(self):
# GH 13098
ser = Series([1, 2, 3, 4, np.nan])
result = ser.quantile(0.5)
expected = 2.5
assert result == expected
# all nan/empty
s1 = Series([], dtype=object)
cases = [s1, Series([np.nan, np.nan])]
for ser in cases:
res = ser.quantile(0.5)
assert np.isnan(res)
res = ser.quantile([0.5])
tm.assert_series_equal(res, Series([np.nan], index=[0.5]))
res = ser.quantile([0.2, 0.3])
tm.assert_series_equal(res, Series([np.nan, np.nan], index=[0.2, 0.3]))
@pytest.mark.parametrize(
"case",
[
[
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
],
[
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
],
[pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")],
# NaT
[
Timestamp("2011-01-01"),
Timestamp("2011-01-02"),
Timestamp("2011-01-03"),
pd.NaT,
],
[
Timestamp("2011-01-01", tz="US/Eastern"),
Timestamp("2011-01-02", tz="US/Eastern"),
Timestamp("2011-01-03", tz="US/Eastern"),
pd.NaT,
],
[
pd.Timedelta("1 days"),
pd.Timedelta("2 days"),
pd.Timedelta("3 days"),
pd.NaT,
],
],
)
def test_quantile_box(self, case):
ser = Series(case, name="XXX")
res = ser.quantile(0.5)
assert res == case[1]
res = ser.quantile([0.5])
exp = Series([case[1]], index=[0.5], name="XXX")
tm.assert_series_equal(res, exp)
def test_datetime_timedelta_quantiles(self):
# covers #9694
assert pd.isna(Series([], dtype="M8[ns]").quantile(0.5))
assert pd.isna(Series([], dtype="m8[ns]").quantile(0.5))
def test_quantile_nat(self):
res = Series([pd.NaT, pd.NaT]).quantile(0.5)
assert res is pd.NaT
res = Series([pd.NaT, pd.NaT]).quantile([0.5])
tm.assert_series_equal(res, Series([pd.NaT], index=[0.5]))
@pytest.mark.parametrize(
"values, dtype",
[([0, 0, 0, 1, 2, 3], "Sparse[int]"), ([0.0, None, 1.0, 2.0], "Sparse[float]")],
)
def test_quantile_sparse(self, values, dtype):
ser = Series(values, dtype=dtype)
result = ser.quantile([0.5])
expected = Series(np.asarray(ser)).quantile([0.5]).astype("Sparse[float]")
tm.assert_series_equal(result, expected)
def test_quantile_empty_float64(self):
# floats
ser = Series([], dtype="float64")
res = ser.quantile(0.5)
assert np.isnan(res)
res = ser.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
def test_quantile_empty_int64(self):
# int
ser = Series([], dtype="int64")
res = ser.quantile(0.5)
assert np.isnan(res)
res = ser.quantile([0.5])
exp = Series([np.nan], index=[0.5])
tm.assert_series_equal(res, exp)
def test_quantile_empty_dt64(self):
# datetime
ser = Series([], dtype="datetime64[ns]")
res = ser.quantile(0.5)
assert res is pd.NaT
res = ser.quantile([0.5])
exp = Series([pd.NaT], index=[0.5], dtype=ser.dtype)
tm.assert_series_equal(res, exp)
@pytest.mark.parametrize("dtype", [int, float, "Int64"])
def test_quantile_dtypes(self, dtype):
result = Series([1, 2, 3], dtype=dtype).quantile(np.arange(0, 1, 0.25))
expected = Series(np.arange(1, 3, 0.5), index=np.arange(0, 1, 0.25))
if dtype == "Int64":
expected = expected.astype("Float64")
tm.assert_series_equal(result, expected)
def test_quantile_all_na(self, any_int_ea_dtype):
# GH#50681
ser = Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
with tm.assert_produces_warning(None):
result = ser.quantile([0.1, 0.5])
expected = Series([pd.NA, pd.NA], dtype=any_int_ea_dtype, index=[0.1, 0.5])
tm.assert_series_equal(result, expected)
def test_quantile_dtype_size(self, any_int_ea_dtype):
# GH#50681
ser = Series([pd.NA, pd.NA, 1], dtype=any_int_ea_dtype)
result = ser.quantile([0.1, 0.5])
expected = Series([1, 1], dtype=any_int_ea_dtype, index=[0.1, 0.5])
tm.assert_series_equal(result, expected)
| TestSeriesQuantile |
python | keras-team__keras | keras/src/backend/common/variables_test.py | {
"start": 5426,
"end": 15307
} | class ____(test_case.TestCase):
"""Tests for Variable._deferred_initialize Variable._maybe_autocast"""
@skip_if_backend(
"openvino", "Can not constant fold eltwise node by CPU plugin"
)
def test_deferred_assignment(self):
"""Tests deferred assignment to variables."""
with backend.StatelessScope() as scope:
v = backend.Variable(
initializer=initializers.RandomNormal(), shape=(2, 2)
)
self.assertEqual(v._value, None)
v.assign(np.zeros((2, 2)))
v.assign_add(2 * np.ones((2, 2)))
v.assign_sub(np.ones((2, 2)))
out = scope.get_current_value(v)
self.assertAllClose(out, np.ones((2, 2)))
def test_trainable_setter(self):
"""Tests the trainable setter."""
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
)
self.assertTrue(v.trainable)
v.trainable = False
self.assertFalse(v.trainable)
if backend.backend() == "torch":
v.trainable = True
self.assertTrue(v._value.requires_grad)
v.trainable = False
self.assertFalse(v._value.requires_grad)
def test_autocasting_float(self):
# Tests autocasting of float variables
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
def test_autocasting_float_assign(self):
# Tests assigning value to variable within an autocast scope
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Assign float16 value within float16 scope
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
v.assign(ops.ones((2, 2), "float16"))
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
# Assign float32 value within float16 scope
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype), "float16"
)
v.assign(ops.zeros((2, 2), "float32"))
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
def test_autocasting_int(self):
# Test non-float variables are not affected
v = backend.Variable(
initializer=initializers.Ones(),
shape=(2, 2),
dtype="int32",
trainable=False,
)
self.assertEqual(v.dtype, "int32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
with AutocastScope("float16"):
self.assertEqual(backend.standardize_dtype(v.value.dtype), "int32")
def test_autocasting_float_with_autocast_off(self):
# Test autocast argument
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
dtype="float32",
autocast=False,
)
self.assertEqual(v.dtype, "float32")
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
with AutocastScope("float16"):
self.assertEqual(
backend.standardize_dtype(v.value.dtype),
"float32", # ignore AutocastScope
)
self.assertEqual(backend.standardize_dtype(v.value.dtype), "float32")
@parameterized.parameters(
*(
(
dtype
for dtype in dtypes.ALLOWED_DTYPES
if dtype not in ["string", "complex64", "complex28"]
)
)
)
def test_standardize_dtype(self, dtype):
"""Tests standardize_dtype for all ALLOWED_DTYPES except string."""
if backend.backend() == "torch" and dtype in (
"uint16",
"uint32",
"uint64",
"complex64",
"complex128",
):
self.skipTest(f"torch backend does not support dtype {dtype}")
if backend.backend() == "jax":
if dtype in ("complex128",):
self.skipTest(f"jax backend does not support dtype {dtype}")
import jax
if not jax.config.x64_enabled and "64" in dtype:
self.skipTest(
f"jax backend does not support {dtype} without x64 enabled"
)
if backend.backend() == "openvino" and dtype in (
"complex64",
"complex128",
):
self.skipTest(f"openvino backend does not support dtype {dtype}")
x = backend.convert_to_tensor(np.zeros(()), dtype)
actual = standardize_dtype(x.dtype)
self.assertEqual(actual, dtype)
def test_standardize_dtype_with_torch_dtype(self):
"""Tests dtype standardization with PyTorch dtypes."""
import torch
x = torch.randn(4, 4)
backend.standardize_dtype(x.dtype)
def test_name_validation(self):
"""Tests validation of variable names."""
with self.assertRaisesRegex(
ValueError, "Argument `name` must be a string"
):
backend.Variable(
initializer=initializers.RandomNormal(), name=12345
)
with self.assertRaisesRegex(ValueError, "cannot contain character `/`"):
backend.Variable(
initializer=initializers.RandomNormal(), name="invalid/name"
)
def test_standardize_shape_with_none(self):
with self.assertRaisesRegex(
ValueError, "Undefined shapes are not supported."
):
standardize_shape(None)
def test_standardize_shape_with_non_iterable(self):
with self.assertRaisesRegex(
ValueError, "Cannot convert '42' to a shape."
):
standardize_shape(42)
def test_standardize_shape_with_valid_input(self):
shape = (3, 4, 5)
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_valid_input_with_none(self):
shape = (3, None, 5)
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, None, 5))
def test_standardize_shape_with_valid_not_tuple_input(self):
shape = [3, 4, 5]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
def test_standardize_shape_with_numpy(self):
shape = [3, np.int32(4), np.int64(5)]
standardized_shape = standardize_shape(shape)
self.assertEqual(standardized_shape, (3, 4, 5))
for d in standardized_shape:
self.assertIsInstance(d, int)
def test_standardize_shape_with_string(self):
shape_with_string = (3, 4, "5")
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension '5'.",
):
standardize_shape(shape_with_string)
def test_standardize_shape_with_float(self):
shape_with_float = (3, 4, 5.0)
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension '5.0'.",
):
standardize_shape(shape_with_float)
def test_standardize_shape_with_object(self):
shape_with_object = (3, 4, object())
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Found invalid dimension .*object",
):
standardize_shape(shape_with_object)
def test_standardize_shape_with_negative_dimension(self):
with self.assertRaisesRegex(
ValueError,
"Cannot convert .* to a shape. Negative dimensions",
):
standardize_shape((3, 4, -5))
def test_shape_equal_length_mismatch(self):
"""Test mismatch in lengths of shapes."""
self.assertFalse(shape_equal((3, 2), (3, 2, 4)))
self.assertFalse(shape_equal((), (3,)))
self.assertFalse(shape_equal((3, 2, 4, 5), (3, 2, 4)))
def test_autocast_scope_with_non_float_dtype(self):
"""Tests autocast scope with non-float dtype."""
with self.assertRaisesRegex(
ValueError,
"`AutocastScope` can only be used with a floating-point",
):
_ = AutocastScope("int32")
def test_variable_path_creation(self):
"""Test path creation for a variable."""
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_var")
with backend.name_scope("test_scope"):
v = backend.Variable(initializer=np.ones((2, 2)), name="test_var")
self.assertEqual(v.path, "test_scope/test_var")
def test_overwrite_with_gradient_setter(self):
v = backend.Variable(
initializer=initializers.RandomNormal(),
shape=(2, 2),
)
self.assertFalse(v.overwrite_with_gradient)
v.overwrite_with_gradient = True
self.assertTrue(v.overwrite_with_gradient)
with self.assertRaisesRegex(TypeError, "must be a boolean."):
v.overwrite_with_gradient = "true"
| VariablePropertiesTest |
python | dask__distributed | distributed/worker_memory.py | {
"start": 21214,
"end": 21577
} | class ____:
def __get__(self, instance: Nanny | Worker | None, owner: type) -> Any:
if instance is None:
# This is triggered by Sphinx
return None # pragma: nocover
_warn_deprecated(instance, "memory_monitor")
return partial(instance.memory_manager.memory_monitor, instance) # type: ignore
| DeprecatedMemoryMonitor |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_bugbear/class_as_data_structure.py | {
"start": 495,
"end": 578
} | class ____: # OK
class B:
...
def __init__(self):
...
| A |
python | getsentry__sentry | src/sentry/seer/endpoints/organization_seer_explorer_chat.py | {
"start": 1335,
"end": 1512
} | class ____(OrganizationPermission):
scope_map = {
"GET": ["org:read"],
"POST": ["org:read"],
}
@region_silo_endpoint
| OrganizationSeerExplorerChatPermission |
python | pytorch__pytorch | test/test_proxy_tensor.py | {
"start": 34630,
"end": 81292
} | class ____(TestCase):
def _test_dynamic(self, fn, trace_inputs, test_inputs, assert_eq=True):
"""
Tests fn traced with trace_inputs against test_inputs
Also returns shape env
"""
trace_inputs = [torch.randn(shape) for shape in trace_inputs]
traced_f = make_fx(fn, tracing_mode="symbolic")(*trace_inputs)
for input in test_inputs:
input = [torch.randn(shape) for shape in input]
rx, ry = traced_f(*input), fn(*input)
if assert_eq:
self.assertEqual(rx, ry)
return traced_f
def test_debug_interpreter(self):
import torch.library
from torch.library import Library
foo = Library("foo", "DEF") # noqa: TOR901
foo.define("foo(Tensor self) -> Tensor")
# Operator where meta and cpu disagree on strides
@torch.library.impl(foo, "foo", "CPU")
def foo_cpu(x):
return x.clone().T
@torch.library.impl(foo, "foo", "Meta")
def foo_meta(x):
return x.clone()
def f(x):
return torch.ops.foo.foo.default(x)
gm = make_fx(f, tracing_mode="symbolic")(torch.randn(2, 2))
from torch._functorch.compilers import DebugInterpreter
interp = DebugInterpreter(gm)
# input mismatch is caught (indicates guard problem)
self.assertRaisesRegex(
AssertionError, r"3 != 1",
lambda: interp.run(torch.randn(3, 3).T),
)
# Catch the incorrect meta
self.assertRaisesRegex(
AssertionError, r"\(3, 1\) != \(1, 3\)",
lambda: interp.run(torch.randn(3, 3))
)
def test_int_input(self):
def f(x, y):
return x.view(y)
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(3, 4), 12).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
view = torch.ops.aten.view.default(x_1, [y_1]); x_1 = y_1 = None
return view""")
def test_resize_from_zero(self):
def f(x, y):
x.resize_(y.size(0))
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(0), torch.empty(2)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
sym_size_int = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
resize_ = torch.ops.aten.resize_.default(x_1, [sym_size_int]); x_1 = sym_size_int = resize_ = None
return None""")
def test_broadcast_shapes(self):
def f(x, y):
return torch.functional.broadcast_shapes(x.size(), y.size()[0])
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(3, 1), torch.empty(5)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0); x_1 = None
sym_size_int_1 = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
return (sym_size_int, sym_size_int_1)""")
def test_deduped_shape(self):
def f(s0, s1, x, y):
return torch.functional.broadcast_shapes(x.size(), y.size()[0]), torch.empty(x.shape[0])
x = torch.empty(3, 1)
y = torch.empty(5)
from torch.fx.experimental.symbolic_shapes import ShapeEnv
shape_env = ShapeEnv()
with FakeTensorMode(shape_env=shape_env, static_shapes=False) as fake_mode:
x = fake_mode.from_tensor(x)
y = fake_mode.from_tensor(y)
r = str(make_fx(f, tracing_mode="real")(x.shape[0], y.shape[0], x, y).code).strip()
self.assertExpectedInline(r, """\
def forward(self, s0_1, s1_1, x_1, y_1):
empty = torch.ops.aten.empty.memory_format([s0_1], device = device(type='cpu'), pin_memory = False)
return ((s0_1, s1_1), empty)""")
def test_non_deduped_shape(self):
def f(x, y):
return torch.functional.broadcast_shapes(x.size(), y.size()[0]), torch.empty(x.shape[0])
x = torch.empty(3, 1)
y = torch.empty(5)
from torch.fx.experimental.symbolic_shapes import ShapeEnv
shape_env = ShapeEnv()
with FakeTensorMode(shape_env=shape_env, static_shapes=False) as fake_mode:
x = fake_mode.from_tensor(x)
y = fake_mode.from_tensor(y)
r = str(make_fx(f, tracing_mode="real")(x, y).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0); x_1 = None
sym_size_int_1 = torch.ops.aten.sym_size.int(y_1, 0); y_1 = None
empty = torch.ops.aten.empty.memory_format([sym_size_int], device = device(type='cpu'), pin_memory = False)
return ((sym_size_int, sym_size_int_1), empty)""")
def test_unary(self):
def f(x):
assert x.shape[0] < 20
return x.cos()
test_inputs = []
test_inputs.append([(2, 5)])
test_inputs.append([(6, 8)])
gm = self._test_dynamic(f, [(3, 4)], test_inputs)
self.assertTrue(eval_guards(gm, torch.randn(4, 5)))
self.assertEqual(repr(bind_symbols(gm, torch.randn(4, 5))), "{s75: 4, s96: 5}")
self.assertFalse(eval_guards(gm, torch.randn(25, 5)))
self.assertExpectedInline(show_guards(gm), """L['x'].size()[0] <= 19""")
def test_repeat_interleave(self):
def f(src_tokens, beam_size_src):
return src_tokens.repeat_interleave(beam_size_src.size(0), 0)
prompt_size = 64
vocab_size = 64
batch_size = 4
src_tokens = torch.randint(1, vocab_size, (batch_size, prompt_size))
gm = make_fx(f, tracing_mode="symbolic")(src_tokens, torch.randn(5))
self.assertEqual(len(gm.shape_env.guards), 0)
def test_non_symint_size_spec(self):
# this isn't really a proxy tensor test, but it's the most convenient
# way to get a fake tensor with symbolic sizes
def f(x):
torch._C._non_sym_sizes(x)
return x + 1
x = torch.randn(2, 3)
make_fx(f, tracing_mode="symbolic")(x)
# https://github.com/pytorch/pytorch/issues/108195
def test_symbolic_repeat_interleave(self):
def f(y, x):
return y.repeat_interleave(x, dim=1)
y = torch.tensor([[1, 2], [3, 4]])
x = torch.tensor([2, 3])
r = str(make_fx(f, tracing_mode="symbolic")(y, x).code).strip()
self.assertExpectedInline(r, """\
def forward(self, y_1, x_1):
repeat_interleave = torch.ops.aten.repeat_interleave.Tensor(x_1); x_1 = None
index_select = torch.ops.aten.index_select.default(y_1, 1, repeat_interleave); y_1 = repeat_interleave = None
return index_select""")
def test_mod_gcd_unbacked(self):
def f(_a, _b, _stride):
a = _a.item()
b = _b.item()
stride = _stride.item()
ta = torch.randn(a * stride)
tb = torch.randn(b * stride)
r = torch.cat([ta, tb])
return r.view(a + b, stride)
_a = torch.tensor(30)
_b = torch.tensor(20)
_stride = torch.tensor(10)
r = str(make_fx(f, tracing_mode="symbolic")(_a, _b, _stride).code).strip()
self.assertExpectedInline(r, """\
def forward(self, _a_1, _b_1, _stride_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(_a_1); _a_1 = None
_local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(_b_1); _b_1 = None
_local_scalar_dense_2 = torch.ops.aten._local_scalar_dense.default(_stride_1); _stride_1 = None
mul = _local_scalar_dense * _local_scalar_dense_2
randn = torch.ops.aten.randn.default([mul], device = device(type='cpu'), pin_memory = False); mul = None
mul_1 = _local_scalar_dense_1 * _local_scalar_dense_2
randn_1 = torch.ops.aten.randn.default([mul_1], device = device(type='cpu'), pin_memory = False); mul_1 = None
cat = torch.ops.aten.cat.default([randn, randn_1]); randn = randn_1 = None
add = _local_scalar_dense + _local_scalar_dense_1; _local_scalar_dense = _local_scalar_dense_1 = None
view = torch.ops.aten.view.default(cat, [add, _local_scalar_dense_2]); cat = add = _local_scalar_dense_2 = None
return view""")
def test_cumsum_unbacked(self):
def f(x):
y = x.item()
z = torch.randn((3, y, 3))
return z.cumsum(0)
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor([5])).code).strip()
self.assertExpectedInline(
r, """\
def forward(self, x_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
randn = torch.ops.aten.randn.default([3, _local_scalar_dense, 3], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
cumsum = torch.ops.aten.cumsum.default(randn, 0); randn = None
return cumsum""" # noqa: B950
)
def test_repeat_interleave_unbacked_output_size(self):
def f(x, y):
s = x.sum().item()
return y.repeat_interleave(x, dim=0, output_size=s)
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor([2, 3]), torch.randn(2)).code).strip()
self.assertExpectedInline(
r, """\
def forward(self, x_1, y_1):
sum_1 = torch.ops.aten.sum.default(x_1)
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(sum_1); sum_1 = None
repeat_interleave = torch.ops.aten.repeat_interleave.Tensor(x_1, output_size = _local_scalar_dense); x_1 = _local_scalar_dense = None
index_select = torch.ops.aten.index_select.default(y_1, 0, repeat_interleave); y_1 = repeat_interleave = None
return index_select""" # noqa: B950
)
def test_arange_unbacked_output_size(self):
def f(x):
return torch.arange(0, x)
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10)).code).strip()
self.assertExpectedInline(
r, """\
def forward(self, x_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
arange = torch.ops.aten.arange.start(0, _local_scalar_dense, device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
return arange""" # noqa: B950
)
def test_adv_index_batch(self):
def f(src_tokens):
bsz, src_len = src_tokens.size()[:2]
start_step = src_tokens.shape[1]
beam_size = 1
generate_size = 64
max_len = src_len + generate_size
tokens = torch.zeros(bsz * beam_size, max_len).to(src_tokens).long().fill_(0)
tokens[:, :start_step] = src_tokens.repeat_interleave(beam_size, 0)
return tokens
prompt_size = 64
vocab_size = 64
batch_size = 4
src_tokens = torch.randint(1, vocab_size, (batch_size, prompt_size))
gm = make_fx(f, tracing_mode="symbolic")(src_tokens)
# Guards to rule out batch_size == sys.maxsize (wobbling between 2 and
# 1 ok)
self.assertEqual(len(gm.shape_env.guards), 0)
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
def test_cpu_scalar_cuda(self):
# Extracted from wave2vec2
def f(a, b):
return (a * b) @ b
r = str(
make_fx(f, tracing_mode="symbolic")(
torch.tensor(1.0), torch.randn(2, 2, device='cuda')
).code
).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1, b_1):
mul = torch.ops.aten.mul.Tensor(a_1, b_1); a_1 = None
mm = torch.ops.aten.mm.default(mul, b_1); mul = b_1 = None
return mm""")
def test_binary_broadcast(self):
def f(a, b):
c = a * b
return c
test_inputs = []
test_inputs.append([(1, 5), (3, 1)])
test_inputs.append([(1, 4), (4, 1)])
shape_env = self._test_dynamic(f, [(1, 2), (3, 1)], test_inputs).shape_env
assert len(shape_env.guards) == 0
def test_multiply_shape(self):
def f(a):
return torch.empty(a.shape[0] * 2)
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0); a_1 = None
mul = sym_size_int * 2; sym_size_int = None
empty = torch.ops.aten.empty.memory_format([mul], device = device(type='cpu'), pin_memory = False); mul = None
return empty""")
def test_item(self):
def f(a):
r = a.item()
return r * a
r = str(make_fx(f, tracing_mode="symbolic")(torch.randn(1)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(a_1)
mul = torch.ops.aten.mul.Tensor(a_1, _local_scalar_dense); a_1 = _local_scalar_dense = None
return mul""")
def test_tensor_symfloat(self):
def f(a):
r = torch.tensor(a.size(0) ** 2.0)
assert r.dtype is torch.float
return r
gm = make_fx(f, tracing_mode="symbolic")(torch.randn(2))
r = str(gm.code).strip()
# NB: this specializes, which is fine, the point is to make sure the
# dtype inference is correct
self.assertExpectedInline(r, """\
def forward(self, a_1):
_tensor_constant0 = self._tensor_constant0
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
return lift_fresh_copy""")
self.assertEqual(gm._tensor_constant0, torch.tensor(4.0))
def test_item_to_constructor(self):
def f(a):
r = a.item()
return torch.empty(r)
r = str(make_fx(f, tracing_mode="symbolic")(torch.randint(5, (1,))).code).strip()
self.assertExpectedInline(
r, """\
def forward(self, a_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(a_1); a_1 = None
empty = torch.ops.aten.empty.memory_format([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
return empty""" # noqa: B950
)
def test_setitem_symint(self):
# from moco
# https://github.com/pytorch/pytorch/issues/101939
def f(x):
x[0] = x.size(0)
return x
r = str(make_fx(f, tracing_mode="symbolic")(torch.randn(10)).code).strip()
self.assertExpectedInline(
r, """\
def forward(self, x_1):
sym_size_int = torch.ops.aten.sym_size.int(x_1, 0)
scalar_tensor = torch.ops.aten.scalar_tensor.default(sym_size_int, dtype = torch.float32, layout = torch.strided, device = device(type='cpu')); sym_size_int = None
select = torch.ops.aten.select.int(x_1, 0, 0)
copy_ = torch.ops.aten.copy_.default(select, scalar_tensor); select = scalar_tensor = copy_ = None
return x_1""" # noqa: B950
)
def test_dynamic_pointwise_scalar(self):
def f(gravity, mask):
gravity[mask, 0] = gravity[mask, 0] * -1
r = str(make_fx(f, tracing_mode="symbolic")(
torch.randn((12, 4)),
torch.randint(0, 2, (12,), dtype=torch.bool)
).code).strip()
self.assertExpectedInline(r, """\
def forward(self, gravity_1, mask_1):
select = torch.ops.aten.select.int(gravity_1, 1, 0)
index = torch.ops.aten.index.Tensor(select, [mask_1]); select = None
mul = torch.ops.aten.mul.Tensor(index, -1); index = None
select_1 = torch.ops.aten.select.int(gravity_1, 1, 0); gravity_1 = None
index_put_ = torch.ops.aten.index_put_.default(select_1, [mask_1], mul); select_1 = mask_1 = mul = index_put_ = None
return None""")
def test_reflect_r_over_x(self):
def reflect_R_over_x(R):
reflect = torch.eye(3, device=R.device)
reflect[0, 0] = -1
return reflect @ R @ reflect
def f(crop_camera, mask):
crop_camera[mask] = reflect_R_over_x(crop_camera[mask])
r = str(make_fx(f, tracing_mode="symbolic")(
torch.randn((12, 3, 3)),
torch.randint(0, 2, (12,), dtype=torch.bool)
).code).strip()
self.assertExpectedInline(r, """\
def forward(self, crop_camera_1, mask_1):
index = torch.ops.aten.index.Tensor(crop_camera_1, [mask_1])
eye = torch.ops.aten.eye.default(3, device = device(type='cpu'), pin_memory = False)
_tensor_constant0 = self._tensor_constant0
lift_fresh_copy = torch.ops.aten.lift_fresh_copy.default(_tensor_constant0); _tensor_constant0 = None
select = torch.ops.aten.select.int(eye, 0, 0)
select_1 = torch.ops.aten.select.int(select, 0, 0); select = None
copy_ = torch.ops.aten.copy_.default(select_1, lift_fresh_copy); select_1 = lift_fresh_copy = copy_ = None
sym_size_int = torch.ops.aten.sym_size.int(index, 0)
expand = torch.ops.aten.expand.default(eye, [sym_size_int, 3, 3])
view = torch.ops.aten.view.default(expand, [sym_size_int, 3, 3]); expand = None
sym_size_int_1 = torch.ops.aten.sym_size.int(crop_camera_1, 1)
sym_size_int_2 = torch.ops.aten.sym_size.int(crop_camera_1, 2)
expand_1 = torch.ops.aten.expand.default(index, [sym_size_int, sym_size_int_1, sym_size_int_2]); index = None
view_1 = torch.ops.aten.view.default(expand_1, [sym_size_int, sym_size_int_1, sym_size_int_2]); expand_1 = sym_size_int_1 = sym_size_int_2 = None
bmm = torch.ops.aten.bmm.default(view, view_1); view = view_1 = None
view_2 = torch.ops.aten.view.default(bmm, [sym_size_int, 3, 3]); bmm = None
mul_9 = sym_size_int * 3
view_3 = torch.ops.aten.view.default(view_2, [mul_9, 3]); view_2 = mul_9 = None
mm = torch.ops.aten.mm.default(view_3, eye); view_3 = eye = None
_unsafe_view = torch.ops.aten._unsafe_view.default(mm, [sym_size_int, 3, 3]); mm = sym_size_int = None
index_put_ = torch.ops.aten.index_put_.default(crop_camera_1, [mask_1], _unsafe_view); crop_camera_1 = mask_1 = _unsafe_view = index_put_ = None
return None""") # noqa: B950
def test_unbacked_slice(self):
def f(x, m):
x = x[m]
return x[slice(None, None, None), slice(None, None, None), slice(None, 2, None)]
make_fx(f, tracing_mode="symbolic")(
torch.randn((12, 3, 3)),
torch.randint(0, 2, (12,), dtype=torch.bool)
)
@unittest.skipIf(not USE_TORCHVISION, "test requires torchvision")
def test_unbacked_batch_resnet(self):
mod = torchvision.models.resnet18()
def f(x, mask, params, buffers):
for p in itertools.chain([x, mask], params.values(), buffers.values()):
for s in p.shape:
guard_int(s)
x = x[mask]
torch._check(x.shape[0] >= 1)
for p in params.values():
p.grad = None
return torch.func.functional_call(mod, {**params, **buffers}, (x,)).sum()
make_fx(f, tracing_mode="symbolic")(
torch.randn(3, 3, 250, 250),
torch.randint(0, 2, (3,), dtype=torch.bool),
dict(mod.named_parameters()),
dict(mod.named_buffers()),
)
def test_boolean_index(self):
def f(images, handedness, valid):
images = images[valid]
handedness = handedness[valid]
right_hand_mask = handedness == 1
images[right_hand_mask] = images[right_hand_mask].flip(-1)
r = str(make_fx(f, tracing_mode="symbolic")(
torch.randint(0, 256, (512, 1, 96, 96)),
torch.randint(0, 1, (512,)),
torch.randint(0, 2, (512,), dtype=torch.bool)
).code).strip()
self.assertExpectedInline(r, """\
def forward(self, images_1, handedness_1, valid_1):
index = torch.ops.aten.index.Tensor(images_1, [valid_1]); images_1 = None
index_1 = torch.ops.aten.index.Tensor(handedness_1, [valid_1]); handedness_1 = valid_1 = None
eq = torch.ops.aten.eq.Scalar(index_1, 1); index_1 = None
index_2 = torch.ops.aten.index.Tensor(index, [eq])
flip = torch.ops.aten.flip.default(index_2, [-1]); index_2 = None
index_put_ = torch.ops.aten.index_put_.default(index, [eq], flip); index = eq = flip = index_put_ = None
return None""")
def test_neg_shape(self):
def f(a):
return torch.empty(-a.shape[0] + 10)
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(2)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0); a_1 = None
neg = -sym_size_int; sym_size_int = None
add = neg + 10; neg = None
empty = torch.ops.aten.empty.memory_format([add], device = device(type='cpu'), pin_memory = False); add = None
return empty""")
def test_unbacked_unification(self):
def f(x, y):
z = torch.zeros(x.item())
return z + y
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.randn(10)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
add = torch.ops.aten.add.Tensor(zeros, y_1); zeros = y_1 = None
return add""") # noqa: B950
def test_reshape_divisibility_unbacked(self):
def f(x):
i0 = x.item()
r = torch.zeros(i0, 4, 20)
r = r.transpose(2, 1)
return r.reshape(-1, 80)
make_fx(f, tracing_mode="symbolic")(torch.tensor(24))
def test_view_divisibility_unbacked(self):
def f(x):
i0 = x.item()
r = torch.zeros(i0, 192)
return r.view(12, -1, 192)
make_fx(f, tracing_mode="symbolic")(torch.tensor(24))
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
def test_view_divisibility_unbacked_relatively_prime(self):
# See https://github.com/pytorch/pytorch/issues/123651
def f(x):
i0 = x.item()
# To trigger the original issue, the max bound has to
# be chosen such that 448 / 447 < 2 (which it is.)
torch._check(i0 > 0)
torch._check(i0 <= 448)
return torch.zeros(256 * i0).view(-1, 447)
make_fx(f, tracing_mode="symbolic")(torch.tensor(256 * 447, device="cuda"))
def test_unbacked_unify_guard(self):
def f(x, y):
z = torch.zeros(x.item())
torch._check(z.size(0) == y.size(0)) # refines i0 = s0
if z.size(0) == 4:
return y * 2
else:
return y + 2
r = str(make_fx(f, tracing_mode="symbolic")(torch.tensor(10), torch.randn(10)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, x_1, y_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = zeros = None
add = torch.ops.aten.add.Tensor(y_1, 2); y_1 = None
return add""") # noqa: B950
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
@unittest.expectedFailure
def test_unbacked_unify_guard_transitivity(self):
def f(x1, x2, y):
z1 = torch.zeros(x1.item())
z2 = torch.zeros(x2.item())
torch._check(z1.size(0) == z2.size(0)) # refines i0 = i1
torch._check(z2.size(0) == y.size(0)) # refines i0 = s0
if z1.size(0) == 4:
return y * 2
else:
return y + 2
gm = make_fx(f, tracing_mode="symbolic")(
torch.tensor(10, device="cuda"),
torch.tensor(10, device="cuda"),
torch.randn(10, device="cuda")
)
insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
gm.recompile()
r = str(gm.code).strip()
# self.assertExpectedInline(
# r, """""" # noqa: B950
# )
@unittest.skipIf(not HAS_CUDA, 'CUDA-only test')
def test_unbacked_unify_dependency_violation(self):
def f(x1, x2, x3, y):
z1 = x1.item()
torch._check(z1 // 9 == 1)
z2 = x2.item()
z3 = x3.item()
torch._check(z1 == z2 + z3)
return y * 2
# NB: inputs are done as CUDA to ensure they aren't queried to be
# backed
gm = make_fx(f, tracing_mode="symbolic")(
torch.tensor(10, device="cuda"), torch.tensor(5, device="cuda"),
torch.tensor(5, device="cuda"), torch.randn(1, device="cuda")
)
insert_deferred_runtime_asserts(gm, gm.shape_env, "test")
gm.recompile()
self.assertEqual(gm(
torch.tensor(12, device="cuda"), torch.tensor(6, device="cuda"),
torch.tensor(6, device="cuda"), torch.tensor([1.0], device="cuda")),
torch.tensor([2.0], device="cuda")
)
with self.assertRaises(RuntimeError):
gm(
torch.tensor(20, device="cuda"), torch.tensor(10, device="cuda"),
torch.tensor(10, device="cuda"), torch.tensor([1.0], device="cuda")
)
def test_split_unbacked_sizes(self):
def f(lengths, values):
# tolist not directly supported atm
sizes = [lengths[i].item() for i in range(lengths.size(0))]
return torch.split(values, sizes)
r = str(make_fx(f, tracing_mode="symbolic")(
torch.tensor([2, 3, 4]),
torch.randn(9)
).code).strip()
self.assertExpectedInline(r, """\
def forward(self, lengths_1, values_1):
select = torch.ops.aten.select.int(lengths_1, 0, 0)
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(select); select = None
select_1 = torch.ops.aten.select.int(lengths_1, 0, 1)
_local_scalar_dense_1 = torch.ops.aten._local_scalar_dense.default(select_1); select_1 = None
select_2 = torch.ops.aten.select.int(lengths_1, 0, 2); lengths_1 = None
_local_scalar_dense_2 = torch.ops.aten._local_scalar_dense.default(select_2); select_2 = None
split_with_sizes = torch.ops.aten.split_with_sizes.default(values_1, [_local_scalar_dense, _local_scalar_dense_1, _local_scalar_dense_2]); values_1 = _local_scalar_dense = _local_scalar_dense_1 = _local_scalar_dense_2 = None
getitem = split_with_sizes[0]
getitem_1 = split_with_sizes[1]
getitem_2 = split_with_sizes[2]; split_with_sizes = None
return (getitem, getitem_1, getitem_2)""") # noqa: B950
def test_invalidate_nonzero(self):
ok = False
def f(a):
nonlocal ok
b = a.clone()
x = b.nonzero()
x1 = b.nonzero()
x2 = b.nonzero()
assert x1.shape[0] == x2.shape[0]
ok = True
b.normal_()
y = b.nonzero()
try:
bool(x1.shape[0] == y.shape[0])
self.fail("didn't raise exception")
except GuardOnDataDependentSymNode:
pass
make_fx(f, tracing_mode="symbolic")(torch.randn(4))
@torch._functorch.config.patch(fake_tensor_propagate_real_tensors=True)
def test_invalidate_nonzero_propagate_real_tensors(self):
def f(a):
b = a.clone()
x = b.nonzero()
x1 = b.nonzero()
x2 = b.nonzero()
assert x1.shape[0] == x2.shape[0]
b.normal_()
y = b.nonzero()
# Because you're not actually going to generate exactly zero with
# normal_ lol
assert x1.shape[0] == y.shape[0]
make_fx(f, tracing_mode="symbolic")(torch.randn(4))
def test_sqrt_size(self):
def f(a):
return a / a.size(-1) ** 0.5
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
sym_float = torch.sym_float(sym_size_int); sym_size_int = None
pow_1 = sym_float ** 0.5; sym_float = None
div = torch.ops.aten.div.Tensor(a_1, pow_1); a_1 = pow_1 = None
return div""")
def test_make_fx_with_custom_tracer_preserving_nn_module_stack(self):
class Bar(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return x + 1
class Foo(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.bar = Bar()
def forward(self, x):
return x + self.bar(x)
gm = make_fx(Foo())(torch.randn(4, 4))
for node in gm.graph.nodes:
self.assertTrue("nn_module_stack" not in node.meta)
foo = Foo()
def functional_call(*args, **kwargs):
with stateless._reparametrize_module(foo, {}):
return foo(*args, **kwargs)
functional_call._orig_mod = foo
gm_with_stack = make_fx(functional_call, record_module_stack=True)(torch.randn(4, 4))
found = False
for node in gm_with_stack.graph.nodes:
if "nn_module_stack" in node.meta:
if len(node.meta["nn_module_stack"]) == 1:
self.assertTrue("custom_tracer_preserving_nn_module_stack.<locals>.Foo" in str(node.meta["nn_module_stack"]))
found = True
elif len(node.meta["nn_module_stack"]) == 2:
self.assertTrue("preserving_nn_module_stack.<locals>.Bar" in str(node.meta["nn_module_stack"]))
found = True
else:
# there can be at most 2 level
self.assertTrue(False)
self.assertTrue(found)
gm_without_stack = make_fx(functional_call)(torch.randn(4, 4))
for node in gm_without_stack.graph.nodes:
self.assertTrue("nn_module_stack" not in node.meta)
def test_symint_to_tensor(self):
def f(a):
return a / a.shape[0]
r = str(make_fx(f, tracing_mode="symbolic")(torch.empty(4)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
div = torch.ops.aten.div.Tensor(a_1, sym_size_int); a_1 = sym_size_int = None
return div""")
r = str(make_fx(f, tracing_mode="symbolic", decomposition_table=decomposition_table)(torch.empty(4)).code).strip()
self.assertExpectedInline(r, """\
def forward(self, a_1):
sym_size_int = torch.ops.aten.sym_size.int(a_1, 0)
sym_float = torch.sym_float(sym_size_int); sym_size_int = None
div = torch.ops.prims.div.default(a_1, sym_float); a_1 = sym_float = None
return div""")
def test_cat(self):
def f(a, b):
val = torch.mul(a, b)
out = torch.cat([val, val])
if out.shape[0] * out.shape[1] > 20:
out = out.cos()
return out
test_inputs = []
test_inputs.append([(1, 5), (6, 1)])
test_inputs.append([(1, 4), (3, 1)])
gm = self._test_dynamic(f, [(1, 6), (8, 1)], test_inputs)
self.assertTrue(eval_guards(gm, torch.randn(1, 10), torch.randn(6, 1)))
self.assertFalse(eval_guards(gm, torch.randn(1, 2), torch.randn(4, 1)))
self.assertExpectedInline(show_guards(gm), """2*L['b'].size()[0]*L['a'].size()[1] > 20""")
def test_new_empty(self):
def f(a, b):
return a.new_empty(b.shape[0], b.shape[1] * 2)
self._test_dynamic(f, [(2, 4), (4, 5)], [[(2, 3), (5, 7)], [(3, 7), (9, 3)]], assert_eq=False).shape_env
def test_size_with_tensor(self):
# I think I messed up writing this test case originally, I think
# I'm supposed to hit an error case, but the code here works in both
# eager and tracing
def f(tensor):
max_size = torch.tensor([800, 1216], dtype=torch.int64)
batch_shape = [2] + list(tensor.shape[:-2]) + list(max_size)
return tensor.new_empty(batch_shape)
a = torch.randn(3, 800, 1199)
f(a)
make_fx(f, tracing_mode="symbolic")(a)
def test_fake_tensor_as_size(self):
def f(x):
r = torch.zeros([x])
return r
fx_g = make_fx(f, tracing_mode="symbolic")(torch.tensor(4))
self.assertExpectedInline(fx_g.code.strip(), """\
def forward(self, x_1):
_local_scalar_dense = torch.ops.aten._local_scalar_dense.default(x_1); x_1 = None
zeros = torch.ops.aten.zeros.default([_local_scalar_dense], device = device(type='cpu'), pin_memory = False); _local_scalar_dense = None
return zeros""") # noqa: B950
def test_expand(self):
def f(a):
b = torch.mul(a, a)
c = b.expand(a.shape)
return c
self._test_dynamic(f, [(3,)], [[(3,)], [(4,)], [(2,)]])
self._test_dynamic(f, [(5, 1)], [[(4, 1)], [(3, 1)], [(6, 1)]])
def test_metadata(self):
def f(a, b):
d = a.new_empty(a.shape[0] + b.shape[0])
return d
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(5), torch.randn(4))
meta_c = _get_node(fx_g, lambda x: x.target == aten.new_empty.default)
meta_d = _get_node(fx_g, lambda x: x.target == operator.add)
self.assertTrue(meta_c.meta['val'].shape[0].node.expr == meta_d.meta['val'].node.expr)
def test_metadata_fresh(self):
def f(x):
assert x.shape[0] == 3
return x.cos()
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(3))
meta_cos = _get_node(fx_g, lambda x: x.target == aten.cos.default)
meta_inp = _get_node(fx_g, lambda x: x.op == 'placeholder')
self.assertTrue(meta_cos.meta['val'].shape[0] == 3)
# Checks if the input expr has been updated even though the constraint
# happened afterwards
self.assertTrue(meta_inp.meta['val'].shape[0] == 3)
def test_elementwise_meta_with_sym_numbers(self):
def f(x, offset, as_sym_float=False):
x0 = x.size()[0]
if as_sym_float:
x0 = torch.sym_float(x0)
return torch.add(x0, offset)
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2.0, False)
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
self.assertEqual(meta_add.meta['val'].shape, ())
self.assertEqual(meta_add.meta['val'].dtype, torch.float32)
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2, False)
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
self.assertEqual(meta_add.meta['val'].shape, ())
self.assertEqual(meta_add.meta['val'].dtype, torch.int64)
fx_g = make_fx(f, tracing_mode="symbolic")(torch.rand(2, 3), 2, True)
meta_add = _get_node(fx_g, lambda x: x.target == aten.add.Tensor)
self.assertEqual(meta_add.meta['val'].shape, ())
self.assertEqual(meta_add.meta['val'].dtype, torch.float32)
def test_return_symint(self):
def f(x):
return x.shape[0], x.cos(), x.shape[0] / 5
self._test_dynamic(f, [(5,)], [[(4,)], [(12,)]])
def f(x):
return x.shape
self._test_dynamic(f, [(5, 3)], [[(4, 6)]])
def test_rmethod(self):
def f(x):
return x.size(0) + x
self._test_dynamic(f, [(5,)], [[(4,)], [(12,)]])
def test_mega_guard(self):
def f(a, b):
assert a.shape[0] == b.shape[0] * 2
return a.cos()
fx_g = make_fx(f, tracing_mode="symbolic")(torch.randn(16), torch.randn(8))
from torch._dynamo.source import LocalSource
self.assertExpectedInline(
str(fx_g.shape_env.produce_guards(fx_placeholder_vals(fx_g), [LocalSource("a"), LocalSource("b")], ignore_static=False)), # noqa: B950
"""["L['a'].size()[0] == 2*L['b'].size()[0]", "L['a'].stride()[0] == 1", "L['a'].storage_offset() == 0", "L['b'].stride()[0] == 1", "L['b'].storage_offset() == 0", "2 <= L['b'].size()[0]"]""" # noqa: B950
)
self.assertExpectedInline(
str(fx_g.shape_env.produce_guards(fx_placeholder_vals(fx_g), [LocalSource("a"), LocalSource("b")], ignore_static=True)), # noqa: B950
"""["L['a'].size()[0] == 2*L['b'].size()[0]", "2 <= L['b'].size()[0]"]""" # noqa: B950
)
def test_guard_upperbound_range_refinement(self):
def f(a):
assert a.shape[0] > 5 and a.shape[0] > 12
return a.cos()
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(15))
self.assertExpectedInline(show_guards(tensor), """13 <= L['a'].size()[0]""")
def test_guard_lowerbound_range_refinement(self):
def f(a):
assert a.shape[0] < 20 and a.shape[0] < 30
return a.cos()
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(15))
self.assertExpectedInline(show_guards(tensor), """L['a'].size()[0] <= 19""")
def test_guard_upperbound_range_refinement_multivariate(self):
def f(a):
assert a.shape[0] > 5 and a.shape[0] > 12
assert a.shape[1] > 5 and a.shape[1] > a.shape[0]
return a.cos()
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn((15, 20)))
self.assertExpectedInline(show_guards(tensor), """\
L['a'].size()[1] > L['a'].size()[0]
13 <= L['a'].size()[0]
14 <= L['a'].size()[1]""")
def test_guard_lowerbound_range_refinement_multivariate(self):
def f(a):
assert a.shape[0] < 20 and a.shape[0] < 30
assert a.shape[1] < 30 and a.shape[1] < a.shape[0]
return a.cos()
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn((15, 5)))
self.assertExpectedInline(
show_guards(tensor),
"""\
L['a'].size()[1] < L['a'].size()[0]
3 <= L['a'].size()[0] and L['a'].size()[0] <= 19
L['a'].size()[1] <= 18""")
def test_sym_storage_offset(self):
def f(x, y):
return x + y
inp = (torch.randn(8)[3:], torch.randn(5))
fx_g = make_fx(f, tracing_mode="symbolic")(*inp)
inp = (torch.randn(8)[3:], torch.randn(5))
self.assertEqual(fx_g(*inp), f(*inp))
def _assert_no_guards(self, fx_g, free_symbols):
assert _get_free_symbols(fx_g.shape_env) == free_symbols, fx_g.shape_env.var_to_val
assert len(fx_g.shape_env.get_nontrivial_guards()) == 0, fx_g.shape_env.format_guards()
def test_guards_equal(self):
def f(a, b):
return a * b
# NB: Numbers are carefully chosen to avoid duck shaping from applying
fx_g = _trace(f, (5, 6), (5, 6))
self._assert_no_guards(fx_g, 2)
fx_g = _trace(f, (5, 6, 7), (5, 6, 7))
self._assert_no_guards(fx_g, 3)
fx_g = _trace(f, (5, 1), (1, 6))
self._assert_no_guards(fx_g, 2)
def f(a, b, c, d):
a = a + b
cat = torch.cat([c, d])
return a + cat
fx_g = _trace(f, 7, 7, 4, 3)
self._assert_no_guards(fx_g, 2)
def f(a, b, c, d, e):
vals = [a, b, c, d, e]
x = a
for idx in range(len(vals) - 1):
x = torch.cat([x, vals[idx]]) + vals[idx + 1]
return x
fx_g = _trace(f, 2, 4, 8, 16, 32)
self._assert_no_guards(fx_g, 1)
def f(a, b):
a = a.view(b.shape[0])
return a + b.sum()
fx_g = _trace(f, (4, 2), 8)
self._assert_no_guards(fx_g, 2)
fx_g = _trace(f, (4, 2), (8, 5))
self._assert_no_guards(fx_g, 3)
fx_g = _trace(f, (2, 3, 4), 24)
self._assert_no_guards(fx_g, 3)
def test_nonidentity_transitive_guards(self):
def f(a, b, c, d, e):
vals = [a, b, c, d, e]
cat_vals = []
for idx in range(len(vals) - 1):
cat_vals.append(torch.cat([vals[idx], vals[idx]]))
final_vals = []
for a, b in reversed(list(zip(cat_vals, vals[1:]))):
final_vals.append(a + b)
return final_vals
fx_g = _trace(f, 2, 4, 8, 16, 32)
self.assertExpectedInline(show_guards(fx_g), """""")
@torch.fx.experimental._config.patch(translation_validation=True)
def test_constant_specialization(self):
def f(t):
assert t.shape[0] == 10
return t
tensor = make_fx(f, tracing_mode="symbolic")(torch.randn(10))
self.assertExpectedInline(show_guards(tensor), """""")
make_fx_failures = {
# unknown
xfail('allclose'),
xfail('equal'),
# empty
skip('new_empty'),
skip('empty_like'),
skip('empty'),
skip('empty_permuted'),
# flaky
skip('linalg.lstsq', 'grad_oriented'),
skip('nn.functional.max_unpool1d', '', device_type='cpu'),
skip('nn.functional.max_unpool2d', '', device_type='cpu'),
skip('nn.functional.max_unpool3d', '', device_type='cpu'),
skip('linalg.lstsq'), # flaky, probably just a precision issue
# data-dependent control flow
skip('item'),
xfail('cov'),
xfail('nn.functional.gaussian_nll_loss'),
xfail('corrcoef'),
xfail('quantile'),
xfail('nanquantile'),
# Seems like it's creating a sparse tensor that isn't captured by tensor.is_sparse
xfail('sparse.sampled_addmm'),
xfail('sparse.mm', 'reduce'),
# proxy tensor doesn't support sparse correctly right now
skip('to_sparse'),
# segfaults
skip('block_diag'),
# AssertionError: Tensor-likes are not close!
skip('empty_strided', '', device_type='cpu'),
}
only_real_tensor_failures = {
xfail('narrow'),
xfail('tensor_split'),
}
only_fake_tensor_failures = {
xfail('tensor_split'),
}
fake_tensor_failures = set()
symbolic_tensor_failures = {
xfail('combinations', ''),
xfail('geqrf', ''), # aten.geqrf.default - couldn't find symbolic meta function/decomposition
xfail('histogram', ''), # Could not run 'aten::histogram.bin_ct' with arguments from the 'Meta' backend. This c...
xfail('histogramdd', ''), # aten._histogramdd_bin_edges.default - couldn't find symbolic meta function/decomposition
xfail('nanquantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('nn.functional.binary_cross_entropy', ''), # aten.new_empty.default - couldn't find symbolic meta function/decom...
xfail('nn.functional.cross_entropy', ''), # aten.size.default - couldn't find symbolic meta function/decomposition
xfail('nn.functional.ctc_loss'), # aten._ctc_loss.Tensor - couldn't find symbolic meta function/decomposition
xfail('quantile', ''), # Could not run 'aten::equal' with arguments from the 'Meta' backend.
xfail('max_pool2d_with_indices_backward', ''), # Expected a value of type 'List[int]' for argument 'kernel_size' but...
}
symbolic_tensor_segfaults = {
skip('nn.functional.batch_norm') # Segfault??
}
symbolic_tensor_failures.update(symbolic_tensor_segfaults)
inplace_symbolic_tensor_failures = {
# bugs
xfail('float_power', ''), # base given to float_power_ has dtype Float but the operation's result requires dtype Double
}
out_symbolic_tensor_failures = {
# Cast error details: Unable to cast (...) to Tensor
#
# This happens because the test is set up to call the out variant using the `out` kwarg:
# torch._some_op(arg1, arg2, out=(out1, out2, out3))
#
# However, this only works on torch ops, not aten ops. For `_batch_norm_with_update`,
# this fails because the op has no python bindings, so it doesn't support the `out` kwarg
# way of calling its out variant.
xfail('_batch_norm_with_update', ''),
xfail('_native_batch_norm_legit', ''),
xfail('angle', ''),
xfail('argmax', ''),
xfail('argmin', ''),
xfail('gather', ''),
xfail('linalg.pinv', ''),
xfail('linalg.pinv', 'hermitian'),
xfail('scatter_add', ''),
xfail('scatter', ''),
xfail('take_along_dim', ''),
# SymIntArrayRef expected to contain only concrete
xfail('randn', ''),
# RuntimeError: Cannot call numel() on tensor with symbolic sizes/strides
xfail('index_reduce', 'prod'),
xfail('index_reduce', 'mean'),
xfail('index_reduce', 'amax'),
xfail('index_reduce', 'amin'),
}
out_symbolic_tensor_segfaults = {
skip('nanmean', ''),
}
out_symbolic_tensor_failures.update(out_symbolic_tensor_segfaults)
# Copies inputs to inplace operations to avoid inplace modifications
# to leaves requiring gradient
def _get_safe_inplace(inplace_variant):
@functools.wraps(inplace_variant)
def _fn(t, *args, **kwargs):
return inplace_variant(t.clone(), *args, **kwargs)
return _fn
def _test_make_fx_helper(self, device, dtype, op, tracing_mode, inplace=False, out=False):
fn = _get_safe_inplace(op.get_inplace()) if inplace else op.op
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
# Limit ourselves to first 100 inputs so symbolic tracing tests don't take too long
count = 100
if out:
count = 5
for sample_input in itertools.islice(sample_inputs_itr, count):
if inplace and sample_input.broadcasts_input:
continue
args = [sample_input.input] + list(sample_input.args)
kwargs = sample_input.kwargs
if out:
expected = fn(*args, **kwargs)
kwargs['out'] = expected
try:
optests.make_fx_check(fn, args, kwargs, tracing_mode, self.assertEqual,
randomize_data=True)
except DynamicOutputShapeException:
self.skipTest("Dynamic output shape operation in trace")
def skipIfNameMatches(pattern):
"""
Decorator to skip a test if its name matches the given pattern.
"""
def decorator(test_func):
def wrapper(*args, **kwargs):
if re.match(pattern, test_func.__name__):
raise unittest.SkipTest(f"Test '{test_func.__name__}' skipped because its name matches the pattern '{pattern}'")
return test_func(*args, **kwargs)
return wrapper
return decorator
# Auto functionalize shouldn't work with make_fx directly
filtered_hop_db = [op for op in hop_db if op.name != "auto_functionalize"]
@unittest.skipIf(not torch._dynamo.is_dynamo_supported(), "Cond requires dynamo")
| TestSymbolicTracing |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_legend04.py | {
"start": 315,
"end": 1420
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_legend04.xlsx")
def test_create_file(self):
"""Test the creation of an XlsxWriter file with legend options."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "line"})
chart.axis_ids = [93548928, 93550464]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_legend({"position": "overlay_top_right"})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | astropy__astropy | astropy/io/votable/converters.py | {
"start": 28156,
"end": 28925
} | class ____(VarArray):
"""
Handles an array of variable-length arrays of complex numbers.
"""
def parse(self, value, config=None, pos=None):
if value.strip() == "":
return ma.array([]), True
parts = self._splitter(value, config, pos)
items = self._base._items
parse_parts = self._base.parse_parts
if len(parts) % items != 0:
vo_raise(E02, (items, len(parts)), config, pos)
result = []
result_mask = []
for i in range(0, len(parts), items):
value, mask = parse_parts(parts[i : i + items], config, pos)
result.append(value)
result_mask.append(mask)
return _make_masked_array(result, result_mask), False
| ComplexArrayVarArray |
python | facebook__pyre-check | client/language_server/protocol.py | {
"start": 10730,
"end": 10900
} | class ____(json_mixins.CamlCaseAndExcludeJsonMixin):
text_document_sync: Optional[TextDocumentSyncOptions] = None
@dataclasses.dataclass(frozen=True)
| ServerCapabilities |
python | tensorflow__tensorflow | tensorflow/python/framework/extension_type.py | {
"start": 29674,
"end": 30532
} | class ____(
ExtensionTypeSpec, type_spec.BatchableTypeSpec
):
"""Base class for TypeSpecs for BatchableExtensionTypes."""
__batch_encoder__ = ExtensionTypeBatchEncoder()
def _batch(self, batch_size):
return self.__batch_encoder__.batch(self, batch_size)
def _unbatch(self):
return self.__batch_encoder__.unbatch(self)
def _to_tensor_list(self, value):
return type_spec.batchable_to_tensor_list(self, value)
def _to_batched_tensor_list(self, value):
return type_spec.batchable_to_tensor_list(self, value, minimum_rank=1)
def _from_compatible_tensor_list(self, tensor_list):
return type_spec.batchable_from_tensor_list(self, tensor_list)
@property
def _flat_tensor_specs(self):
return type_spec.get_batchable_flat_tensor_specs(self)
@tf_export('experimental.BatchableExtensionType')
| BatchableExtensionTypeSpec |
python | PrefectHQ__prefect | tests/cli/test_deploy.py | {
"start": 89821,
"end": 121528
} | class ____:
@pytest.mark.usefixtures("project_dir")
async def test_passing_cron_schedules_to_deploy(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --cron '0 4 * * *'"
f" --timezone 'Europe/Berlin' --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "Europe/Berlin"
@pytest.mark.usefixtures("project_dir")
async def test_deployment_yaml_cron_schedule(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
deploy_config["deployments"][0]["schedule"]["parameters"] = {
"number": 42,
}
deploy_config["deployments"][0]["schedule"]["slug"] = "test-slug"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "America/Chicago"
assert deployment.schedules[0].parameters == {"number": 42}
assert deployment.schedules[0].slug == "test-slug"
@pytest.mark.usefixtures("project_dir")
async def test_deployment_yaml_cron_schedule_timezone_cli(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name "
f"--timezone 'Europe/Berlin' --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.cron == "0 4 * * *"
assert schedule.timezone == "Europe/Berlin"
@pytest.mark.usefixtures("project_dir")
async def test_passing_interval_schedules_to_deploy(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --interval 42"
" --anchor-date 2040-02-02 --timezone 'America/New_York' --pool"
f" {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.interval == timedelta(seconds=42)
assert schedule.anchor_date == parse_datetime("2040-02-02")
assert schedule.timezone == "America/New_York"
@pytest.mark.usefixtures("project_dir")
async def test_interval_schedule_deployment_yaml(
self,
prefect_client: PrefectClient,
work_pool: WorkPool,
):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["interval"] = 42
deploy_config["deployments"][0]["schedule"]["anchor_date"] = "2040-02-02"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
deploy_config["deployments"][0]["schedule"]["parameters"] = {
"number": 42,
}
deploy_config["deployments"][0]["schedule"]["slug"] = "test-slug"
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.interval == timedelta(seconds=42)
assert schedule.anchor_date == parse_datetime("2040-02-02")
assert schedule.timezone == "America/Chicago"
assert deployment.schedules[0].parameters == {"number": 42}
assert deployment.schedules[0].slug == "test-slug"
@pytest.mark.usefixtures("project_dir")
async def test_parsing_rrule_schedule_string_literal(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
"deploy ./flows/hello.py:my_flow -n test-name --rrule"
" 'DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17'"
f" --pool {work_pool.name}"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert (
schedule.rrule
== "DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
)
@pytest.mark.usefixtures("project_dir")
async def test_rrule_deployment_yaml(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["schedule"]["rrule"] = (
"DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
)
deploy_config["deployments"][0]["schedule"]["parameters"] = {
"number": 42,
}
deploy_config["deployments"][0]["schedule"]["slug"] = "test-slug"
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule = deployment.schedules[0].schedule
assert (
schedule.rrule
== "DTSTART:20220910T110000\nRRULE:FREQ=HOURLY;BYDAY=MO,TU,WE,TH,FR,SA;BYHOUR=9,10,11,12,13,14,15,16,17"
)
assert deployment.schedules[0].parameters == {"number": 42}
assert deployment.schedules[0].slug == "test-slug"
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_via_command(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --cron '* * * * *' --interval 42 --rrule 'FREQ=HOURLY' --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule_config = {}
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
if isinstance(schedule, IntervalSchedule):
schedule_config["interval"] = schedule.interval
elif isinstance(schedule, CronSchedule):
schedule_config["cron"] = schedule.cron
elif isinstance(schedule, RRuleSchedule):
schedule_config["rrule"] = schedule.rrule
else:
raise AssertionError("Unknown schedule type received")
assert schedule_config == {
"interval": timedelta(seconds=42),
"cron": "* * * * *",
"rrule": "FREQ=HOURLY",
}
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_via_yaml(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedules"] = [
{"interval": 42},
{"cron": "* * * * *"},
{"rrule": "FREQ=HOURLY"},
]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedule_config = {}
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
if isinstance(schedule, IntervalSchedule):
schedule_config["interval"] = schedule.interval
elif isinstance(schedule, CronSchedule):
schedule_config["cron"] = schedule.cron
elif isinstance(schedule, RRuleSchedule):
schedule_config["rrule"] = schedule.rrule
else:
raise AssertionError("Unknown schedule type received")
assert schedule_config == {
"interval": timedelta(seconds=42),
"cron": "* * * * *",
"rrule": "FREQ=HOURLY",
}
@pytest.mark.usefixtures("project_dir")
async def test_yaml_with_schedule_and_schedules_raises_error(
self, work_pool: WorkPool
):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["interval"] = 42
deploy_config["deployments"][0]["schedules"] = [{"interval": 42}]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
expected_code=1,
expected_output_contains="Both 'schedule' and 'schedules' keys are present in the deployment configuration. Please use only use `schedules`.",
)
@pytest.mark.usefixtures("project_dir")
async def test_can_provide_multiple_schedules_of_the_same_type_via_command(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --cron '* * * * *' --cron '0 * * * *' --pool {work_pool.name}",
expected_code=0,
expected_output_contains=[
"Deployment 'An important name/test-name' successfully created"
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
schedules: set[str] = set()
for deployment_schedule in deployment.schedules:
schedule = deployment_schedule.schedule
assert isinstance(schedule, CronSchedule)
schedules.add(schedule.cron)
assert schedules == {
"* * * * *",
"0 * * * *",
}
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_interval_schedule_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select interval schedule
+ readchar.key.ENTER
# Enter invalid interval
+ "bad interval"
+ readchar.key.ENTER
# Enter another invalid interval
+ "0"
+ readchar.key.ENTER
# Enter valid interval
+ "42"
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"? Seconds between scheduled runs",
"Please enter a valid interval denoted in seconds",
"Interval must be greater than 0",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.interval == timedelta(seconds=42)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_default_interval_schedule_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select interval schedule
+ readchar.key.ENTER
# Enter default interval
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"Seconds between scheduled runs (3600)",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.interval == timedelta(seconds=3600)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_cron_schedule_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select cron schedule
+ readchar.key.DOWN
+ readchar.key.ENTER
# Enter invalid cron string
+ "bad cron string"
+ readchar.key.ENTER
# Enter cron
+ "* * * * *"
+ readchar.key.ENTER
# Enter invalid timezone
+ "bad timezone"
+ readchar.key.ENTER
# Select default timezone
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
expected_output_contains=[
"? Cron string",
"Please enter a valid cron string",
"? Timezone",
"Please enter a valid timezone",
],
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].schedule.cron == "* * * * *"
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_rrule_schedule_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Confirm schedule creation
readchar.key.ENTER
# Select rrule schedule
+ readchar.key.DOWN
+ readchar.key.DOWN
+ readchar.key.ENTER
# Enter invalid rrule string
+ "bad rrule string"
+ readchar.key.ENTER
# Enter valid rrule string
+ "FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20240730T040000Z"
+ readchar.key.ENTER
# Enter invalid timezone
+ "bad timezone"
+ readchar.key.ENTER
# Select default timezone
+ readchar.key.ENTER
# accept schedule being active
+ readchar.key.ENTER
# decline adding another schedule
+ readchar.key.ENTER
# decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert (
deployment.schedules[0].schedule.rrule
== "FREQ=WEEKLY;BYDAY=MO,WE,FR;UNTIL=20240730T040000Z"
)
@pytest.mark.usefixtures("interactive_console", "project_dir")
async def test_deploy_no_schedule_interactive(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
user_input=(
# Decline schedule creation
"n"
+ readchar.key.ENTER
# Decline remote storage
+ "n"
+ readchar.key.ENTER
# Decline save
+ "n"
+ readchar.key.ENTER
),
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 0
@pytest.mark.usefixtures("project_dir")
async def test_deploy_with_inactive_schedule(
self, work_pool: WorkPool, prefect_client: PrefectClient
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedule"]["cron"] = "0 4 * * *"
deploy_config["deployments"][0]["schedule"]["timezone"] = "America/Chicago"
deploy_config["deployments"][0]["schedule"]["active"] = False
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=(
f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}"
),
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is False
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
@pytest.mark.usefixtures("project_dir")
async def test_deploy_does_not_activate_schedule_outside_of_yaml(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
# Create a deployment with a schedule that is not active
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedules"] = [
{
"cron": "0 4 * * *",
"timezone": "America/Chicago",
"active": False,
"slug": "test-yaml-slug",
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is False
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
# Create another schedule outside of the yaml
# Using the https client directly because the PrefectClient does not support
# creating schedules with slugs
await prefect_client._client.post(
f"/deployments/{deployment.id}/schedules",
json=[
DeploymentScheduleCreate(
schedule=CronSchedule(cron="0 4 * * *"),
active=False,
slug="test-client-slug",
).model_dump(mode="json"),
],
)
deploy_config["deployments"][0]["schedules"][0]["active"] = True
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
)
assert result.exit_code == 0
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert len(deployment.schedules) == 2
expected_slug_active = {("test-yaml-slug", True), ("test-client-slug", False)}
actual_slug_active = {
(schedule.slug, schedule.active) for schedule in deployment.schedules
}
assert actual_slug_active == expected_slug_active
@pytest.mark.usefixtures("project_dir")
async def test_yaml_null_schedules(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_yaml_content = f"""
deployments:
- name: test-name
entrypoint: flows/hello.py:my_flow
work_pool:
name: {work_pool.name}
schedules: null
"""
with open("prefect.yaml", "w") as f:
f.write(prefect_yaml_content)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules == []
@pytest.mark.usefixtures("project_dir")
async def test_yaml_with_shell_script_step_to_determine_schedule_is_active(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name",
"work_pool": {"name": work_pool.name},
"build": [
{
"prefect.deployments.steps.run_shell_script": {
"id": "get-schedule-isactive",
"script": "echo 'false'",
}
}
],
"schedules": [
{
"active": "{{ get-schedule-isactive.stdout }}",
"cron": "0 * * * *",
"timezone": "America/Chicago",
}
],
}
]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --all",
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].active is False
@pytest.mark.parametrize("schedule_is_active", [True, False])
@pytest.mark.usefixtures("project_dir")
async def test_yaml_with_env_var_to_determine_schedule_is_active(
self,
prefect_client: PrefectClient,
work_pool: WorkPool,
monkeypatch: pytest.MonkeyPatch,
schedule_is_active: bool,
):
monkeypatch.setenv(
"SCHEDULE_IS_ACTIVE", "true" if schedule_is_active else "false"
)
prefect_yaml = Path("prefect.yaml")
with prefect_yaml.open(mode="r") as f:
contents = yaml.safe_load(f)
contents["deployments"] = [
{
"entrypoint": "./flows/hello.py:my_flow",
"name": "test-name",
"work_pool": {"name": work_pool.name},
"schedules": [
{
"active": "{{ $SCHEDULE_IS_ACTIVE }}",
"cron": "0 * * * *",
"timezone": "America/Chicago",
}
],
}
]
with prefect_yaml.open(mode="w") as f:
yaml.safe_dump(contents, f)
await run_sync_in_worker_thread(
invoke_and_assert,
command="deploy --name test-name",
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
assert deployment.schedules[0].active is schedule_is_active
@pytest.mark.usefixtures("project_dir")
async def test_redeploy_does_not_update_active_when_active_unset_in_yaml(
self, prefect_client: PrefectClient, work_pool: WorkPool
):
prefect_file = Path("prefect.yaml")
with prefect_file.open(mode="r") as f:
deploy_config = yaml.safe_load(f)
# Create a deployment with a schedule that is not active
deploy_config["deployments"][0]["name"] = "test-name"
deploy_config["deployments"][0]["schedules"] = [
{
"cron": "0 4 * * *",
"timezone": "America/Chicago",
"slug": "test-yaml-slug",
}
]
with prefect_file.open(mode="w") as f:
yaml.safe_dump(deploy_config, f)
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
)
assert result.exit_code == 0
# Check that the schedule is active
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is True
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
# Update the schedule via the client and set the schedule to inactive
await prefect_client._client.patch(
f"/deployments/{deployment.id}/schedules/{deployment.schedules[0].id}",
json=DeploymentScheduleUpdate(
active=False,
).model_dump(mode="json", exclude_unset=True),
)
# Check that the schedule is inactive
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is False
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
# Redeploy the deployment
result = await run_sync_in_worker_thread(
invoke_and_assert,
command=f"deploy ./flows/hello.py:my_flow -n test-name --pool {work_pool.name}",
)
assert result.exit_code == 0
# Check that the schedule is still inactive
deployment = await prefect_client.read_deployment_by_name(
"An important name/test-name"
)
deployment_schedule = deployment.schedules[0]
assert deployment_schedule.active is False
assert deployment_schedule.schedule.cron == "0 4 * * *"
assert deployment_schedule.schedule.timezone == "America/Chicago"
| TestSchedules |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 177231,
"end": 187645
} | class ____:
def test_send_removed_project_release_email_to_maintainer(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="",
)
result = email.send_removed_project_release_email(
pyramid_request,
[stub_user, stub_submitter_user],
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Maintainer",
)
assert result == {
"project_name": release.project.name,
"release_version": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter_name": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "a maintainer",
}
subject_renderer.assert_(project_name="test_project")
subject_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(project_name="test_project")
body_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(release_date=release.created.strftime("%Y-%m-%d"))
body_renderer.assert_(submitter_name=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="a maintainer")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
def test_send_removed_project_release_email_to_owner(
self, pyramid_request, pyramid_config, monkeypatch
):
stub_user = pretend.stub(
id="id_1",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
stub_submitter_user = pretend.stub(
id="id_2",
username="submitterusername",
name="",
email="submiteremail@example.com",
primary_email=pretend.stub(
email="submiteremail@example.com", verified=True
),
)
subject_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
"email/removed-project-release/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
ids = [stub_submitter_user.id, stub_user.id]
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=ids.pop())
)
),
)
pyramid_request.user = stub_submitter_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
release = pretend.stub(
version="0.0.0",
project=pretend.stub(name="test_project"),
created=datetime.datetime(2017, 2, 5, 0, 0, 0, 0),
yanked_reason="",
)
result = email.send_removed_project_release_email(
pyramid_request,
[stub_user, stub_submitter_user],
release=release,
submitter_name=stub_submitter_user.username,
submitter_role="Owner",
recipient_role="Owner",
)
assert result == {
"project_name": release.project.name,
"release_version": release.version,
"release_date": release.created.strftime("%Y-%m-%d"),
"submitter_name": stub_submitter_user.username,
"submitter_role": "owner",
"recipient_role_descr": "an owner",
}
subject_renderer.assert_(project_name="test_project")
subject_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(project_name="test_project")
body_renderer.assert_(release_version="0.0.0")
body_renderer.assert_(release_date=release.created.strftime("%Y-%m-%d"))
body_renderer.assert_(submitter_name=stub_submitter_user.username)
body_renderer.assert_(submitter_role="owner")
body_renderer.assert_(recipient_role_descr="an owner")
assert pyramid_request.task.calls == [
pretend.call(send_email),
pretend.call(send_email),
]
assert send_email.delay.calls == [
pretend.call(
"username <email@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "email@example.com",
"subject": "Email Subject",
"redact_ip": True,
},
},
),
pretend.call(
"submitterusername <submiteremail@example.com>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_submitter_user.id,
"additional": {
"from_": "noreply@example.com",
"to": "submiteremail@example.com",
"subject": "Email Subject",
"redact_ip": False,
},
},
),
]
| TestRemovedReleaseEmail |
python | pypa__packaging | src/packaging/metadata.py | {
"start": 20022,
"end": 29304
} | class ____(Generic[T]):
"""Validate a metadata field.
All _process_*() methods correspond to a core metadata field. The method is
called with the field's raw value. If the raw value is valid it is returned
in its "enriched" form (e.g. ``version.Version`` for the ``Version`` field).
If the raw value is invalid, :exc:`InvalidMetadata` is raised (with a cause
as appropriate).
"""
name: str
raw_name: str
added: _MetadataVersion
def __init__(
self,
*,
added: _MetadataVersion = "1.0",
) -> None:
self.added = added
def __set_name__(self, _owner: Metadata, name: str) -> None:
self.name = name
self.raw_name = _RAW_TO_EMAIL_MAPPING[name]
def __get__(self, instance: Metadata, _owner: type[Metadata]) -> T:
# With Python 3.8, the caching can be replaced with functools.cached_property().
# No need to check the cache as attribute lookup will resolve into the
# instance's __dict__ before __get__ is called.
cache = instance.__dict__
value = instance._raw.get(self.name)
# To make the _process_* methods easier, we'll check if the value is None
# and if this field is NOT a required attribute, and if both of those
# things are true, we'll skip the the converter. This will mean that the
# converters never have to deal with the None union.
if self.name in _REQUIRED_ATTRS or value is not None:
try:
converter: Callable[[Any], T] = getattr(self, f"_process_{self.name}")
except AttributeError:
pass
else:
value = converter(value)
cache[self.name] = value
try:
del instance._raw[self.name] # type: ignore[misc]
except KeyError:
pass
return cast("T", value)
def _invalid_metadata(
self, msg: str, cause: Exception | None = None
) -> InvalidMetadata:
exc = InvalidMetadata(
self.raw_name, msg.format_map({"field": repr(self.raw_name)})
)
exc.__cause__ = cause
return exc
def _process_metadata_version(self, value: str) -> _MetadataVersion:
# Implicitly makes Metadata-Version required.
if value not in _VALID_METADATA_VERSIONS:
raise self._invalid_metadata(f"{value!r} is not a valid metadata version")
return cast("_MetadataVersion", value)
def _process_name(self, value: str) -> str:
if not value:
raise self._invalid_metadata("{field} is a required field")
# Validate the name as a side-effect.
try:
utils.canonicalize_name(value, validate=True)
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
else:
return value
def _process_version(self, value: str) -> version_module.Version:
if not value:
raise self._invalid_metadata("{field} is a required field")
try:
return version_module.parse(value)
except version_module.InvalidVersion as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_summary(self, value: str) -> str:
"""Check the field contains no newlines."""
if "\n" in value:
raise self._invalid_metadata("{field} must be a single line")
return value
def _process_description_content_type(self, value: str) -> str:
content_types = {"text/plain", "text/x-rst", "text/markdown"}
message = email.message.EmailMessage()
message["content-type"] = value
content_type, parameters = (
# Defaults to `text/plain` if parsing failed.
message.get_content_type().lower(),
message["content-type"].params,
)
# Check if content-type is valid or defaulted to `text/plain` and thus was
# not parseable.
if content_type not in content_types or content_type not in value.lower():
raise self._invalid_metadata(
f"{{field}} must be one of {list(content_types)}, not {value!r}"
)
charset = parameters.get("charset", "UTF-8")
if charset != "UTF-8":
raise self._invalid_metadata(
f"{{field}} can only specify the UTF-8 charset, not {list(charset)}"
)
markdown_variants = {"GFM", "CommonMark"}
variant = parameters.get("variant", "GFM") # Use an acceptable default.
if content_type == "text/markdown" and variant not in markdown_variants:
raise self._invalid_metadata(
f"valid Markdown variants for {{field}} are {list(markdown_variants)}, "
f"not {variant!r}",
)
return value
def _process_dynamic(self, value: list[str]) -> list[str]:
for dynamic_field in map(str.lower, value):
if dynamic_field in {"name", "version", "metadata-version"}:
raise self._invalid_metadata(
f"{dynamic_field!r} is not allowed as a dynamic field"
)
elif dynamic_field not in _EMAIL_TO_RAW_MAPPING:
raise self._invalid_metadata(
f"{dynamic_field!r} is not a valid dynamic field"
)
return list(map(str.lower, value))
def _process_provides_extra(
self,
value: list[str],
) -> list[utils.NormalizedName]:
normalized_names = []
try:
for name in value:
normalized_names.append(utils.canonicalize_name(name, validate=True))
except utils.InvalidName as exc:
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}", cause=exc
) from exc
else:
return normalized_names
def _process_requires_python(self, value: str) -> specifiers.SpecifierSet:
try:
return specifiers.SpecifierSet(value)
except specifiers.InvalidSpecifier as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_requires_dist(
self,
value: list[str],
) -> list[requirements.Requirement]:
reqs = []
try:
for req in value:
reqs.append(requirements.Requirement(req))
except requirements.InvalidRequirement as exc:
raise self._invalid_metadata(
f"{req!r} is invalid for {{field}}", cause=exc
) from exc
else:
return reqs
def _process_license_expression(self, value: str) -> NormalizedLicenseExpression:
try:
return licenses.canonicalize_license_expression(value)
except ValueError as exc:
raise self._invalid_metadata(
f"{value!r} is invalid for {{field}}", cause=exc
) from exc
def _process_license_files(self, value: list[str]) -> list[str]:
paths = []
for path in value:
if ".." in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, "
"parent directory indicators are not allowed"
)
if "*" in path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be resolved"
)
if (
pathlib.PurePosixPath(path).is_absolute()
or pathlib.PureWindowsPath(path).is_absolute()
):
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must be relative"
)
if pathlib.PureWindowsPath(path).as_posix() != path:
raise self._invalid_metadata(
f"{path!r} is invalid for {{field}}, paths must use '/' delimiter"
)
paths.append(path)
return paths
def _process_import_names(self, value: list[str]) -> list[str]:
for import_name in value:
name, semicolon, private = import_name.partition(";")
name = name.rstrip()
for identifier in name.split("."):
if not identifier.isidentifier():
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}; "
f"{identifier!r} is not a valid identifier"
)
elif keyword.iskeyword(identifier):
raise self._invalid_metadata(
f"{name!r} is invalid for {{field}}; "
f"{identifier!r} is a keyword"
)
if semicolon and private.lstrip() != "private":
raise self._invalid_metadata(
f"{import_name!r} is invalid for {{field}}; "
"the only valid option is 'private'"
)
return value
_process_import_namespaces = _process_import_names
| _Validator |
python | dagster-io__dagster | python_modules/automation/automation_tests/dagster_docs_tests/test_changed_validator.py | {
"start": 479,
"end": 1262
} | class ____:
def test_default_file_filter(self):
config = ValidationConfig(
root_path=Path("/test"),
path_converter=generic_path_converter,
)
assert config.file_filter(Path("test.py")) is True
assert config.file_filter(Path("test.txt")) is False
assert config.file_filter(Path("test")) is False
def test_custom_file_filter(self):
custom_filter = lambda p: p.name.startswith("test_")
config = ValidationConfig(
root_path=Path("/test"),
path_converter=generic_path_converter,
file_filter=custom_filter,
)
assert config.file_filter(Path("test_module.py")) is True
assert config.file_filter(Path("module.py")) is False
| TestValidationConfig |
python | pypa__warehouse | warehouse/views.py | {
"start": 17533,
"end": 19430
} | class ____:
def __init__(self, request):
self.request = request
@property
def default_response(self):
return {}
@view_config(request_method="GET")
def security_key_giveaway(self):
return self.default_response
@view_config(
route_name="includes.current-user-indicator",
renderer="warehouse:templates/includes/current-user-indicator.html",
uses_session=True,
has_translations=True,
)
def current_user_indicator(request):
return {}
@view_config(
route_name="includes.flash-messages",
renderer="warehouse:templates/includes/flash-messages.html",
uses_session=True,
has_translations=True,
)
def flash_messages(request):
return {}
@view_config(
route_name="includes.session-notifications",
renderer="warehouse:templates/includes/session-notifications.html",
uses_session=True,
has_translations=True,
)
def session_notifications(request):
return {}
@view_config(
route_name="includes.sidebar-sponsor-logo",
renderer="warehouse:templates/includes/sidebar-sponsor-logo.html",
uses_session=False,
has_translations=False,
decorator=[
cache_control(30), # 30 seconds
],
)
def sidebar_sponsor_logo(request):
return {}
@view_config(route_name="health", renderer="string")
def health(request):
# This will ensure that we can access the database and run queries against
# it without doing anything that will take a lock or block other queries.
request.db.execute(text("SELECT 1"))
# Nothing will actually check this, but it's a little nicer to have
# something to return besides an empty body.
return "OK"
@view_config(route_name="force-status")
def force_status(request):
try:
raise exception_response(int(request.matchdict["status"]))
except KeyError:
raise exception_response(404) from None
| SecurityKeyGiveaway |
python | h5py__h5py | h5py/_hl/filters.py | {
"start": 3912,
"end": 4900
} | class ____(Mapping):
"""Base class for referring to an HDF5 and describing its options
Your subclass must define filter_id, and may define a filter_options tuple.
"""
filter_id = None
filter_options = ()
# Mapping interface supports using instances as **kwargs for compatibility
# with older versions of h5py
@property
def _kwargs(self):
return {
'compression': self.filter_id,
'compression_opts': self.filter_options
}
def __hash__(self):
return hash((self.filter_id, self.filter_options))
def __eq__(self, other):
return (
isinstance(other, FilterRefBase)
and self.filter_id == other.filter_id
and self.filter_options == other.filter_options
)
def __len__(self):
return len(self._kwargs)
def __iter__(self):
return iter(self._kwargs)
def __getitem__(self, item):
return self._kwargs[item]
| FilterRefBase |
python | gevent__gevent | src/gevent/tests/known_failures.py | {
"start": 5185,
"end": 5466
} | class ____(type):
# a metaclass on Python 3 that makes sure we only set attributes once. pylint doesn't
# warn about that.
@classmethod
def __prepare__(mcs, name, bases): # pylint:disable=unused-argument,bad-dunder-name
return SetOnceMapping()
| DefinitionsMeta |
python | plotly__plotly.py | plotly/graph_objs/waterfall/connector/_line.py | {
"start": 233,
"end": 4178
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "waterfall.connector"
_path_str = "waterfall.connector.line"
_valid_props = {"color", "dash", "width"}
@property
def color(self):
"""
Sets the line color.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def dash(self):
"""
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
The 'dash' property is an enumeration that may be specified as:
- One of the following dash styles:
['solid', 'dot', 'dash', 'longdash', 'dashdot', 'longdashdot']
- A string containing a dash length list in pixels or percentages
(e.g. '5px 10px 2px 2px', '5, 10, 2, 2', '10% 20% 40%', etc.)
Returns
-------
str
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
@property
def width(self):
"""
Sets the line width (in px).
The 'width' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
@property
def _prop_descriptions(self):
return """\
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
"""
def __init__(self, arg=None, color=None, dash=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.connector.Line`
color
Sets the line color.
dash
Sets the dash style of lines. Set to a dash type string
("solid", "dot", "dash", "longdash", "dashdot", or
"longdashdot") or a dash length list in px (eg
"5px,10px,2px,2px").
width
Sets the line width (in px).
Returns
-------
Line
"""
super().__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.waterfall.connector.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.connector.Line`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("dash", arg, dash)
self._set_property("width", arg, width)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Line |
python | tiangolo__fastapi | docs_src/security/tutorial003.py | {
"start": 772,
"end": 928
} | class ____(BaseModel):
username: str
email: Union[str, None] = None
full_name: Union[str, None] = None
disabled: Union[bool, None] = None
| User |
python | sqlalchemy__sqlalchemy | test/orm/test_unitofwork.py | {
"start": 13534,
"end": 17825
} | class ____(fixtures.MappedTest):
__sparse_driver_backend__ = True
@classmethod
def define_tables(cls, metadata):
Table(
"users_t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("name", String(30)),
Column("counter", Integer, default=1),
)
Table(
"boolean_t",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("value", Boolean),
)
Table(
"pk_t",
metadata,
Column(
"p_id", Integer, key="id", autoincrement=True, primary_key=True
),
Column("data", String(30)),
)
@classmethod
def setup_classes(cls):
class User(cls.Comparable):
pass
class HasBoolean(cls.Comparable):
pass
class PkDefault(cls.Comparable):
pass
@classmethod
def setup_mappers(cls):
User, users_t = cls.classes.User, cls.tables.users_t
HasBoolean, boolean_t = cls.classes.HasBoolean, cls.tables.boolean_t
PkDefault, pk_t = cls.classes.PkDefault, cls.tables.pk_t
cls.mapper_registry.map_imperatively(User, users_t)
cls.mapper_registry.map_imperatively(HasBoolean, boolean_t)
cls.mapper_registry.map_imperatively(PkDefault, pk_t)
def test_update(self):
User = self.classes.User
u = User(name="test")
session = fixture_session()
session.add(u)
session.flush()
eq_(u.counter, 1)
u.counter = User.counter + 1
session.flush()
def go():
assert (u.counter == 2) is True # ensure its not a ClauseElement
self.sql_count_(1, go)
def test_multi_update(self):
User = self.classes.User
u = User(name="test")
session = fixture_session()
session.add(u)
session.flush()
eq_(u.counter, 1)
u.name = "test2"
u.counter = User.counter + 1
session.flush()
def go():
eq_(u.name, "test2")
assert (u.counter == 2) is True
self.sql_count_(1, go)
session.expunge_all()
u = session.get(User, u.id)
eq_(u.name, "test2")
eq_(u.counter, 2)
def test_insert(self):
User = self.classes.User
u = User(name="test", counter=sa.select(5).scalar_subquery())
session = fixture_session()
session.add(u)
session.flush()
assert (u.counter == 5) is True
@testing.requires.sql_expressions_inserted_as_primary_key
def test_insert_pk_expression(self):
PkDefault = self.classes.PkDefault
pk = PkDefault(id=literal(5) + 10, data="some data")
session = fixture_session()
session.add(pk)
session.flush()
eq_(pk.id, 15)
session.commit()
eq_(pk.id, 15)
def test_update_special_comparator(self):
HasBoolean = self.classes.HasBoolean
# make sure the comparison we're shooting
# for is invalid, otherwise we need to
# test something else here
assert_raises_message(
TypeError,
"Boolean value of this clause is not defined",
bool,
None == sa.false(), # noqa
)
s = fixture_session()
hb = HasBoolean(value=None)
s.add(hb)
s.flush()
hb.value = sa.false()
s.flush()
# needs to be refreshed
assert "value" not in hb.__dict__
eq_(hb.value, False)
def test_clauseelement_accessor(self):
class Thing:
def __init__(self, value):
self.value = value
def __clause_element__(self):
return literal_column(str(self.value))
User = self.classes.User
u = User(id=5, name="test", counter=Thing(3))
session = fixture_session()
session.add(u)
session.flush()
u.counter = Thing(5)
session.flush()
def go():
eq_(u.counter, 5)
self.sql_count_(1, go)
| ClauseAttributesTest |
python | numba__numba | numba/tests/test_fancy_indexing.py | {
"start": 467,
"end": 12745
} | class ____(MemoryLeakMixin, TestCase):
def generate_advanced_indices(self, N, many=True):
choices = [np.int16([0, N - 1, -2])]
if many:
choices += [np.uint16([0, 1, N - 1]),
np.bool_([0, 1, 1, 0])]
return choices
def generate_basic_index_tuples(self, N, maxdim, many=True):
"""
Generate basic index tuples with 0 to *maxdim* items.
"""
# Note integers can be considered advanced indices in certain
# cases, so we avoid them here.
# See "Combining advanced and basic indexing"
# in http://docs.scipy.org/doc/numpy/reference/arrays.indexing.html
if many:
choices = [slice(None, None, None),
slice(1, N - 1, None),
slice(0, None, 2),
slice(N - 1, None, -2),
slice(-N + 1, -1, None),
slice(-1, -N, -2),
]
else:
choices = [slice(0, N - 1, None),
slice(-1, -N, -2)]
for ndim in range(maxdim + 1):
for tup in itertools.product(choices, repeat=ndim):
yield tup
def generate_advanced_index_tuples(self, N, maxdim, many=True):
"""
Generate advanced index tuples by generating basic index tuples
and adding a single advanced index item.
"""
# (Note Numba doesn't support advanced indices with more than
# one advanced index array at the moment)
choices = list(self.generate_advanced_indices(N, many=many))
for i in range(maxdim + 1):
for tup in self.generate_basic_index_tuples(N, maxdim - 1, many):
for adv in choices:
yield tup[:i] + (adv,) + tup[i:]
def generate_advanced_index_tuples_with_ellipsis(self, N, maxdim, many=True):
"""
Same as generate_advanced_index_tuples(), but also insert an
ellipsis at various points.
"""
for tup in self.generate_advanced_index_tuples(N, maxdim, many):
for i in range(len(tup) + 1):
yield tup[:i] + (Ellipsis,) + tup[i:]
def check_getitem_indices(self, arr, indices):
pyfunc = getitem_usecase
cfunc = jit(nopython=True)(pyfunc)
orig = arr.copy()
orig_base = arr.base or arr
for index in indices:
expected = pyfunc(arr, index)
# Sanity check: if a copy wasn't made, this wasn't advanced
# but basic indexing, and shouldn't be tested here.
assert expected.base is not orig_base
got = cfunc(arr, index)
# Note Numba may not return the same array strides and
# contiguity as Numpy
self.assertEqual(got.shape, expected.shape)
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_equal(got, expected)
# Check a copy was *really* returned by Numba
if got.size:
got.fill(42)
np.testing.assert_equal(arr, orig)
def test_getitem_tuple(self):
# Test many variations of advanced indexing with a tuple index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples(N, ndim)
self.check_getitem_indices(arr, indices)
def test_getitem_tuple_and_ellipsis(self):
# Same, but also insert an ellipsis at a random point
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim,
many=False)
self.check_getitem_indices(arr, indices)
def test_ellipsis_getsetitem(self):
# See https://github.com/numba/numba/issues/3225
@jit(nopython=True)
def foo(arr, v):
arr[..., 0] = arr[..., 1]
arr = np.arange(2)
foo(arr, 1)
self.assertEqual(arr[0], arr[1])
def test_getitem_array(self):
# Test advanced indexing with a single array index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_indices(N)
self.check_getitem_indices(arr, indices)
def check_setitem_indices(self, arr, indices):
pyfunc = setitem_usecase
cfunc = jit(nopython=True)(pyfunc)
for index in indices:
src = arr[index]
expected = np.zeros_like(arr)
got = np.zeros_like(arr)
pyfunc(expected, index, src)
cfunc(got, index, src)
# Note Numba may not return the same array strides and
# contiguity as Numpy
self.assertEqual(got.shape, expected.shape)
self.assertEqual(got.dtype, expected.dtype)
np.testing.assert_equal(got, expected)
def test_setitem_tuple(self):
# Test many variations of advanced indexing with a tuple index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples(N, ndim)
self.check_setitem_indices(arr, indices)
def test_setitem_tuple_and_ellipsis(self):
# Same, but also insert an ellipsis at a random point
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32)
indices = self.generate_advanced_index_tuples_with_ellipsis(N, ndim,
many=False)
self.check_setitem_indices(arr, indices)
def test_setitem_array(self):
# Test advanced indexing with a single array index
N = 4
ndim = 3
arr = np.arange(N ** ndim).reshape((N,) * ndim).astype(np.int32) + 10
indices = self.generate_advanced_indices(N)
self.check_setitem_indices(arr, indices)
def test_setitem_0d(self):
# Test setitem with a 0d-array
pyfunc = setitem_usecase
cfunc = jit(nopython=True)(pyfunc)
inps = [
(np.zeros(3), np.array(3.14)),
(np.zeros(2), np.array(2)),
(np.zeros(3, dtype=np.int64), np.array(3, dtype=np.int64)),
(np.zeros(3, dtype=np.float64), np.array(1, dtype=np.int64)),
(np.zeros(5, dtype='<U3'), np.array('abc')),
(np.zeros((3,), dtype='<U3'), np.array('a')),
(np.array(['abc','def','ghi'], dtype='<U3'),
np.array('WXYZ', dtype='<U4')),
(np.zeros(3, dtype=complex), np.array(2+3j, dtype=complex)),
]
for x1, v in inps:
x2 = x1.copy()
pyfunc(x1, 0, v)
cfunc(x2, 0, v)
self.assertPreciseEqual(x1, x2)
def test_np_take(self):
# shorter version of array.take test in test_array_methods
pyfunc = np_take
cfunc = jit(nopython=True)(pyfunc)
def check(arr, ind):
expected = pyfunc(arr, ind)
got = cfunc(arr, ind)
self.assertPreciseEqual(expected, got)
if hasattr(expected, 'order'):
self.assertEqual(expected.order == got.order)
# need to check:
# 1. scalar index
# 2. 1d array index
# 3. nd array index
# 4. reflected list
# 5. tuples
test_indices = []
test_indices.append(1)
test_indices.append(np.array([1, 5, 1, 11, 3]))
test_indices.append(np.array([[[1], [5]], [[1], [11]]]))
test_indices.append([1, 5, 1, 11, 3])
test_indices.append((1, 5, 1))
test_indices.append(((1, 5, 1), (11, 3, 2)))
for dt in [np.int64, np.complex128]:
A = np.arange(12, dtype=dt).reshape((4, 3))
for ind in test_indices:
check(A, ind)
# https://github.com/numpy/numpy/blob/main/numpy/_core/tests/test_numeric.py#L319-L325
indices = [1, 2, 4]
a = np.array([1, 2, 3, 4, 5])
check(a, indices)
#check illegal access raises
szA = A.size
illegal_indices = [szA, -szA - 1, np.array(szA), np.array(-szA - 1),
[szA], [-szA - 1]]
for x in illegal_indices:
with self.assertRaises(IndexError):
cfunc(A, x) # oob raises
# check float indexing raises
with self.assertRaises(TypingError):
cfunc(A, [1.7])
#exceptions leak refs
self.disable_leak_check()
def test_np_take_axis(self):
pyfunc = np_take_kws
cfunc = jit(nopython=True)(pyfunc)
nt = collections.namedtuple('inputs', ['arrays', 'indices', 'axis'])
triples = (
nt(
arrays=(
np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]),
),
indices=(
np.array([0, 2, 1]),
np.array([1, 2, 1, 2, 1]),
np.array([0]),
1,
(0,),
(0, 1),
),
axis=(0, 1, -1),
),
nt(
arrays=(
np.arange(5),
np.array([123]),
),
indices=(
0,
(0,),
np.asarray([0])
),
axis=(0,)
),
nt(
arrays=(
np.ones((10, 1, 11, 1, 12, 1, 13)),
),
indices=(
0,
),
axis=(1, 3, 5)
),
)
for arrays, indices, axis in triples:
for array in arrays:
for indice in indices:
for ax in axis:
expected = np.take(array, indice, axis=ax)
got = cfunc(array, indice, axis=ax)
self.assertPreciseEqual(expected, got)
def test_np_take_axis_exception(self):
cfunc = jit(nopython=True)(np_take_kws)
arr = np.arange(9).reshape(3, 3)
msg = 'axis 2 is out of bounds for array of dimension 2'
indices = np.array([0, 1, 2])
with self.assertRaisesRegex(ValueError, msg):
cfunc(arr, indices, axis=2)
self.disable_leak_check()
def test_newaxis(self):
@njit
def np_new_axis_getitem(a, idx):
return a[idx]
@njit
def np_new_axis_setitem(a, idx, item):
a[idx] = item
return a
a = np.arange(4 * 5 * 6 * 7).reshape((4, 5, 6, 7))
idx_cases = [
(slice(None), np.newaxis),
(np.newaxis, slice(None)),
(slice(1), np.newaxis, np.array([1, 2, 1])),
(np.newaxis, np.array([1, 2, 1]), slice(None)),
(slice(1), Ellipsis, np.newaxis, np.array([1, 2, 1])),
(np.array([1, 2, 1]), np.newaxis, Ellipsis),
(np.newaxis, slice(1), np.newaxis, np.array([1, 2, 1])),
(np.array([1, 2, 1]), Ellipsis, None, np.newaxis),
(np.newaxis, slice(1), Ellipsis, np.newaxis, np.array([1, 2, 1])),
(np.array([1, 2, 1]), np.newaxis, np.newaxis, Ellipsis),
(np.newaxis, np.array([1, 2, 1]), np.newaxis, Ellipsis),
(slice(3), np.array([1, 2, 1]), np.newaxis, None),
(np.newaxis, np.array([1, 2, 1]), Ellipsis, None),
]
pyfunc_getitem = np_new_axis_getitem.py_func
cfunc_getitem = np_new_axis_getitem
pyfunc_setitem = np_new_axis_setitem.py_func
cfunc_setitem = np_new_axis_setitem
for idx in idx_cases:
expected = pyfunc_getitem(a, idx)
got = cfunc_getitem(a, idx)
np.testing.assert_equal(expected, got)
a_empty = np.zeros_like(a)
item = a[idx]
expected = pyfunc_setitem(a_empty.copy(), idx, item)
got = cfunc_setitem(a_empty.copy(), idx, item)
np.testing.assert_equal(expected, got)
| TestFancyIndexing |
python | pypa__pipenv | pipenv/patched/pip/_internal/cache.py | {
"start": 3825,
"end": 6317
} | class ____(Cache):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir: str) -> None:
super().__init__(cache_dir)
def get_path_for_link(self, link: Link) -> str:
"""Return a directory to store cached wheels for link
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were
not unique. E.g. ./package might have dozens of installs done for it
and build a version of 0.0...and if we built and cached a wheel, we'd
end up using the same wheel even if the source has been edited.
:param link: The link of the sdist for which this will cache wheels.
"""
parts = self._get_cache_path_parts(link)
assert self.cache_dir
# Store wheels within the root cache_dir
return os.path.join(self.cache_dir, "wheels", *parts)
def get(
self,
link: Link,
package_name: Optional[str],
supported_tags: List[Tag],
) -> Link:
candidates = []
if not package_name:
return link
canonical_package_name = canonicalize_name(package_name)
for wheel_name, wheel_dir in self._get_candidates(link, canonical_package_name):
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if canonicalize_name(wheel.name) != canonical_package_name:
logger.debug(
"Ignoring cached wheel %s for %s as it "
"does not match the expected distribution name %s.",
wheel_name,
link,
package_name,
)
continue
if not wheel.supported(supported_tags):
# Built for a different python/arch/etc
continue
candidates.append(
(
wheel.support_index_min(supported_tags),
wheel_name,
wheel_dir,
)
)
if not candidates:
return link
_, wheel_name, wheel_dir = min(candidates)
return Link(path_to_url(os.path.join(wheel_dir, wheel_name)))
| SimpleWheelCache |
python | getsentry__sentry | src/sentry/flags/models.py | {
"start": 3494,
"end": 4050
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
created_by = HybridCloudForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete="SET_NULL")
date_added = models.DateTimeField(default=timezone.now)
organization = FlexibleForeignKey("sentry.Organization")
provider = models.CharField(db_index=True)
secret = models.CharField()
class Meta:
app_label = "flags"
db_table = "flags_webhooksigningsecret"
unique_together = (("organization", "provider", "secret"),)
| FlagWebHookSigningSecretModel |
python | ray-project__ray | python/ray/util/collective/examples/nccl_p2p_example_multigpu.py | {
"start": 126,
"end": 1460
} | class ____:
def __init__(self):
with Device(0):
self.send1 = cp.ones((4,), dtype=cp.float32)
with Device(1):
self.send2 = cp.ones((4,), dtype=cp.float32) * 2
with Device(0):
self.recv1 = cp.zeros((4,), dtype=cp.float32)
with Device(1):
self.recv2 = cp.zeros((4,), dtype=cp.float32)
self.rank = -1
def setup(self, world_size, rank):
self.rank = rank
collective.init_collective_group(world_size, rank, "nccl", "8")
return True
def compute(self):
if self.rank == 0:
with Device(0):
collective.send_multigpu(self.send1 * 2, 1, 1, "8")
else:
# with Device(1):
collective.recv_multigpu(self.recv2, 0, 0, "8")
return self.recv2
def destroy(self):
collective.destroy_collective_group("8")
if __name__ == "__main__":
ray.init(address="auto")
num_workers = 2
workers = []
init_rets = []
for i in range(num_workers):
w = Worker.remote()
workers.append(w)
init_rets.append(w.setup.remote(num_workers, i))
a = ray.get(init_rets)
results = ray.get([w.compute.remote() for w in workers])
print(results)
ray.get([w.destroy.remote() for w in workers])
ray.shutdown()
| Worker |
python | pennersr__django-allauth | tests/apps/socialaccount/providers/github/tests.py | {
"start": 375,
"end": 4379
} | class ____(OAuth2TestsMixin, TestCase):
provider_id = GitHubProvider.id
def get_mocked_response(self):
return [
MockedResponse(
HTTPStatus.OK,
"""
{
"type":"User",
"organizations_url":"https://api.github.com/users/pennersr/orgs",
"gists_url":"https://api.github.com/users/pennersr/gists{/gist_id}",
"received_events_url":"https://api.github.com/users/pennersr/received_events",
"gravatar_id":"8639768262b8484f6a3380f8db2efa5b",
"followers":16,
"blog":"http://www.intenct.info",
"avatar_url":"https://secure.gravatar.com/avatar/8639768262b8484f6a3380f8db2efa5b?d=https://a248.e.akamai.net/assets.github.com%2Fimages%2Fgravatars%2Fgravatar-user-420.png",
"login":"pennersr",
"created_at":"2010-02-10T12:50:51Z",
"company":"IntenCT",
"subscriptions_url":"https://api.github.com/users/pennersr/subscriptions",
"public_repos":14,
"hireable":false,
"url":"https://api.github.com/users/pennersr",
"public_gists":0,
"starred_url":"https://api.github.com/users/pennersr/starred{/owner}{/repo}",
"html_url":"https://github.com/pennersr",
"location":"The Netherlands",
"bio":null,
"name":"Raymond Penners",
"repos_url":"https://api.github.com/users/pennersr/repos",
"followers_url":"https://api.github.com/users/pennersr/followers",
"id":201022,
"following":0,
"email":"raymond.penners@intenct.nl",
"events_url":"https://api.github.com/users/pennersr/events{/privacy}",
"following_url":"https://api.github.com/users/pennersr/following"
}""",
),
MockedResponse(
HTTPStatus.OK,
"""
[{
"email": "octocat@github.com",
"verified": true,
"primary": true,
"visibility": "public"
}]
""",
),
]
def get_expected_to_str(self):
return "pennersr"
def test_account_name_null(self):
"""String conversion when GitHub responds with empty name"""
mocks = [
MockedResponse(
HTTPStatus.OK,
"""
{
"type": "User",
"id": 201022,
"login": "pennersr",
"name": null
}
""",
),
MockedResponse(
HTTPStatus.OK,
"""
[
{
"email": "octocat@github.com",
"verified": true,
"primary": true,
"visibility": "public"
},
{
"email": "secONDary@GitHub.COM",
"verified": true,
"primary": false,
"visibility": "public"
}
]
""",
),
]
with patch(
"allauth.socialaccount.adapter.DefaultSocialAccountAdapter.populate_user"
) as populate_mock:
self.login(mocks)
populate_data = populate_mock.call_args[0][2]
assert populate_data["email"] == "octocat@github.com"
socialaccount = SocialAccount.objects.get(uid="201022")
self.assertIsNone(socialaccount.extra_data.get("name"))
account = socialaccount.get_provider_account()
self.assertIsNotNone(account.to_str())
self.assertEqual(account.to_str(), "pennersr")
self.assertEqual(socialaccount.user.email, "octocat@github.com")
self.assertTrue(
EmailAddress.objects.filter(
primary=False,
verified=True,
email="secondary@github.com",
user=socialaccount.user,
).exists()
)
self.assertTrue("emails" not in socialaccount.extra_data)
| GitHubTests |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/openlineage/mixins.py | {
"start": 1707,
"end": 26676
} | class ____:
"""Mixin for BigQueryInsertJobOperator to extract OpenLineage metadata."""
def get_openlineage_facets_on_complete(self, _):
"""
Retrieve OpenLineage data for a completed BigQuery job.
This method calls BigQuery API, retrieving input and output dataset info from it,
as well as run-level statistics.
Run facets may contain:
- ExternalQueryRunFacet (for QUERY job type)
- BigQueryJobRunFacet
- ErrorMessageRunFacet (if an error occurred)
Job facets should contain:
- SqlJobFacet (for QUERY job type)
Input datasets should contain:
- SchemaDatasetFacet
Output datasets should contain:
- SchemaDatasetFacet
- OutputStatisticsOutputDatasetFacet (for QUERY job type)
- ColumnLineageDatasetFacet (for QUERY job type)
"""
from airflow.providers.openlineage.extractors import OperatorLineage
from airflow.providers.openlineage.sqlparser import SQLParser
if not self.job_id:
self.log.warning("No BigQuery job_id was found by OpenLineage.")
return OperatorLineage()
if not self.hook:
# This can occur when in deferrable mode
from airflow.providers.google.cloud.hooks.bigquery import BigQueryHook
self.hook = BigQueryHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.log.debug("Extracting data from bigquery job: `%s`", self.job_id)
inputs, outputs = [], []
run_facets: dict[str, RunFacet] = {
"externalQuery": ExternalQueryRunFacet(externalQueryId=self.job_id, source="bigquery")
}
self._client = self.hook.get_client(
project_id=self.project_id or self.hook.project_id, location=self.location
)
try:
job_properties = self._client.get_job(job_id=self.job_id)._properties
if get_from_nullable_chain(job_properties, ["status", "state"]) != "DONE":
raise ValueError(f"Trying to extract data from running bigquery job: `{self.job_id}`")
run_facets["bigQueryJob"] = self._get_bigquery_job_run_facet(job_properties)
if get_from_nullable_chain(job_properties, ["statistics", "numChildJobs"]):
self.log.debug("Found SCRIPT job. Extracting lineage from child jobs instead.")
# SCRIPT job type has no input / output information but spawns child jobs that have one
# https://cloud.google.com/bigquery/docs/information-schema-jobs#multi-statement_query_job
for child_job_id in self._client.list_jobs(parent_job=self.job_id):
child_job_properties = self._client.get_job(job_id=child_job_id)._properties
child_inputs, child_outputs = self._get_inputs_and_outputs(child_job_properties)
inputs.extend(child_inputs)
outputs.extend(child_outputs)
else:
inputs, outputs = self._get_inputs_and_outputs(job_properties)
except Exception as e:
self.log.warning("Cannot retrieve job details from BigQuery.Client. %s", e, exc_info=True)
exception_msg = traceback.format_exc()
run_facets.update(
{
"errorMessage": ErrorMessageRunFacet(
message=f"{e}: {exception_msg}",
programmingLanguage="python",
)
}
)
return OperatorLineage(
inputs=list(inputs),
outputs=self._deduplicate_outputs(outputs),
run_facets=run_facets,
job_facets={"sql": SQLJobFacet(query=SQLParser.normalize_sql(self.sql))} if self.sql else {},
)
def _get_inputs_and_outputs(self, properties: dict) -> tuple[list[InputDataset], list[OutputDataset]]:
job_type = get_from_nullable_chain(properties, ["configuration", "jobType"])
if job_type == "QUERY":
inputs, outputs = self._get_inputs_and_outputs_for_query_job(properties)
elif job_type == "LOAD":
inputs, outputs = self._get_inputs_and_outputs_for_load_job(properties)
elif job_type == "COPY":
inputs, outputs = self._get_inputs_and_outputs_for_copy_job(properties)
elif job_type == "EXTRACT":
inputs, outputs = self._get_inputs_and_outputs_for_extract_job(properties)
else:
self.log.debug("Unsupported job type for input/output extraction: `%s`.", job_type) # type: ignore[attr-defined]
inputs, outputs = [], []
return inputs, outputs
def _deduplicate_outputs(self, outputs: Iterable[OutputDataset | None]) -> list[OutputDataset]:
final_outputs = {}
for single_output in outputs:
if not single_output:
continue
key = f"{single_output.namespace}.{single_output.name}"
if key not in final_outputs:
final_outputs[key] = single_output
continue
# No OutputStatisticsOutputDatasetFacet is added to duplicated outputs as we can not determine
# if the rowCount or size can be summed together.
if single_output.outputFacets:
single_output.outputFacets.pop("outputStatistics", None)
# If multiple outputs contain Column Level Lineage Facet - merge the facets
if (
single_output.facets
and final_outputs[key].facets
and "columnLineage" in single_output.facets
and "columnLineage" in final_outputs[key].facets # type: ignore
):
single_output.facets["columnLineage"] = merge_column_lineage_facets(
[
single_output.facets["columnLineage"], # type: ignore
final_outputs[key].facets["columnLineage"], # type: ignore
]
)
final_outputs[key] = single_output
return list(final_outputs.values())
def _get_input_dataset(self, table: dict) -> InputDataset:
return cast("InputDataset", self._get_dataset(table, "input"))
def _get_output_dataset(self, table: dict) -> OutputDataset:
return cast("OutputDataset", self._get_dataset(table, "output"))
def _get_dataset(self, table: dict, dataset_type: str) -> Dataset:
project = table.get("projectId")
dataset = table.get("datasetId")
table_name = table.get("tableId")
dataset_name = f"{project}.{dataset}.{table_name}"
dataset_facets = self._get_table_facets_safely(dataset_name)
if dataset_type == "input":
# Logic specific to creating InputDataset (if needed)
return InputDataset(
namespace=BIGQUERY_NAMESPACE,
name=dataset_name,
facets=dataset_facets,
)
if dataset_type == "output":
# Logic specific to creating OutputDataset (if needed)
return OutputDataset(
namespace=BIGQUERY_NAMESPACE,
name=dataset_name,
facets=dataset_facets,
)
raise ValueError("Invalid dataset_type. Must be 'input' or 'output'")
def _get_table_facets_safely(self, table_name: str) -> dict[str, DatasetFacet]:
try:
bq_table = self._client.get_table(table_name)
return get_facets_from_bq_table(bq_table)
except Exception as e:
self.log.warning("Could not extract facets from bigquery table: `%s`. %s", table_name, e) # type: ignore[attr-defined]
return {}
def _get_inputs_and_outputs_for_query_job(
self, properties: dict
) -> tuple[list[InputDataset], list[OutputDataset]]:
input_tables = get_from_nullable_chain(properties, ["statistics", "query", "referencedTables"]) or []
output_table = get_from_nullable_chain(properties, ["configuration", "query", "destinationTable"])
inputs = [
self._get_input_dataset(input_table)
for input_table in input_tables
if input_table != output_table # Output table is in `referencedTables` and needs to be removed
]
if not output_table:
return inputs, []
output = self._get_output_dataset(output_table)
if dataset_stat_facet := self._get_output_statistics_dataset_facet(properties):
output.outputFacets = output.outputFacets or {}
output.outputFacets["outputStatistics"] = dataset_stat_facet
if cll_facet := self._get_column_level_lineage_facet_for_query_job(properties, output, inputs):
output.facets = output.facets or {}
output.facets["columnLineage"] = cll_facet
return inputs, [output]
def _get_inputs_and_outputs_for_load_job(
self, properties: dict
) -> tuple[list[InputDataset], list[OutputDataset]]:
output = self._get_output_dataset(properties["configuration"]["load"]["destinationTable"])
output_table_schema_facet = output.facets.get("schema") if output.facets else None
source_uris = properties["configuration"]["load"]["sourceUris"]
inputs = [
InputDataset(
namespace=namespace,
name=name,
facets={"schema": output_table_schema_facet} if output_table_schema_facet else {},
)
for namespace, name in get_namespace_name_from_source_uris(source_uris)
]
if dataset_stat_facet := self._get_output_statistics_dataset_facet(properties):
output.outputFacets = output.outputFacets or {}
output.outputFacets["outputStatistics"] = dataset_stat_facet
if cll_facet := get_identity_column_lineage_facet(self._extract_column_names(output), inputs):
output.facets = {**output.facets, **cll_facet} if output.facets else cll_facet
return inputs, [output]
def _get_inputs_and_outputs_for_copy_job(
self, properties: dict
) -> tuple[list[InputDataset], list[OutputDataset]]:
input_tables = get_from_nullable_chain(properties, ["configuration", "copy", "sourceTables"]) or [
get_from_nullable_chain(properties, ["configuration", "copy", "sourceTable"])
]
inputs = [self._get_input_dataset(input_table) for input_table in input_tables]
output = self._get_output_dataset(properties["configuration"]["copy"]["destinationTable"])
if dataset_stat_facet := self._get_output_statistics_dataset_facet(properties):
output.outputFacets = output.outputFacets or {}
output.outputFacets["outputStatistics"] = dataset_stat_facet
if cll_facet := get_identity_column_lineage_facet(self._extract_column_names(output), inputs):
output.facets = {**output.facets, **cll_facet} if output.facets else cll_facet
return inputs, [output]
def _get_inputs_and_outputs_for_extract_job(
self, properties: dict
) -> tuple[list[InputDataset], list[OutputDataset]]:
source_table = get_from_nullable_chain(properties, ["configuration", "extract", "sourceTable"])
input_dataset = self._get_input_dataset(source_table) if source_table else None
destination_uris = get_from_nullable_chain(
properties, ["configuration", "extract", "destinationUris"]
) or [get_from_nullable_chain(properties, ["configuration", "extract", "destinationUri"])]
outputs = []
for namespace, name in get_namespace_name_from_source_uris(destination_uris):
output_facets = {}
if input_dataset:
input_schema = input_dataset.facets.get("schema") if input_dataset.facets else None
if input_schema:
output_facets["schema"] = input_schema
if cll_facet := get_identity_column_lineage_facet(
self._extract_column_names(input_dataset), [input_dataset]
):
output_facets = {**output_facets, **cll_facet}
outputs.append(OutputDataset(namespace=namespace, name=name, facets=output_facets))
inputs = [input_dataset] if input_dataset else []
return inputs, outputs
@staticmethod
def _get_bigquery_job_run_facet(properties: dict) -> BigQueryJobRunFacet:
from airflow.providers.google.cloud.openlineage.facets import BigQueryJobRunFacet
job_type = get_from_nullable_chain(properties, ["configuration", "jobType"])
cache_hit, billed_bytes = None, None
if job_type == "QUERY":
if get_from_nullable_chain(properties, ["configuration", "query", "query"]):
# Exclude the query to avoid event size issues and duplicating SqlJobFacet information.
properties = copy.deepcopy(properties)
properties["configuration"]["query"].pop("query")
cache_hit = get_from_nullable_chain(properties, ["statistics", "query", "cacheHit"])
billed_bytes = get_from_nullable_chain(properties, ["statistics", "query", "totalBytesBilled"])
return BigQueryJobRunFacet(
cached=str(cache_hit).lower() == "true",
billedBytes=int(billed_bytes) if billed_bytes else None,
properties=json.dumps(properties),
)
@staticmethod
def _get_output_statistics_dataset_facet(
properties,
) -> OutputStatisticsOutputDatasetFacet | None:
job_type = get_from_nullable_chain(properties, ["configuration", "jobType"])
out_rows, out_bytes = None, None
if job_type == "QUERY":
query_plan = get_from_nullable_chain(properties, chain=["statistics", "query", "queryPlan"])
if not query_plan: # Without query plan there is no statistics
return None
out_stage = query_plan[-1] # Last stage of query plan writes the data and has all the statistics
out_rows = out_stage.get("recordsWritten", None)
out_bytes = out_stage.get("shuffleOutputBytes", None)
elif job_type == "LOAD":
out_rows = get_from_nullable_chain(properties, ["statistics", "load", "outputRows"])
out_bytes = get_from_nullable_chain(properties, ["statistics", "load", "outputBytes"])
elif job_type == "COPY":
out_rows = get_from_nullable_chain(properties, ["statistics", "copy", "copiedRows"])
out_bytes = get_from_nullable_chain(properties, ["statistics", "copy", "copiedLogicalBytes"])
# No statistics available for EXTRACT job type
if out_bytes and out_rows:
return OutputStatisticsOutputDatasetFacet(rowCount=int(out_rows), size=int(out_bytes))
return None
def _get_column_level_lineage_facet_for_query_job(
self, properties: dict, output: OutputDataset, inputs: Iterable[InputDataset]
) -> ColumnLineageDatasetFacet | None:
"""
Extract column-level lineage information from a BigQuery job and return it as a facet.
The Column Level Lineage Facet will NOT be returned if any of the following condition is met:
- The parsed result does not contain column lineage information.
- The parsed result does not contain exactly one output table.
- The parsed result has a different output table than the output table from the BQ job.
- The parsed result has at least one input table not present in the input tables from the BQ job.
- The parsed result has a column not present in the schema of given dataset from the BQ job.
Args:
properties: The properties of the BigQuery job.
output: The output dataset for which the column lineage is being extracted.
Returns:
The extracted Column Lineage Dataset Facet, or None if conditions are not met.
"""
from airflow.providers.openlineage.sqlparser import SQLParser
# Extract SQL query and parse it
self.log.debug("Extracting column-level lineage facet from BigQuery query.") # type: ignore[attr-defined]
query = get_from_nullable_chain(properties, ["configuration", "query", "query"])
if query is None:
self.log.debug("No query found in BQ job configuration. Facet generation skipped.") # type: ignore[attr-defined]
return None
parse_result = SQLParser("bigquery").parse(SQLParser.split_sql_string(SQLParser.normalize_sql(query)))
if parse_result is None or parse_result.column_lineage == []:
self.log.debug("No column-level lineage found in the SQL query. Facet generation skipped.") # type: ignore[attr-defined]
return None
default_dataset, default_project = self._extract_default_dataset_and_project(
properties,
self.project_id, # type: ignore[attr-defined]
)
# Verify if the output table id from the parse result matches the BQ job output table
if not self._validate_output_table_id(
parse_result,
output,
default_project,
default_dataset,
):
return None
# Verify if all columns from parse results are present in the output dataset schema
if not self._validate_output_columns(parse_result, output):
return None
input_tables_from_parse_result = self._extract_parsed_input_tables(
parse_result, default_project, default_dataset
)
input_tables_from_bq = {input_ds.name: self._extract_column_names(input_ds) for input_ds in inputs}
# Verify if all datasets from parse results are present in bq job input datasets
if not self._validate_input_tables(input_tables_from_parse_result, input_tables_from_bq):
return None
# Verify if all columns from parse results are present in their respective bq job input datasets
if not self._validate_input_columns(input_tables_from_parse_result, input_tables_from_bq):
return None
return self._generate_column_lineage_facet(parse_result, default_project, default_dataset)
@staticmethod
def _get_qualified_name_from_parse_result(table, default_project: str, default_dataset: str) -> str:
"""Get the qualified name of a table from the parse result."""
return ".".join(
(
table.database or default_project,
table.schema or default_dataset,
table.name,
)
)
@staticmethod
def _extract_default_dataset_and_project(properties: dict, default_project: str) -> tuple[str, str]:
"""Extract the default dataset and project from the BigQuery job properties."""
default_dataset_obj = get_from_nullable_chain(
properties, ["configuration", "query", "defaultDataset"]
)
default_dataset = default_dataset_obj.get("datasetId", "") if default_dataset_obj else ""
default_project = (
default_dataset_obj.get("projectId", default_project) if default_dataset_obj else default_project
)
return default_dataset, default_project
def _validate_output_table_id(
self, parse_result, output: OutputDataset, default_project: str, default_dataset: str
) -> bool:
"""Check if the output table id from the parse result matches the BQ job output table."""
if len(parse_result.out_tables) != 1:
self.log.debug( # type: ignore[attr-defined]
"Invalid output tables in the parse result: `%s`. Expected exactly one output table.",
parse_result.out_tables,
)
return False
parsed_output_table = self._get_qualified_name_from_parse_result(
parse_result.out_tables[0], default_project, default_dataset
)
if parsed_output_table != output.name:
self.log.debug( # type: ignore[attr-defined]
"Mismatch between parsed output table `%s` and BQ job output table `%s`.",
parsed_output_table,
output.name,
)
return False
return True
@staticmethod
def _extract_column_names(dataset: Dataset) -> list[str]:
"""Extract column names from a dataset's schema."""
return [
f.name
for f in dataset.facets.get("schema", SchemaDatasetFacet(fields=[])).fields # type: ignore[union-attr]
if dataset.facets
]
def _validate_output_columns(self, parse_result, output: OutputDataset) -> bool:
"""Validate if all descendant columns in parse result exist in output dataset schema."""
output_column_names = self._extract_column_names(output)
missing_columns = [
lineage.descendant.name
for lineage in parse_result.column_lineage
if lineage.descendant.name not in output_column_names
]
if missing_columns:
self.log.debug( # type: ignore[attr-defined]
"Output dataset schema is missing columns from the parse result: `%s`.", missing_columns
)
return False
return True
def _extract_parsed_input_tables(
self, parse_result, default_project: str, default_dataset: str
) -> dict[str, list[str]]:
"""Extract input tables and their columns from the parse result."""
input_tables: dict[str, list[str]] = {}
for lineage in parse_result.column_lineage:
for column_meta in lineage.lineage:
if not column_meta.origin:
self.log.debug( # type: ignore[attr-defined]
"Column `%s` lacks origin information. Skipping facet generation.", column_meta.name
)
return {}
input_table_id = self._get_qualified_name_from_parse_result(
column_meta.origin, default_project, default_dataset
)
input_tables.setdefault(input_table_id, []).append(column_meta.name)
return input_tables
def _validate_input_tables(
self, parsed_input_tables: dict[str, list[str]], input_tables_from_bq: dict[str, list[str]]
) -> bool:
"""Validate if all parsed input tables exist in the BQ job's input datasets."""
if not parsed_input_tables:
self.log.debug("No input tables found in the parse result. Facet generation skipped.") # type: ignore[attr-defined]
return False
if missing_tables := set(parsed_input_tables) - set(input_tables_from_bq):
self.log.debug( # type: ignore[attr-defined]
"Parsed input tables not found in the BQ job's input datasets: `%s`.", missing_tables
)
return False
return True
def _validate_input_columns(
self, parsed_input_tables: dict[str, list[str]], input_tables_from_bq: dict[str, list[str]]
) -> bool:
"""Validate if all parsed input columns exist in their respective BQ job input table schemas."""
if not parsed_input_tables:
self.log.debug("No input tables found in the parse result. Facet generation skipped.") # type: ignore[attr-defined]
return False
for table, columns in parsed_input_tables.items():
if missing_columns := set(columns) - set(input_tables_from_bq.get(table, [])):
self.log.debug( # type: ignore[attr-defined]
"Input table `%s` is missing columns from the parse result: `%s`.", table, missing_columns
)
return False
return True
def _generate_column_lineage_facet(
self, parse_result, default_project: str, default_dataset: str
) -> ColumnLineageDatasetFacet:
"""Generate the ColumnLineageDatasetFacet based on the parsed result."""
return ColumnLineageDatasetFacet(
fields={
lineage.descendant.name: Fields(
inputFields=[
InputField(
namespace=BIGQUERY_NAMESPACE,
name=self._get_qualified_name_from_parse_result(
column_meta.origin, default_project, default_dataset
),
field=column_meta.name,
)
for column_meta in lineage.lineage
],
transformationType="",
transformationDescription="",
)
for lineage in parse_result.column_lineage
}
)
| _BigQueryInsertJobOperatorOpenLineageMixin |
python | django__django | django/db/models/functions/text.py | {
"start": 8869,
"end": 9105
} | class ____(Left):
function = "RIGHT"
def get_substr(self):
return Substr(
self.source_expressions[0],
self.source_expressions[1] * Value(-1),
self.source_expressions[1],
)
| Right |
python | cython__cython | Cython/Compiler/ParseTreeTransforms.py | {
"start": 31400,
"end": 61047
} | class ____(CythonTransform):
"""
After parsing, directives can be stored in a number of places:
- #cython-comments at the top of the file (stored in ModuleNode)
- Command-line arguments overriding these
- @cython.directivename decorators
- with cython.directivename: statements
- replaces "cython.compiled" with BoolNode(value=True)
allowing unreachable blocks to be removed at a fairly early stage
before cython typing rules are forced on applied
This transform is responsible for interpreting these various sources
and store the directive in two ways:
- Set the directives attribute of the ModuleNode for global directives.
- Use a CompilerDirectivesNode to override directives for a subtree.
(The first one is primarily to not have to modify with the tree
structure, so that ModuleNode stay on top.)
The directives are stored in dictionaries from name to value in effect.
Each such dictionary is always filled in for all possible directives,
using default values where no value is given by the user.
The available directives are controlled in Options.py.
Note that we have to run this prior to analysis, and so some minor
duplication of functionality has to occur: We manually track cimports
and which names the "cython" module may have been imported to.
"""
unop_method_nodes = {
'typeof': ExprNodes.TypeofNode,
'operator.address': ExprNodes.AmpersandNode,
'operator.dereference': ExprNodes.DereferenceNode,
'operator.preincrement' : ExprNodes.inc_dec_constructor(True, '++'),
'operator.predecrement' : ExprNodes.inc_dec_constructor(True, '--'),
'operator.postincrement': ExprNodes.inc_dec_constructor(False, '++'),
'operator.postdecrement': ExprNodes.inc_dec_constructor(False, '--'),
'operator.typeid' : ExprNodes.TypeidNode,
# For backwards compatibility.
'address': ExprNodes.AmpersandNode,
}
binop_method_nodes = {
'operator.comma' : ExprNodes.c_binop_constructor(','),
}
special_methods = {
'declare', 'union', 'struct', 'typedef',
'sizeof', 'cast', 'pointer', 'compiled',
'NULL', 'fused_type', 'parallel',
}
special_methods.update(unop_method_nodes)
valid_cython_submodules = {
'cimports',
'dataclasses',
'operator',
'parallel',
'view',
}
valid_parallel_directives = {
"parallel",
"prange",
"threadid",
#"threadsavailable",
}
def __init__(self, context, compilation_directive_defaults):
super().__init__(context)
self.cython_module_names = set()
self.directive_names = {'staticmethod': 'staticmethod'}
self.parallel_directives = {}
directives = copy.deepcopy(Options.get_directive_defaults())
for key, value in compilation_directive_defaults.items():
directives[str(key)] = copy.deepcopy(value)
self.directives = directives
def check_directive_scope(self, pos, directive, scope):
legal_scopes = Options.directive_scopes.get(directive, None)
if legal_scopes and scope not in legal_scopes:
self.context.nonfatal_error(PostParseError(pos, 'The %s compiler directive '
'is not allowed in %s scope' % (directive, scope)))
return False
else:
if directive not in Options.directive_types:
error(pos, "Invalid directive: '%s'." % (directive,))
return True
def _check_valid_cython_module(self, pos, module_name):
if not module_name.startswith("cython."):
return
submodule = module_name.split('.', 2)[1]
if submodule in self.valid_cython_submodules:
return
extra = ""
# This is very rarely used, so don't waste space on static tuples.
hints = [
line.split() for line in """\
imp cimports
cimp cimports
para parallel
parra parallel
dataclass dataclasses
""".splitlines()[:-1]
]
for wrong, correct in hints:
if module_name.startswith("cython." + wrong):
extra = "Did you mean 'cython.%s' ?" % correct
break
if not extra:
is_simple_cython_name = submodule in Options.directive_types
if not is_simple_cython_name and not submodule.startswith("_"):
# Try to find it in the Shadow module (i.e. the pure Python namespace of cython.*).
# FIXME: use an internal reference of "cython.*" names instead of Shadow.py
from .. import Shadow
is_simple_cython_name = hasattr(Shadow, submodule)
if is_simple_cython_name:
extra = "Instead, use 'import cython' and then 'cython.%s'." % submodule
error(pos, "'%s' is not a valid cython.* module%s%s" % (
module_name,
". " if extra else "",
extra,
))
# Set up processing and handle the cython: comments.
def visit_ModuleNode(self, node):
for key in sorted(node.directive_comments):
if not self.check_directive_scope(node.pos, key, 'module'):
self.wrong_scope_error(node.pos, key, 'module')
del node.directive_comments[key]
self.module_scope = node.scope
self.directives.update(node.directive_comments)
node.directives = self.directives
node.parallel_directives = self.parallel_directives
self.visitchildren(node)
node.cython_module_names = self.cython_module_names
return node
def visit_CompilerDirectivesNode(self, node):
old_directives, self.directives = self.directives, node.directives
self.visitchildren(node)
self.directives = old_directives
return node
# The following four functions track imports and cimports that
# begin with "cython"
def is_cython_directive(self, name):
return (name in Options.directive_types or
name in self.special_methods or
PyrexTypes.parse_basic_type(name))
def is_parallel_directive(self, full_name, pos):
"""
Checks to see if fullname (e.g. cython.parallel.prange) is a valid
parallel directive. If it is a star import it also updates the
parallel_directives.
"""
result = (full_name + ".").startswith("cython.parallel.")
if result:
directive = full_name.split('.')
if full_name == "cython.parallel":
self.parallel_directives["parallel"] = "cython.parallel"
elif full_name == "cython.parallel.*":
for name in self.valid_parallel_directives:
self.parallel_directives[name] = "cython.parallel.%s" % name
elif (len(directive) != 3 or
directive[-1] not in self.valid_parallel_directives):
error(pos, "No such directive: %s" % full_name)
return result
def visit_CImportStatNode(self, node):
module_name = node.module_name
if module_name == "cython.cimports":
error(node.pos, "Cannot cimport the 'cython.cimports' package directly, only submodules.")
if module_name.startswith("cython.cimports."):
if node.as_name and node.as_name != 'cython':
node.module_name = module_name[len("cython.cimports."):]
return node
error(node.pos,
"Python cimports must use 'from cython.cimports... import ...'"
" or 'import ... as ...', not just 'import ...'")
if module_name == "cython":
self.cython_module_names.add(node.as_name or "cython")
elif module_name.startswith("cython."):
if module_name.startswith("cython.parallel."):
error(node.pos, node.module_name + " is not a module")
else:
self._check_valid_cython_module(node.pos, module_name)
if module_name == "cython.parallel":
if node.as_name and node.as_name != "cython":
self.parallel_directives[node.as_name] = module_name
else:
self.cython_module_names.add("cython")
self.parallel_directives[
"cython.parallel"] = module_name
elif node.as_name:
self.directive_names[node.as_name] = module_name[7:]
else:
self.cython_module_names.add("cython")
# if this cimport was a compiler directive, we don't
# want to leave the cimport node sitting in the tree
return None
return node
def visit_FromCImportStatNode(self, node):
module_name = node.module_name
if module_name == "cython.cimports" or module_name.startswith("cython.cimports."):
# only supported for convenience
return self._create_cimport_from_import(
node.pos, module_name, node.relative_level, node.imported_names)
elif not node.relative_level and (
module_name == "cython" or module_name.startswith("cython.")):
self._check_valid_cython_module(node.pos, module_name)
submodule = (module_name + ".")[7:]
newimp = []
for pos, name, as_name in node.imported_names:
full_name = submodule + name
qualified_name = "cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
# from cython cimport parallel, or
# from cython.parallel cimport parallel, prange, ...
self.parallel_directives[as_name or name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[as_name or name] = full_name
elif full_name in ['dataclasses', 'typing']:
self.directive_names[as_name or name] = full_name
# unlike many directives, still treat it as a regular module
newimp.append((pos, name, as_name))
else:
newimp.append((pos, name, as_name))
if not newimp:
return None
node.imported_names = newimp
return node
def visit_FromImportStatNode(self, node):
import_node = node.module
module_name = import_node.module_name.value
if module_name == "cython.cimports" or module_name.startswith("cython.cimports."):
imported_names = []
for name, name_node in node.items:
imported_names.append(
(name_node.pos, name, None if name == name_node.name else name_node.name))
return self._create_cimport_from_import(
node.pos, module_name, import_node.level, imported_names)
elif module_name == "cython" or module_name.startswith("cython."):
self._check_valid_cython_module(import_node.module_name.pos, module_name)
submodule = (module_name + ".")[7:]
newimp = []
for name, name_node in node.items:
full_name = submodule + name
qualified_name = "cython." + full_name
if self.is_parallel_directive(qualified_name, node.pos):
self.parallel_directives[name_node.name] = qualified_name
elif self.is_cython_directive(full_name):
self.directive_names[name_node.name] = full_name
else:
newimp.append((name, name_node))
if not newimp:
return None
node.items = newimp
return node
def _create_cimport_from_import(self, node_pos, module_name, level, imported_names):
if module_name == "cython.cimports" or module_name.startswith("cython.cimports."):
module_name = EncodedString(module_name[len("cython.cimports."):]) # may be empty
if module_name:
# from cython.cimports.a.b import x, y, z => from a.b cimport x, y, z
return Nodes.FromCImportStatNode(
node_pos, module_name=module_name,
relative_level=level,
imported_names=imported_names)
else:
# from cython.cimports import x, y, z => cimport x; cimport y; cimport z
return [
Nodes.CImportStatNode(
pos,
module_name=dotted_name,
as_name=as_name,
is_absolute=level == 0)
for pos, dotted_name, as_name in imported_names
]
def visit_SingleAssignmentNode(self, node):
if isinstance(node.rhs, ExprNodes.ImportNode):
module_name = node.rhs.module_name.value
if module_name != "cython" and not module_name.startswith("cython."):
return node
node = Nodes.CImportStatNode(node.pos, module_name=module_name, as_name=node.lhs.name)
node = self.visit_CImportStatNode(node)
else:
self.visitchildren(node)
return node
def visit_NameNode(self, node):
if node.annotation:
self.visitchild(node, 'annotation')
if node.name in self.cython_module_names:
node.is_cython_module = True
else:
directive = self.directive_names.get(node.name)
if directive is not None:
node.cython_attribute = directive
if node.as_cython_attribute() == "compiled":
return ExprNodes.BoolNode(node.pos, value=True) # replace early so unused branches can be dropped
# before they have a chance to cause compile-errors
return node
def visit_AttributeNode(self, node):
self.visitchildren(node)
if node.as_cython_attribute() == "compiled":
return ExprNodes.BoolNode(node.pos, value=True) # replace early so unused branches can be dropped
# before they have a chance to cause compile-errors
return node
def visit_AnnotationNode(self, node):
# for most transforms annotations are left unvisited (because they're unevaluated)
# however, it is important to pick up compiler directives from them
if node.expr:
self.visit(node.expr)
return node
def visit_NewExprNode(self, node):
self.visitchild(node, 'cppclass')
self.visitchildren(node)
return node
def try_to_parse_directives(self, node):
# If node is the contents of an directive (in a with statement or
# decorator), returns a list of (directivename, value) pairs.
# Otherwise, returns None
if isinstance(node, ExprNodes.CallNode):
self.visitchild(node, 'function')
optname = node.function.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype:
args, kwds = node.explicit_args_kwds()
directives = []
key_value_pairs = []
if kwds is not None and directivetype is not dict:
for keyvalue in kwds.key_value_pairs:
key, value = keyvalue
sub_optname = "%s.%s" % (optname, key.value)
if Options.directive_types.get(sub_optname):
directives.append(self.try_to_parse_directive(sub_optname, [value], None, keyvalue.pos))
else:
key_value_pairs.append(keyvalue)
if not key_value_pairs:
kwds = None
else:
kwds.key_value_pairs = key_value_pairs
if directives and not kwds and not args:
return directives
directives.append(self.try_to_parse_directive(optname, args, kwds, node.function.pos))
return directives
elif isinstance(node, (ExprNodes.AttributeNode, ExprNodes.NameNode)):
self.visit(node)
optname = node.as_cython_attribute()
if optname:
directivetype = Options.directive_types.get(optname)
if directivetype is bool:
arg = ExprNodes.BoolNode(node.pos, value=True)
return [self.try_to_parse_directive(optname, [arg], None, node.pos)]
elif directivetype is None or directivetype is Options.DEFER_ANALYSIS_OF_ARGUMENTS:
return [(optname, None)]
else:
raise PostParseError(
node.pos, "The '%s' directive should be used as a function call." % optname)
return None
def try_to_parse_directive(self, optname, args, kwds, pos):
if optname == 'np_pythran' and not self.context.cpp:
raise PostParseError(pos, 'The %s directive can only be used in C++ mode.' % optname)
elif optname == 'exceptval':
# default: exceptval(None, check=True)
arg_error = len(args) > 1
check = True
if kwds and kwds.key_value_pairs:
kw = kwds.key_value_pairs[0]
if (len(kwds.key_value_pairs) == 1 and
kw.key.is_string_literal and kw.key.value == 'check' and
isinstance(kw.value, ExprNodes.BoolNode)):
check = kw.value.value
else:
arg_error = True
if arg_error:
raise PostParseError(
pos, 'The exceptval directive takes 0 or 1 positional arguments and the boolean keyword "check"')
return ('exceptval', (args[0] if args else None, check))
directivetype = Options.directive_types.get(optname)
if len(args) == 1 and isinstance(args[0], ExprNodes.NoneNode):
return optname, Options.get_directive_defaults()[optname]
elif directivetype is bool:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(pos,
'The %s directive takes one compile-time boolean argument' % optname)
return (optname, args[0].value)
elif directivetype is int:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.IntNode):
raise PostParseError(pos,
'The %s directive takes one compile-time integer argument' % optname)
return (optname, int(args[0].value))
elif directivetype is str:
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.UnicodeNode):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, str(args[0].value))
elif directivetype is type:
if kwds is not None or len(args) != 1:
raise PostParseError(pos,
'The %s directive takes one type argument' % optname)
return (optname, args[0])
elif directivetype is dict:
if len(args) != 0:
raise PostParseError(pos,
'The %s directive takes no prepositional arguments' % optname)
return optname, kwds.as_python_dict()
elif directivetype is list:
if kwds and len(kwds.key_value_pairs) != 0:
raise PostParseError(pos,
'The %s directive takes no keyword arguments' % optname)
return optname, [ str(arg.value) for arg in args ]
elif callable(directivetype):
if kwds is not None or len(args) != 1 or not isinstance(args[0], ExprNodes.UnicodeNode):
raise PostParseError(pos,
'The %s directive takes one compile-time string argument' % optname)
return (optname, directivetype(optname, str(args[0].value)))
elif directivetype is Options.DEFER_ANALYSIS_OF_ARGUMENTS:
# signal to pass things on without processing
return (optname, (args, kwds.as_python_dict() if kwds else {}))
else:
assert False
def visit_with_directives(self, node, directives, contents_directives):
# contents_directives may be None
if not directives:
assert not contents_directives
return self.visit_Node(node)
old_directives = self.directives
new_directives = Options.copy_inherited_directives(old_directives, **directives)
if contents_directives is not None:
new_contents_directives = Options.copy_inherited_directives(
old_directives, **contents_directives)
else:
new_contents_directives = new_directives
if new_directives == old_directives:
return self.visit_Node(node)
self.directives = new_directives
if (contents_directives is not None and
new_contents_directives != new_directives):
# we need to wrap the node body in a compiler directives node
node.body = Nodes.StatListNode(
node.body.pos,
stats=[
Nodes.CompilerDirectivesNode(
node.body.pos,
directives=new_contents_directives,
body=node.body)
]
)
retbody = self.visit_Node(node)
self.directives = old_directives
if isinstance(retbody, Nodes.CompilerDirectivesNode):
new_directives.update(retbody.directives)
retbody = retbody.body
if not isinstance(retbody, Nodes.StatListNode):
retbody = Nodes.StatListNode(node.pos, stats=[retbody])
return Nodes.CompilerDirectivesNode(
retbody.pos, body=retbody, directives=new_directives, is_terminator=retbody.is_terminator)
# Handle decorators
def visit_FuncDefNode(self, node):
directives, contents_directives = self._extract_directives(node, 'function')
return self.visit_with_directives(node, directives, contents_directives)
def visit_CVarDefNode(self, node):
directives, _ = self._extract_directives(node, 'function')
for name, value in directives.items():
if name == 'locals':
node.directive_locals = value
elif name not in ('final', 'staticmethod'):
self.context.nonfatal_error(PostParseError(
node.pos,
"Cdef functions can only take cython.locals(), "
"staticmethod, or final decorators, got %s." % name))
return self.visit_with_directives(node, directives, contents_directives=None)
def visit_CClassDefNode(self, node):
directives, contents_directives = self._extract_directives(node, 'cclass')
return self.visit_with_directives(node, directives, contents_directives)
def visit_CppClassNode(self, node):
directives, contents_directives = self._extract_directives(node, 'cppclass')
return self.visit_with_directives(node, directives, contents_directives)
def visit_PyClassDefNode(self, node):
directives, contents_directives = self._extract_directives(node, 'class')
return self.visit_with_directives(node, directives, contents_directives)
def _extract_directives(self, node, scope_name):
"""
Returns two dicts - directives applied to this function/class
and directives applied to its contents. They aren't always the
same (since e.g. cfunc should not be applied to inner functions)
"""
if not node.decorators:
return {}, {}
# Split the decorators into two lists -- real decorators and directives
directives = []
realdecs = []
both = []
current_opt_dict = dict(self.directives)
missing = object()
# Decorators coming first take precedence.
for dec in node.decorators[::-1]:
new_directives = self.try_to_parse_directives(dec.decorator)
if new_directives is not None:
for directive in new_directives:
if self.check_directive_scope(node.pos, directive[0], scope_name):
name, value = directive
if name in ('nogil', 'with_gil'):
if value is None:
value = True
else:
args, kwds = value
if kwds or len(args) != 1 or not isinstance(args[0], ExprNodes.BoolNode):
raise PostParseError(dec.pos, 'The %s directive takes one compile-time boolean argument' % name)
value = args[0].value
directive = (name, value)
if current_opt_dict.get(name, missing) != value:
if name == 'cfunc' and 'ufunc' in current_opt_dict:
error(dec.pos, "Cannot apply @cfunc to @ufunc, please reverse the decorators.")
directives.append(directive)
current_opt_dict[name] = value
else:
warning(dec.pos, "Directive does not change previous value (%s%s)" % (
name, '=%r' % value if value is not None else ''))
if directive[0] == 'staticmethod':
both.append(dec)
# Adapt scope type based on decorators that change it.
if directive[0] == 'cclass' and scope_name == 'class':
scope_name = 'cclass'
else:
realdecs.append(dec)
node.decorators = realdecs[::-1] + both[::-1]
# merge or override repeated directives
optdict = {}
contents_optdict = {}
for name, value in directives:
if name in optdict:
old_value = optdict[name]
# keywords and arg lists can be merged, everything
# else overrides completely
if isinstance(old_value, dict):
old_value.update(value)
elif isinstance(old_value, list):
old_value.extend(value)
else:
optdict[name] = value
else:
optdict[name] = value
if name not in Options.immediate_decorator_directives:
contents_optdict[name] = value
return optdict, contents_optdict
# Handle with-statements
def visit_WithStatNode(self, node):
directive_dict = {}
for directive in self.try_to_parse_directives(node.manager) or []:
if directive is None:
continue
if node.target is not None:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive with statements cannot contain 'as'"))
continue
name, value = directive
if name in ('nogil', 'gil'):
# special case: in pure mode, "with nogil" spells "with cython.nogil"
return self._transform_with_gil(node, name)
elif name == "critical_section":
args, kwds = value
return self._transform_critical_section(node, args, kwds)
elif self.check_directive_scope(node.pos, name, 'with statement'):
directive_dict[name] = value
if directive_dict:
return self.visit_with_directives(node.body, directive_dict, contents_directives=None)
return self.visit_Node(node)
def _transform_with_gil(self, node, state):
assert state in ('gil', 'nogil')
manager = node.manager
condition = None
if isinstance(manager, ExprNodes.SimpleCallNode) and manager.args:
if len(manager.args) > 1:
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive %s accepts one positional argument." % state))
condition = manager.args[0]
elif isinstance(manager, ExprNodes.GeneralCallNode):
self.context.nonfatal_error(
PostParseError(node.pos, "Compiler directive %s accepts one positional argument." % state))
node = Nodes.GILStatNode(node.pos, state=state, body=node.body, condition=condition)
return self.visit_Node(node)
def _transform_critical_section(self, node, args, kwds):
if len(args) < 1 or len(args) > 2 or kwds:
self.context.nonfatal_error(
PostParseError(node.pos, "critical_section directive accepts one or two positional arguments")
)
node = Nodes.CriticalSectionStatNode(
node.pos, args=args, body=node.body
)
return self.visit_Node(node)
| InterpretCompilerDirectives |
python | doocs__leetcode | solution/0400-0499/0426.Convert Binary Search Tree to Sorted Doubly Linked List/Solution.py | {
"start": 174,
"end": 752
} | class ____:
def treeToDoublyList(self, root: 'Optional[Node]') -> 'Optional[Node]':
def dfs(root):
if root is None:
return
nonlocal prev, head
dfs(root.left)
if prev:
prev.right = root
root.left = prev
else:
head = root
prev = root
dfs(root.right)
if root is None:
return None
head = prev = None
dfs(root)
prev.right = head
head.left = prev
return head
| Solution |
python | apache__airflow | providers/edge3/tests/unit/edge3/cli/test_worker.py | {
"start": 2858,
"end": 18569
} | class ____:
parser: argparse.ArgumentParser
@classmethod
def setup_class(cls):
with conf_vars(
{("core", "executor"): "airflow.providers.edge3.executors.edge_executor.EdgeExecutor"}
):
importlib.reload(executor_loader)
importlib.reload(cli_parser)
cls.parser = cli_parser.get_parser()
@pytest.fixture
def mock_joblist(self, tmp_path: Path) -> list[Job]:
logfile = tmp_path / "file.log"
logfile.touch()
return [
Job(
edge_job=EdgeJobFetched(
dag_id="test",
task_id="test1",
run_id="test",
map_index=-1,
try_number=1,
concurrency_slots=1,
command=MOCK_COMMAND, # type: ignore[arg-type]
),
process=_MockPopen(),
logfile=logfile,
logsize=0,
),
]
@pytest.fixture
def worker_with_job(self, tmp_path: Path, mock_joblist: list[Job]) -> EdgeWorker:
test_worker = EdgeWorker(str(tmp_path / "mock.pid"), "mock", None, 8, 5, 5)
EdgeWorker.jobs = mock_joblist
return test_worker
@pytest.fixture
def mock_edgeworker(self) -> EdgeWorkerModel:
test_edgeworker = EdgeWorkerModel(
worker_name="test_edge_worker",
state="idle",
queues=["default"],
)
return test_edgeworker
@patch("airflow.providers.edge3.cli.worker.Process")
@patch("airflow.providers.edge3.cli.worker.logs_logfile_path")
@patch("airflow.providers.edge3.cli.worker.Popen")
def test_launch_job(self, mock_popen, mock_logfile_path, mock_process, worker_with_job: EdgeWorker):
mock_popen.side_effect = [MagicMock()]
mock_process_instance = MagicMock()
mock_process.side_effect = [mock_process_instance]
edge_job = EdgeWorker.jobs.pop().edge_job
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
worker_with_job._launch_job(edge_job)
if AIRFLOW_V_3_0_PLUS:
assert mock_process.call_count == 1
assert mock_process_instance.start.call_count == 1
else:
assert mock_popen.call_count == 1
assert mock_logfile_path.call_count == 1
assert len(EdgeWorker.jobs) == 1
assert EdgeWorker.jobs[0].edge_job == edge_job
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3+")
@pytest.mark.parametrize(
("configs", "expected_url"),
[
(
{("edge", "api_url"): "https://api-host/edge_worker/v1/rpcapi"},
"https://api-host/execution",
),
(
{("edge", "api_url"): "https://api:1234/subpath/edge_worker/v1/rpcapi"},
"https://api:1234/subpath/execution",
),
(
{
("edge", "api_url"): "https://api-endpoint",
("core", "execution_api_server_url"): "https://other-endpoint",
},
"https://other-endpoint",
),
],
)
def test_execution_api_server_url(
self,
configs,
expected_url,
):
with conf_vars(configs):
EdgeWorker._execution_api_server_url.cache_clear()
url = EdgeWorker._execution_api_server_url()
assert url == expected_url
@pytest.mark.skipif(not AIRFLOW_V_3_0_PLUS, reason="Test requires Airflow 3+")
@patch("airflow.sdk.execution_time.supervisor.supervise")
@patch("airflow.providers.edge3.cli.worker.Process")
@patch("airflow.providers.edge3.cli.worker.Popen")
def test_supervise_launch(
self,
mock_popen,
mock_process,
mock_supervise,
worker_with_job: EdgeWorker,
):
mock_popen.side_effect = [MagicMock()]
mock_process_instance = MagicMock()
mock_process.side_effect = [mock_process_instance]
edge_job = EdgeWorker.jobs.pop().edge_job
worker_with_job._launch_job(edge_job)
mock_process_callback = mock_process.call_args.kwargs["target"]
mock_process_callback(workload=MagicMock(), execution_api_server_url="http://mock-url")
assert mock_supervise.call_args.kwargs["server"] == "http://mock-url"
@pytest.mark.parametrize(
("reserve_result", "fetch_result", "expected_calls"),
[
pytest.param(None, False, (0, 0), id="no_job"),
pytest.param(
EdgeJobFetched(
dag_id="test",
task_id="test",
run_id="test",
map_index=-1,
try_number=1,
concurrency_slots=1,
command=MOCK_COMMAND, # type: ignore[arg-type]
),
True,
(1, 1),
id="new_job",
),
],
)
@patch("airflow.providers.edge3.cli.worker.jobs_fetch")
@patch("airflow.providers.edge3.cli.worker.logs_logfile_path")
@patch("airflow.providers.edge3.cli.worker.jobs_set_state")
@patch("subprocess.Popen")
def test_fetch_job(
self,
mock_popen,
mock_set_state,
mock_logfile_path,
mock_reserve_task,
reserve_result,
fetch_result,
expected_calls,
worker_with_job: EdgeWorker,
):
logfile_path_call_count, set_state_call_count = expected_calls
mock_reserve_task.side_effect = [reserve_result]
mock_popen.side_effect = ["mock"]
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
got_job = worker_with_job.fetch_job()
mock_reserve_task.assert_called_once()
assert got_job == fetch_result
if AIRFLOW_V_3_0_PLUS:
# this is only called on Airflow 2.10, AIP-72 includes it
assert mock_logfile_path.call_count == 0
else:
assert mock_logfile_path.call_count == logfile_path_call_count
assert mock_set_state.call_count == set_state_call_count
def test_check_running_jobs_running(self, worker_with_job: EdgeWorker):
assert worker_with_job.free_concurrency == worker_with_job.concurrency
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 1
assert (
worker_with_job.free_concurrency
== worker_with_job.concurrency - EdgeWorker.jobs[0].edge_job.concurrency_slots
)
@patch("airflow.providers.edge3.cli.worker.jobs_set_state")
def test_check_running_jobs_success(self, mock_set_state, worker_with_job: EdgeWorker):
job = EdgeWorker.jobs[0]
job.process.generated_returncode = 0 # type: ignore[union-attr]
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 0
mock_set_state.assert_called_once_with(job.edge_job.key, TaskInstanceState.SUCCESS)
assert worker_with_job.free_concurrency == worker_with_job.concurrency
@patch("airflow.providers.edge3.cli.worker.jobs_set_state")
def test_check_running_jobs_failed(self, mock_set_state, worker_with_job: EdgeWorker):
job = EdgeWorker.jobs[0]
job.process.generated_returncode = 42 # type: ignore[union-attr]
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 0
mock_set_state.assert_called_once_with(job.edge_job.key, TaskInstanceState.FAILED)
assert worker_with_job.free_concurrency == worker_with_job.concurrency
@time_machine.travel(datetime.now(), tick=False)
@patch("airflow.providers.edge3.cli.worker.logs_push")
def test_check_running_jobs_log_push(self, mock_logs_push, worker_with_job: EdgeWorker):
job = EdgeWorker.jobs[0]
job.logfile.write_text("some log content")
with conf_vars(
{
("edge", "api_url"): "https://invalid-api-test-endpoint",
("edge", "push_log_chunk_size"): "524288",
}
):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 1
mock_logs_push.assert_called_once_with(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="some log content"
)
@time_machine.travel(datetime.now(), tick=False)
@patch("airflow.providers.edge3.cli.worker.logs_push")
def test_check_running_jobs_log_push_increment(self, mock_logs_push, worker_with_job: EdgeWorker):
job = EdgeWorker.jobs[0]
job.logfile.write_text("hello ")
job.logsize = job.logfile.stat().st_size
job.logfile.write_text("hello world")
with conf_vars(
{
("edge", "api_url"): "https://invalid-api-test-endpoint",
("edge", "push_log_chunk_size"): "524288",
}
):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 1
mock_logs_push.assert_called_once_with(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="world"
)
@time_machine.travel(datetime.now(), tick=False)
@patch("airflow.providers.edge3.cli.worker.logs_push")
def test_check_running_jobs_log_push_chunks(self, mock_logs_push, worker_with_job: EdgeWorker):
job = EdgeWorker.jobs[0]
job.logfile.write_bytes("log1log2ülog3".encode("latin-1"))
with conf_vars(
{("edge", "api_url"): "https://invalid-api-test-endpoint", ("edge", "push_log_chunk_size"): "4"}
):
worker_with_job.check_running_jobs()
assert len(EdgeWorker.jobs) == 1
calls = mock_logs_push.call_args_list
assert len(calls) == 4
assert calls[0] == call(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="log1"
)
assert calls[1] == call(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="log2"
)
assert calls[2] == call(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="\\xfc"
)
assert calls[3] == call(
task=job.edge_job.key, log_chunk_time=timezone.utcnow(), log_chunk_data="log3"
)
@pytest.mark.parametrize(
("drain", "maintenance_mode", "jobs", "expected_state"),
[
pytest.param(False, False, False, EdgeWorkerState.IDLE, id="idle"),
pytest.param(False, False, True, EdgeWorkerState.RUNNING, id="running_jobs"),
pytest.param(False, True, False, EdgeWorkerState.MAINTENANCE_MODE, id="maintenance_no_job"),
pytest.param(
False, True, True, EdgeWorkerState.MAINTENANCE_PENDING, id="maintenance_running_jobs"
),
pytest.param(True, False, False, EdgeWorkerState.OFFLINE, id="shut_down"),
pytest.param(True, False, True, EdgeWorkerState.TERMINATING, id="terminating"),
pytest.param(True, True, False, EdgeWorkerState.OFFLINE_MAINTENANCE, id="offline_maintenance"),
pytest.param(True, True, True, EdgeWorkerState.TERMINATING, id="maintenance_shut_down"),
],
)
@patch("airflow.providers.edge3.cli.worker.worker_set_state")
def test_heartbeat(
self, mock_set_state, drain, maintenance_mode, jobs, expected_state, worker_with_job: EdgeWorker
):
if not jobs:
EdgeWorker.jobs = []
EdgeWorker.drain = drain
EdgeWorker.maintenance_mode = maintenance_mode
mock_set_state.return_value = WorkerSetStateReturn(
state=EdgeWorkerState.RUNNING, queues=["queue1", "queue2"]
)
with conf_vars({("edge", "api_url"): "https://invalid-api-test-endpoint"}):
worker_with_job.heartbeat()
assert mock_set_state.call_args.args[1] == expected_state
queue_list = worker_with_job.queues or []
assert len(queue_list) == 2
assert "queue1" in (queue_list)
assert "queue2" in (queue_list)
@patch("airflow.providers.edge3.cli.worker.worker_set_state")
def test_version_mismatch(self, mock_set_state, worker_with_job):
mock_set_state.side_effect = EdgeWorkerVersionException("")
worker_with_job.heartbeat()
assert worker_with_job.drain
@patch("airflow.providers.edge3.cli.worker.worker_register")
def test_start_missing_apiserver(self, mock_register_worker, worker_with_job: EdgeWorker):
mock_response = Response()
mock_response.status_code = 404
mock_register_worker.side_effect = HTTPError(
"Something with 404:NOT FOUND means API is not active", response=mock_response
)
with pytest.raises(SystemExit, match=r"API endpoint is not ready"):
worker_with_job.start()
@patch("airflow.providers.edge3.cli.worker.worker_register")
def test_start_server_error(self, mock_register_worker, worker_with_job: EdgeWorker):
mock_response = Response()
mock_response.status_code = 500
mock_register_worker.side_effect = HTTPError(
"Something other error not FourhundretFour", response=mock_response
)
with pytest.raises(SystemExit, match=r"Something other"):
worker_with_job.start()
@patch("airflow.providers.edge3.cli.worker.worker_register")
@patch("airflow.providers.edge3.cli.worker.EdgeWorker.loop")
@patch("airflow.providers.edge3.cli.worker.worker_set_state")
def test_start_and_run_one(self, mock_set_state, mock_loop, mock_register, worker_with_job: EdgeWorker):
def stop_running():
EdgeWorker.drain = True
EdgeWorker.jobs = []
mock_loop.side_effect = stop_running
mock_register.side_effect = [WorkerRegistrationReturn(last_update=datetime.now())]
worker_with_job.start()
mock_register.assert_called_once()
mock_loop.assert_called_once()
assert mock_set_state.call_count == 2
def test_get_sysinfo(self, worker_with_job: EdgeWorker):
concurrency = 8
worker_with_job.concurrency = concurrency
sysinfo = worker_with_job._get_sysinfo()
assert "airflow_version" in sysinfo
assert "edge_provider_version" in sysinfo
assert "concurrency" in sysinfo
assert sysinfo["concurrency"] == concurrency
@pytest.mark.db_test
def test_list_edge_workers(self, mock_edgeworker: EdgeWorkerModel):
args = self.parser.parse_args(["edge", "list-workers", "--output", "json"])
with contextlib.redirect_stdout(StringIO()) as temp_stdout:
with patch(
"airflow.providers.edge3.models.edge_worker.get_registered_edge_hosts",
return_value=[mock_edgeworker],
):
edge_command.list_edge_workers(args)
out = temp_stdout.getvalue()
edge_workers = json.loads(out)
for key in [
"worker_name",
"state",
"queues",
"jobs_active",
"concurrency",
"free_concurrency",
"maintenance_comment",
]:
assert key in edge_workers[0]
assert any("test_edge_worker" in h["worker_name"] for h in edge_workers)
| TestEdgeWorker |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/daemon_sensor_tests/test_run_status_sensors.py | {
"start": 2979,
"end": 68456
} | class ____(NamedTuple):
instance: dg.DagsterInstance
context: WorkspaceProcessContext
repositories: dict[str, RemoteRepository]
code_location: CodeLocation
def get_single_repository(self) -> RemoteRepository:
assert len(self.repositories) == 1
return next(iter(self.repositories.values()))
@contextmanager
def instance_with_single_code_location_multiple_repos_with_sensors(
overrides: Optional[Mapping[str, Any]] = None,
workspace_load_target: Optional[WorkspaceLoadTarget] = None,
synchronous_run_coordinator=False,
) -> Iterator[tuple[dg.DagsterInstance, WorkspaceProcessContext, dict[str, RemoteRepository]]]:
with instance_with_multiple_code_locations(
overrides, workspace_load_target, synchronous_run_coordinator=synchronous_run_coordinator
) as many_tuples:
assert len(many_tuples) == 1
location_info = next(iter(many_tuples.values()))
yield (
location_info.instance,
location_info.context,
location_info.repositories,
)
@contextmanager
def instance_with_multiple_code_locations(
overrides: Optional[Mapping[str, Any]] = None,
workspace_load_target=None,
synchronous_run_coordinator=False,
) -> Iterator[dict[str, CodeLocationInfoForSensorTest]]:
with dg.instance_for_test(
overrides, synchronous_run_coordinator=synchronous_run_coordinator
) as instance:
with create_test_daemon_workspace_context(
workspace_load_target or create_workspace_load_target(None), instance=instance
) as workspace_context:
location_infos: dict[str, CodeLocationInfoForSensorTest] = {}
for code_location_entry in (
workspace_context.create_request_context().get_code_location_entries().values()
):
code_location: CodeLocation = check.not_none(code_location_entry.code_location)
location_infos[code_location.name] = CodeLocationInfoForSensorTest(
instance=instance,
context=workspace_context,
repositories={**code_location.get_repositories()},
code_location=code_location,
)
yield location_infos
def test_run_status_sensor(
caplog,
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
success_sensor = remote_repo.get_sensor("my_job_success_sensor")
instance.start_sensor(success_sensor)
started_sensor = remote_repo.get_sensor("my_job_started_sensor")
instance.start_sensor(started_sensor)
state = instance.get_instigator_state(
started_sensor.get_remote_origin_id(), started_sensor.selector_id
)
assert (
cast("SensorInstigatorData", check.not_none(state).instigator_data).sensor_type
== SensorType.RUN_STATUS
)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("failure_job")
run = instance.create_run_for_job(
failure_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should not fire the success sensor, should fire the started sensro
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
ticks = instance.get_ticks(
started_sensor.get_remote_origin_id(), started_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
started_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("foo_job")
run = instance.create_run_for_job(
foo_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
caplog.clear()
with freeze_time(freeze_datetime):
# should fire the success sensor and the started sensor
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
ticks = instance.get_ticks(
started_sensor.get_remote_origin_id(), started_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
started_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert 'Sensor "my_job_started_sensor" acted on run status STARTED of run' in caplog.text
assert 'Sensor "my_job_success_sensor" acted on run status SUCCESS of run' in caplog.text
def test_run_failure_sensor(
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("failure_job")
run = instance.create_run_for_job(
failure_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should fire the failure sensor
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_run_failure_sensor_that_fails(
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor_that_itself_fails")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("failure_job")
run = instance.create_run_for_job(
failure_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should fire the failure sensor and fail
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.FAILURE,
expected_error="How meta",
)
# Next tick skips again
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should fire the failure sensor and fail
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
def test_run_failure_sensor_filtered(
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor_filtered")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("failure_job_2")
run = instance.create_run_for_job(
failure_job_2,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should not fire the failure sensor (filtered to failure job)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("failure_job")
run = instance.create_run_for_job(
failure_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should not fire the failure sensor (filtered to failure job)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_run_failure_sensor_overfetch(
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
remote_repo: RemoteRepository,
):
with environ(
{
"DAGSTER_RUN_STATUS_SENSOR_FETCH_LIMIT": "6",
"DAGSTER_RUN_STATUS_SENSOR_PROCESS_LIMIT": "2",
},
):
with create_test_daemon_workspace_context(
workspace_load_target=create_workspace_load_target(), instance=instance
) as workspace_context:
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor_filtered")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
matching_runs = []
non_matching_runs = []
# interleave matching jobs and jobs that do not match
for _i in range(4):
remote_job = remote_repo.get_full_job("failure_job")
remote_job_2 = remote_repo.get_full_job("failure_job_2")
run = instance.create_run_for_job(
failure_job_2,
remote_job_origin=remote_job_2.get_remote_origin(),
job_code_origin=remote_job_2.get_python_origin(),
)
instance.report_run_failed(run)
non_matching_runs.append(run)
run = instance.create_run_for_job(
failure_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.report_run_failed(run)
matching_runs.append(run)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should fire the failure sensor (filtered to failure job)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert set(ticks[0].origin_run_ids or []) == {
matching_runs[0].run_id,
matching_runs[1].run_id,
}
# Additional non-matching run was incorporated into the cursor
run_status_changes = instance.event_log_storage.fetch_run_status_changes(
records_filter=DagsterEventType.RUN_FAILURE, ascending=True, limit=1000
)
assert len(run_status_changes.records) == 8
last_non_matching_run_storage_records = [
record
for record in run_status_changes.records
if record.run_id == non_matching_runs[2].run_id
]
assert len(last_non_matching_run_storage_records) == 1
assert (
dg.deserialize_value(
check.not_none(ticks[0].cursor), RunStatusSensorCursor
).record_id
== last_non_matching_run_storage_records[0].storage_id
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert set(ticks[0].origin_run_ids or []) == {
matching_runs[2].run_id,
matching_runs[3].run_id,
}
last_matching_run_storage_records = [
record
for record in run_status_changes.records
if record.run_id == matching_runs[3].run_id
]
assert len(last_matching_run_storage_records) == 1
assert (
dg.deserialize_value(
check.not_none(ticks[0].cursor), RunStatusSensorCursor
).record_id
== last_matching_run_storage_records[0].storage_id
)
def sqlite_storage_config_fn(temp_dir: str) -> dict[str, Any]:
# non-run sharded storage
return {
"run_storage": {
"module": "dagster._core.storage.runs",
"class": "SqliteRunStorage",
"config": {"base_dir": temp_dir},
},
"event_log_storage": {
"module": "dagster._core.storage.event_log",
"class": "SqliteEventLogStorage",
"config": {"base_dir": temp_dir},
},
}
def default_storage_config_fn(_):
# run sharded storage
return {}
def sql_event_log_storage_config_fn(temp_dir: str):
return {
"event_log_storage": {
"module": "dagster._core.storage.event_log",
"class": "ConsolidatedSqliteEventLogStorage",
"config": {"base_dir": temp_dir},
},
}
@pytest.mark.parametrize(
"storage_config_fn",
[default_storage_config_fn, sqlite_storage_config_fn],
)
def test_run_status_sensor_interleave(storage_config_fn, executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
with tempfile.TemporaryDirectory() as temp_dir:
with instance_with_sensors(overrides=storage_config_fn(temp_dir)) as (
instance,
workspace_context,
remote_repo,
):
# start sensor
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("hanging_job")
# start run 1
run1 = instance.create_run_for_job(
hanging_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run1.run_id, workspace_context.create_request_context())
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
# start run 2
run2 = instance.create_run_for_job(
hanging_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run2.run_id, workspace_context.create_request_context())
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
# fail run 2
instance.report_run_failed(run2)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.FAILURE
assert run.run_id == run2.run_id
# check sensor
with freeze_time(freeze_datetime):
# should fire for run 2
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert len(ticks[0].origin_run_ids) == 1 # pyright: ignore[reportArgumentType]
assert ticks[0].origin_run_ids[0] == run2.run_id # pyright: ignore[reportOptionalSubscript]
# fail run 1
with freeze_time(freeze_datetime):
# fail run 2
instance.report_run_failed(run1)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
# check sensor
with freeze_time(freeze_datetime):
# should fire for run 1
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert len(ticks[0].origin_run_ids) == 1 # pyright: ignore[reportArgumentType]
assert ticks[0].origin_run_ids[0] == run1.run_id # pyright: ignore[reportOptionalSubscript]
@pytest.mark.parametrize("storage_config_fn", [sql_event_log_storage_config_fn])
def test_run_failure_sensor_empty_run_records(
storage_config_fn, executor: Optional[ThreadPoolExecutor]
):
freeze_datetime = get_current_datetime()
with tempfile.TemporaryDirectory() as temp_dir:
with instance_with_sensors(overrides=storage_config_fn(temp_dir)) as (
instance,
workspace_context,
remote_repo,
):
with freeze_time(freeze_datetime):
failure_sensor = remote_repo.get_sensor("my_run_failure_sensor")
instance.start_sensor(failure_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
# create a mismatch between event storage and run storage
instance.event_log_storage.store_event(
dg.EventLogEntry(
error_info=None,
level="debug",
user_message="",
run_id="fake_run_id",
timestamp=time.time(),
dagster_event=dg.DagsterEvent(
DagsterEventType.PIPELINE_FAILURE.value,
"foo",
),
)
)
runs = instance.get_runs()
assert len(runs) == 0
failure_events = instance.fetch_run_status_changes(
DagsterEventType.PIPELINE_FAILURE, limit=5000
).records
assert len(failure_events) == 1
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# shouldn't fire the failure sensor due to the mismatch
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
failure_sensor.get_remote_origin_id(), failure_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
failure_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
def test_all_code_locations_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
# we have no good api for compositing load targets so forced to use a workspace file
workspace_load_target = WorkspaceFileTarget(
[dg.file_relative_path(__file__, "daemon_sensor_defs_test_workspace.yaml")]
)
# the name of the location by default is the fully-qualified module name
daemon_sensor_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.daemon_sensor_defs"
)
job_defs_name = "dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs"
with instance_with_multiple_code_locations(
workspace_load_target=workspace_load_target,
synchronous_run_coordinator=True,
) as location_infos:
assert len(location_infos) == 2
daemon_sensor_defs_location_info = location_infos[daemon_sensor_defs_name]
job_defs_location_info = location_infos[job_defs_name]
sensor_repo = daemon_sensor_defs_location_info.get_single_repository()
job_repo = job_defs_location_info.get_single_repository()
# verify assumption that the instances are the same
assert daemon_sensor_defs_location_info.instance == job_defs_location_info.instance
instance = daemon_sensor_defs_location_info.instance
# verify assumption that the contexts are the same
assert daemon_sensor_defs_location_info.context == job_defs_location_info.context
workspace_context = daemon_sensor_defs_location_info.context
# This remainder is largely copied from test_cross_repo_run_status_sensor
with freeze_time(freeze_datetime):
my_sensor = sensor_repo.get_sensor("all_code_locations_run_status_sensor")
instance.start_sensor(my_sensor)
evaluate_sensors(workspace_context, executor)
ticks = [*instance.get_ticks(my_sensor.get_remote_origin_id(), my_sensor.selector_id)]
assert len(ticks) == 1
validate_tick(
ticks[0],
my_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
external_another_job = job_repo.get_full_job("another_success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
another_success_job,
)
dagster_run = instance.create_run_for_job(
another_success_job,
remote_job_origin=external_another_job.get_remote_origin(),
job_code_origin=external_another_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [*instance.get_ticks(my_sensor.get_remote_origin_id(), my_sensor.selector_id)]
assert len(ticks) == 2
validate_tick(
ticks[0],
my_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_all_code_location_run_failure_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
# we have no good api for compositing load targets so forced to use a workspace file
workspace_load_target = WorkspaceFileTarget(
[dg.file_relative_path(__file__, "daemon_sensor_defs_test_workspace.yaml")]
)
# the name of the location by default is the fully-qualified module name
daemon_sensor_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.daemon_sensor_defs"
)
job_defs_name = "dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs"
with instance_with_multiple_code_locations(
workspace_load_target=workspace_load_target,
synchronous_run_coordinator=True,
) as location_infos:
assert len(location_infos) == 2
daemon_sensor_defs_location_info = location_infos[daemon_sensor_defs_name]
job_defs_location_info = location_infos[job_defs_name]
sensor_repo = daemon_sensor_defs_location_info.get_single_repository()
job_repo = job_defs_location_info.get_single_repository()
# verify assumption that the instances are the same
assert daemon_sensor_defs_location_info.instance == job_defs_location_info.instance
instance = daemon_sensor_defs_location_info.instance
# verify assumption that the contexts are the same
assert daemon_sensor_defs_location_info.context == job_defs_location_info.context
workspace_context = daemon_sensor_defs_location_info.context
# This remainder is largely copied from test_cross_repo_run_status_sensor
with freeze_time(freeze_datetime):
my_sensor = sensor_repo.get_sensor("all_code_locations_run_failure_sensor")
instance.start_sensor(my_sensor)
evaluate_sensors(workspace_context, executor)
ticks = [*instance.get_ticks(my_sensor.get_remote_origin_id(), my_sensor.selector_id)]
assert len(ticks) == 1
validate_tick(
ticks[0],
my_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
external_another_job = job_repo.get_full_job("another_failure_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
another_failure_job,
)
dagster_run = instance.create_run_for_job(
another_failure_job,
remote_job_origin=external_another_job.get_remote_origin(),
job_code_origin=external_another_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.FAILURE
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [*instance.get_ticks(my_sensor.get_remote_origin_id(), my_sensor.selector_id)]
assert len(ticks) == 2
validate_tick(
ticks[0],
my_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_cross_code_location_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
# we have no good api for compositing load targets so forced to use a workspace file
workspace_load_target = WorkspaceFileTarget(
[dg.file_relative_path(__file__, "daemon_sensor_defs_test_workspace.yaml")]
)
# the name of the location by default is the fully-qualified module name
daemon_sensor_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.daemon_sensor_defs"
)
success_job_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs"
)
with instance_with_multiple_code_locations(
synchronous_run_coordinator=True,
workspace_load_target=workspace_load_target,
) as location_infos:
assert len(location_infos) == 2
daemon_sensor_defs_location_info = location_infos[daemon_sensor_defs_name]
success_job_def_location_info = location_infos[success_job_defs_name]
sensor_repo = daemon_sensor_defs_location_info.get_single_repository()
job_repo = success_job_def_location_info.get_single_repository()
# verify assumption that the instances are the same
assert daemon_sensor_defs_location_info.instance == success_job_def_location_info.instance
instance = daemon_sensor_defs_location_info.instance
# verify assumption that the contexts are the same
assert daemon_sensor_defs_location_info.context == success_job_def_location_info.context
workspace_context = daemon_sensor_defs_location_info.context
# This remainder is largely copied from test_cross_repo_run_status_sensor
with freeze_time(freeze_datetime):
success_sensor = sensor_repo.get_sensor("success_sensor")
instance.start_sensor(success_sensor)
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
external_success_job = job_repo.get_full_job("success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
success_job,
)
dagster_run = instance.create_run_for_job(
success_job,
remote_job_origin=external_success_job.get_remote_origin(),
job_code_origin=external_success_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_cross_code_location_job_selector_on_defs_run_status_sensor(
executor: Optional[ThreadPoolExecutor],
):
freeze_datetime = get_current_datetime()
# we have no good api for compositing load targets so forced to use a workspace file
workspace_load_target = WorkspaceFileTarget(
[dg.file_relative_path(__file__, "daemon_sensor_defs_test_workspace.yaml")]
)
# the name of the location by default is the fully-qualified module name
daemon_sensor_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.daemon_sensor_defs"
)
success_job_defs_name = (
"dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs"
)
with instance_with_multiple_code_locations(
synchronous_run_coordinator=True,
workspace_load_target=workspace_load_target,
) as location_infos:
assert len(location_infos) == 2
daemon_sensor_defs_location_info = location_infos[daemon_sensor_defs_name]
success_job_def_location_info = location_infos[success_job_defs_name]
sensor_repo = daemon_sensor_defs_location_info.get_single_repository()
job_repo = success_job_def_location_info.get_single_repository()
# verify assumption that the instances are the same
assert daemon_sensor_defs_location_info.instance == success_job_def_location_info.instance
instance = daemon_sensor_defs_location_info.instance
# verify assumption that the contexts are the same
assert daemon_sensor_defs_location_info.context == success_job_def_location_info.context
workspace_context = daemon_sensor_defs_location_info.context
# This remainder is largely copied from test_cross_repo_run_status_sensor
with freeze_time(freeze_datetime):
success_sensor = sensor_repo.get_sensor("success_of_another_job_sensor")
instance.start_sensor(success_sensor)
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
external_success_job = job_repo.get_full_job("success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
success_job,
)
dagster_run = instance.create_run_for_job(
success_job,
remote_job_origin=external_success_job.get_remote_origin(),
job_code_origin=external_success_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
# A successful job was launched but not the one we were listening to.
# So the tick is skipped
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
# now launch the run that is actually being listened to
with freeze_time(freeze_datetime):
external_another_success_job = job_repo.get_full_job("another_success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
another_success_job,
)
dagster_run = instance.create_run_for_job(
another_success_job,
remote_job_origin=external_another_success_job.get_remote_origin(),
job_code_origin=external_another_success_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
# A successful job was launched and we are listening to it this time
# so we check for success
assert len(ticks) == 3
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_code_location_scoped_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
# we have no good api for compositing load targets so forced to use a workspace file
workspace_load_target = WorkspaceFileTarget(
[dg.file_relative_path(__file__, "code_location_scoped_test_workspace.yaml")]
)
# the name of the location by default is the fully-qualified module name
code_location_with_sensor_name = "dagster_tests.daemon_sensor_tests.locations_for_code_location_scoped_sensor_test.code_location_with_sensor"
code_location_with_dupe_job_name = "dagster_tests.daemon_sensor_tests.locations_for_code_location_scoped_sensor_test.code_location_with_duplicate_job_name"
with instance_with_multiple_code_locations(
synchronous_run_coordinator=True,
workspace_load_target=workspace_load_target,
) as location_infos:
assert len(location_infos) == 2
code_location_w_sensor_info = location_infos[code_location_with_sensor_name]
code_location_w_dupe_job_info = location_infos[code_location_with_dupe_job_name]
sensor_repo = code_location_w_sensor_info.get_single_repository()
dupe_job_repo = code_location_w_dupe_job_info.get_single_repository()
# verify assumption that the instances are the same
assert code_location_w_sensor_info.instance == code_location_w_dupe_job_info.instance
instance = code_location_w_sensor_info.instance
# verify assumption that the contexts are the same
assert code_location_w_sensor_info.context == code_location_w_dupe_job_info.context
workspace_context = code_location_w_sensor_info.context
# This remainder is largely copied from test_cross_repo_run_status_sensor
with freeze_time(freeze_datetime):
success_sensor = sensor_repo.get_sensor("success_sensor")
instance.start_sensor(success_sensor)
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
external_success_job = sensor_repo.get_full_job("success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
success_job,
)
dagster_run = instance.create_run_for_job(
success_job,
remote_job_origin=external_success_job.get_remote_origin(),
job_code_origin=external_success_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
with freeze_time(freeze_datetime):
external_success_job = dupe_job_repo.get_full_job("success_job")
# this unfortunate API (create_run_for_job) requires the importation
# of the in-memory job object even though it is dealing mostly with
# "external" objects
from dagster_tests.daemon_sensor_tests.locations_for_xlocation_sensor_test.job_defs import (
success_job,
)
dagster_run = instance.create_run_for_job(
success_job,
remote_job_origin=external_success_job.get_remote_origin(),
job_code_origin=external_success_job.get_python_origin(),
)
instance.submit_run(dagster_run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
dagster_run = next(iter(instance.get_runs()))
assert dagster_run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = [
*instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
]
assert len(ticks) == 3
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
def test_cross_repo_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
with instance_with_single_code_location_multiple_repos_with_sensors(
synchronous_run_coordinator=True,
) as (
instance,
workspace_context,
repos,
):
the_repo = repos["the_repo"]
the_other_repo = repos["the_other_repo"]
with freeze_time(freeze_datetime):
cross_repo_sensor = the_repo.get_sensor("cross_repo_sensor")
instance.start_sensor(cross_repo_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = the_other_repo.get_full_job("the_job")
run = instance.create_run_for_job(
the_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_cross_repo_job_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
with instance_with_single_code_location_multiple_repos_with_sensors(
synchronous_run_coordinator=True,
) as (
instance,
workspace_context,
repos,
):
the_repo = repos["the_repo"]
the_other_repo = repos["the_other_repo"]
with freeze_time(freeze_datetime):
cross_repo_sensor = the_repo.get_sensor("cross_repo_job_sensor")
instance.start_sensor(cross_repo_sensor)
assert instance.get_runs_count() == 0
evaluate_sensors(workspace_context, executor)
wait_for_all_runs_to_finish(instance)
assert instance.get_runs_count() == 0
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = the_other_repo.get_full_job("the_job")
run = instance.create_run_for_job(
the_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
assert instance.get_runs_count() == 1
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
wait_for_all_runs_to_finish(instance)
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
run_request_runs = [r for r in instance.get_runs() if r.job_name == "the_other_job"]
assert len(run_request_runs) == 1
assert run_request_runs[0].status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# ensure that the success of the run launched by the sensor doesn't trigger the sensor
evaluate_sensors(workspace_context, executor)
wait_for_all_runs_to_finish(instance)
run_request_runs = [r for r in instance.get_runs() if r.job_name == "the_other_job"]
assert len(run_request_runs) == 1
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 3
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
def test_partitioned_job_run_status_sensor(
caplog,
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
success_sensor = remote_repo.get_sensor("partitioned_pipeline_success_sensor")
instance.start_sensor(success_sensor)
assert instance.get_runs_count() == 0
evaluate_sensors(workspace_context, executor)
assert instance.get_runs_count() == 0
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("daily_partitioned_job")
run = instance.create_run_for_job(
daily_partitioned_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
tags={"dagster/partition": "2022-08-01"},
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
assert instance.get_runs_count() == 1
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
caplog.clear()
with freeze_time(freeze_datetime):
# should fire the success sensor
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
assert (
'Sensor "partitioned_pipeline_success_sensor" acted on run status SUCCESS of run'
in caplog.text
)
def test_different_instance_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
with instance_with_sensors(
synchronous_run_coordinator=True,
) as (
instance,
workspace_context,
the_repo,
):
with instance_with_sensors(
attribute="the_other_repo",
synchronous_run_coordinator=True,
) as (
the_other_instance,
the_other_workspace_context,
the_other_repo,
):
with freeze_time(freeze_datetime):
cross_repo_sensor = the_repo.get_sensor("cross_repo_sensor")
instance.start_sensor(cross_repo_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = the_other_repo.get_full_job("the_job")
run = the_other_instance.create_run_for_job(
the_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
the_other_instance.submit_run(
run.run_id, the_other_workspace_context.create_request_context()
)
wait_for_all_runs_to_finish(the_other_instance)
run = the_other_instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
cross_repo_sensor.get_remote_origin_id(), cross_repo_sensor.selector_id
)
assert len(ticks) == 2
# the_pipeline was run in another instance, so the cross_repo_sensor should not trigger
validate_tick(
ticks[0],
cross_repo_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
def test_instance_run_status_sensor(executor: Optional[ThreadPoolExecutor]):
freeze_datetime = get_current_datetime()
with instance_with_single_code_location_multiple_repos_with_sensors(
synchronous_run_coordinator=True,
) as (
instance,
workspace_context,
repos,
):
the_repo = repos["the_repo"]
the_other_repo = repos["the_other_repo"]
with freeze_time(freeze_datetime):
instance_sensor = the_repo.get_sensor("instance_sensor")
instance.start_sensor(instance_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
instance_sensor.get_remote_origin_id(), instance_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
instance_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
time.sleep(1)
with freeze_time(freeze_datetime):
remote_job = the_other_repo.get_full_job("the_job")
run = instance.create_run_for_job(
the_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
instance_sensor.get_remote_origin_id(), instance_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
instance_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
def test_logging_run_status_sensor(
executor: Optional[ThreadPoolExecutor],
instance: DagsterInstance,
workspace_context: WorkspaceProcessContext,
remote_repo: RemoteRepository,
):
freeze_datetime = get_current_datetime()
with freeze_time(freeze_datetime):
success_sensor = remote_repo.get_sensor("logging_status_sensor")
instance.start_sensor(success_sensor)
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 1
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SKIPPED,
)
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
remote_job = remote_repo.get_full_job("foo_job")
run = instance.create_run_for_job(
foo_job,
remote_job_origin=remote_job.get_remote_origin(),
job_code_origin=remote_job.get_python_origin(),
)
instance.submit_run(run.run_id, workspace_context.create_request_context())
wait_for_all_runs_to_finish(instance)
run = instance.get_runs()[0]
assert run.status == DagsterRunStatus.SUCCESS
freeze_datetime = freeze_datetime + relativedelta(seconds=60)
with freeze_time(freeze_datetime):
# should fire the success sensor and the started sensor
evaluate_sensors(workspace_context, executor)
ticks = instance.get_ticks(
success_sensor.get_remote_origin_id(), success_sensor.selector_id
)
assert len(ticks) == 2
validate_tick(
ticks[0],
success_sensor,
freeze_datetime,
TickStatus.SUCCESS,
)
tick = ticks[0]
assert tick.log_key
records = get_instigation_log_records(instance, tick.log_key)
assert len(records) == 1
assert records
record = records[0]
assert record[LOG_RECORD_METADATA_ATTR]["orig_message"] == f"run succeeded: {run.run_id}"
instance.compute_log_manager.delete_logs(log_key=tick.log_key)
| CodeLocationInfoForSensorTest |
python | pandas-dev__pandas | pandas/io/common.py | {
"start": 1988,
"end": 29929
} | class ____(Generic[AnyStr]):
"""
Return value of io/common.py:get_handle
Can be used as a context manager.
This is used to easily close created buffers and to handle corner cases when
TextIOWrapper is inserted.
handle: The file handle to be used.
created_handles: All file handles that are created by get_handle
is_wrapped: Whether a TextIOWrapper needs to be detached.
"""
# handle might not implement the IO-interface
handle: IO[AnyStr]
compression: CompressionDict
created_handles: list[IO[bytes] | IO[str]] = dataclasses.field(default_factory=list)
is_wrapped: bool = False
def close(self) -> None:
"""
Close all created buffers.
Note: If a TextIOWrapper was inserted, it is flushed and detached to
avoid closing the potentially user-created buffer.
"""
if self.is_wrapped:
assert isinstance(self.handle, TextIOWrapper)
self.handle.flush()
self.handle.detach()
self.created_handles.remove(self.handle)
for handle in self.created_handles:
handle.close()
self.created_handles = []
self.is_wrapped = False
def __enter__(self) -> IOHandles[AnyStr]:
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_value: BaseException | None,
traceback: TracebackType | None,
) -> None:
self.close()
def is_url(url: object) -> bool:
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
isurl : bool
If `url` has a valid protocol return True otherwise False.
"""
if not isinstance(url, str):
return False
return parse_url(url).scheme in _VALID_URLS
@overload
def _expand_user(filepath_or_buffer: str) -> str: ...
@overload
def _expand_user(filepath_or_buffer: BaseBufferT) -> BaseBufferT: ...
def _expand_user(filepath_or_buffer: str | BaseBufferT) -> str | BaseBufferT:
"""
Return the argument with an initial component of ~ or ~user
replaced by that user's home directory.
Parameters
----------
filepath_or_buffer : object to be converted if possible
Returns
-------
expanded_filepath_or_buffer : an expanded filepath or the
input if not expandable
"""
if isinstance(filepath_or_buffer, str):
return os.path.expanduser(filepath_or_buffer)
return filepath_or_buffer
def validate_header_arg(header: object) -> None:
if header is None:
return
if is_integer(header):
header = cast(int, header)
if header < 0:
# GH 27779
raise ValueError(
"Passing negative integer to header is invalid. "
"For no header, use header=None instead"
)
return
if is_list_like(header, allow_sets=False):
header = cast(Sequence, header)
if not all(map(is_integer, header)):
raise ValueError("header must be integer or list of integers")
if any(i < 0 for i in header):
raise ValueError("cannot specify multi-index header with negative integers")
return
if is_bool(header):
raise TypeError(
"Passing a bool to header is invalid. Use header=None for no header or "
"header=int or list-like of ints to specify "
"the row(s) making up the column names"
)
# GH 16338
raise ValueError("header must be integer or list of integers")
@overload
def stringify_path(
filepath_or_buffer: FilePath, convert_file_like: bool = ...
) -> str: ...
@overload
def stringify_path(
filepath_or_buffer: BaseBufferT, convert_file_like: bool = ...
) -> BaseBufferT: ...
def stringify_path(
filepath_or_buffer: FilePath | BaseBufferT,
convert_file_like: bool = False,
) -> str | BaseBufferT:
"""
Attempt to convert a path-like object to a string.
Parameters
----------
filepath_or_buffer : object to be converted
Returns
-------
str_filepath_or_buffer : maybe a string version of the object
Notes
-----
Objects supporting the fspath protocol are coerced
according to its __fspath__ method.
Any other object is passed through unchanged, which includes bytes,
strings, buffers, or anything else that's not even path-like.
"""
if not convert_file_like and is_file_like(filepath_or_buffer):
# GH 38125: some fsspec objects implement os.PathLike but have already opened a
# file. This prevents opening the file a second time. infer_compression calls
# this function with convert_file_like=True to infer the compression.
return cast(BaseBufferT, filepath_or_buffer)
if isinstance(filepath_or_buffer, os.PathLike):
filepath_or_buffer = filepath_or_buffer.__fspath__()
return _expand_user(filepath_or_buffer)
def urlopen(*args: Any, **kwargs: Any) -> Any:
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs) # noqa: TID251
def is_fsspec_url(url: FilePath | BaseBuffer) -> bool:
"""
Returns true if the given URL looks like
something fsspec can handle
"""
return (
isinstance(url, str)
and bool(_FSSPEC_URL_PATTERN.match(url))
and not url.startswith(("http://", "https://"))
)
@doc(
storage_options=_shared_docs["storage_options"],
compression_options=_shared_docs["compression_options"] % "filepath_or_buffer",
)
def _get_filepath_or_buffer(
filepath_or_buffer: FilePath | BaseBuffer,
encoding: str = "utf-8",
compression: CompressionOptions | None = None,
mode: str = "r",
storage_options: StorageOptions | None = None,
) -> IOArgs:
"""
If the filepath_or_buffer is a url, translate and return the buffer.
Otherwise passthrough.
Parameters
----------
filepath_or_buffer : a url, filepath (str or pathlib.Path),
or buffer
{compression_options}
encoding : the encoding to use to decode bytes, default is 'utf-8'
mode : str, optional
{storage_options}
Returns the dataclass IOArgs.
"""
filepath_or_buffer = stringify_path(filepath_or_buffer)
# handle compression dict
compression_method, compression = get_compression_method(compression)
compression_method = infer_compression(filepath_or_buffer, compression_method)
# GH21227 internal compression is not used for non-binary handles.
if compression_method and hasattr(filepath_or_buffer, "write") and "b" not in mode:
warnings.warn(
"compression has no effect when passing a non-binary object as input.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
compression_method = None
compression = dict(compression, method=compression_method)
# bz2 and xz do not write the byte order mark for utf-16 and utf-32
# print a warning when writing such files
if (
"w" in mode
and compression_method in ["bz2", "xz"]
and encoding in ["utf-16", "utf-32"]
):
warnings.warn(
f"{compression} will not write the byte order mark for {encoding}",
UnicodeWarning,
stacklevel=find_stack_level(),
)
if "a" in mode and compression_method in ["zip", "tar"]:
# GH56778
warnings.warn(
"zip and tar do not support mode 'a' properly. "
"This combination will result in multiple files with same name "
"being added to the archive.",
RuntimeWarning,
stacklevel=find_stack_level(),
)
# Use binary mode when converting path-like objects to file-like objects (fsspec)
# except when text mode is explicitly requested. The original mode is returned if
# fsspec is not used.
fsspec_mode = mode
if "t" not in fsspec_mode and "b" not in fsspec_mode:
fsspec_mode += "b"
if isinstance(filepath_or_buffer, str) and is_url(filepath_or_buffer):
# TODO: fsspec can also handle HTTP via requests, but leaving this
# unchanged. using fsspec appears to break the ability to infer if the
# server responded with gzipped data
storage_options = storage_options or {}
# waiting until now for importing to match intended lazy logic of
# urlopen function defined elsewhere in this module
import urllib.request
# assuming storage_options is to be interpreted as headers
req_info = urllib.request.Request(filepath_or_buffer, headers=storage_options)
with urlopen(req_info) as req:
content_encoding = req.headers.get("Content-Encoding", None)
if content_encoding == "gzip":
# Override compression based on Content-Encoding header
compression = {"method": "gzip"}
reader = BytesIO(req.read())
return IOArgs(
filepath_or_buffer=reader,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
if is_fsspec_url(filepath_or_buffer):
assert isinstance(
filepath_or_buffer, str
) # just to appease mypy for this branch
# two special-case s3-like protocols; these have special meaning in Hadoop,
# but are equivalent to just "s3" from fsspec's point of view
# cc #11071
if filepath_or_buffer.startswith("s3a://"):
filepath_or_buffer = filepath_or_buffer.replace("s3a://", "s3://")
if filepath_or_buffer.startswith("s3n://"):
filepath_or_buffer = filepath_or_buffer.replace("s3n://", "s3://")
fsspec = import_optional_dependency("fsspec")
# If botocore is installed we fallback to reading with anon=True
# to allow reads from public buckets
err_types_to_retry_with_anon: list[Any] = []
try:
import_optional_dependency("botocore")
from botocore.exceptions import (
ClientError,
NoCredentialsError,
)
err_types_to_retry_with_anon = [
ClientError,
NoCredentialsError,
PermissionError,
]
except ImportError:
pass
try:
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
# GH 34626 Reads from Public Buckets without Credentials needs anon=True
except tuple(err_types_to_retry_with_anon):
if storage_options is None:
storage_options = {"anon": True}
else:
# don't mutate user input.
storage_options = dict(storage_options)
storage_options["anon"] = True
file_obj = fsspec.open(
filepath_or_buffer, mode=fsspec_mode, **(storage_options or {})
).open()
return IOArgs(
filepath_or_buffer=file_obj,
encoding=encoding,
compression=compression,
should_close=True,
mode=fsspec_mode,
)
elif storage_options:
raise ValueError(
"storage_options passed with file object or non-fsspec file path"
)
if isinstance(filepath_or_buffer, (str, bytes, mmap.mmap)):
return IOArgs(
filepath_or_buffer=_expand_user(filepath_or_buffer),
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
# is_file_like requires (read | write) & __iter__ but __iter__ is only
# needed for read_csv(engine=python)
if not (
hasattr(filepath_or_buffer, "read") or hasattr(filepath_or_buffer, "write")
):
msg = f"Invalid file path or buffer object type: {type(filepath_or_buffer)}"
raise ValueError(msg)
return IOArgs(
filepath_or_buffer=filepath_or_buffer,
encoding=encoding,
compression=compression,
should_close=False,
mode=mode,
)
def file_path_to_url(path: str) -> str:
"""
converts an absolute native path to a FILE URL.
Parameters
----------
path : a path in native format
Returns
-------
a valid FILE URL
"""
# lazify expensive import (~30ms)
from urllib.request import pathname2url
return urljoin("file:", pathname2url(path))
extension_to_compression = {
".tar": "tar",
".tar.gz": "tar",
".tar.bz2": "tar",
".tar.xz": "tar",
".gz": "gzip",
".bz2": "bz2",
".zip": "zip",
".xz": "xz",
".zst": "zstd",
}
_supported_compressions = set(extension_to_compression.values())
def get_compression_method(
compression: CompressionOptions,
) -> tuple[str | None, CompressionDict]:
"""
Simplifies a compression argument to a compression method string and
a mapping containing additional arguments.
Parameters
----------
compression : str or mapping
If string, specifies the compression method. If mapping, value at key
'method' specifies compression method.
Returns
-------
tuple of ({compression method}, Optional[str]
{compression arguments}, Dict[str, Any])
Raises
------
ValueError on mapping missing 'method' key
"""
compression_method: str | None
if isinstance(compression, Mapping):
compression_args = dict(compression)
try:
compression_method = compression_args.pop("method")
except KeyError as err:
raise ValueError("If mapping, compression must have key 'method'") from err
else:
compression_args = {}
compression_method = compression
return compression_method, compression_args
@doc(compression_options=_shared_docs["compression_options"] % "filepath_or_buffer")
def infer_compression(
filepath_or_buffer: FilePath | BaseBuffer, compression: str | None
) -> str | None:
"""
Get the compression method for filepath_or_buffer. If compression='infer',
the inferred compression method is returned. Otherwise, the input
compression method is returned unchanged, unless it's invalid, in which
case an error is raised.
Parameters
----------
filepath_or_buffer : str or file handle
File path or object.
{compression_options}
Returns
-------
string or None
Raises
------
ValueError on invalid compression specified.
"""
if compression is None:
return None
# Infer compression
if compression == "infer":
# Convert all path types (e.g. pathlib.Path) to strings
if isinstance(filepath_or_buffer, str) and "::" in filepath_or_buffer:
# chained URLs contain ::
filepath_or_buffer = filepath_or_buffer.split("::")[0]
filepath_or_buffer = stringify_path(filepath_or_buffer, convert_file_like=True)
if not isinstance(filepath_or_buffer, str):
# Cannot infer compression of a buffer, assume no compression
return None
# Infer compression from the filename/URL extension
for extension, compression in extension_to_compression.items():
if filepath_or_buffer.lower().endswith(extension):
return compression
return None
# Compression has been specified. Check that it's valid
if compression in _supported_compressions:
return compression
valid = ["infer", None] + sorted(_supported_compressions)
msg = (
f"Unrecognized compression type: {compression}\n"
f"Valid compression types are {valid}"
)
raise ValueError(msg)
def check_parent_directory(path: Path | str) -> None:
"""
Check if parent directory of a file exists, raise OSError if it does not
Parameters
----------
path: Path or str
Path to check parent directory of
"""
parent = Path(path).parent
if not parent.is_dir():
raise OSError(rf"Cannot save file into a non-existent directory: '{parent}'")
@overload
def get_handle(
path_or_buf: FilePath | BaseBuffer,
mode: str,
*,
encoding: str | None = ...,
compression: CompressionOptions = ...,
memory_map: bool = ...,
is_text: Literal[False],
errors: str | None = ...,
storage_options: StorageOptions = ...,
) -> IOHandles[bytes]: ...
@overload
def get_handle(
path_or_buf: FilePath | BaseBuffer,
mode: str,
*,
encoding: str | None = ...,
compression: CompressionOptions = ...,
memory_map: bool = ...,
is_text: Literal[True] = ...,
errors: str | None = ...,
storage_options: StorageOptions = ...,
) -> IOHandles[str]: ...
@overload
def get_handle(
path_or_buf: FilePath | BaseBuffer,
mode: str,
*,
encoding: str | None = ...,
compression: CompressionOptions = ...,
memory_map: bool = ...,
is_text: bool = ...,
errors: str | None = ...,
storage_options: StorageOptions = ...,
) -> IOHandles[str] | IOHandles[bytes]: ...
@doc(compression_options=_shared_docs["compression_options"] % "path_or_buf")
def get_handle(
path_or_buf: FilePath | BaseBuffer,
mode: str,
*,
encoding: str | None = None,
compression: CompressionOptions | None = None,
memory_map: bool = False,
is_text: bool = True,
errors: str | None = None,
storage_options: StorageOptions | None = None,
) -> IOHandles[str] | IOHandles[bytes]:
"""
Get file handle for given path/buffer and mode.
Parameters
----------
path_or_buf : str or file handle
File path or object.
mode : str
Mode to open path_or_buf with.
encoding : str or None
Encoding to use.
{compression_options}
May be a dict with key 'method' as compression mode
and other keys as compression options if compression
mode is 'zip'.
Passing compression options as keys in dict is
supported for compression modes 'gzip', 'bz2', 'zstd' and 'zip'.
memory_map : bool, default False
See parsers._parser_params for more information. Only used by read_csv.
is_text : bool, default True
Whether the type of the content passed to the file/buffer is string or
bytes. This is not the same as `"b" not in mode`. If a string content is
passed to a binary file/buffer, a wrapper is inserted.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
storage_options: StorageOptions = None
Passed to _get_filepath_or_buffer
Returns the dataclass IOHandles
"""
# Windows does not default to utf-8. Set to utf-8 for a consistent behavior
encoding = encoding or "utf-8"
errors = errors or "strict"
# read_csv does not know whether the buffer is opened in binary/text mode
if _is_binary_mode(path_or_buf, mode) and "b" not in mode:
mode += "b"
# validate encoding and errors
codecs.lookup(encoding)
if isinstance(errors, str):
codecs.lookup_error(errors)
# open URLs
ioargs = _get_filepath_or_buffer(
path_or_buf,
encoding=encoding,
compression=compression,
mode=mode,
storage_options=storage_options,
)
handle = ioargs.filepath_or_buffer
handles: list[BaseBuffer]
# memory mapping needs to be the first step
# only used for read_csv
handle, memory_map, handles = _maybe_memory_map(handle, memory_map)
is_path = isinstance(handle, str)
compression_args = dict(ioargs.compression)
compression = compression_args.pop("method")
# Only for write methods
if "r" not in mode and is_path:
check_parent_directory(str(handle))
if compression:
if compression != "zstd":
# compression libraries do not like an explicit text-mode
ioargs.mode = ioargs.mode.replace("t", "")
elif compression == "zstd" and "b" not in ioargs.mode:
# python-zstandard defaults to text mode, but we always expect
# compression libraries to use binary mode.
ioargs.mode += "b"
# GZ Compression
if compression == "gzip":
if isinstance(handle, str):
# error: Incompatible types in assignment (expression has type
# "GzipFile", variable has type "Union[str, BaseBuffer]")
handle = gzip.GzipFile( # type: ignore[assignment]
filename=handle,
mode=ioargs.mode,
**compression_args,
)
else:
handle = gzip.GzipFile(
# No overload variant of "GzipFile" matches argument types
# "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
fileobj=handle, # type: ignore[call-overload]
mode=ioargs.mode,
**compression_args,
)
# BZ Compression
elif compression == "bz2":
import bz2
# Overload of "BZ2File" to handle pickle protocol 5
# "Union[str, BaseBuffer]", "str", "Dict[str, Any]"
handle = bz2.BZ2File( # type: ignore[call-overload]
handle,
mode=ioargs.mode,
**compression_args,
)
# ZIP Compression
elif compression == "zip":
# error: Argument 1 to "_BytesZipFile" has incompatible type
# "Union[str, BaseBuffer]"; expected "Union[Union[str, PathLike[str]],
# ReadBuffer[bytes], WriteBuffer[bytes]]"
handle = _BytesZipFile(
handle, # type: ignore[arg-type]
ioargs.mode,
**compression_args,
)
if handle.buffer.mode == "r":
handles.append(handle)
zip_names = handle.buffer.namelist()
if len(zip_names) == 1:
handle = handle.buffer.open(zip_names.pop())
elif not zip_names:
raise ValueError(f"Zero files found in ZIP file {path_or_buf}")
else:
raise ValueError(
"Multiple files found in ZIP file. "
f"Only one file per ZIP: {zip_names}"
)
# TAR Encoding
elif compression == "tar":
compression_args.setdefault("mode", ioargs.mode)
if isinstance(handle, str):
handle = _BytesTarFile(name=handle, **compression_args)
else:
# error: Argument "fileobj" to "_BytesTarFile" has incompatible
# type "BaseBuffer"; expected "Union[ReadBuffer[bytes],
# WriteBuffer[bytes], None]"
handle = _BytesTarFile(
fileobj=handle, # type: ignore[arg-type]
**compression_args,
)
assert isinstance(handle, _BytesTarFile)
if "r" in handle.buffer.mode:
handles.append(handle)
files = handle.buffer.getnames()
if len(files) == 1:
file = handle.buffer.extractfile(files[0])
assert file is not None
handle = file
elif not files:
raise ValueError(f"Zero files found in TAR archive {path_or_buf}")
else:
raise ValueError(
"Multiple files found in TAR archive. "
f"Only one file per TAR archive: {files}"
)
# XZ Compression
elif compression == "xz":
# error: Argument 1 to "LZMAFile" has incompatible type "Union[str,
# BaseBuffer]"; expected "Optional[Union[Union[str, bytes, PathLike[str],
# PathLike[bytes]], IO[bytes]], None]"
import lzma
handle = lzma.LZMAFile(
handle, # type: ignore[arg-type]
ioargs.mode,
**compression_args,
)
# Zstd Compression
elif compression == "zstd":
zstd = import_optional_dependency("zstandard")
if "r" in ioargs.mode:
open_args = {"dctx": zstd.ZstdDecompressor(**compression_args)}
else:
open_args = {"cctx": zstd.ZstdCompressor(**compression_args)}
handle = zstd.open(
handle,
mode=ioargs.mode,
**open_args,
)
# Unrecognized Compression
else:
msg = f"Unrecognized compression type: {compression}"
raise ValueError(msg)
assert not isinstance(handle, str)
handles.append(handle)
elif isinstance(handle, str):
# Check whether the filename is to be opened in binary mode.
# Binary mode does not support 'encoding' and 'newline'.
if ioargs.encoding and "b" not in ioargs.mode:
# Encoding
handle = open(
handle,
ioargs.mode,
encoding=ioargs.encoding,
errors=errors,
newline="",
)
else:
# Binary mode
handle = open(handle, ioargs.mode)
handles.append(handle)
# Convert BytesIO or file objects passed with an encoding
is_wrapped = False
if not is_text and ioargs.mode == "rb" and isinstance(handle, TextIOBase):
# not added to handles as it does not open/buffer resources
handle = _BytesIOWrapper(
handle,
encoding=ioargs.encoding,
)
elif is_text and (
compression or memory_map or _is_binary_mode(handle, ioargs.mode)
):
if (
not hasattr(handle, "readable")
or not hasattr(handle, "writable")
or not hasattr(handle, "seekable")
):
handle = _IOWrapper(handle)
# error: Value of type variable "_BufferT_co" of "TextIOWrapper" cannot
# be "_IOWrapper | BaseBuffer" [type-var]
handle = TextIOWrapper(
handle, # type: ignore[type-var]
encoding=ioargs.encoding,
errors=errors,
newline="",
)
handles.append(handle)
# only marked as wrapped when the caller provided a handle
is_wrapped = not (
isinstance(ioargs.filepath_or_buffer, str) or ioargs.should_close
)
if "r" in ioargs.mode and not hasattr(handle, "read"):
raise TypeError(
"Expected file path name or file-like object, "
f"got {type(ioargs.filepath_or_buffer)} type"
)
handles.reverse() # close the most recently added buffer first
if ioargs.should_close:
assert not isinstance(ioargs.filepath_or_buffer, str)
handles.append(ioargs.filepath_or_buffer)
return IOHandles(
# error: Argument "handle" to "IOHandles" has incompatible type
# "Union[TextIOWrapper, GzipFile, BaseBuffer, typing.IO[bytes],
# typing.IO[Any]]"; expected "pandas._typing.IO[Any]"
handle=handle, # type: ignore[arg-type]
# error: Argument "created_handles" to "IOHandles" has incompatible type
# "List[BaseBuffer]"; expected "List[Union[IO[bytes], IO[str]]]"
created_handles=handles, # type: ignore[arg-type]
is_wrapped=is_wrapped,
compression=ioargs.compression,
)
| IOHandles |
python | gevent__gevent | src/greentest/3.10/test_signal.py | {
"start": 6238,
"end": 8914
} | class ____(unittest.TestCase):
def test_invalid_call(self):
# First parameter is positional-only
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signum=signal.SIGINT)
# warn_on_full_buffer is a keyword-only parameter
with self.assertRaises(TypeError):
signal.set_wakeup_fd(signal.SIGINT, False)
def test_invalid_fd(self):
fd = os_helper.make_bad_fd()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_invalid_socket(self):
sock = socket.socket()
fd = sock.fileno()
sock.close()
self.assertRaises((ValueError, OSError),
signal.set_wakeup_fd, fd)
def test_set_wakeup_fd_result(self):
r1, w1 = os.pipe()
self.addCleanup(os.close, r1)
self.addCleanup(os.close, w1)
r2, w2 = os.pipe()
self.addCleanup(os.close, r2)
self.addCleanup(os.close, w2)
if hasattr(os, 'set_blocking'):
os.set_blocking(w1, False)
os.set_blocking(w2, False)
signal.set_wakeup_fd(w1)
self.assertEqual(signal.set_wakeup_fd(w2), w1)
self.assertEqual(signal.set_wakeup_fd(-1), w2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
def test_set_wakeup_fd_socket_result(self):
sock1 = socket.socket()
self.addCleanup(sock1.close)
sock1.setblocking(False)
fd1 = sock1.fileno()
sock2 = socket.socket()
self.addCleanup(sock2.close)
sock2.setblocking(False)
fd2 = sock2.fileno()
signal.set_wakeup_fd(fd1)
self.assertEqual(signal.set_wakeup_fd(fd2), fd1)
self.assertEqual(signal.set_wakeup_fd(-1), fd2)
self.assertEqual(signal.set_wakeup_fd(-1), -1)
# On Windows, files are always blocking and Windows does not provide a
# function to test if a socket is in non-blocking mode.
@unittest.skipIf(sys.platform == "win32", "tests specific to POSIX")
def test_set_wakeup_fd_blocking(self):
rfd, wfd = os.pipe()
self.addCleanup(os.close, rfd)
self.addCleanup(os.close, wfd)
# fd must be non-blocking
os.set_blocking(wfd, True)
with self.assertRaises(ValueError) as cm:
signal.set_wakeup_fd(wfd)
self.assertEqual(str(cm.exception),
"the fd %s must be in non-blocking mode" % wfd)
# non-blocking is ok
os.set_blocking(wfd, False)
signal.set_wakeup_fd(wfd)
signal.set_wakeup_fd(-1)
@unittest.skipIf(sys.platform == "win32", "Not valid on Windows")
| WakeupFDTests |
python | jazzband__django-simple-history | simple_history/tests/models.py | {
"start": 23601,
"end": 23824
} | class ____(models.Model):
name = models.CharField(max_length=15, unique=True)
other = models.ManyToManyField(ManyToManyModelOther)
history = HistoricalRecords(excluded_fields=["other"])
| ModelWithExcludedManyToMany |
python | django-import-export__django-import-export | tests/core/tests/test_forms.py | {
"start": 408,
"end": 1455
} | class ____(TestCase):
def test_formbase_init_blank_resources(self):
with self.assertRaises(ValueError):
forms.ImportExportFormBase(["format1"], [])
def test_formbase_init_one_resource(self):
form = forms.ImportExportFormBase([CSV], [resources.ModelResource])
self.assertEqual(
form.fields["resource"].choices,
[(0, "ModelResource")],
)
self.assertEqual(form.initial["resource"], "0")
self.assertIsInstance(
form.fields["resource"].widget,
django.forms.HiddenInput,
)
def test_formbase_init_two_resources(self):
form = forms.ImportExportFormBase([CSV], [resources.ModelResource, MyResource])
self.assertEqual(
form.fields["resource"].choices,
[(0, "ModelResource"), (1, "My super resource")],
)
self.assertNotIn("resource", form.initial)
self.assertIsInstance(
form.fields["resource"].widget,
django.forms.Select,
)
| FormTest |
python | conda__conda | conda/models/package_info.py | {
"start": 452,
"end": 609
} | class ____(EnumField):
def box(self, instance, instance_type, val):
return super().box(instance, instance_type, NoarchType.coerce(val))
| NoarchField |
python | sympy__sympy | sympy/polys/rings.py | {
"start": 24916,
"end": 109319
} | class ____(
DomainElement, DefaultPrinting, CantSympify, dict[tuple[int, ...], Er], Generic[Er]
):
"""Element of multivariate distributed polynomial ring."""
def __init__(
self, ring: PolyRing[Er], init: dict[Mon, Er] | Iterable[tuple[Mon, Er]]
):
super().__init__(init)
self.ring = ring
# This check would be too slow to run every time:
# self._check()
def __getnewargs__(self):
return (self.ring, list(self.iterterms()))
_hash = None
def __hash__(self) -> int: # type: ignore
# XXX: This computes a hash of a dictionary, but currently we don't
# protect dictionary from being changed so any use site modifications
# will make hashing go wrong. Use this feature with caution until we
# figure out how to make a safe API without compromising speed of this
# low-level class.
_hash = self._hash
if _hash is None:
self._hash = _hash = hash((self.ring, frozenset(self.items())))
return _hash
def __ne__(self, other) -> bool:
return not self == other
def __pos__(self) -> PolyElement[Er]:
return self
def __lt__(self, other) -> bool:
return self._cmp(other, lt)
def __le__(self, other) -> bool:
return self._cmp(other, le)
def __gt__(self, other) -> bool:
return self._cmp(other, gt)
def __ge__(self, other) -> bool:
return self._cmp(other, ge)
def as_expr(self, *symbols: Expr) -> Expr:
if not symbols:
symbols = self.ring.symbols
elif len(symbols) != self.ring.ngens:
raise ValueError(
"Wrong number of symbols, expected %s got %s"
% (self.ring.ngens, len(symbols))
)
return expr_from_dict(self.as_expr_dict(), *symbols)
@overload
def __add__(self, other: PolyElement[Er] | Er | int, /) -> PolyElement[Er]: ...
@overload
def __add__(
self, other: PolyElement[PolyElement[Er]], /
) -> PolyElement[PolyElement[Er]]: ...
def __add__(
self, other: PolyElement[Er] | Er | int | PolyElement[PolyElement[Er]], /
) -> PolyElement[Er] | PolyElement[PolyElement[Er]]:
"""Add two polynomials.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> (x + y)**2 + (x - y)**2
2*x**2 + 2*y**2
"""
if self.ring.is_element(other):
return self._add(other)
if isinstance(other, PolyElement):
domain = other.ring.domain
if isinstance(domain, PolynomialRing) and domain.ring.is_element(self):
return cast("PolyElement[PolyElement[Er]]", other)._add_ground(self)
res = self._try_add_ground(other)
if res is not NotImplemented:
return res
if isinstance(other, PolyElement):
return other._try_add_ground(self)
return NotImplemented
def __radd__(self, other: Er | int) -> PolyElement[Er]:
return self._try_add_ground(other)
def _try_add_ground(self, other: object) -> PolyElement[Er] | NotImplementedType:
ring = self.ring
domain = ring.domain
if domain.of_type(other):
return self._add_ground(other)
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._add_ground(cp2)
@overload
def __sub__(self, other: PolyElement[Er] | Er | int, /) -> PolyElement[Er]: ...
@overload
def __sub__(
self, other: PolyElement[PolyElement[Er]], /
) -> PolyElement[PolyElement[Er]]: ...
def __sub__(
self, other: PolyElement[Er] | Er | int | PolyElement[PolyElement[Er]], /
) -> PolyElement[Er] | PolyElement[PolyElement[Er]]:
"""Subtract polynomial p2 from p1.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p1 = x + y**2
>>> p2 = x*y + y**2
>>> p1 - p2
-x*y + x
"""
if self.ring.is_element(other):
return self._sub(other)
if isinstance(other, PolyElement):
domain = other.ring.domain
if isinstance(domain, PolynomialRing) and domain.ring.is_element(self):
return cast("PolyElement[PolyElement[Er]]", other)._sub_ground(self)
res = self._try_sub_ground(other)
if res is not NotImplemented:
return res
if isinstance(other, PolyElement):
return other._try_rsub_ground(self)
return NotImplemented
def __rsub__(self, other: Er | int) -> PolyElement[Er]:
return self._try_rsub_ground(other)
def _try_sub_ground(self, other: object) -> PolyElement[Er] | NotImplementedType:
ring = self.ring
domain = ring.domain
if domain.of_type(other):
return self._sub_ground(other)
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._sub_ground(cp2)
def _try_rsub_ground(self, other: object) -> PolyElement[Er] | NotImplementedType:
ring = self.ring
domain = ring.domain
if domain.of_type(other):
return self._rsub_ground(other)
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._rsub_ground(cp2)
@overload
def __mul__(self, other: PolyElement[Er] | Er | int, /) -> PolyElement[Er]: ...
@overload
def __mul__(
self, other: PolyElement[PolyElement[Er]], /
) -> PolyElement[PolyElement[Er]]: ...
def __mul__(
self, other: PolyElement[Er] | Er | int | PolyElement[PolyElement[Er]], /
) -> PolyElement[Er] | PolyElement[PolyElement[Er]]:
"""Multiply two polynomials.
Examples
========
>>> from sympy.polys.domains import QQ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', QQ)
>>> p1 = x + y
>>> p2 = x - y
>>> p1*p2
x**2 - y**2
"""
if not self or not other:
return self.ring.zero
if self.ring.is_element(other):
return self._mul(other)
if isinstance(other, PolyElement):
domain = other.ring.domain
if isinstance(domain, PolynomialRing) and domain.ring.is_element(self):
return cast("PolyElement[PolyElement[Er]]", other).mul_ground(self)
res = self._try_mul_ground(other)
if res is not NotImplemented:
return res
if isinstance(other, PolyElement):
return other._try_mul_ground(self)
return NotImplemented
def __rmul__(self, other: Er | int) -> PolyElement[Er]:
"""p2 * p1 with p2 in the coefficient domain of p1.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y
>>> 4 * p
4*x + 4*y
"""
return self._try_mul_ground(other)
def _try_mul_ground(self, other: object) -> PolyElement[Er] | NotImplementedType:
ring = self.ring
domain = ring.domain
if domain.of_type(other):
return self.mul_ground(other)
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self.mul_ground(cp2)
def __pow__(self, n: int) -> PolyElement[Er]:
"""raise polynomial to power `n`
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p**3
x**3 + 3*x**2*y**2 + 3*x*y**4 + y**6
"""
if not isinstance(n, int):
raise TypeError("exponent must be an integer, got %s" % n)
elif n < 0:
raise ValueError("exponent must be a non-negative integer, got %s" % n)
if not n:
if self:
return self.ring.one
else:
raise ValueError("0**0")
return self._pow_int(n)
def __divmod__(
self, other: PolyElement[Er] | Er | int
) -> tuple[PolyElement[Er], PolyElement[Er]]:
ring = self.ring
if not other:
raise ZeroDivisionError("polynomial division")
if isinstance(other, PolyElement):
if other.ring == ring:
return self._divmod(other)
elif (
isinstance(ring.domain, PolynomialRing)
and ring.domain.ring == other.ring
):
pass
elif (
isinstance(other.ring.domain, PolynomialRing)
and other.ring.domain.ring == ring
):
return other.__rdivmod__(self) # type: ignore
else:
return NotImplemented
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._divmod_ground(cp2)
def __rdivmod__(self, other: Er | int) -> tuple[PolyElement[Er], PolyElement[Er]]:
ring = self.ring
try:
other_poly = ring.ground_new(other)
except CoercionFailed:
return NotImplemented
else:
return other_poly._divmod(self)
def __mod__(self, other: PolyElement[Er] | Er | int) -> PolyElement[Er]:
ring = self.ring
if not other:
raise ZeroDivisionError("polynomial division")
if isinstance(other, PolyElement):
if other.ring == ring:
return self._mod(other)
elif (
isinstance(ring.domain, PolynomialRing)
and ring.domain.ring == other.ring
):
pass
elif (
isinstance(other.ring.domain, PolynomialRing)
and other.ring.domain.ring == ring
):
return other.__rmod__(self) # type: ignore
else:
return NotImplemented
try:
cp2 = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._mod_ground(cp2)
def __rmod__(self, other: Er | int) -> PolyElement[Er]:
ring = self.ring
try:
other_poly = ring.ground_new(other)
except CoercionFailed:
return NotImplemented
else:
return other_poly._mod(self)
def __floordiv__(self, other: PolyElement[Er] | Er | int) -> PolyElement[Er]:
ring = self.ring
if not other:
raise ZeroDivisionError("polynomial division")
elif ring.is_element(other):
return self._floordiv(other)
elif isinstance(other, PolyElement):
if (
isinstance(ring.domain, PolynomialRing)
and ring.domain.ring == other.ring
):
pass
elif (
isinstance(other.ring.domain, PolynomialRing)
and other.ring.domain.ring == ring
):
return other.__rtruediv__(self) # type: ignore
else:
return NotImplemented
try:
other = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._floordiv_ground(other)
def __rfloordiv__(self, other: Er | int) -> PolyElement[Er]:
ring = self.ring
try:
other_poly = ring.ground_new(other)
except CoercionFailed:
return NotImplemented
else:
return other_poly._floordiv(self)
def __truediv__(self, other: PolyElement[Er] | Er | int) -> PolyElement[Er]:
ring = self.ring
if not other:
raise ZeroDivisionError("polynomial division")
elif ring.is_element(other):
return self._truediv(cast("Er", other))
elif isinstance(other, PolyElement):
if (
isinstance(ring.domain, PolynomialRing)
and ring.domain.ring == other.ring
):
pass
elif (
isinstance(other.ring.domain, PolynomialRing)
and other.ring.domain.ring == ring
):
return other.__rtruediv__(self) # type: ignore
else:
return NotImplemented
try:
other = ring.domain_new(other)
except CoercionFailed:
return NotImplemented
else:
return self._floordiv_ground(other)
def __rtruediv__(self, other: Er | int) -> PolyElement[Er]:
ring = self.ring
try:
other_poly = ring.ground_new(other)
except CoercionFailed:
return NotImplemented
else:
return other_poly._truediv(self)
@property
def is_generator(self) -> bool:
return self in self.ring._gens_set
@property
def is_monomial(self) -> bool:
return not self or (len(self) == 1 and self.LC == 1)
@property
def is_term(self) -> bool:
return len(self) <= 1
@property
def is_negative(self) -> bool:
return self.ring.domain.is_negative(self.LC)
@property
def is_positive(self) -> bool:
return self.ring.domain.is_positive(self.LC)
@property
def is_nonnegative(self) -> bool:
return self.ring.domain.is_nonnegative(self.LC)
@property
def is_nonpositive(self) -> bool:
return self.ring.domain.is_nonpositive(self.LC)
@property
def is_monic(self) -> bool:
return self.ring.domain.is_one(self.LC)
@property
def is_primitive(self) -> bool:
return self.ring.domain.is_one(self.content())
@property
def is_linear(self) -> bool:
return all(sum(monom) <= 1 for monom in self.itermonoms())
@property
def is_quadratic(self) -> bool:
return all(sum(monom) <= 2 for monom in self.itermonoms())
def _check(self) -> None:
"""Validate polynomial structure."""
assert isinstance(self, PolyElement)
assert isinstance(self.ring, PolyRing)
dom = self.ring.domain
assert isinstance(dom, Domain)
for monom, coeff in self.iterterms():
assert dom.of_type(coeff)
assert len(monom) == self.ring.ngens
assert all(isinstance(exp, int) and exp >= 0 for exp in monom)
def new(self, init) -> PolyElement[Er]:
"""Create a new polynomial element in the same ring."""
return self.__class__(self.ring, init)
def parent(self) -> PolynomialRing[Er]:
"""Return the parent domain of this polynomial."""
return self.ring.to_domain()
def copy(self) -> PolyElement[Er]:
"""Return a copy of polynomial self.
Polynomials are mutable; if one is interested in preserving
a polynomial, and one plans to use inplace operations, one
can copy the polynomial. This method makes a shallow copy.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> R, x, y = ring('x, y', ZZ)
>>> p = (x + y)**2
>>> p1 = p.copy()
>>> p2 = p
>>> p[R.zero_monom] = 3
>>> p
x**2 + 2*x*y + y**2 + 3
>>> p1
x**2 + 2*x*y + y**2
>>> p2
x**2 + 2*x*y + y**2 + 3
"""
return self.new(self)
def set_ring(self, new_ring: PolyRing[Es]) -> PolyElement[Es]:
"""Change the ring of this polynomial."""
if self.ring == new_ring:
return cast("PolyElement[Es]", self)
return self._change_ring(new_ring)
def strip_zero(self) -> None:
"""Eliminate monomials with zero coefficient."""
for monom, coeff in self.listterms():
if not coeff:
del self[monom]
def almosteq(
self, other: PolyElement[Er] | Er | int, tolerance: float | None = None
) -> bool:
"""Approximate equality test for polynomials."""
ring = self.ring
if ring.is_element(other):
if set(self.itermonoms()) != set(other.itermonoms()):
return False
almosteq = ring.domain.almosteq
for monom in self.itermonoms():
if not almosteq(self[monom], other[monom], tolerance):
return False
return True
elif len(self) > 1:
return False
else:
try:
other = ring.domain.convert(other)
except CoercionFailed:
return False
else:
return ring.domain.almosteq(self.const(), other, tolerance)
def sort_key(self) -> tuple[int, list[tuple[Mon, Er]]]:
"""Return a key for sorting polynomials."""
return len(self), self.terms()
def _drop(
self, gen: PolyElement[Er] | int | str
) -> tuple[int, PolyRing[Er] | Domain[Er]]:
ring = self.ring
i = ring.index(gen)
if ring.ngens == 1:
return i, ring.domain
else:
new_ring = ring.drop(gen)
return i, new_ring
def _drop_multi(self, i: int) -> PolyElement[Er]:
assert self.ring.ngens > 1
return cast("PolyElement[Er]", self.drop(i))
def drop(self, gen: PolyElement[Er] | int | str) -> PolyElement[Er] | Er:
i, ring = self._drop(gen)
if self.ring.ngens == 1:
if self.is_ground:
return self.coeff(1)
else:
raise ValueError(f"Cannot drop {gen}")
else:
if not isinstance(ring, PolyRing):
raise TypeError("Ring after drop must be a PolyRing")
poly = ring.zero
for k, v in self.iterterms():
if k[i] == 0:
K = list(k)
del K[i]
poly[tuple(K)] = v
else:
raise ValueError(f"Cannot drop {gen}")
return poly
def drop_to_ground(
self, gen: PolyElement[Er] | int | str | None
) -> PolyElement[PolyElement[Er]]:
ring = self.ring
if ring.ngens == 1:
raise ValueError("Cannot drop only generator to ground")
i = ring.index(gen)
new_syms = list(ring.symbols)
ground_sym = new_syms[i]
del new_syms[i]
new_ground = PolyRing([ground_sym], ring.domain, ring.order)
new_ring = PolyRing(new_syms, new_ground, ring.order)
poly = new_ring.zero
gen = new_ground.gens[0]
for monom, coeff in self.iterterms():
mon = monom[:i] + monom[i + 1 :]
term = (gen ** monom[i]).mul_ground(coeff)
if mon not in poly:
poly[mon] = term
else:
poly[mon] = poly[mon] + term
return poly
def square(self) -> PolyElement[Er]:
"""square of a polynomial
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p.square()
x**2 + 2*x*y**2 + y**4
"""
return self._square()
def degree(self, x: PolyElement[Er] | int | str | None = None) -> float:
"""
The leading degree in ``x`` or the main variable.
Note that the degree of 0 is negative infinity (``float('-inf')``)
"""
i = self.ring.index(x)
if not self:
return ninf
elif i < 0:
return 0
else:
return self._degree(i)
def degrees(self) -> tuple[float, ...]:
"""
A tuple containing leading degrees in all variables.
Note that the degree of 0 is negative infinity (``float('-inf')``)
"""
if not self:
return (ninf,) * self.ring.ngens
else:
return self._degrees()
def tail_degree(self, x: PolyElement[Er] | int | str | None = None) -> float:
"""
The tail degree in ``x`` or the main variable.
Note that the degree of 0 is negative infinity (``float('-inf')``)
"""
i = self.ring.index(x)
if not self:
return ninf
elif i < 0:
return 0
else:
return min(monom[i] for monom in self.itermonoms())
def tail_degrees(self) -> tuple[float, ...]:
"""
A tuple containing tail degrees in all variables.
Note that the degree of 0 is negative infinity (``float('-inf')``)
"""
if not self:
return (ninf,) * self.ring.ngens
else:
return tuple(map(min, list(zip(*self.itermonoms()))))
def monic(self) -> PolyElement[Er]:
"""Divides all coefficients by the leading coefficient."""
if not self:
return self
else:
return self.quo_ground(self.LC)
@overload
def div(self, fv: PolyElement[Er]) -> tuple[PolyElement[Er], PolyElement[Er]]: ...
@overload
def div(
self, fv: Iterable[PolyElement[Er]]
) -> tuple[list[PolyElement[Er]], PolyElement[Er]]: ...
def div(
self, fv: PolyElement[Er] | Iterable[PolyElement[Er]]
) -> (
tuple[PolyElement[Er], PolyElement[Er]]
| tuple[list[PolyElement[Er]], PolyElement[Er]]
):
"""Division algorithm, see [CLO] p64.
fv array of polynomials
return qv, r such that
self = sum(fv[i]*qv[i]) + r
All polynomials are required not to be Laurent polynomials.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> f = x**3
>>> f0 = x - y**2
>>> f1 = x - y
>>> qv, r = f.div((f0, f1))
>>> qv[0]
x**2 + x*y**2 + y**4
>>> qv[1]
0
>>> r
y**6
"""
ring = self.ring
if isinstance(fv, PolyElement):
if fv.ring != ring:
raise ValueError("self and f must have the same ring")
if not fv:
raise ZeroDivisionError("polynomial division")
if not self:
return (ring.zero, ring.zero)
return self._div(fv)
else:
fv_list = list(fv)
if not all(f.ring == ring for f in fv_list):
raise ValueError("self and f must have the same ring")
if not all(fv_list):
raise ZeroDivisionError("polynomial division")
if not self:
return ([], ring.zero)
return self._div_list(fv_list)
def quo_ground(self, x: Er) -> PolyElement[Er]:
domain = self.ring.domain
if not x:
raise ZeroDivisionError("polynomial division")
if not self or x == domain.one:
return self
return self._quo_ground(x)
def extract_ground(
self, g: PolyElement[Er]
) -> tuple[Er, PolyElement[Er], PolyElement[Er]]:
f = self
fc = f.content()
gc = g.content()
gcd = f.ring.domain.gcd(fc, gc)
f = f.quo_ground(gcd)
g = g.quo_ground(gcd)
return gcd, f, g
def quo_term(self, term: tuple[Mon, Er]) -> PolyElement[Er]:
monom, coeff = term
if not coeff:
raise ZeroDivisionError("polynomial division")
elif not self:
return self.ring.zero
elif monom == self.ring.zero_monom:
return self.quo_ground(coeff)
return self._quo_term(term)
def _norm(self, norm_func):
if not self:
return self.ring.domain.zero
else:
ground_abs = self.ring.domain.abs
return norm_func([ground_abs(coeff) for coeff in self.itercoeffs()])
def max_norm(self):
return self._norm(max)
def l1_norm(self):
return self._norm(sum)
def deflate(
self, *G: PolyElement[Er]
) -> tuple[tuple[int, ...], list[PolyElement[Er]]]:
ring = self.ring
polys = [self] + list(G)
J = [0] * ring.ngens
for p in polys:
for monom in p.itermonoms():
for i, m in enumerate(monom):
J[i] = igcd(J[i], m)
for i, b in enumerate(J):
if not b:
J[i] = 1
J2 = tuple(J)
if all(b == 1 for b in J2):
return J2, polys
return J2, self._deflate(J2, polys)
def canonical_unit(self):
domain = self.ring.domain
return domain.canonical_unit(self.LC)
def diff(self, x: int | str | PolyElement[Er]) -> PolyElement[Er]:
"""Computes partial derivative in ``x``.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring("x,y", ZZ)
>>> p = x + x**2*y**3
>>> p.diff(x)
2*x*y**3 + 1
"""
ring = self.ring
i = ring.index(x)
return self._diff(i)
def trunc_ground(self, p: Er) -> PolyElement[Er]:
# XXX: This is not valid for all domains (e.g. GF(p))
if self.ring.domain.is_ZZ:
terms = []
for monom, coeff in self.iterterms():
coeff = coeff % p # type: ignore
if coeff > p // 2: # type: ignore
coeff = coeff - p
terms.append((monom, coeff))
else:
terms = [(monom, coeff % p) for monom, coeff in self.iterterms()] # type: ignore
poly = self.new(terms)
poly.strip_zero()
return poly
rem_ground = trunc_ground
def lcm(self, g: PolyElement[Er]) -> PolyElement[Er]:
f = self
domain = f.ring.domain
if not domain.is_Field:
fc, f = f.primitive()
gc, g = g.primitive()
c = domain.lcm(fc, gc)
h = (f * g).quo(f.gcd(g))
return h.mul_ground(c)
else:
h = (f * g).quo(f.gcd(g))
return h.monic()
def coeff_wrt(self, x: int | str | PolyElement[Er], deg: int) -> PolyElement[Er]:
"""
Coefficient of ``self`` with respect to ``x**deg``.
Treating ``self`` as a univariate polynomial in ``x`` this finds the
coefficient of ``x**deg`` as a polynomial in the other generators.
Parameters
==========
x : generator or generator index
The generator or generator index to compute the expression for.
deg : int
The degree of the monomial to compute the expression for.
Returns
=======
:py:class:`~.PolyElement`
The coefficient of ``x**deg`` as a polynomial in the same ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x, y, z = ring("x, y, z", ZZ)
>>> p = 2*x**4 + 3*y**4 + 10*z**2 + 10*x*z**2
>>> deg = 2
>>> p.coeff_wrt(2, deg) # Using the generator index
10*x + 10
>>> p.coeff_wrt(z, deg) # Using the generator
10*x + 10
>>> p.coeff(z**2) # shows the difference between coeff and coeff_wrt
10
See Also
========
coeff, coeffs
"""
p = self
i = p.ring.index(x)
terms = [(m, c) for m, c in p.iterterms() if m[i] == deg]
if not terms:
return p.ring.zero
monoms, coeffs = zip(*terms)
monoms_list = [m[:i] + (0,) + m[i + 1 :] for m in monoms]
return p.ring.from_dict(dict(zip(monoms_list, coeffs)))
def compose(self, x, a=None):
ring = self.ring
poly = ring.zero
gens_map = dict(zip(ring.gens, range(ring.ngens)))
if a is not None:
replacements = [(x, a)]
else:
if isinstance(x, list):
replacements = list(x)
elif isinstance(x, dict):
replacements = sorted(x.items(), key=lambda k: gens_map[k[0]])
else:
raise ValueError(
"expected a generator, value pair a sequence of such pairs"
)
replacements = [(gens_map[x], ring.ring_new(g)) for x, g in replacements]
return self._compose(replacements, initial_poly=poly)
def __call__(self, *values):
if 0 < len(values) <= self.ring.ngens:
return self.evaluate(list(zip(self.ring.gens, values)))
else:
raise ValueError(
"expected at least 1 and at most %s values, got %s"
% (self.ring.ngens, len(values))
)
@overload
def evaluate(
self, values: list[tuple[PolyElement[Er], Er | int]]
) -> PolyElement[Er] | Er: ...
@overload
def evaluate(
self, x: PolyElement[Er] | int | str, a: Er | int
) -> PolyElement[Er] | Er: ...
def evaluate(self, *args, **kwargs) -> PolyElement[Er] | Er:
eval_dict = {}
ring = self.ring
if len(args) == 1 and isinstance(args[0], list) and not kwargs:
for gen, val in args[0]:
idx = ring.index(gen)
eval_dict[idx] = ring.domain.convert(val)
elif len(args) == 2 and not kwargs:
x, a = args
idx = ring.index(x)
eval_dict[idx] = ring.domain.convert(a)
else:
raise ValueError("Invalid arguments for evaluate()")
if not eval_dict:
return self
elif len(eval_dict) == ring.ngens:
return self._evaluate(eval_dict)
else:
temp_result = self._subs(eval_dict)
new_ring = ring.drop(*[ring.gens[i] for i in eval_dict.keys()])
return temp_result.set_ring(new_ring) # type: ignore
@overload
def subs(self, values: list[tuple[Expr, Er | int]]) -> PolyElement[Er]: ...
@overload
def subs(self, x: PolyElement[Er] | int | str, a: Er | int) -> PolyElement[Er]: ...
def subs(self, *args, **kwargs) -> PolyElement[Er]:
subs_dict = {}
ring = self.ring
if len(args) == 1 and isinstance(args[0], list) and not kwargs:
for gen, val in args[0]:
idx = ring.index(gen)
subs_dict[idx] = ring.domain.convert(val)
elif len(args) == 2 and not kwargs:
x, a = args
idx = ring.index(x)
subs_dict[idx] = ring.domain.convert(a)
else:
raise ValueError("Invalid arguments for subs()")
if not subs_dict:
return self
elif len(subs_dict) == ring.ngens:
result = self._evaluate(subs_dict)
return ring.ground_new(result)
else:
return self._subs(subs_dict)
def prem(
self, g: PolyElement[Er], x: PolyElement[Er] | int | None = None
) -> PolyElement[Er]:
"""
Pseudo-remainder of the polynomial ``self`` with respect to ``g``.
The pseudo-quotient ``q`` and pseudo-remainder ``r`` with respect to
``z`` when dividing ``f`` by ``g`` satisfy ``m*f = g*q + r``,
where ``deg(r,z) < deg(g,z)`` and
``m = LC(g,z)**(deg(f,z) - deg(g,z)+1)``.
See :meth:`pdiv` for explanation of pseudo-division.
Parameters
==========
g : :py:class:`~.PolyElement`
The polynomial to divide ``self`` by.
x : generator or generator index, optional
The main variable of the polynomials and default is first generator.
Returns
=======
:py:class:`~.PolyElement`
The pseudo-remainder polynomial.
Raises
======
ZeroDivisionError : If ``g`` is the zero polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2
>>> f.prem(g) # first generator is chosen by default if it is not given
-4*y + 4
>>> f.rem(g) # shows the difference between prem and rem
x**2 + x*y
>>> f.prem(g, y) # generator is given
0
>>> f.prem(g, 1) # generator index is given
0
See Also
========
pdiv, pquo, pexquo, sympy.polys.domains.ring.Ring.rem
"""
x_index = self.ring.index(x)
return self._prem(g, x_index)
def pdiv(
self, g: PolyElement[Er], x: PolyElement[Er] | int | None = None
) -> tuple[PolyElement[Er], PolyElement[Er]]:
"""
Computes the pseudo-division of the polynomial ``self`` with respect to ``g``.
The pseudo-division algorithm is used to find the pseudo-quotient ``q``
and pseudo-remainder ``r`` such that ``m*f = g*q + r``, where ``m``
represents the multiplier and ``f`` is the dividend polynomial.
The pseudo-quotient ``q`` and pseudo-remainder ``r`` are polynomials in
the variable ``x``, with the degree of ``r`` with respect to ``x``
being strictly less than the degree of ``g`` with respect to ``x``.
The multiplier ``m`` is defined as
``LC(g, x) ^ (deg(f, x) - deg(g, x) + 1)``,
where ``LC(g, x)`` represents the leading coefficient of ``g``.
It is important to note that in the context of the ``prem`` method,
multivariate polynomials in a ring, such as ``R[x,y,z]``, are treated
as univariate polynomials with coefficients that are polynomials,
such as ``R[x,y][z]``. When dividing ``f`` by ``g`` with respect to the
variable ``z``, the pseudo-quotient ``q`` and pseudo-remainder ``r``
satisfy ``m*f = g*q + r``, where ``deg(r, z) < deg(g, z)``
and ``m = LC(g, z)^(deg(f, z) - deg(g, z) + 1)``.
In this function, the pseudo-remainder ``r`` can be obtained using the
``prem`` method, the pseudo-quotient ``q`` can
be obtained using the ``pquo`` method, and
the function ``pdiv`` itself returns a tuple ``(q, r)``.
Parameters
==========
g : :py:class:`~.PolyElement`
The polynomial to divide ``self`` by.
x : generator or generator index, optional
The main variable of the polynomials and default is first generator.
Returns
=======
:py:class:`~.PolyElement`
The pseudo-division polynomial (tuple of ``q`` and ``r``).
Raises
======
ZeroDivisionError : If ``g`` is the zero polynomial.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2
>>> f.pdiv(g) # first generator is chosen by default if it is not given
(2*x + 2*y - 2, -4*y + 4)
>>> f.div(g) # shows the difference between pdiv and div
(0, x**2 + x*y)
>>> f.pdiv(g, y) # generator is given
(2*x**3 + 2*x**2*y + 6*x**2 + 2*x*y + 8*x + 4, 0)
>>> f.pdiv(g, 1) # generator index is given
(2*x**3 + 2*x**2*y + 6*x**2 + 2*x*y + 8*x + 4, 0)
See Also
========
prem
Computes only the pseudo-remainder more efficiently than
`f.pdiv(g)[1]`.
pquo
Returns only the pseudo-quotient.
pexquo
Returns only an exact pseudo-quotient having no remainder.
div
Returns quotient and remainder of f and g polynomials.
"""
x_index = self.ring.index(x)
return self._pdiv(g, x_index)
def pquo(self, g: PolyElement[Er], x: PolyElement[Er] | int | None = None):
"""
Polynomial pseudo-quotient in multivariate polynomial ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> f.pquo(g)
2*x
>>> f.quo(g) # shows the difference between pquo and quo
0
>>> f.pquo(h)
2*x + 2*y - 2
>>> f.quo(h) # shows the difference between pquo and quo
0
See Also
========
prem, pdiv, pexquo, sympy.polys.domains.ring.Ring.quo
"""
x_index = self.ring.index(x)
return self._pquo(g, x_index)
def pexquo(self, g: PolyElement[Er], x: PolyElement[Er] | int | None = None):
"""
Polynomial exact pseudo-quotient in multivariate polynomial ring.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> f = x**2 + x*y
>>> g = 2*x + 2*y
>>> h = 2*x + 2
>>> f.pexquo(g)
2*x
>>> f.exquo(g) # shows the difference between pexquo and exquo
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x + 2*y does not divide x**2 + x*y
>>> f.pexquo(h)
Traceback (most recent call last):
...
ExactQuotientFailed: 2*x + 2 does not divide x**2 + x*y
See Also
========
prem, pdiv, pquo, sympy.polys.domains.ring.Ring.exquo
"""
x_index = self.ring.index(x)
return self._pexquo(g, x_index)
def subresultants(self, g: PolyElement[Er], x: PolyElement[Er] | int | None = None):
"""
Computes the subresultant PRS of two polynomials ``self`` and ``g``.
Parameters
==========
g : :py:class:`~.PolyElement`
The second polynomial.
x : generator or generator index
The variable with respect to which the subresultant sequence is computed.
Returns
=======
R : list
Returns a list polynomials representing the subresultant PRS.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x, y = ring("x, y", ZZ)
>>> f = x**2*y + x*y
>>> g = x + y
>>> f.subresultants(g) # first generator is chosen by default if not given
[x**2*y + x*y, x + y, y**3 - y**2]
>>> f.subresultants(g, 0) # generator index is given
[x**2*y + x*y, x + y, y**3 - y**2]
>>> f.subresultants(g, y) # generator is given
[x**2*y + x*y, x + y, x**3 + x**2]
"""
x_index = self.ring.index(x)
return self._subresultants(g, x_index)
def symmetrize(
self,
) -> tuple[
PolyElement[Er], PolyElement[Er], list[tuple[PolyElement[Er], PolyElement[Er]]]
]:
r"""
Rewrite *self* in terms of elementary symmetric polynomials.
Explanation
===========
If this :py:class:`~.PolyElement` belongs to a ring of $n$ variables,
we can try to write it as a function of the elementary symmetric
polynomials on $n$ variables. We compute a symmetric part, and a
remainder for any part we were not able to symmetrize.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> R, x, y = ring("x,y", ZZ)
>>> f = x**2 + y**2
>>> f.symmetrize()
(x**2 - 2*y, 0, [(x, x + y), (y, x*y)])
>>> f = x**2 - y**2
>>> f.symmetrize()
(x**2 - 2*y, -2*y**2, [(x, x + y), (y, x*y)])
Returns
=======
Triple ``(p, r, m)``
``p`` is a :py:class:`~.PolyElement` that represents our attempt
to express *self* as a function of elementary symmetric
polynomials. Each variable in ``p`` stands for one of the
elementary symmetric polynomials. The correspondence is given
by ``m``.
``r`` is the remainder.
``m`` is a list of pairs, giving the mapping from variables in
``p`` to elementary symmetric polynomials.
The triple satisfies the equation ``p.compose(m) + r == self``.
If the remainder ``r`` is zero, *self* is symmetric. If it is
nonzero, we were not able to represent *self* as symmetric.
See Also
========
sympy.polys.polyfuncs.symmetrize
References
==========
.. [1] Lauer, E. Algorithms for symmetrical polynomials, Proc. 1976
ACM Symp. on Symbolic and Algebraic Computing, NY 242-247.
https://dl.acm.org/doi/pdf/10.1145/800205.806342
"""
return self._symmetrize()
def __eq__(self, other: object) -> bool:
"""Equality test for polynomials.
Examples
========
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.rings import ring
>>> _, x, y = ring('x, y', ZZ)
>>> p1 = (x + y)**2 + (x - y)**2
>>> p1 == 4*x*y
False
>>> p1 == 2*(x**2 + y**2)
True
"""
if not other:
return not self
elif self.ring.is_element(other):
return dict.__eq__(self, other)
elif len(self) > 1:
return False
else:
return self.get(self.ring.zero_monom) == other
def __neg__(self) -> PolyElement[Er]:
# Return (-1) * self in case of python-flint
return self.new([(monom, -coeff) for monom, coeff in self.iterterms()])
def _add(self, p2: PolyElement[Er]) -> PolyElement[Er]:
p = self.copy()
get = p.get
zero = self.ring.domain.zero
for k, v in p2.items():
v = get(k, zero) + v
if v:
p[k] = v
else:
del p[k]
return p
def _add_ground(self, cp2: Er) -> PolyElement[Er]:
p = self.copy()
if not cp2:
return p
ring = self.ring
zm = ring.zero_monom
v = self.get(zm, ring.domain.zero) + cp2
if v:
p[zm] = v
else:
del p[zm]
return p
def _sub(self, p2: PolyElement[Er]) -> PolyElement[Er]:
p = self.copy()
get = p.get
zero = self.ring.domain.zero
for k, v in p2.items():
v = get(k, zero) - v
if v:
p[k] = v
else:
del p[k]
return p
def _sub_ground(self, cp2: Er) -> PolyElement[Er]:
p = self.copy()
if not cp2:
return p
ring = self.ring
zm = ring.zero_monom
v = self.get(zm, ring.domain.zero) - cp2
if v:
p[zm] = v
else:
del p[zm]
return p
def _rsub_ground(self, cp2: Er) -> PolyElement[Er]:
return self.__neg__()._add_ground(cp2)
def _mul(self, other: PolyElement[Er]) -> PolyElement[Er]:
ring = self.ring
p = ring.zero
for exp1, v1 in self.iterterms():
for exp2, v2 in other.iterterms():
exp = ring.monomial_mul(exp1, exp2)
v = v1 * v2
p[exp] = p.get(exp, ring.domain.zero) + v
p.strip_zero()
return p
def mul_ground(self, x: Er) -> PolyElement[Er]:
if not x:
return self.ring.zero
terms = [(monom, coeff * x) for monom, coeff in self.iterterms()]
return self.new(terms)
def _pow_int(self, n: int) -> PolyElement[Er]:
if n == 1:
return self.copy()
elif n == 2:
return self.square()
elif n == 3:
return self * self.square()
elif len(self) <= 5: # TODO: use an actual density measure
return self._pow_multinomial(n)
else:
return self._pow_generic(n)
def _pow_generic(self, n: int) -> PolyElement[Er]:
p = self.ring.one
c = self
while True:
if n & 1:
p = p * c
n -= 1
if not n:
break
c = c.square()
n = n // 2
return p
def _pow_multinomial(self, n: int) -> PolyElement[Er]:
multinomials = multinomial_coefficients(len(self), n).items()
monomial_mulpow = self.ring.monomial_mulpow
zero_monom = self.ring.zero_monom
terms = self.items()
zero = self.ring.domain.zero
poly = self.ring.zero
for multinomial, multinomial_coeff in multinomials:
product_monom = zero_monom
product_coeff = multinomial_coeff
for exp, (monom, coeff) in zip(multinomial, terms):
if exp:
product_monom = monomial_mulpow(product_monom, monom, exp)
product_coeff *= coeff**exp
monom = tuple(product_monom)
coeff = product_coeff
coeff = poly.get(monom, zero) + coeff
if coeff:
poly[monom] = coeff
elif monom in poly:
del poly[monom]
return poly
def _square(self) -> PolyElement[Er]:
ring = self.ring
p = ring.zero
get = p.get
keys = list(self.keys())
zero = ring.domain.zero
monomial_mul = ring.monomial_mul
for i in range(len(keys)):
k1 = keys[i]
pk = self[k1]
for j in range(i):
k2 = keys[j]
exp = monomial_mul(k1, k2)
p[exp] = get(exp, zero) + pk * self[k2]
p = p.imul_num(2)
get = p.get
for k, v in self.items():
k2 = monomial_mul(k, k)
p[k2] = get(k2, zero) + v**2
p.strip_zero()
# p._check()
return p
def _divmod(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er]]:
return self.div(other)
def _divmod_ground(self, x: Er) -> tuple[PolyElement[Er], PolyElement[Er]]:
return self.quo_ground(x), self.rem_ground(x)
def _floordiv(self, p2: PolyElement[Er]) -> PolyElement[Er]:
return self.quo(p2)
def _floordiv_ground(self, p2: Er) -> PolyElement[Er]:
return self.quo_ground(p2)
@overload
def _truediv(self, p2: PolyElement[Er]) -> PolyElement[Er]: ...
@overload
def _truediv(self, p2: list[PolyElement[Er]]) -> list[PolyElement[Er]]: ...
@overload
def _truediv(self, p2: Er) -> PolyElement[Er]: ...
def _truediv(self, p2):
return self.exquo(p2)
def _term_div(self):
zm = self.ring.zero_monom
domain = self.ring.domain
domain_quo = domain.quo
monomial_div = self.ring.monomial_div
if domain.is_Field:
def term_div(a_lm_a_lc, b_lm_b_lc):
a_lm, a_lc = a_lm_a_lc
b_lm, b_lc = b_lm_b_lc
if b_lm == zm: # apparently this is a very common case
monom = a_lm
else:
monom = monomial_div(a_lm, b_lm)
if monom is not None:
return monom, domain_quo(a_lc, b_lc)
else:
return None
else:
def term_div(a_lm_a_lc, b_lm_b_lc):
a_lm, a_lc = a_lm_a_lc
b_lm, b_lc = b_lm_b_lc
if b_lm == zm: # apparently this is a very common case
monom = a_lm
else:
monom = monomial_div(a_lm, b_lm)
if not (monom is None or a_lc % b_lc):
return monom, domain_quo(a_lc, b_lc)
else:
return None
return term_div
@overload
def rem(self, G: PolyElement[Er]) -> PolyElement[Er]: ...
@overload
def rem(self, G: list[PolyElement[Er]]) -> list[PolyElement[Er]]: ...
def rem(
self, G: PolyElement[Er] | list[PolyElement[Er]]
) -> PolyElement[Er] | list[PolyElement[Er]]:
f = self
if isinstance(G, PolyElement):
return f._rem(G)
else:
if not all(G):
raise ZeroDivisionError("polynomial division")
return f._rem_list(G)
@overload
def quo(self, G: PolyElement[Er]) -> PolyElement[Er]: ...
@overload
def quo(self, G: list[PolyElement[Er]]) -> list[PolyElement[Er]]: ...
def quo(self, G):
return self.div(G)[0]
@overload
def exquo(self, G: list[PolyElement[Er]]) -> list[PolyElement[Er]]: ...
@overload
def exquo(self, G: PolyElement[Er]) -> PolyElement[Er]: ...
def exquo(
self, G: PolyElement[Er] | list[PolyElement[Er]]
) -> PolyElement[Er] | list[PolyElement[Er]]:
q, r = self.div(G)
if not r:
return q
else:
raise ExactQuotientFailed(self, G)
def _iadd_monom(self, mc: tuple[Mon, Er]) -> PolyElement[Er]:
"""add to self the monomial coeff*x0**i0*x1**i1*...
unless self is a generator -- then just return the sum of the two.
mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...)
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x**4 + 2*y
>>> m = (1, 2)
>>> p1 = p._iadd_monom((m, 5))
>>> p1
x**4 + 5*x*y**2 + 2*y
>>> p1 is p
True
>>> p = x
>>> p1 = p._iadd_monom((m, 5))
>>> p1
5*x*y**2 + x
>>> p1 is p
False
"""
if self in self.ring._gens_set:
cpself = self.copy()
else:
cpself = self
expv, coeff = mc
c = cpself.get(expv)
if c is None:
cpself[expv] = coeff
else:
c += coeff
if c:
cpself[expv] = c
else:
del cpself[expv]
return cpself
def _iadd_poly_monom(
self, p2: PolyElement[Er], mc: tuple[Mon, Er]
) -> PolyElement[Er]:
"""add to self the product of (p)*(coeff*x0**i0*x1**i1*...)
unless self is a generator -- then just return the sum of the two.
mc is a tuple, (monom, coeff), where monomial is (i0, i1, ...)
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring('x, y, z', ZZ)
>>> p1 = x**4 + 2*y
>>> p2 = y + z
>>> m = (1, 2, 3)
>>> p1 = p1._iadd_poly_monom(p2, (m, 3))
>>> p1
x**4 + 3*x*y**3*z**3 + 3*x*y**2*z**4 + 2*y
"""
p1 = self
if p1 in p1.ring._gens_set:
p1 = p1.copy()
(m, c) = mc
get = p1.get
zero = p1.ring.domain.zero
monomial_mul = p1.ring.monomial_mul
for k, v in p2.items():
ka = monomial_mul(k, m)
coeff = get(ka, zero) + v * c
if coeff:
p1[ka] = coeff
else:
del p1[ka]
return p1
def imul_num(self, c: Er | int) -> PolyElement[Er]:
"""multiply inplace the polynomial p by an element in the
coefficient ring, provided p is not one of the generators;
else multiply not inplace
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> p = x + y**2
>>> p1 = p.imul_num(3)
>>> p1
3*x + 3*y**2
>>> p1 is p
True
>>> p = x
>>> p1 = p.imul_num(3)
>>> p1
3*x
>>> p1 is p
False
"""
if self in self.ring._gens_set:
return self * c
if not c:
self.clear()
return self
for exp in self:
self[exp] *= c
return self
def _rem(self, g: PolyElement[Er]) -> PolyElement[Er]:
"""Remainder when dividing by a single polynomial g"""
return self._rem_list([g])
def _rem_list(self, G: list[PolyElement[Er]]) -> PolyElement[Er]:
ring = self.ring
domain = ring.domain
zero = domain.zero
monomial_mul = ring.monomial_mul
r = ring.zero
term_div = self._term_div()
ltf = self.LT
f = self.copy()
get = f.get
while f:
for g in G:
tq = term_div(ltf, g.LT)
if tq is not None:
m, c = tq
for mg, cg in g.iterterms():
m1 = monomial_mul(mg, m)
c1 = get(m1, zero) - c * cg
if not c1:
del f[m1]
else:
f[m1] = c1
ltm = f.leading_expv()
if ltm is not None:
ltf = ltm, f[ltm]
break
else:
ltm, ltc = ltf
if ltm in r:
r[ltm] += ltc
else:
r[ltm] = ltc
del f[ltm]
ltm = f.leading_expv()
if ltm is not None:
ltf = ltm, f[ltm]
return r
def _mod(self, other: PolyElement[Er]) -> PolyElement[Er]:
return self.rem(other)
def _mod_ground(self, x: Er) -> PolyElement[Er]:
return self.rem_ground(x)
@property
def is_ground(self) -> bool:
# Return self.flint_poly.is_constant() in case of python-flint
return not self or (len(self) == 1 and self.ring.zero_monom in self)
@property
def is_zero(self) -> bool:
# Return self.flint_poly.is_zero() in case of python-flint
return not self
@property
def is_one(self) -> bool:
# Return self.flint_poly.is_one() in case of python-flint
return self == self.ring.one
@property
def is_squarefree(self) -> bool:
if not self.ring.ngens:
return True
return self.ring.dmp_sqf_p(self)
@property
def is_irreducible(self) -> bool:
if not self.ring.ngens:
return True
return self.ring.dmp_irreducible_p(self)
@property
def is_cyclotomic(self) -> bool:
if self.ring.is_univariate:
return self.ring.dup_cyclotomic_p(self)
else:
raise MultivariatePolynomialError("cyclotomic polynomial")
@property
def LC(self) -> Er:
# Just use leafing_coefficient() in case of python-flint
return self._get_coeff(self.leading_expv())
@property
def LM(self) -> Mon:
# Use monomial(0) in case of python-flint
expv = self.leading_expv()
if expv is None:
return self.ring.zero_monom
else:
return expv
@property
def LT(self) -> tuple[Mon, Er]:
# Use monomial(0) and leafing_coefficient() in case of python-flint
expv = self.leading_expv()
if expv is None:
return (self.ring.zero_monom, self.ring.domain.zero)
else:
return (expv, self._get_coeff(expv))
def clear_denoms(self) -> tuple[Er, PolyElement[Er]]:
"""Clear denominators from polynomial coefficients."""
domain = self.ring.domain
if not domain.is_Field or not domain.has_assoc_Ring:
return domain.one, self
ground_ring = domain.get_ring()
common = ground_ring.one
lcm = ground_ring.lcm
denom = domain.denom
for coeff in self.values():
common = lcm(common, denom(coeff))
poly = self.new([(monom, coeff * common) for monom, coeff in self.items()])
return common, poly
def _change_ring(self, new_ring):
# Use fmpz_mpoly.compose() or fmpz_mpoly.compose() in case of python-flint
if self.ring.symbols != new_ring.symbols:
terms = list(zip(*_dict_reorder(self, self.ring.symbols, new_ring.symbols)))
return new_ring.from_terms(terms, self.ring.domain)
else:
return new_ring.from_dict(self, self.ring.domain)
def as_expr_dict(self) -> dict[tuple[int, ...], Expr]:
# Can just use self.flint_poly.to_dict() in case of python-flint
# Or this can just directly go into the baseclass as is since iterterms
# will be implemented separately for pure python and flint versions anyways
to_sympy = self.ring.domain.to_sympy
return {monom: to_sympy(coeff) for monom, coeff in self.iterterms()}
def _cmp(
self,
other: PolyElement[Er],
op: Callable[
[tuple[int, list[tuple[Mon, Er]]], tuple[int, list[tuple[Mon, Er]]]], bool
],
) -> bool:
# We can override this for python-flint version
# to use the native lt, le, gt, ge methods
if self.ring.is_element(other):
return op(self.sort_key(), other.sort_key())
else:
return NotImplemented
def to_dense(self) -> dmp[Er]:
return dmp_from_dict(self, self.ring.ngens - 1, self.ring.domain)
def to_dup(self) -> dup[Er]:
assert self.ring.ngens == 1
return dup_from_dict(self, self.ring.domain) # type: ignore
def to_dict(self) -> dict[Mon, Er]:
# Return a self.flint_poly.to_dict() in case of python-flint
return dict(self)
def str(self, printer, precedence, exp_pattern, mul_symbol) -> str:
# Use str(self.flint_poly).replace("^", "**") in case of python-flint
if not self:
return printer._print(self.ring.domain.zero)
prec_mul = precedence["Mul"]
prec_atom = precedence["Atom"]
ring = self.ring
symbols = ring.symbols
ngens = ring.ngens
zm = ring.zero_monom
sexpvs = []
for expv, coeff in self.terms():
negative = ring.domain.is_negative(coeff)
sign = " - " if negative else " + "
sexpvs.append(sign)
if expv == zm:
scoeff = printer._print(coeff)
if negative and scoeff.startswith("-"):
scoeff = scoeff[1:]
else:
if negative:
coeff = -coeff
if coeff != self.ring.domain.one:
scoeff = printer.parenthesize(coeff, prec_mul, strict=True)
else:
scoeff = ""
sexpv = []
for i in range(ngens):
exp = expv[i]
if not exp:
continue
symbol = printer.parenthesize(symbols[i], prec_atom, strict=True)
if exp != 1:
if exp != int(exp) or exp < 0:
sexp = printer.parenthesize(exp, prec_atom, strict=False)
else:
sexp = exp
sexpv.append(exp_pattern % (symbol, sexp))
else:
sexpv.append("%s" % symbol)
if scoeff:
sexpv = [scoeff] + sexpv
sexpvs.append(mul_symbol.join(sexpv))
if sexpvs[0] in [" + ", " - "]:
head = sexpvs.pop(0)
if head == " - ":
sexpvs.insert(0, "-")
return "".join(sexpvs)
def _degree(self, i: int) -> int:
return max(monom[i] for monom in self.itermonoms())
def _degree_int(self, x: int | PolyElement[Er] | None = None) -> int:
i = self.ring.index(x)
if not self:
return -1
elif i < 0:
return 0
else:
return self._degree(i)
def _degrees(self) -> tuple[int, ...]:
return tuple(map(max, list(zip(*self.itermonoms()))))
def leading_expv(self) -> Mon | None:
"""Leading monomial tuple according to the monomial ordering.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring('x, y, z', ZZ)
>>> p = x**4 + x**3*y + x**2*z**2 + z**7
>>> p.leading_expv()
(4, 0, 0)
"""
# Use fmpz_mpoly.monomial(1) or fmpq_mpoly.monomial(1) in case of python-flint
try:
return self._leading_expv()
except KeyError:
return None
def _leading_expv(self) -> Mon:
if not self:
raise KeyError
else:
return self.ring.leading_expv(self)
def _get_coeff(self, expv) -> Er:
return self.get(expv, self.ring.domain.zero)
def const(self) -> Er:
# Use
"""Returns the constant coefficient."""
return self._get_coeff(self.ring.zero_monom)
def coeff(self, element: PolyElement[Er] | int) -> Er:
"""
Returns the coefficient that stands next to the given monomial.
Parameters
==========
element : PolyElement (with ``is_monomial = True``) or 1
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y, z = ring("x,y,z", ZZ)
>>> f = 3*x**2*y - x*y*z + 7*z**3 + 23
>>> f.coeff(x**2*y)
3
>>> f.coeff(x*y)
0
>>> f.coeff(1)
23
"""
if element == 1:
return self._get_coeff(self.ring.zero_monom)
elif self.ring.is_element(element):
terms = list(cast("PolyElement[Er]", element).iterterms())
if len(terms) == 1:
monom, coeff = terms[0]
if coeff == self.ring.domain.one:
return self._get_coeff(monom)
raise ValueError("expected a monomial, got %s" % element)
def leading_monom(self) -> PolyElement[Er]:
"""
Leading monomial as a polynomial element.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> (3*x*y + y**2).leading_monom()
x*y
"""
p = self.ring.zero
expv = self.leading_expv()
if expv:
p[expv] = self.ring.domain.one
return p
def leading_term(self) -> PolyElement[Er]:
"""Leading term as a polynomial element.
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> _, x, y = ring('x, y', ZZ)
>>> (3*x*y + y**2).leading_term()
3*x*y
"""
p = self.ring.zero
expv = self.leading_expv()
if expv is not None:
p[expv] = self[expv]
return p
def coeffs(self, order: _str | None = None) -> list[Er]:
"""Ordered list of polynomial coefficients.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.coeffs()
[2, 1]
>>> f.coeffs(grlex)
[1, 2]
"""
return [coeff for _, coeff in self.terms(order)]
def monoms(self, order: _str | None = None) -> list[Mon]:
"""Ordered list of polynomial monomials.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.monoms()
[(2, 3), (1, 7)]
>>> f.monoms(grlex)
[(1, 7), (2, 3)]
"""
return [monom for monom, _ in self.terms(order)]
def terms(self, order: _str | None = None) -> list[tuple[Mon, Er]]:
"""Ordered list of polynomial terms.
Parameters
==========
order : :class:`~.MonomialOrder` or coercible, optional
Examples
========
>>> from sympy.polys.rings import ring
>>> from sympy.polys.domains import ZZ
>>> from sympy.polys.orderings import lex, grlex
>>> _, x, y = ring("x, y", ZZ, lex)
>>> f = x*y**7 + 2*x**2*y**3
>>> f.terms()
[((2, 3), 2), ((1, 7), 1)]
>>> f.terms(grlex)
[((1, 7), 1), ((2, 3), 2)]
"""
return self._sorted(list(self.items()), order)
def _sorted(
self, seq: list[tuple[Mon, Er]], order: _str | None
) -> list[tuple[Mon, Er]]:
if order is None:
ordering = self.ring.order
else:
ordering = OrderOpt.preprocess(order)
if ordering is lex:
return sorted(seq, key=lambda monom: monom[0], reverse=True)
else:
return sorted(seq, key=lambda monom: ordering(monom[0]), reverse=True)
def itercoeffs(self):
"""Iterator over coefficients of a polynomial."""
return iter(self.values())
def itermonoms(self):
"""Iterator over monomials of a polynomial."""
return iter(self.keys())
def iterterms(self) -> Iterator[tuple[Mon, Er]]:
"""Iterator over terms of a polynomial."""
return iter(self.items())
def listcoeffs(self) -> list[Er]:
"""Unordered list of polynomial coefficients."""
return list(self.values())
def listmonoms(self) -> list[Mon]:
"""Unordered list of polynomial monomials."""
return list(self.keys())
def listterms(self) -> list[tuple[Mon, Er]]:
"""Unordered list of polynomial terms."""
return list(self.items())
def content(self) -> Er:
"""Returns GCD of polynomial's coefficients."""
# In the flint version, we will have to override
# this to use the native content() method for ZZ
# and use the pure python technique for other domains
domain = self.ring.domain
cont = domain.zero
gcd = domain.gcd
for coeff in self.itercoeffs():
cont = gcd(cont, coeff)
return cont
def primitive(self) -> tuple[Er, PolyElement[Er]]:
"""Returns content and a primitive polynomial."""
cont = self.content()
if cont == self.ring.domain.zero:
return (cont, self)
return cont, self.quo_ground(cont)
def mul_monom(self, monom: Mon) -> PolyElement[Er]:
monomial_mul = self.ring.monomial_mul
terms = [
(monomial_mul(f_monom, monom), f_coeff) for f_monom, f_coeff in self.items()
]
return self.new(terms)
def mul_term(self, term: tuple[Mon, Er]) -> PolyElement[Er]:
monom, coeff = term
if not self or not coeff:
return self.ring.zero
elif monom == self.ring.zero_monom:
return self.mul_ground(coeff)
monomial_mul = self.ring.monomial_mul
terms = [
(monomial_mul(f_monom, monom), f_coeff * coeff)
for f_monom, f_coeff in self.items()
]
return self.new(terms)
def _quo_ground(self, x: Er) -> PolyElement[Er]:
domain = self.ring.domain
if domain.is_Field:
quo = domain.quo
terms = [(monom, quo(coeff, x)) for monom, coeff in self.iterterms()]
else:
# XXX: This is not valid for all domains (e.g. GF(p))
terms = [
(monom, coeff // x) # type: ignore
for monom, coeff in self.iterterms()
if not (coeff % x) # type: ignore
]
return self.new(terms)
def _quo_term(self, term: tuple[Mon, Er]) -> PolyElement[Er]:
term_div = self._term_div()
terms = [term_div(t, term) for t in self.iterterms()]
return self.new([t for t in terms if t is not None])
def _deflate(
self, J: tuple[int, ...], polys: list[PolyElement[Er]]
) -> list[PolyElement[Er]]:
ring = self.ring
H = []
for p in polys:
h = ring.zero
for I, coeff in p.iterterms():
N = [i // j for i, j in zip(I, J)]
h[tuple(N)] = coeff
H.append(h)
return H
def inflate(self, J: Sequence[int]) -> PolyElement[Er]:
poly = self.ring.zero
for I, coeff in self.iterterms():
N = [i * j for i, j in zip(I, J)]
poly[tuple(N)] = coeff
return poly
def gcd(self, other: PolyElement[Er]) -> PolyElement[Er]:
return self.cofactors(other)[0]
def _diff(self, i: int) -> PolyElement[Er]:
# Use the native derivative() method in case of python-flint
ring = self.ring
m = ring.monomial_basis(i)
g = ring.zero
for expv, coeff in self.iterterms():
if expv[i]:
e = ring.monomial_ldiv(expv, m)
g[e] = ring.domain_new(coeff * expv[i])
return g
def cofactors(
self: PolyElement[Er], other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
if not self and not other:
zero = self.ring.zero
return zero, zero, zero
elif not self:
h, cff, cfg = self._gcd_zero(other)
return h, cff, cfg
elif not other:
h, cfg, cff = other._gcd_zero(self)
return h, cff, cfg
elif len(self) == 1:
h, cff, cfg = self._gcd_monom(other)
return h, cff, cfg
elif len(other) == 1:
h, cfg, cff = other._gcd_monom(self)
return h, cff, cfg
J, (self, other) = self.deflate(other)
h, cff, cfg = self._gcd(other)
return (h.inflate(J), cff.inflate(J), cfg.inflate(J))
def _gcd_zero(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
one, zero = self.ring.one, self.ring.zero
if other.is_nonnegative:
return other, zero, one
else:
return -other, zero, -one
def _gcd_monom(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
ring = self.ring
ground_gcd = ring.domain.gcd
ground_quo = ring.domain.quo
monomial_gcd = ring.monomial_gcd
monomial_ldiv = ring.monomial_ldiv
mf, cf = self.listterms()[0]
_mgcd, _cgcd = mf, cf
for mg, cg in other.iterterms():
_mgcd = monomial_gcd(_mgcd, mg)
_cgcd = ground_gcd(_cgcd, cg)
h = self.new([(_mgcd, _cgcd)])
cff = self.new([(monomial_ldiv(mf, _mgcd), ground_quo(cf, _cgcd))])
cfg = self.new(
[
(monomial_ldiv(mg, _mgcd), ground_quo(cg, _cgcd))
for mg, cg in other.iterterms()
]
)
return h, cff, cfg
def _gcd(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
ring = self.ring
if ring.domain.is_QQ:
return self._gcd_QQ(other)
elif ring.domain.is_ZZ:
return self._gcd_ZZ(other)
else: # TODO: don't use dense representation (port PRS algorithms)
return ring.dmp_inner_gcd(self, other)
def _gcd_ZZ(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
return heugcd(self, other)
def _gcd_QQ(
self, g: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
f = self
ring = f.ring
new_ring = ring.clone(domain=ring.domain.get_ring())
cf, f = f.clear_denoms()
cg, g = g.clear_denoms()
f = f.set_ring(new_ring)
g = g.set_ring(new_ring)
h, cff, cfg = f._gcd_ZZ(g)
h = h.set_ring(ring)
c, h = h.LC, h.monic()
cff = cff.set_ring(ring).mul_ground(ring.domain.quo(c, cf))
cfg = cfg.set_ring(ring).mul_ground(ring.domain.quo(c, cg))
return h, cff, cfg
def cancel(self, g: PolyElement[Er]) -> tuple[PolyElement[Er], PolyElement[Er]]:
"""
Cancel common factors in a rational function ``f/g``.
Examples
========
>>> from sympy.polys import ring, ZZ
>>> R, x,y = ring("x,y", ZZ)
>>> (2*x**2 - 2).cancel(x**2 - 2*x + 1)
(2*x + 2, x - 1)
"""
f = self
ring = f.ring
if not f:
return f, ring.one
domain = ring.domain
if not (domain.is_Field and domain.has_assoc_Ring):
_, p, q = f.cofactors(g)
else:
new_ring = ring.clone(domain=domain.get_ring())
cq, f = f.clear_denoms()
cp, g = g.clear_denoms()
f = f.set_ring(new_ring)
g = g.set_ring(new_ring)
_, p, q = f.cofactors(g)
_, cp, cq = new_ring.domain.cofactors(cp, cq)
p = p.set_ring(ring)
q = q.set_ring(ring)
p = p.mul_ground(cp)
q = q.mul_ground(cq)
# Make canonical with respect to sign or quadrant in the case of ZZ_I
# or QQ_I. This ensures that the LC of the denominator is canonical by
# multiplying top and bottom by a unit of the ring.
u = q.canonical_unit()
if u == domain.one:
pass
elif u == -domain.one:
p, q = -p, -q
else:
p = p.mul_ground(u)
q = q.mul_ground(u)
return p, q
def _compose(self, replacements, initial_poly):
ring = self.ring
poly = initial_poly
for monom, coeff in self.iterterms():
monom = list(monom)
subpoly = ring.one
for i, g in replacements:
n, monom[i] = monom[i], 0
if n:
subpoly *= g**n
subpoly = subpoly.mul_term((tuple(monom), coeff))
poly += subpoly
return poly
# XXX: implement the same algorith for div from CLO
# for python-flint
def _div(self, fv: PolyElement[Er]) -> tuple[PolyElement[Er], PolyElement[Er]]:
[q], r = self._div_list([fv])
return q, r
def _div_list(
self, fv: list[PolyElement[Er]]
) -> tuple[list[PolyElement[Er]], PolyElement[Er]]:
ring = self.ring
s = len(fv)
qv = [ring.zero for i in range(s)]
p = self.copy()
r = ring.zero
term_div = self._term_div()
expvs = [fx._leading_expv() for fx in fv]
while p:
i = 0
divoccurred = 0
while i < s and divoccurred == 0:
expv = p._leading_expv()
term = term_div((expv, p[expv]), (expvs[i], fv[i][expvs[i]]))
if term is not None:
expv1, c = term
qv[i] = qv[i]._iadd_monom((expv1, c))
p = p._iadd_poly_monom(fv[i], (expv1, -c))
divoccurred = 1
else:
i += 1
if not divoccurred:
expv = p._leading_expv()
r = r._iadd_monom((expv, p[expv]))
del p[expv]
if expv == ring.zero_monom:
r += p
return qv, r
# The following _p* and _subresultants methods can just be converted to pure python
# methods in case of python-flint since their speeds don't exactly matter wrt the
# flint version.
def _prem(self, g: PolyElement[Er], x: int) -> PolyElement[Er]:
f = self
df = f._degree_int(x)
dg = g._degree_int(x)
if dg < 0:
raise ZeroDivisionError("polynomial division")
r, dr = f, df
if df < dg:
return r
N = df - dg + 1
lc_g = g.coeff_wrt(x, dg)
xp = f.ring.gens[x]
while True:
lc_r = r.coeff_wrt(x, dr)
j, N = dr - dg, N - 1
R = r * lc_g
G = g * lc_r * xp**j
r = R - G
dr = r._degree_int(x)
if dr < dg:
break
c = lc_g**N
return r * c
def _pdiv(
self, g: PolyElement[Er], x: int
) -> tuple[PolyElement[Er], PolyElement[Er]]:
f = self
df = f._degree_int(x)
dg = g._degree_int(x)
if dg < 0:
raise ZeroDivisionError("polynomial division")
q, r, dr = self.ring(x), f, df
if df < dg:
return q, r
N = df - dg + 1
lc_g = g.coeff_wrt(x, dg)
xp = f.ring.gens[x]
while True:
lc_r = r.coeff_wrt(x, dr)
j, N = dr - dg, N - 1
Q = q * lc_g
q = Q + (lc_r) * xp**j
R = r * lc_g
G = g * lc_r * xp**j
r = R - G
dr = r._degree_int(x)
if dr < dg:
break
c = lc_g**N
q = q * c
r = r * c
return q, r
def _pquo(self, g: PolyElement[Er], x: int) -> PolyElement[Er]:
f = self
return f.pdiv(g, x)[0]
def _pexquo(self, g: PolyElement[Er], x: int):
f = self
q, r = f.pdiv(g, x)
if r.is_zero:
return q
else:
raise ExactQuotientFailed(f, g)
def _subresultants(self, g: PolyElement[Er], x: int):
f = self
n = f._degree_int(x)
m = g._degree_int(x)
if n < m:
f, g = g, f
n, m = m, n
if f == 0:
return [0, 0]
if g == 0:
return [f, 1]
R = [f, g]
d = n - m
b = (-1) ** (d + 1)
# Compute the pseudo-remainder for f and g
h = f.prem(g, x)
h = h * b
# Compute the coefficient of g with respect to x**m
lc = g.coeff_wrt(x, m)
c = lc**d
S = [1, c]
c = -c
while h:
k = h.degree(x)
R.append(h)
f, g, m, d = g, h, k, m - k
b = -lc * c**d
h = f.prem(g, x)
h = h.exquo(b)
lc = g.coeff_wrt(x, k)
if d > 1:
p = (-lc) ** d
q = c ** (d - 1)
c = p.exquo(q)
else:
c = -lc
S.append(-c)
return R
def _subs(self, subs_dict: Mapping[int, Er]) -> PolyElement[Er]:
ring = self.ring
result_poly = ring.zero
for monom, coeff in self.iterterms():
new_coeff = coeff
new_monom_list = list(monom)
for i, val in subs_dict.items():
exp = monom[i]
if exp > 0:
new_coeff *= val**exp
new_monom_list[i] = 0
if new_coeff:
new_monom = tuple(new_monom_list)
if new_monom in result_poly:
result_poly[new_monom] += new_coeff
if not result_poly[new_monom]:
del result_poly[new_monom]
else:
result_poly[new_monom] = new_coeff
return result_poly
def _evaluate(self, eval_dict: Mapping[int, Er]) -> Er:
result = self.ring.domain.zero
for monom, coeff in self.iterterms():
monom_value = self.ring.domain.one
for i, exp in enumerate(monom):
if exp > 0:
monom_value *= eval_dict[i] ** exp
result += coeff * monom_value
return result
def _symmetrize(
self,
) -> tuple[
PolyElement[Er], PolyElement[Er], list[tuple[PolyElement[Er], PolyElement[Er]]]
]:
# For the python-flint override this can just be converted back to
# the pure python version until python-flint provides some
# equivalent functionality.
f = self.copy()
ring = f.ring
n = ring.ngens
if not n:
return f, ring.zero, []
polys = [ring.symmetric_poly(i + 1) for i in range(n)]
poly_powers = {}
def get_poly_power(i, n):
if (i, n) not in poly_powers:
poly_powers[(i, n)] = polys[i] ** n
return poly_powers[(i, n)]
indices = list(range(n - 1))
weights = list(range(n, 0, -1))
symmetric = ring.zero
while f:
_height, _monom, _coeff = -1, None, None
for i, (monom, coeff) in enumerate(f.terms()):
if all(monom[i] >= monom[i + 1] for i in indices):
height = max(n * m for n, m in zip(weights, monom))
if height > _height:
_height, _monom, _coeff = height, monom, coeff
if _height != -1:
monom, coeff = cast("Mon", _monom), cast("Er", _coeff)
else:
break
exponents = []
for m1, m2 in zip(monom, monom[1:] + (0,)):
exponents.append(m1 - m2)
symmetric += ring.term_new(tuple(exponents), coeff)
product = coeff
for i, n in enumerate(exponents):
product *= get_poly_power(i, n)
f -= product
mapping = list(zip(ring.gens, polys))
return symmetric, f, mapping
# TODO: following methods should point to polynomial
# representation independent algorithm implementations.
def half_gcdex(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er]]:
return self.ring.dmp_half_gcdex(self, other)
def gcdex(
self, other: PolyElement[Er]
) -> tuple[PolyElement[Er], PolyElement[Er], PolyElement[Er]]:
return self.ring.dmp_gcdex(self, other)
def resultant(self, other: PolyElement[Er]) -> PolyElement[Er] | Er:
return self.ring.dmp_resultant(self, other)
def discriminant(self) -> PolyElement[Er] | Er:
return self.ring.dmp_discriminant(self)
def decompose(self) -> list[PolyElement[Er]]:
if self.ring.is_univariate:
return self.ring.dup_decompose(self)
else:
raise MultivariatePolynomialError("polynomial decomposition")
def shift(self, a: Er) -> PolyElement[Er]:
if self.ring.is_univariate:
return self.ring.dup_shift(self, a)
else:
raise MultivariatePolynomialError("shift: use shift_list instead")
def shift_list(self, a: list[Er]) -> PolyElement[Er]:
return self.ring.dmp_shift(self, a)
def sturm(self) -> list[PolyElement[Er]]:
if self.ring.is_univariate:
return self.ring.dup_sturm(self)
else:
raise MultivariatePolynomialError("sturm sequence")
def gff_list(self) -> list[tuple[PolyElement[Er], int]]:
return self.ring.dmp_gff_list(self)
def norm(self) -> PolyElement[MPQ]:
# XXX: Only defined for AlgebraicField
return self.ring.dmp_norm(self)
def sqf_norm(self) -> tuple[list[int], PolyElement[Er], PolyElement[MPQ]]:
# XXX: Only defined for AlgebraicField
return self.ring.dmp_sqf_norm(self)
def sqf_part(self) -> PolyElement[Er]:
return self.ring.dmp_sqf_part(self)
def sqf_list(
self, all: bool = False
) -> tuple[Er, list[tuple[PolyElement[Er], int]]]:
return self.ring.dmp_sqf_list(self, all=all)
def factor_list(self) -> tuple[Er, list[tuple[PolyElement[Er], int]]]:
return self.ring.dmp_factor_list(self)
| PolyElement |
python | huggingface__transformers | src/transformers/models/tvp/modeling_tvp.py | {
"start": 19066,
"end": 19983
} | class ____(GradientCheckpointingLayer):
def __init__(self, config):
super().__init__()
self.attention = TvpAttention(config)
self.intermediate = TvpIntermediate(config)
self.output = TvpOutputLayer(config)
def forward(
self,
hidden_states,
attention_mask=None,
output_attentions: Optional[bool] = None,
):
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
output_attentions=output_attentions,
)
attention_output = self_attention_outputs[0]
outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
return outputs
| TvpEncodeLayer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.