language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass8.py | {
"start": 251,
"end": 294
} | class ____(Generic[T], metaclass=A[T]): ...
| B |
python | sanic-org__sanic | sanic/worker/constants.py | {
"start": 72,
"end": 200
} | class ____(UpperStrEnum):
"""Available restart orders."""
SHUTDOWN_FIRST = auto()
STARTUP_FIRST = auto()
| RestartOrder |
python | altair-viz__altair | tests/utils/test_schemapi.py | {
"start": 1539,
"end": 1672
} | class ____(SchemaBase):
@classmethod
def _default_wrapper_classes(cls):
return _TestSchema.__subclasses__()
| _TestSchema |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/stackdriver.py | {
"start": 17073,
"end": 20068
} | class ____(GoogleCloudBaseOperator):
"""
Deletes an alerting policy.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:StackdriverDeleteAlertOperator`
:param name: The alerting policy to delete. The format is:
``projects/[PROJECT_ID]/alertPolicies/[ALERT_POLICY_ID]``.
:param retry: A retry object used to retry requests. If ``None`` is
specified, requests will be retried using a default configuration.
:param timeout: The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: (Optional) The connection ID used to connect to Google
Cloud Platform.
:param project_id: The project from which alert needs to be deleted.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"name",
"impersonation_chain",
)
ui_color = "#e5ffcc"
def __init__(
self,
*,
name: str,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
project_id: str = PROVIDE_PROJECT_ID,
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.name = name
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.project_id = project_id
self.impersonation_chain = impersonation_chain
self.hook: StackdriverHook | None = None
def execute(self, context: Context):
self.log.info("Delete Alert Policy: Project id: %s Name: %s", self.project_id, self.name)
if self.hook is None:
self.hook = StackdriverHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
self.hook.delete_alert_policy(
name=self.name,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
| StackdriverDeleteAlertOperator |
python | kamyu104__LeetCode-Solutions | Python/partition-string-into-substrings-with-values-at-most-k.py | {
"start": 38,
"end": 439
} | class ____(object):
def minimumPartition(self, s, k):
"""
:type s: str
:type k: int
:rtype: int
"""
result = 1
curr = 0
for c in s:
if int(c) > k:
return -1
if curr*10+int(c) > k:
result += 1
curr = 0
curr = curr*10+int(c)
return result
| Solution |
python | ray-project__ray | python/ray/serve/tests/unit/test_http_util.py | {
"start": 9840,
"end": 13962
} | class ____:
"""Test suite for configure_http_options_with_defaults function."""
def test_basic_configuration_with_mock_env(
self, base_http_options, mock_env_constants
):
"""Test basic configuration with mocked environment constants."""
result = configure_http_options_with_defaults(base_http_options)
# Should apply default request timeout from mock (30)
assert result.request_timeout_s == 30.0
# Keep alive timeout should remain original since mock sets it to 300
assert result.keep_alive_timeout_s == 300.0
# Should initialize middlewares list
assert result.middlewares == []
# Original should not be modified
assert base_http_options.request_timeout_s == 30.0
def test_keep_alive_timeout_override_from_env(self, base_http_options):
"""Test keep alive timeout override from environment variable."""
with patch(
"ray.serve._private.http_util.RAY_SERVE_HTTP_KEEP_ALIVE_TIMEOUT_S", 10
):
result = configure_http_options_with_defaults(base_http_options)
assert result.keep_alive_timeout_s == 10
def test_request_timeout_preserved_when_already_set(self):
"""Test that existing request timeout is preserved when already set."""
http_options = HTTPOptions(
host="0.0.0.0",
port=8000,
request_timeout_s=120.0,
keep_alive_timeout_s=5.0,
middlewares=[],
)
with patch(
"ray.serve._private.http_util.RAY_SERVE_REQUEST_PROCESSING_TIMEOUT_S", 300
):
result = configure_http_options_with_defaults(http_options)
assert result.request_timeout_s == 120.0
@patch("ray.serve._private.http_util.call_function_from_import_path")
@patch(
"ray.serve._private.http_util.RAY_SERVE_HTTP_PROXY_CALLBACK_IMPORT_PATH",
"my.module.callback",
)
def test_callback_middleware_injection(self, mock_call_function, base_http_options):
"""Test that the callback middleware is injected correctly."""
# Arrange: Create a valid middleware by wrapping it with Starlette's Middleware class
class CustomMiddleware(BaseHTTPMiddleware):
async def dispatch(self, request, call_next):
response = await call_next(request) # Simply pass the request through
return response
# Mock the app argument
mock_app = MagicMock()
wrapped_middleware = Middleware(CustomMiddleware, app=mock_app)
mock_call_function.return_value = [
wrapped_middleware
] # Return list of wrapped middleware
# Act
result = configure_http_middlewares(base_http_options)
# Assert
mock_call_function.assert_called_once_with(
"my.module.callback"
) # Verify callback execution
assert len(result.middlewares) == 1 # Ensure one middleware was injected
assert isinstance(result.middlewares[0], Middleware)
def test_callback_middleware_disabled(self, base_http_options):
"""Test that callback middleware is not loaded when disabled."""
with patch(
"ray.serve._private.http_util.RAY_SERVE_HTTP_PROXY_CALLBACK_IMPORT_PATH",
"",
):
result = configure_http_options_with_defaults(base_http_options)
# Assert that no callback middleware is added
assert result.middlewares == []
def test_deep_copy_behavior(self, base_http_options, mock_env_constants):
"""Test that an original HTTPOptions object is not modified."""
original_timeout = base_http_options.request_timeout_s
result = configure_http_options_with_defaults(base_http_options)
# Original should remain unchanged
assert base_http_options.request_timeout_s == original_timeout
# Result should be a different object
assert result is not base_http_options
if __name__ == "__main__":
sys.exit(pytest.main(["-v", "-s", __file__]))
| TestConfigureHttpOptionsWithDefaults |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 42525,
"end": 46094
} | class ____(nn.Module):
def __init__(self, config: RTDetrV2Config):
super().__init__()
self.normalize_before = config.normalize_before
# self-attention
self.self_attn = RTDetrV2MultiheadAttention(
embed_dim=config.encoder_hidden_dim,
num_heads=config.num_attention_heads,
dropout=config.dropout,
)
self.self_attn_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.encoder_activation_function]
self.activation_dropout = config.activation_dropout
self.fc1 = nn.Linear(config.encoder_hidden_dim, config.encoder_ffn_dim)
self.fc2 = nn.Linear(config.encoder_ffn_dim, config.encoder_hidden_dim)
self.final_layer_norm = nn.LayerNorm(config.encoder_hidden_dim, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
output_attentions: bool = False,
**kwargs,
):
"""
Args:
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
attention_mask (`torch.FloatTensor`): attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
position_embeddings (`torch.FloatTensor`, *optional*):
Object queries (also called content embeddings), to be added to the hidden states.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
if self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.self_attn_layer_norm(hidden_states)
if self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
if not self.normalize_before:
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights,)
return outputs
| RTDetrV2EncoderLayer |
python | astropy__astropy | astropy/io/ascii/ecsv.py | {
"start": 1258,
"end": 1600
} | class ____(ECSVHeaderSplitter):
"""Special case splitter used for writing header line to quote all the column names.
This is used if the first column name starts with the ECSV comment regex or if any
column names have leading or trailing whitespace. See issue #18710.
"""
quoting = csv.QUOTE_ALL
| ECSVHeaderSplitterQuoteAll |
python | tensorflow__tensorflow | tensorflow/python/distribute/integration_test/tpu_memory_test.py | {
"start": 1630,
"end": 7979
} | class ____(tf.test.TestCase):
def setUp(self):
super().setUp()
# Clear all cached tensors
context._reset_context()
# Run garbage collection to free any tensors from previous
# runs.
gc.collect()
# Run a small program and copy the result to CPU.
# This causes deferred deallocations to be flushed and new memory to be
# allocated in a less fragmented way.
# Turning deferred deallocations off no longer seems to work.
assert tf.reduce_sum(tf.random.uniform(
(1024, 128), dtype=tf.float32)).numpy() > 1.0
self.resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
tpu="", project=None, zone=None)
tf.config.experimental_connect_to_cluster(self.resolver)
tf.tpu.experimental.initialize_tpu_system(self.resolver)
def testAutoDefragInProgramLoading(self):
# This test covers the case when training a large model on TPU. TPU HBM
# is not big enough to hold all TPU buffers and preserve stack for the
# TPU program. Runtime will automatically unload unused TPU program to
# free up space for TPU buffers. Having lots of TPU buffer may also
# introduce fragmentation in HBM to prevent us loading a TPU program
# properly. Runtime will automatically defrag in order to load a large
# TPU program.
strategy = tf.distribute.TPUStrategy(self.resolver)
dataset = get_dataset()
iterator = iter(
strategy.experimental_distribute_dataset(dataset,
tf.distribute.InputOptions()))
# Create a dummy big model that is close to HBM limit (15G):
# Temp HBM: 11G
# Sharded variable size: 2G
# Unsharded variables size: 4G
with strategy.scope():
x = tf.keras.layers.Input(shape=(500, 500, 3), name="input")
y = tf.keras.layers.Conv2D(
384, (15, 15),
strides=(2, 2),
padding="valid",
use_bias=False,
kernel_initializer="he_normal",
name="conv1")(
x)
y = tf.keras.layers.BatchNormalization(
momentum=0.997, center=True, scale=True)(
y)
y = tf.keras.layers.Dense(
10,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Conv2D(
64, (9, 9),
strides=(2, 2),
padding="valid",
use_bias=False,
kernel_initializer="he_normal",
name="conv2")(
y)
y = tf.keras.layers.Flatten()(y)
y = tf.keras.layers.Dense(
1024,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Dense(
1024,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
y = tf.keras.layers.Dense(
NUM_CLASS,
activation="softmax",
kernel_initializer=tf.random_normal_initializer(stddev=0.01))(
y)
model = tf.keras.Model(x, y)
optimizer = tf.keras.optimizers.legacy.SGD(learning_rate=0.1)
loss_obj = tf.keras.losses.CategoricalCrossentropy(
label_smoothing=0.0, reduction=tf.keras.losses.Reduction.NONE)
model.compile(optimizer=optimizer, loss=loss_obj)
@tf.function
def train_step(iterator):
def step_fn(inputs):
images, targets = inputs
with tf.GradientTape() as tape:
outputs = model(images, training=True)
loss = model.loss(targets, outputs)
grads = tape.gradient(loss, model.trainable_variables)
model.optimizer.apply_gradients(zip(grads, model.trainable_variables))
return loss
# Using host training loop here to trigger weight-update-sharding. It will
# introduce shard variable and unshard variable ops into the graph.
# When running unshard variable op, HBM won't have enough space for
# unsharded variables: 11G + 2G + 4G > 15G. So Runtime will have to
# automatically unload step function to free up space for unshard
# variable op.
for _ in tf.range(tf.constant(20)):
strategy.run(step_fn, args=(next(iterator),))
# We want to load the step function again after unshard variable op.
# However, we won't have enough space due to fragamentation:
# 15G - 2G - 4G < 11G. So Runtime will have to automatically defrag
# in order to load the program successfully.
strategy.run(step_fn, args=(next(iterator),))
# A dummy result to indicate this @tf.function has finished.
return 1.0
if FLAGS.tpu_use_tfrt:
result = train_step(iterator)
self.assertAllClose(1.0, result, atol=1e-07)
else:
# TPU StreamExecutor does not support auto-defrag in program loading. So
# it will return a ResourceExhaustedError.
with self.assertRaises(tf.errors.ResourceExhaustedError):
_ = train_step(iterator)
def testAutoDefragInBufferAllocation(self):
if not FLAGS.tpu_use_tfrt:
self.skipTest(
"TPU StreamExecutor does not support auto-defrag in allocation.")
with tf.device("TPU:0"):
# DF has ~15G HBM. Following 7 buffers will consume most HBM.
# pylint: disable=unused-variable
buffer_2g_1 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_2 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_3 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_4 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_5 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_6 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
buffer_2g_7 = tf.random.uniform((2, 256, 1024, 1024), dtype=tf.float32)
# pylint: enable=unused-variable
# Deallocate two buffers.
del buffer_2g_1, buffer_2g_3
gc.collect()
# The buffer we just deallocated doesn't provide enough contiguous region
# for allocating 4G. This allocation will trigger auto-defrag.
buffer_4g = tf.random.uniform((4, 256, 1024, 1024), dtype=tf.float32)
self.assertEndsWith(buffer_4g.device, "device:TPU:0")
if __name__ == "__main__":
tf.test.main()
| TpuMemoryTest |
python | zarr-developers__zarr-python | src/zarr/codecs/numcodecs/_codecs.py | {
"start": 7499,
"end": 7568
} | class ____(_NumcodecsBytesBytesCodec, codec_name="zstd"):
pass
| Zstd |
python | kamyu104__LeetCode-Solutions | Python/number-of-stable-subsequences.py | {
"start": 34,
"end": 557
} | class ____(object):
def countStableSubsequences(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
MOD = 10**9+7
dp = [[0]*2 for _ in xrange(2)] # dp[p][i]: count of subsequences that end with exactly (i+1) consecutive numbers of parity p
for x in nums:
p = x%2
dp[p][1] = (dp[p][1]+dp[p][0])%MOD
dp[p][0] = (dp[p][0]+1+dp[1^p][0]+dp[1^p][1])%MOD
return sum(dp[p][i] for p in xrange(2) for i in xrange(2))%MOD
| Solution |
python | huggingface__transformers | src/transformers/models/qwen3_omni_moe/modular_qwen3_omni_moe.py | {
"start": 100506,
"end": 100545
} | class ____(SnakeBeta):
pass
| SnakeBeta |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/processing_qwen2_5_omni.py | {
"start": 1215,
"end": 1530
} | class ____(VideosKwargs, total=False):
min_pixels: int
max_pixels: int
patch_size: int
temporal_patch_size: int
merge_size: int
min_frames: int
max_frames: int
use_audio_in_video: bool
seconds_per_chunk: float
position_id_per_seconds: Union[int, float]
| Qwen2_5_OmniVideosKwargs |
python | sqlalchemy__sqlalchemy | test/base/test_result.py | {
"start": 27933,
"end": 30886
} | class ____(fixtures.TestBase):
@testing.fixture
def merge_fixture(self):
r1 = result.IteratorResult(
result.SimpleResultMetaData(["user_id", "user_name"]),
iter([(7, "u1"), (8, "u2")]),
)
r2 = result.IteratorResult(
result.SimpleResultMetaData(["user_id", "user_name"]),
iter([(9, "u3")]),
)
r3 = result.IteratorResult(
result.SimpleResultMetaData(["user_id", "user_name"]),
iter([(10, "u4"), (11, "u5")]),
)
r4 = result.IteratorResult(
result.SimpleResultMetaData(["user_id", "user_name"]),
iter([(12, "u6")]),
)
return r1, r2, r3, r4
@testing.fixture
def dupe_fixture(self):
r1 = result.IteratorResult(
result.SimpleResultMetaData(["x", "y", "z"]),
iter([(1, 2, 1), (2, 2, 1)]),
)
r2 = result.IteratorResult(
result.SimpleResultMetaData(["x", "y", "z"]),
iter([(3, 1, 2), (3, 3, 3)]),
)
return r1, r2
def test_merge_results(self, merge_fixture):
r1, r2, r3, r4 = merge_fixture
result = r1.merge(r2, r3, r4)
eq_(result.keys(), ["user_id", "user_name"])
row = result.fetchone()
eq_(row, (7, "u1"))
result.close()
def test_fetchall(self, merge_fixture):
r1, r2, r3, r4 = merge_fixture
result = r1.merge(r2, r3, r4)
eq_(
result.fetchall(),
[
(7, "u1"),
(8, "u2"),
(9, "u3"),
(10, "u4"),
(11, "u5"),
(12, "u6"),
],
)
def test_first(self, merge_fixture):
r1, r2, r3, r4 = merge_fixture
result = r1.merge(r2, r3, r4)
eq_(
result.first(),
(7, "u1"),
)
def test_columns(self, merge_fixture):
r1, r2, r3, r4 = merge_fixture
result = r1.merge(r2, r3, r4)
eq_(
result.columns("user_name").fetchmany(4),
[("u1",), ("u2",), ("u3",), ("u4",)],
)
result.close()
def test_merge_scalars(self, merge_fixture):
r1, r2, r3, r4 = merge_fixture
for r in (r1, r2, r3, r4):
r.scalars(0)
result = r1.merge(r2, r3, r4)
eq_(result.scalars(0).all(), [7, 8, 9, 10, 11, 12])
def test_merge_unique(self, dupe_fixture):
r1, r2 = dupe_fixture
r1.scalars("y")
r2.scalars("y")
result = r1.merge(r2)
# uniqued 2, 2, 1, 3
eq_(result.scalars("y").unique().all(), [2, 1, 3])
def test_merge_preserve_unique(self, dupe_fixture):
r1, r2 = dupe_fixture
r1.unique().scalars("y")
r2.unique().scalars("y")
result = r1.merge(r2)
# unique takes place
eq_(result.scalars("y").all(), [2, 1, 3])
| MergeResultTest |
python | dagster-io__dagster | python_modules/libraries/dagster-aws/dagster_aws/athena/resources.py | {
"start": 564,
"end": 3669
} | class ____:
def __init__(self, client, workgroup="primary", polling_interval=5, max_polls=120):
check.invariant(
polling_interval >= 0, "polling_interval must be greater than or equal to 0"
)
check.invariant(max_polls > 0, "max_polls must be greater than 0")
self.client = client
self.workgroup = workgroup
self.max_polls = max_polls
self.polling_interval = polling_interval
def execute_query(self, query, fetch_results=False):
"""Synchronously execute a single query against Athena. If fetch_results is set to true,
will return a list of rows, where each row is a tuple of stringified values,
e.g. SELECT 1 will return [("1",)].
Args:
query (str): The query to execute.
fetch_results (Optional[bool]): Whether to return the results of executing the query.
Defaults to False, in which case the query will be executed without retrieving the
results.
Returns:
Optional[List[Tuple[Optional[str], ...]]]: Results of the query, as a list of tuples,
when fetch_results is set. Otherwise, return None. All items in the tuple are
represented as strings except for empty columns which are represented as None.
"""
check.str_param(query, "query")
check.bool_param(fetch_results, "fetch_results")
execution_id = self.client.start_query_execution(
QueryString=query, WorkGroup=self.workgroup
)["QueryExecutionId"]
self._poll(execution_id)
if fetch_results:
return self._results(execution_id)
def _poll(self, execution_id):
retries = self.max_polls
state = "QUEUED"
while retries > 0:
execution = self.client.get_query_execution(QueryExecutionId=execution_id)[
"QueryExecution"
]
state = execution["Status"]["State"]
if state not in ["QUEUED", "RUNNING"]:
break
retries -= 1
time.sleep(self.polling_interval)
if retries <= 0:
raise AthenaTimeout()
if state != "SUCCEEDED":
raise AthenaError(execution["Status"]["StateChangeReason"]) # pyright: ignore[reportPossiblyUnboundVariable]
def _results(self, execution_id):
execution = self.client.get_query_execution(QueryExecutionId=execution_id)["QueryExecution"]
s3 = boto3.resource("s3")
output_location = execution["ResultConfiguration"]["OutputLocation"]
bucket = urlparse(output_location).netloc
prefix = urlparse(output_location).path.lstrip("/")
results = []
rows = s3.Bucket(bucket).Object(prefix).get()["Body"].read().decode("utf-8").splitlines()
reader = csv.reader(rows)
next(reader) # Skip the CSV's header row
for row in reader:
results.append(tuple(row))
return results
@deprecated(breaking_version="2.0", additional_warn_text="Use AthenaClientResource instead.")
| AthenaClient |
python | aio-libs__aiohttp | aiohttp/web_exceptions.py | {
"start": 7229,
"end": 7291
} | class ____(HTTPClientError):
status_code = 403
| HTTPForbidden |
python | openai__openai-python | src/openai/resources/uploads/uploads.py | {
"start": 23209,
"end": 23758
} | class ____:
def __init__(self, uploads: Uploads) -> None:
self._uploads = uploads
self.create = _legacy_response.to_raw_response_wrapper(
uploads.create,
)
self.cancel = _legacy_response.to_raw_response_wrapper(
uploads.cancel,
)
self.complete = _legacy_response.to_raw_response_wrapper(
uploads.complete,
)
@cached_property
def parts(self) -> PartsWithRawResponse:
return PartsWithRawResponse(self._uploads.parts)
| UploadsWithRawResponse |
python | pypa__warehouse | tests/unit/email/test_init.py | {
"start": 201408,
"end": 204516
} | class ____:
@pytest.mark.parametrize(
("fn", "template_name"),
[
(email.send_recovery_codes_generated_email, "recovery-codes-generated"),
(email.send_recovery_code_used_email, "recovery-code-used"),
(email.send_recovery_code_reminder_email, "recovery-code-reminder"),
],
)
def test_recovery_code_emails(
self, pyramid_request, pyramid_config, monkeypatch, fn, template_name
):
stub_user = pretend.stub(
id="id",
username="username",
name="",
email="email@example.com",
primary_email=pretend.stub(email="email@example.com", verified=True),
)
subject_renderer = pyramid_config.testing_add_renderer(
f"email/{template_name}/subject.txt"
)
subject_renderer.string_response = "Email Subject"
body_renderer = pyramid_config.testing_add_renderer(
f"email/{template_name}/body.txt"
)
body_renderer.string_response = "Email Body"
html_renderer = pyramid_config.testing_add_renderer(
f"email/{template_name}/body.html"
)
html_renderer.string_response = "Email HTML Body"
send_email = pretend.stub(
delay=pretend.call_recorder(lambda *args, **kwargs: None)
)
pyramid_request.task = pretend.call_recorder(lambda *args, **kwargs: send_email)
monkeypatch.setattr(email, "send_email", send_email)
pyramid_request.db = pretend.stub(
query=lambda a: pretend.stub(
filter=lambda *a: pretend.stub(
one=lambda: pretend.stub(user_id=stub_user.id)
)
),
)
pyramid_request.user = stub_user
pyramid_request.registry.settings = {"mail.sender": "noreply@example.com"}
result = fn(pyramid_request, stub_user)
assert result == {"username": stub_user.username}
subject_renderer.assert_()
body_renderer.assert_(username=stub_user.username)
html_renderer.assert_(username=stub_user.username)
assert pyramid_request.task.calls == [pretend.call(send_email)]
assert send_email.delay.calls == [
pretend.call(
f"{stub_user.username} <{stub_user.email}>",
{
"sender": None,
"subject": "Email Subject",
"body_text": "Email Body",
"body_html": (
"<html>\n<head></head>\n"
"<body><p>Email HTML Body</p></body>\n</html>\n"
),
},
{
"tag": "account:email:sent",
"user_id": stub_user.id,
"additional": {
"from_": "noreply@example.com",
"to": stub_user.email,
"subject": "Email Subject",
"redact_ip": False,
},
},
)
]
| TestRecoveryCodeEmails |
python | openai__openai-python | src/openai/types/responses/response_function_tool_call_param.py | {
"start": 229,
"end": 941
} | class ____(TypedDict, total=False):
arguments: Required[str]
"""A JSON string of the arguments to pass to the function."""
call_id: Required[str]
"""The unique ID of the function tool call generated by the model."""
name: Required[str]
"""The name of the function to run."""
type: Required[Literal["function_call"]]
"""The type of the function tool call. Always `function_call`."""
id: str
"""The unique ID of the function tool call."""
status: Literal["in_progress", "completed", "incomplete"]
"""The status of the item.
One of `in_progress`, `completed`, or `incomplete`. Populated when items are
returned via API.
"""
| ResponseFunctionToolCallParam |
python | spyder-ide__spyder | spyder/plugins/projects/widgets/main_widget.py | {
"start": 42228,
"end": 43580
} | class ____(QWidget):
def __init__(self, directory=None):
QWidget.__init__(self)
self.CONF_SECTION = 'project_explorer'
vlayout = QVBoxLayout()
self.setLayout(vlayout)
self.explorer = ProjectExplorerWidget(None, self, self)
if directory is not None:
self.directory = directory
else:
self.directory = osp.dirname(osp.abspath(__file__))
self.explorer._setup_project(self.directory)
vlayout.addWidget(self.explorer)
hlayout1 = QHBoxLayout()
vlayout.addLayout(hlayout1)
label = QLabel("<b>Open file:</b>")
label.setAlignment(Qt.AlignRight)
hlayout1.addWidget(label)
self.label1 = QLabel()
hlayout1.addWidget(self.label1)
self.explorer.sig_open_file_requested.connect(self.label1.setText)
hlayout3 = QHBoxLayout()
vlayout.addLayout(hlayout3)
label = QLabel("<b>Option changed:</b>")
label.setAlignment(Qt.AlignRight)
hlayout3.addWidget(label)
self.label3 = QLabel()
hlayout3.addWidget(self.label3)
def test():
from spyder.utils.qthelpers import qapplication
app = qapplication()
test = ProjectExplorerTest()
test.resize(250, 480)
test.show()
app.exec_()
if __name__ == "__main__":
test()
| ProjectExplorerTest |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/attributes.py | {
"start": 76580,
"end": 93699
} | class ____(NamedTuple):
"""A 3-tuple of added, unchanged and deleted values,
representing the changes which have occurred on an instrumented
attribute.
The easiest way to get a :class:`.History` object for a particular
attribute on an object is to use the :func:`_sa.inspect` function::
from sqlalchemy import inspect
hist = inspect(myobject).attrs.myattribute.history
Each tuple member is an iterable sequence:
* ``added`` - the collection of items added to the attribute (the first
tuple element).
* ``unchanged`` - the collection of items that have not changed on the
attribute (the second tuple element).
* ``deleted`` - the collection of items that have been removed from the
attribute (the third tuple element).
"""
added: Union[Tuple[()], List[Any]]
unchanged: Union[Tuple[()], List[Any]]
deleted: Union[Tuple[()], List[Any]]
def __bool__(self) -> bool:
return self != HISTORY_BLANK
def empty(self) -> bool:
"""Return True if this :class:`.History` has no changes
and no existing, unchanged state.
"""
return not bool((self.added or self.deleted) or self.unchanged)
def sum(self) -> Sequence[Any]:
"""Return a collection of added + unchanged + deleted."""
return (
(self.added or []) + (self.unchanged or []) + (self.deleted or [])
)
def non_deleted(self) -> Sequence[Any]:
"""Return a collection of added + unchanged."""
return (self.added or []) + (self.unchanged or [])
def non_added(self) -> Sequence[Any]:
"""Return a collection of unchanged + deleted."""
return (self.unchanged or []) + (self.deleted or [])
def has_changes(self) -> bool:
"""Return True if this :class:`.History` has changes."""
return bool(self.added or self.deleted)
def _merge(self, added: Iterable[Any], deleted: Iterable[Any]) -> History:
return History(
list(self.added) + list(added),
self.unchanged,
list(self.deleted) + list(deleted),
)
def as_state(self) -> History:
return History(
[
(c is not None) and instance_state(c) or None
for c in self.added
],
[
(c is not None) and instance_state(c) or None
for c in self.unchanged
],
[
(c is not None) and instance_state(c) or None
for c in self.deleted
],
)
@classmethod
def from_scalar_attribute(
cls,
attribute: _ScalarAttributeImpl,
state: InstanceState[Any],
current: Any,
) -> History:
original = state.committed_state.get(attribute.key, _NO_HISTORY)
deleted: Union[Tuple[()], List[Any]]
if original is _NO_HISTORY:
if current is NO_VALUE:
return cls((), (), ())
else:
return cls((), [current], ())
# don't let ClauseElement expressions here trip things up
elif (
current is not NO_VALUE
and attribute.is_equal(current, original) is True
):
return cls((), [current], ())
else:
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
# key situations
if id(original) in _NO_STATE_SYMBOLS:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NO_VALUE:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_object_attribute(
cls,
attribute: _ScalarObjectAttributeImpl,
state: InstanceState[Any],
current: Any,
original: Any = _NO_HISTORY,
) -> History:
deleted: Union[Tuple[()], List[Any]]
if original is _NO_HISTORY:
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if original is _NO_HISTORY:
if current is NO_VALUE:
return cls((), (), ())
else:
return cls((), [current], ())
elif current is original and current is not NO_VALUE:
return cls((), [current], ())
else:
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
# indicate a "del" operation occurred when we don't have
# the previous value as: ([None], (), ())
if id(current) in _NO_STATE_SYMBOLS:
current = None
else:
deleted = [original]
if current is NO_VALUE:
return cls((), (), deleted)
else:
return cls([current], (), deleted)
@classmethod
def from_collection(
cls,
attribute: _CollectionAttributeImpl,
state: InstanceState[Any],
current: Any,
) -> History:
original = state.committed_state.get(attribute.key, _NO_HISTORY)
if current is NO_VALUE:
return cls((), (), ())
current = getattr(current, "_sa_adapter")
if original is NO_VALUE:
return cls(list(current), (), ())
elif original is _NO_HISTORY:
return cls((), list(current), ())
else:
current_states = [
((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [
((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
original_set = dict(original_states)
return cls(
[o for s, o in current_states if s not in original_set],
[o for s, o in current_states if s in original_set],
[o for s, o in original_states if s not in current_set],
)
HISTORY_BLANK = History((), (), ())
def get_history(
obj: object, key: str, passive: PassiveFlag = PASSIVE_OFF
) -> History:
"""Return a :class:`.History` record for the given object
and attribute key.
This is the **pre-flush** history for a given attribute, which is
reset each time the :class:`.Session` flushes changes to the
current database transaction.
.. note::
Prefer to use the :attr:`.AttributeState.history` and
:meth:`.AttributeState.load_history` accessors to retrieve the
:class:`.History` for instance attributes.
:param obj: an object whose class is instrumented by the
attributes package.
:param key: string attribute name.
:param passive: indicates loading behavior for the attribute
if the value is not already present. This is a
bitflag attribute, which defaults to the symbol
:attr:`.PASSIVE_OFF` indicating all necessary SQL
should be emitted.
.. seealso::
:attr:`.AttributeState.history`
:meth:`.AttributeState.load_history` - retrieve history
using loader callables if the value is not locally present.
"""
return get_state_history(instance_state(obj), key, passive)
def get_state_history(
state: InstanceState[Any], key: str, passive: PassiveFlag = PASSIVE_OFF
) -> History:
return state.get_history(key, passive)
def has_parent(
cls: Type[_O], obj: _O, key: str, optimistic: bool = False
) -> bool:
"""TODO"""
manager = manager_of_class(cls)
state = instance_state(obj)
return manager.has_parent(state, key, optimistic)
def _register_attribute(
class_: Type[_O],
key: str,
*,
comparator: interfaces.PropComparator[_T],
parententity: _InternalEntityType[_O],
doc: Optional[str] = None,
**kw: Any,
) -> InstrumentedAttribute[_T]:
desc = _register_descriptor(
class_, key, comparator=comparator, parententity=parententity, doc=doc
)
_register_attribute_impl(class_, key, **kw)
return desc
def _register_attribute_impl(
class_: Type[_O],
key: str,
uselist: bool = False,
callable_: Optional[_LoaderCallable] = None,
useobject: bool = False,
impl_class: Optional[Type[_AttributeImpl]] = None,
backref: Optional[str] = None,
**kw: Any,
) -> QueryableAttribute[Any]:
manager = manager_of_class(class_)
if uselist:
factory = kw.pop("typecallable", None)
typecallable = manager.instrument_collection_class(
key, factory or list
)
else:
typecallable = kw.pop("typecallable", None)
dispatch = cast(
"_Dispatch[QueryableAttribute[Any]]", manager[key].dispatch
) # noqa: E501
impl: _AttributeImpl
if impl_class:
# TODO: this appears to be the WriteOnlyAttributeImpl /
# DynamicAttributeImpl constructor which is hardcoded
impl = cast("Type[_WriteOnlyAttributeImpl]", impl_class)(
class_, key, dispatch, **kw
)
elif uselist:
impl = _CollectionAttributeImpl(
class_, key, callable_, dispatch, typecallable=typecallable, **kw
)
elif useobject:
impl = _ScalarObjectAttributeImpl(
class_, key, callable_, dispatch, **kw
)
else:
impl = _ScalarAttributeImpl(class_, key, callable_, dispatch, **kw)
manager[key].impl = impl
if backref:
_backref_listeners(manager[key], backref, uselist)
manager.post_configure_attribute(key)
return manager[key]
def _register_descriptor(
class_: Type[Any],
key: str,
*,
comparator: interfaces.PropComparator[_T],
parententity: _InternalEntityType[Any],
doc: Optional[str] = None,
) -> InstrumentedAttribute[_T]:
manager = manager_of_class(class_)
descriptor = InstrumentedAttribute(
class_, key, comparator=comparator, parententity=parententity
)
descriptor.__doc__ = doc # type: ignore
manager.instrument_attribute(key, descriptor)
return descriptor
def _unregister_attribute(class_: Type[Any], key: str) -> None:
manager_of_class(class_).uninstrument_attribute(key)
def init_collection(obj: object, key: str) -> CollectionAdapter:
"""Initialize a collection attribute and return the collection adapter.
This function is used to provide direct access to collection internals
for a previously unloaded attribute. e.g.::
collection_adapter = init_collection(someobject, "elements")
for elem in values:
collection_adapter.append_without_event(elem)
For an easier way to do the above, see
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
:param obj: a mapped object
:param key: string attribute name where the collection is located.
"""
state = instance_state(obj)
dict_ = state.dict
return init_state_collection(state, dict_, key)
def init_state_collection(
state: InstanceState[Any], dict_: _InstanceDict, key: str
) -> CollectionAdapter:
"""Initialize a collection attribute and return the collection adapter.
Discards any existing collection which may be there.
"""
attr = state.manager[key].impl
if TYPE_CHECKING:
assert isinstance(attr, _HasCollectionAdapter)
old = dict_.pop(key, None) # discard old collection
if old is not None:
old_collection = old._sa_adapter
attr._dispose_previous_collection(state, old, old_collection, False)
user_data = attr._default_value(state, dict_)
adapter: CollectionAdapter = attr.get_collection(
state, dict_, user_data, passive=PassiveFlag.PASSIVE_NO_FETCH
)
adapter._reset_empty()
return adapter
def set_committed_value(instance: object, key: str, value: Any) -> None:
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
This is the same underlying method used when a lazy loader
fires off and loads additional data from the database.
In particular, this method can be used by application code
which has loaded additional attributes or collections through
separate queries, which can then be attached to an instance
as though it were part of its original loaded state.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set_committed_value(state, dict_, value)
def set_attribute(
instance: object,
key: str,
value: Any,
initiator: Optional[AttributeEventToken] = None,
) -> None:
"""Set the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
:param instance: the object that will be modified
:param key: string name of the attribute
:param value: value to assign
:param initiator: an instance of :class:`.Event` that would have
been propagated from a previous event listener. This argument
is used when the :func:`.set_attribute` function is being used within
an existing event listening function where an :class:`.Event` object
is being supplied; the object may be used to track the origin of the
chain of events.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.set(state, dict_, value, initiator)
def get_attribute(instance: object, key: str) -> Any:
"""Get the value of an attribute, firing any callables required.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to make usage of attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
return state.manager[key].impl.get(state, dict_)
def del_attribute(instance: object, key: str) -> None:
"""Delete the value of an attribute, firing history events.
This function may be used regardless of instrumentation
applied directly to the class, i.e. no descriptors are required.
Custom attribute management schemes will need to make usage
of this method to establish attribute state as understood
by SQLAlchemy.
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state.manager[key].impl.delete(state, dict_)
def flag_modified(instance: object, key: str) -> None:
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
The attribute must have a value present, else an
:class:`.InvalidRequestError` is raised.
To mark an object "dirty" without referring to any specific attribute
so that it is considered within a flush, use the
:func:`.attributes.flag_dirty` call.
.. seealso::
:func:`.attributes.flag_dirty`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
impl = state.manager[key].impl
impl.dispatch.modified(state, impl._modified_token)
state._modified_event(dict_, impl, NO_VALUE, is_userland=True)
def flag_dirty(instance: object) -> None:
"""Mark an instance as 'dirty' without any specific attribute mentioned.
This is a special operation that will allow the object to travel through
the flush process for interception by events such as
:meth:`.SessionEvents.before_flush`. Note that no SQL will be emitted in
the flush process for an object that has no changes, even if marked dirty
via this method. However, a :meth:`.SessionEvents.before_flush` handler
will be able to see the object in the :attr:`.Session.dirty` collection and
may establish changes on it, which will then be included in the SQL
emitted.
.. seealso::
:func:`.attributes.flag_modified`
"""
state, dict_ = instance_state(instance), instance_dict(instance)
state._modified_event(dict_, None, NO_VALUE, is_userland=True)
| History |
python | mlflow__mlflow | mlflow/models/rag_signatures.py | {
"start": 845,
"end": 1028
} | class ____:
query: str = "What is mlflow?"
history: list[Message] | None = field(default_factory=list)
@deprecated("mlflow.types.llm.ChatChoice")
@dataclass
| MultiturnChatRequest |
python | eventlet__eventlet | tests/websocket_test.py | {
"start": 22270,
"end": 23353
} | class ____(tests.LimitedTestCase):
def setUp(self):
self.mock_socket = s = mock.Mock()
self.environ = env = dict(HTTP_ORIGIN='http://localhost', HTTP_WEBSOCKET_PROTOCOL='ws',
PATH_INFO='test')
self.test_ws = WebSocket(s, env)
super().setUp()
def test_recieve(self):
ws = self.test_ws
ws.socket.recv.return_value = b'\x00hello\xFF'
self.assertEqual(ws.wait(), 'hello')
self.assertEqual(ws._buf, b'')
self.assertEqual(len(ws._msgs), 0)
ws.socket.recv.return_value = b''
self.assertEqual(ws.wait(), None)
self.assertEqual(ws._buf, b'')
self.assertEqual(len(ws._msgs), 0)
def test_send_to_ws(self):
ws = self.test_ws
ws.send('hello')
assert ws.socket.sendall.called_with("\x00hello\xFF")
ws.send(10)
assert ws.socket.sendall.called_with("\x0010\xFF")
def test_close_ws(self):
ws = self.test_ws
ws.close()
assert ws.socket.shutdown.called_with(True)
| TestWebSocketObject |
python | pytorch__pytorch | test/profiler/test_cpp_thread.py | {
"start": 2375,
"end": 7614
} | class ____(TestCase):
ThreadCount = 20 # set to 2 for debugging
EventHandler = None
TraceObject = None
@classmethod
def setUpClass(cls) -> None:
super(TestCase, cls).setUpClass()
CppThreadTestCUDA.EventHandler = PythonProfilerEventHandler()
cpp.ProfilerEventHandler.Register(CppThreadTestCUDA.EventHandler)
@classmethod
def tearDownClass(cls):
if not is_fbcode():
torch.testing._internal.common_utils.remove_cpp_extensions_build_root()
def setUp(self) -> None:
super().setUp()
if not torch.cuda.is_available():
self.skipTest("Test machine does not have cuda")
global device
device = "cuda"
# this clears off events from initialization
self.start_profiler(False)
cpp.start_threads(1, IterationCount, False)
def start_profiler(self, profile_memory):
global KinetoProfiler
KinetoProfiler = torch.profiler.profile(
schedule=torch.profiler.schedule(
wait=1, warmup=1, active=ActivateIteration, repeat=1
),
on_trace_ready=self.set_trace,
with_stack=True,
profile_memory=profile_memory,
record_shapes=True,
)
def set_trace(self, trace_obj) -> None:
CppThreadTestCUDA.TraceObject = trace_obj
def assert_text(self, condition, text, msg):
if condition:
print(f"\33[32m{text}\33[0m")
else:
print(f"\33[31m{text}\33[0m")
self.assertTrue(condition, msg)
def check_trace(self, expected, mem=False) -> None:
blueprint("verifying trace")
event_list = CppThreadTestCUDA.TraceObject.events()
for key, values in expected.items():
count = values[0]
min_count = count * (ActivateIteration - 1)
device = values[1]
filtered = filter(
lambda ev: ev.name == key
and str(ev.device_type) == f"DeviceType.{device}",
event_list,
)
if mem:
actual = 0
for ev in filtered:
sev = str(ev)
has_cuda_memory_usage = (
sev.find("cuda_memory_usage=0 ") < 0
and sev.find("cuda_memory_usage=") > 0
)
if has_cuda_memory_usage:
actual += 1
self.assert_text(
actual >= min_count,
f"{key}: {actual} >= {min_count}",
"not enough event with cuda_memory_usage set",
)
else:
actual = len(list(filtered))
if count == 1: # test_without
count *= ActivateIteration
self.assert_text(
actual == count,
f"{key}: {actual} == {count}",
"baseline event count incorrect",
)
else:
self.assert_text(
actual >= min_count,
f"{key}: {actual} >= {min_count}",
"not enough event recorded",
)
@skipIf(
IS_WINDOWS,
"Failing on windows cuda, see https://github.com/pytorch/pytorch/pull/130037 for slightly more context",
)
def test_with_enable_profiler_in_child_thread_cuda(self) -> None:
self.start_profiler(False)
cpp.start_threads(self.ThreadCount, IterationCount, True)
self.check_trace(
{
"aten::add": [self.ThreadCount, "CPU"],
"user_function": [self.ThreadCount, "CUDA"],
}
)
@skipIf(
IS_WINDOWS,
"Failing on windows cuda, see https://github.com/pytorch/pytorch/pull/130037 for slightly more context",
)
def test_without_enable_profiler_in_child_thread_cuda(self) -> None:
self.start_profiler(False)
cpp.start_threads(self.ThreadCount, IterationCount, False)
self.check_trace(
{
"aten::add": [1, "CPU"],
"user_function": [1, "CUDA"],
}
)
@skipIf(
IS_WINDOWS,
"Failing on windows cuda, see https://github.com/pytorch/pytorch/pull/130037 for slightly more context",
)
def test_profile_memory_cuda(self) -> None:
self.start_profiler(True)
cpp.start_threads(self.ThreadCount, IterationCount, True)
self.check_trace(
{
"aten::add": [self.ThreadCount, "CPU"],
},
mem=True,
)
# Here duplicate the CppThreadTest to enable the xpu cases because the
# instantiate_device_type_tests will call class method setUpClass.
# In function setUpClass, the instantiated class(e.g CppThreadTestCPU, CppThreadTestXPU)
# needs to be called to get it member EventHandler, while in this period,
# the input class in argument cls is CppThreadTest, which is not defined any more.
# We cannot detect which instantiated class is being created in setUpClass, so duplicate here
# for enabling xpu test cases
| CppThreadTestCUDA |
python | getsentry__sentry | tests/sentry/utils/test_linksign.py | {
"start": 281,
"end": 3931
} | class ____(TestCase):
def test_link_signing(self) -> None:
base_url = get_local_region().to_url("/")
assert base_url.startswith("http://")
url = linksign.generate_signed_link(self.user.id, "sentry")
assert url.startswith(base_url)
url = linksign.generate_signed_link(
self.user.id,
"sentry-customer-domain-unsubscribe-project",
referrer="alert_view",
kwargs={"project_id": 1},
)
assert url.startswith(base_url)
assert "referrer=alert_view" in url
assert "unsubscribe/project" in url
def test_link_signing_custom_url_prefix(self) -> None:
if SiloMode.get_current_mode() != SiloMode.MONOLITH:
return
rf = RequestFactory()
# system.url-prefix only influences monolith behavior.
# in siloed deployments url templates are used
with self.options({"system.url-prefix": "https://sentry.io"}):
url = linksign.generate_signed_link(self.user.id, "sentry")
assert url.startswith("https://sentry.io")
req = rf.get("/" + url.split("/", 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user
assert signed_user.id == self.user.id
def test_process_signature(self) -> None:
rf = RequestFactory()
url = linksign.generate_signed_link(self.user.id, "sentry")
req = rf.get("/" + url.split("/", 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user
assert signed_user.id == self.user.id
req = rf.get("/what" + url.split("/", 3)[-1])
signed_user = linksign.process_signature(req)
assert signed_user is None
req = rf.get("/" + url.split("/", 3)[-1] + "garbage")
signed_user = linksign.process_signature(req)
assert signed_user is None
def test_generate_signed_unsubscribe_link_path_based(self) -> None:
rf = RequestFactory()
org = self.organization
user = self.user
url = linksign.generate_signed_unsubscribe_link(
org, user_id=user.id, resource="project", resource_id=1, referrer="alert_notification"
)
assert f"http://testserver/unsubscribe/{org.slug}/project/1/" in url
assert "referrer=alert_notification" in url
assert "_=" in url
# signature should be valid for the API endpoint
parsed = urlparse(url)
api_path = reverse("sentry-api-0-organization-unsubscribe-project", args=[org.slug, 1])
req = rf.get(f"{api_path}?{parsed.query}")
signed_user = linksign.process_signature(req)
assert signed_user
def test_generate_signed_unsubscribe_link_domain_based(self) -> None:
rf = RequestFactory()
org = self.organization
user = self.user
with self.feature("system:multi-region"):
url = linksign.generate_signed_unsubscribe_link(
org,
user_id=user.id,
resource="project",
resource_id=1,
referrer="alert_notification",
)
assert f"http://{org.slug}.testserver/unsubscribe/project/1/" in url
assert "referrer=alert_notification" in url
assert "_=" in url
# signature should be valid for the API endpoint
parsed = urlparse(url)
api_path = reverse("sentry-api-0-organization-unsubscribe-project", args=[org.slug, 1])
req = rf.get(f"{api_path}?{parsed.query}")
signed_user = linksign.process_signature(req)
assert signed_user
| LinkSignTestCase |
python | keras-team__keras | guides/making_new_layers_and_models_via_subclassing.py | {
"start": 18507,
"end": 19014
} | class ____(layers.Layer):
"""Converts z, the encoded digit vector, back into a readable digit."""
def __init__(
self, original_dim, intermediate_dim=64, name="decoder", **kwargs
):
super().__init__(name=name, **kwargs)
self.dense_proj = layers.Dense(intermediate_dim, activation="relu")
self.dense_output = layers.Dense(original_dim, activation="sigmoid")
def call(self, inputs):
x = self.dense_proj(inputs)
return self.dense_output(x)
| Decoder |
python | PyCQA__pylint | doc/data/messages/n/non-str-assignment-to-dunder-name/bad.py | {
"start": 0,
"end": 82
} | class ____:
pass
Fruit.__name__ = 1 # [non-str-assignment-to-dunder-name]
| Fruit |
python | django__django | django/db/backends/ddl_references.py | {
"start": 3512,
"end": 4311
} | class ____(Columns):
def __init__(self, table, columns, quote_name, col_suffixes=(), opclasses=()):
self.opclasses = opclasses
super().__init__(table, columns, quote_name, col_suffixes)
def __str__(self):
def col_str(column, idx):
# Index.__init__() guarantees that self.opclasses is the same
# length as self.columns.
col = "{} {}".format(self.quote_name(column), self.opclasses[idx])
try:
suffix = self.col_suffixes[idx]
if suffix:
col = "{} {}".format(col, suffix)
except IndexError:
pass
return col
return ", ".join(
col_str(column, idx) for idx, column in enumerate(self.columns)
)
| IndexColumns |
python | getsentry__sentry | tests/sentry/issues/endpoints/test_actionable_items.py | {
"start": 213,
"end": 2879
} | class ____(APITestCase):
# These tests will not focus on the actual source map debugging functionality as that is covered in
# test_source_map_debug.py. Instead, these tests will focus on the unique parts of this endpoint including the responses,
# and how event errors are handled.
endpoint = "sentry-api-0-event-actionable-items"
def setUp(self) -> None:
self.login_as(self.user)
return super().setUp()
def test_missing_event(self) -> None:
resp = self.get_error_response(
self.organization.slug,
self.project.slug,
"invalid_id",
status_code=status.HTTP_404_NOT_FOUND,
)
assert resp.data["detail"] == "Event not found"
def test_orders_event_errors_by_priority(self) -> None:
event = self.store_event(
data={
"event_id": "a" * 32,
"release": "my-release",
"dist": "my-dist",
"sdk": {
"name": "sentry.javascript.browser",
"version": "7.3.0",
},
"exception": {
"values": [
{
"type": "Error",
"stacktrace": {
"frames": [
{
"abs_path": "https://example.com/application.js",
"lineno": 1,
"colno": 39,
}
]
},
}
]
},
"errors": [
{"type": EventError.INVALID_DATA, "name": "foo"},
{"type": EventError.JS_MISSING_SOURCES_CONTENT, "url": "http://example.com"},
{"type": EventError.UNKNOWN_ERROR, "name": "bar"},
],
},
project_id=self.project.id,
assert_no_errors=False,
)
resp = self.get_success_response(
self.organization.slug,
self.project.slug,
event.event_id,
)
errors = resp.data["errors"]
# Unknown error should be hidden
assert len(errors) == 2
# Missing Error should be first by priority
missing_error = errors[0]
invalid_data = errors[1]
assert missing_error["type"] == EventError.JS_MISSING_SOURCES_CONTENT
assert invalid_data["type"] == EventError.INVALID_DATA
| ActionableItemsEndpointTestCase |
python | ray-project__ray | python/ray/serve/tests/test_config_files/logging_config_test.py | {
"start": 666,
"end": 1530
} | class ____:
def __init__(self, handle):
self.handle = handle
async def __call__(self):
logger.debug("this_is_debug_info_from_router")
log_info = await self.handle.remote()
if len(logger.handlers) == 2:
log_info["router_log_file"] = logger.handlers[1].target.baseFilename
else:
log_info["router_log_file"] = None
log_info["router_log_level"] = logger.level
try:
# Add controller log file path
client = _get_global_client()
_, log_file_path = ray.get(client._controller._get_logging_config.remote())
except RayActorError:
log_file_path = None
log_info["controller_log_file"] = log_file_path
return log_info
model = Router.bind(Model.bind())
@serve.deployment(logging_config={"log_level": "DEBUG"})
| Router |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 658275,
"end": 658696
} | class ____(sgqlc.types.Interface):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("viewer_cannot_update_reasons",)
viewer_cannot_update_reasons = sgqlc.types.Field(
sgqlc.types.non_null(
sgqlc.types.list_of(sgqlc.types.non_null(CommentCannotUpdateReason))
),
graphql_name="viewerCannotUpdateReasons",
)
| UpdatableComment |
python | jazzband__prettytable | src/prettytable/prettytable.py | {
"start": 2180,
"end": 2248
} | class ____(IntEnum):
FRAME = 0
ALL = 1
NONE = 2
| VRuleStyle |
python | huggingface__transformers | src/transformers/models/glm4v/modeling_glm4v.py | {
"start": 30753,
"end": 31327
} | class ____(PreTrainedModel):
config: Glm4vConfig
base_model_prefix = "model"
input_modalities = ("image", "video", "text")
supports_gradient_checkpointing = True
_no_split_modules = ["Glm4vTextDecoderLayer", "Glm4vVisionBlock"]
_skip_keys_device_placement = "past_key_values"
_supports_flash_attn = True
_supports_sdpa = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Glm4vTextDecoderLayer,
"attentions": Glm4vTextAttention,
}
| Glm4vPreTrainedModel |
python | huggingface__transformers | src/transformers/models/seamless_m4t_v2/modeling_seamless_m4t_v2.py | {
"start": 109698,
"end": 117347
} | class ____(PreTrainedModel):
config: SeamlessM4Tv2Config
main_input_name = "input_embeds"
input_modalities = "audio"
_no_split_modules = []
def __init__(self, config):
super().__init__(config)
self.pad_token_id = config.t2u_pad_token_id
embed_dim = config.unit_embed_dim
kernel_size = config.variance_predictor_kernel_size
var_pred_dropout = config.var_pred_dropout
self.dur_predictor = SeamlessM4Tv2VariancePredictor(embed_dim, embed_dim, kernel_size, var_pred_dropout)
self.unit_embedding = nn.Embedding(config.unit_hifi_gan_vocab_size, config.unit_embed_dim)
self.speaker_embedding = nn.Embedding(config.vocoder_num_spkrs, config.spkr_embed_dim)
self.language_embedding = nn.Embedding(config.vocoder_num_langs, config.lang_embed_dim)
self.hifi_gan = SeamlessM4Tv2HifiGan(config)
# Initialize weights and apply final processing
self.post_init()
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_dur_output_lengths
def _get_dur_output_lengths(self, input_ids, dur_out):
"""
Computes the output length after the duration layer.
"""
unit_lengths = (input_ids != self.pad_token_id).sum(1)
# take care of edge cases where no padding or too many padding
unit_lengths = torch.clamp(unit_lengths, 0, dur_out.shape[1] - 1)
cumulative_dur_out = torch.cumsum(dur_out, dim=1)
unit_lengths = cumulative_dur_out.gather(dim=1, index=unit_lengths.unsqueeze(1)).squeeze()
return unit_lengths
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan._get_output_hifigan_lengths
def _get_output_hifigan_lengths(self, input_lengths: Union[torch.LongTensor, int]):
"""
Computes the output length of the hifigan convolutional layers
"""
def _conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
# 1D convolutional layer output length formula taken
# from https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html
return (
torch.div(input_length + 2 * pad - dilation * (kernel_size - 1) - 1, stride, rounding_mode="floor") + 1
)
def _transpose_conv_out_length(input_length, kernel_size, stride, pad, dilation=1):
return (input_length - 1) * stride - 2 * pad + dilation * (kernel_size - 1) + 1
# conv_pre
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
# upsampler
for i, (upsample_rate, kernel_size) in enumerate(
zip(self.config.upsample_rates, self.config.upsample_kernel_sizes)
):
input_lengths = _transpose_conv_out_length(
input_lengths, kernel_size, upsample_rate, (kernel_size - upsample_rate) // 2
)
# resblock
for i in range(len(self.config.upsample_rates)):
for kernel_size, dilation in zip(self.config.resblock_kernel_sizes, self.config.resblock_dilation_sizes):
for dil in dilation:
input_lengths = _conv_out_length(
input_lengths, kernel_size, 1, (kernel_size - 1) * dil // 2, dilation=dil
)
for dil in dilation:
input_lengths = _conv_out_length(input_lengths, kernel_size, 1, (kernel_size - 1) // 2, dilation=1)
# conv_post
input_lengths = _conv_out_length(input_lengths, 7, 1, 3)
return input_lengths
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.forward with SeamlessM4T->SeamlessM4Tv2, spkr_id->speaker_id
def forward(
self, input_ids: torch.LongTensor, speaker_id: torch.Tensor, lang_id: torch.Tensor
) -> tuple[torch.Tensor]:
"""
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`SeamlessM4Tv2TextToUnitForConditionalGeneration`]. [What are input
IDs?](../glossary#input-ids)
speaker_id (`int`, *optional*):
The id of the speaker used for speech synthesis. Must be lower than `config.vocoder_num_spkrs`.
tgt_lang (`str`, *optional*):
The language id to use as target language for translation.
"""
hidden_states = self.unit_embedding(input_ids).transpose(1, 2)
spkr = self.speaker_embedding(speaker_id).transpose(1, 2)
lang = self.language_embedding(lang_id).transpose(1, 2)
log_dur_pred = self.dur_predictor(hidden_states.transpose(1, 2))
dur_out = torch.clamp(torch.round(torch.expm1(log_dur_pred)).long(), min=1)
# B x C x T
if hidden_states.size(0) == 1:
hidden_states = torch.repeat_interleave(hidden_states, dur_out.view(-1), dim=2)
else:
# if batched sample, need to interleave per sample, and pad -> loss of parallelism
if hidden_states.shape[0] > 1 and self.training:
logger.warning(
"""`self.training=True` and you use batching. You lose parallelism during the hifigan
forward pass because the samples are interleaved."""
)
hidden_states = [
torch.repeat_interleave(hidden_state, duration, dim=-1).transpose(0, 1)
for (hidden_state, duration) in zip(hidden_states, dur_out)
]
hidden_states = nn.utils.rnn.pad_sequence(hidden_states, batch_first=True).transpose(1, 2)
spkr = spkr.repeat(1, 1, hidden_states.shape[-1])
lang = lang.repeat(1, 1, hidden_states.shape[-1])
hidden_states = torch.cat([lang, hidden_states, spkr], dim=1)
hidden_states = self.hifi_gan(hidden_states)
unit_lengths = self._get_dur_output_lengths(input_ids, dur_out)
lengths = self._get_output_hifigan_lengths(unit_lengths)
return hidden_states, lengths
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.apply_weight_norm
def apply_weight_norm(self):
weight_norm = nn.utils.weight_norm
if hasattr(nn.utils.parametrizations, "weight_norm"):
weight_norm = nn.utils.parametrizations.weight_norm
weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.apply_weight_norm()
weight_norm(self.hifi_gan.conv_post)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TCodeHifiGan.remove_weight_norm
def remove_weight_norm(self):
nn.utils.remove_weight_norm(self.hifi_gan.conv_pre)
for layer in self.hifi_gan.upsampler:
nn.utils.remove_weight_norm(layer)
for layer in self.hifi_gan.resblocks:
layer.remove_weight_norm()
nn.utils.remove_weight_norm(self.hifi_gan.conv_post)
############ WHOLE MODEL related code ################
@auto_docstring(
custom_intro="""
The text-to-text SeamlessM4Tv2 Model transformer which can be used for T2TT.
"""
)
# Copied from transformers.models.seamless_m4t.modeling_seamless_m4t.SeamlessM4TForTextToText with SeamlessM4T->SeamlessM4Tv2,SeamlessM4Tv2Tokenizer->SeamlessM4TTokenizer, SeamlessM4Tv2Processor->SeamlessM4TProcessor, SEAMLESS_M4T->SEAMLESS_M4T_V2
| SeamlessM4Tv2CodeHifiGan |
python | weaviate__weaviate-python-client | weaviate/backup/backup.py | {
"start": 675,
"end": 853
} | class ____(str, Enum):
"""Which backend should be used to write the backup to."""
FILESYSTEM = "filesystem"
S3 = "s3"
GCS = "gcs"
AZURE = "azure"
| BackupStorage |
python | PrefectHQ__prefect | src/prefect/events/schemas/labelling.py | {
"start": 101,
"end": 2131
} | class ____:
"""The LabelDiver supports templating use cases for any Labelled object, by
presenting the labels as a graph of objects that may be accessed by attribute. For
example:
```python
diver = LabelDiver({
'hello.world': 'foo',
'hello.world.again': 'bar'
})
assert str(diver.hello.world) == 'foo'
assert str(diver.hello.world.again) == 'bar'
```
"""
_value: str
_divers: Dict[str, "LabelDiver"]
_labels: Dict[str, str]
def __init__(self, labels: Dict[str, str], value: str = ""):
self._labels = labels.copy()
self._value = value
divers: Dict[str, Dict[str, str]] = {}
values: Dict[str, str] = {}
for key, value in labels.items():
head, _, tail = key.partition(".")
if tail:
if head not in divers:
divers[head] = {}
divers[head][tail] = labels[key]
else:
values[head] = value
# start with keys that had sub-divers...
self._divers: Dict[str, LabelDiver] = {
k: LabelDiver(v, value=values.pop(k, "")) for k, v in divers.items()
}
# ...then mix in any remaining keys that _only_ had values
self._divers.update(**{k: LabelDiver({}, value=v) for k, v in values.items()})
def __str__(self) -> str:
return self._value or ""
def __repr__(self) -> str:
return f"LabelDiver(divers={self._divers!r}, value={self._value!r})"
def __len__(self) -> int:
return len(self._labels)
def __iter__(self) -> Iterator[Tuple[str, str]]:
return iter(self._labels.items())
def __getitem__(self, key: str) -> str:
return self._labels[key]
def __getattr__(self, name: str) -> "LabelDiver":
if name.startswith("_"):
raise AttributeError
try:
return self._divers[name]
except KeyError:
raise AttributeError
| LabelDiver |
python | PrefectHQ__prefect | tests/test_flows.py | {
"start": 156054,
"end": 166284
} | class ____:
@pytest.fixture
def mock_deploy(self, monkeypatch):
mock = AsyncMock()
monkeypatch.setattr("prefect.deployments.runner.deploy", mock)
return mock
@pytest.fixture
def local_flow(self):
@flow
def local_flow_deploy():
pass
return local_flow_deploy
@pytest.fixture
async def remote_flow(self):
remote_flow = await flow.from_source(
entrypoint="flows.py:test_flow", source=MockStorage()
)
return remote_flow
@pytest.fixture
async def mock_create_deployment(self):
with mock.patch(
"prefect.client.orchestration.PrefectClient.create_deployment"
) as mock_create:
mock_create.return_value = uuid.uuid4()
yield mock_create
@pytest.fixture
async def mock_get_inferred_version_info(self):
with mock.patch(
"prefect.deployments.runner.get_inferred_version_info"
) as mock_get_inferred:
mock_get_inferred.return_value = GitVersionInfo(
type="vcs:git",
version="abcdef12",
commit_sha="abcdef12",
message="Initial commit",
branch="main",
url="https://github.com/org/repo",
repository="org/repo",
)
yield mock_get_inferred
async def test_calls_deploy_with_expected_args(
self, mock_deploy, local_flow, work_pool, capsys
):
image = DockerImage(
name="my-repo/my-image", tag="dev", build_kwargs={"pull": False}
)
await local_flow.deploy(
name="test",
tags=["price", "luggage"],
parameters={"name": "Arthur"},
concurrency_limit=42,
description="This is a test",
version="alpha",
version_type=VersionType.SIMPLE,
work_pool_name=work_pool.name,
work_queue_name="line",
job_variables={"foo": "bar"},
image=image,
build=False,
push=False,
enforce_parameter_schema=True,
paused=True,
)
mock_deploy.assert_called_once_with(
await local_flow.to_deployment(
name="test",
tags=["price", "luggage"],
parameters={"name": "Arthur"},
concurrency_limit=42,
description="This is a test",
version="alpha",
version_type=VersionType.SIMPLE,
work_queue_name="line",
job_variables={"foo": "bar"},
enforce_parameter_schema=True,
paused=True,
),
work_pool_name=work_pool.name,
image=image,
build=False,
push=False,
print_next_steps_message=False,
ignore_warnings=False,
)
console_output = capsys.readouterr().out
assert "prefect worker start --pool" in console_output
assert work_pool.name in console_output
assert "prefect deployment run 'local-flow-deploy/test'" in console_output
async def test_calls_deploy_with_expected_args_remote_flow(
self,
mock_deploy,
remote_flow,
work_pool,
):
image = DockerImage(
name="my-repo/my-image", tag="dev", build_kwargs={"pull": False}
)
await remote_flow.deploy(
name="test",
tags=["price", "luggage"],
parameters={"name": "Arthur"},
description="This is a test",
version="alpha",
version_type=VersionType.SIMPLE,
work_pool_name=work_pool.name,
work_queue_name="line",
job_variables={"foo": "bar"},
image=image,
push=False,
enforce_parameter_schema=True,
paused=True,
schedule=Schedule(
interval=3600,
anchor_date=datetime.datetime(2025, 1, 1),
parameters={"number": 42},
),
)
mock_deploy.assert_called_once_with(
await remote_flow.to_deployment(
name="test",
tags=["price", "luggage"],
parameters={"name": "Arthur"},
description="This is a test",
version="alpha",
version_type=VersionType.SIMPLE,
work_queue_name="line",
job_variables={"foo": "bar"},
enforce_parameter_schema=True,
paused=True,
schedule=Schedule(
interval=3600,
anchor_date=datetime.datetime(2025, 1, 1),
parameters={"number": 42},
),
),
work_pool_name=work_pool.name,
image=image,
build=True,
push=False,
print_next_steps_message=False,
ignore_warnings=False,
)
async def test_deploy_non_existent_work_pool(
self,
mock_deploy,
local_flow,
):
with pytest.raises(
ValueError, match="Could not find work pool 'non-existent'."
):
await local_flow.deploy(
name="test",
work_pool_name="non-existent",
image="my-repo/my-image",
)
async def test_no_worker_command_for_push_pool(
self, mock_deploy, local_flow, push_work_pool, capsys
):
await local_flow.deploy(
name="test",
work_pool_name=push_work_pool.name,
image="my-repo/my-image",
)
assert "prefect worker start" not in capsys.readouterr().out
async def test_no_worker_command_for_active_workers(
self, mock_deploy, local_flow, work_pool, capsys, monkeypatch
):
mock_read_workers_for_work_pool = AsyncMock(
return_value=[
Worker(
name="test-worker",
work_pool_id=work_pool.id,
status=WorkerStatus.ONLINE,
)
]
)
monkeypatch.setattr(
"prefect.client.orchestration.PrefectClient.read_workers_for_work_pool",
mock_read_workers_for_work_pool,
)
await local_flow.deploy(
name="test", work_pool_name=work_pool.name, image="my-repo/my-image"
)
assert "prefect worker start" not in capsys.readouterr().out
async def test_suppress_console_output(
self, mock_deploy, local_flow, work_pool, capsys
):
await local_flow.deploy(
name="test",
work_pool_name=work_pool.name,
image="my-repo/my-image",
print_next_steps=False,
)
assert not capsys.readouterr().out
async def test_deploy_from_within_flow(
self, mock_deploy, local_flow, work_pool, prefect_client
):
"""regression test for 17434"""
@flow
def hello_flow():
local_flow.deploy(
name="my-deployment",
work_pool_name=work_pool.name,
)
hello_flow()
assert mock_deploy.call_count == 1
mock_deploy.assert_called_once_with(
await local_flow.to_deployment(
name="my-deployment",
),
work_pool_name=work_pool.name,
image=None,
build=True,
push=True,
print_next_steps_message=False,
ignore_warnings=False,
)
async def test_deploy_infers_version_info(
self,
local_flow,
work_pool_with_image_variable,
mock_create_deployment,
mock_get_inferred_version_info,
):
await local_flow.deploy(
name="my-deployment",
work_pool_name=work_pool_with_image_variable.name,
image="my-repo/my-image",
build=False,
)
mock_get_inferred_version_info.assert_awaited_once()
mock_create_deployment.assert_awaited_once()
passed_version_info = mock_create_deployment.call_args.kwargs["version_info"]
assert passed_version_info == GitVersionInfo(
type="vcs:git",
version="abcdef12",
commit_sha="abcdef12",
message="Initial commit",
branch="main",
url="https://github.com/org/repo",
repository="org/repo",
)
async def test_deploy_infers_version_info_with_name(
self,
local_flow,
work_pool_with_image_variable,
mock_create_deployment,
mock_get_inferred_version_info,
):
await local_flow.deploy(
name="my-deployment",
work_pool_name=work_pool_with_image_variable.name,
image="my-repo/my-image",
build=False,
version="my-version",
)
mock_get_inferred_version_info.assert_awaited_once()
mock_create_deployment.assert_awaited_once()
passed_version_info = mock_create_deployment.call_args.kwargs["version_info"]
assert passed_version_info == GitVersionInfo(
type="vcs:git",
version="my-version",
commit_sha="abcdef12",
message="Initial commit",
branch="main",
url="https://github.com/org/repo",
repository="org/repo",
)
async def test_deploy_uses_flow_version_as_simple_version(
self,
local_flow,
work_pool_with_image_variable,
mock_create_deployment,
):
await local_flow.deploy(
name="my-deployment",
work_pool_name=work_pool_with_image_variable.name,
image="my-repo/my-image",
build=False,
version_type=VersionType.SIMPLE,
)
mock_create_deployment.assert_awaited_once()
passed_version_info = mock_create_deployment.call_args.kwargs["version_info"]
assert passed_version_info == VersionInfo(
type="prefect:simple",
version=local_flow.version,
)
| TestFlowDeploy |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_workflows.py | {
"start": 9131,
"end": 10729
} | class ____:
@mock.patch(BASE_PATH.format("Execution"))
@mock.patch(BASE_PATH.format("WorkflowsHook"))
@mock.patch(BASE_PATH.format("WorkflowsExecutionLink.persist"))
def test_execute(self, mock_link_persist, mock_hook, mock_object):
mock_hook.return_value.create_execution.return_value.name = "name/execution_id"
op = WorkflowsCreateExecutionOperator(
task_id="test_task",
workflow_id=WORKFLOW_ID,
execution=EXECUTION,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
context = mock.MagicMock()
result = op.execute(context=context)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_execution.assert_called_once_with(
workflow_id=WORKFLOW_ID,
execution=EXECUTION,
location=LOCATION,
project_id=PROJECT_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_link_persist.assert_called_with(
context=context,
location_id=LOCATION,
workflow_id=WORKFLOW_ID,
execution_id=EXECUTION_ID,
project_id=PROJECT_ID,
)
assert result == mock_object.to_dict.return_value
| TestWorkflowExecutionsCreateExecutionOperator |
python | huggingface__transformers | tests/models/conditional_detr/test_modeling_conditional_detr.py | {
"start": 6642,
"end": 21566
} | class ____(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
all_model_classes = (
(
ConditionalDetrModel,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
)
if is_torch_available()
else ()
)
pipeline_model_mapping = (
{"image-feature-extraction": ConditionalDetrModel, "object-detection": ConditionalDetrForObjectDetection}
if is_torch_available()
else {}
)
is_encoder_decoder = True
test_missing_keys = False
zero_init_hidden_state = True
test_torch_exportable = True
# special case for head models
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False):
inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels)
if return_labels:
if model_class.__name__ in ["ConditionalDetrForObjectDetection", "ConditionalDetrForSegmentation"]:
labels = []
for i in range(self.model_tester.batch_size):
target = {}
target["class_labels"] = torch.ones(
size=(self.model_tester.n_targets,), device=torch_device, dtype=torch.long
)
target["boxes"] = torch.ones(
self.model_tester.n_targets, 4, device=torch_device, dtype=torch.float
)
target["masks"] = torch.ones(
self.model_tester.n_targets,
self.model_tester.min_size,
self.model_tester.max_size,
device=torch_device,
dtype=torch.float,
)
labels.append(target)
inputs_dict["labels"] = labels
return inputs_dict
def setUp(self):
self.model_tester = ConditionalDetrModelTester(self)
self.config_tester = ConfigTester(self, config_class=ConditionalDetrConfig, has_text_modality=False)
def test_config(self):
self.config_tester.run_common_tests()
def test_conditional_detr_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_conditional_detr_model(*config_and_inputs)
def test_conditional_detr_object_detection_head_model(self):
config_and_inputs = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_conditional_detr_object_detection_head_model(*config_and_inputs)
# TODO: check if this works again for PyTorch 2.x.y
@unittest.skip(reason="Got `CUDA error: misaligned address` with PyTorch 2.0.0.")
def test_multi_gpu_data_parallel_forward(self):
pass
@unittest.skip(reason="Conditional DETR does not use inputs_embeds")
def test_inputs_embeds(self):
pass
@unittest.skip(reason="Conditional DETR does not use inputs_embeds")
def test_inputs_embeds_matches_input_ids(self):
pass
@unittest.skip(reason="Conditional DETR does not have a get_input_embeddings method")
def test_model_get_set_embeddings(self):
pass
@unittest.skip(reason="Conditional DETR is not a generative model")
def test_generate_without_input_ids(self):
pass
@unittest.skip(reason="Conditional DETR does not use token embeddings")
def test_resize_tokens_embeddings(self):
pass
@slow
@unittest.skip(reason="TODO Niels: fix me!")
def test_model_outputs_equivalence(self):
pass
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = self.model_tester.decoder_seq_length
encoder_seq_length = self.model_tester.encoder_seq_length
decoder_key_length = self.model_tester.decoder_seq_length
encoder_key_length = self.model_tester.encoder_seq_length
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = False
config.return_dict = True
model = model_class._from_config(config, attn_implementation="eager")
config = model.config
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
out_len = len(outputs)
if self.is_encoder_decoder:
correct_outlen = 6
# loss is at first position
if "labels" in inputs_dict:
correct_outlen += 1 # loss is added to beginning
# Object Detection model returns pred_logits and pred_boxes
if model_class.__name__ == "ConditionalDetrForObjectDetection":
correct_outlen += 1
# Panoptic Segmentation model returns pred_logits, pred_boxes, pred_masks
if model_class.__name__ == "ConditionalDetrForSegmentation":
correct_outlen += 2
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
self.assertEqual(out_len, correct_outlen)
# decoder attentions
decoder_attentions = outputs.decoder_attentions
self.assertIsInstance(decoder_attentions, (list, tuple))
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
# cross attentions
cross_attentions = outputs.cross_attentions
self.assertIsInstance(cross_attentions, (list, tuple))
self.assertEqual(len(cross_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(cross_attentions[0].shape[-3:]),
[
self.model_tester.num_attention_heads,
decoder_seq_length,
encoder_key_length,
],
)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
inputs_dict["output_hidden_states"] = True
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if hasattr(self.model_tester, "num_hidden_states_types"):
added_hidden_states = self.model_tester.num_hidden_states_types
elif self.is_encoder_decoder:
added_hidden_states = 2
else:
added_hidden_states = 1
self.assertEqual(out_len + added_hidden_states, len(outputs))
self_attentions = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(self_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(self_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_retain_grad_hidden_states_attentions(self):
# removed retain_grad and grad on decoder_hidden_states, as queries don't require grad
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
# no need to test all models as different heads yield the same functionality
model_class = self.all_model_classes[0]
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs = model(**inputs)
output = outputs[0]
encoder_hidden_states = outputs.encoder_hidden_states[0]
encoder_attentions = outputs.encoder_attentions[0]
encoder_hidden_states.retain_grad()
encoder_attentions.retain_grad()
decoder_attentions = outputs.decoder_attentions[0]
decoder_attentions.retain_grad()
cross_attentions = outputs.cross_attentions[0]
cross_attentions.retain_grad()
output.flatten()[0].backward(retain_graph=True)
self.assertIsNotNone(encoder_hidden_states.grad)
self.assertIsNotNone(encoder_attentions.grad)
self.assertIsNotNone(decoder_attentions.grad)
self.assertIsNotNone(cross_attentions.grad)
def test_forward_auxiliary_loss(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.auxiliary_loss = True
# only test for object detection and segmentation model
for model_class in self.all_model_classes[1:]:
model = model_class(config)
model.to(torch_device)
inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
outputs = model(**inputs)
self.assertIsNotNone(outputs.auxiliary_outputs)
self.assertEqual(len(outputs.auxiliary_outputs), self.model_tester.num_hidden_layers - 1)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = ["pixel_values", "pixel_mask", "decoder_attention_mask"]
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["pixel_values", "pixel_mask"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_different_timm_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# let's pick a random timm backbone
config.backbone = "tf_mobilenetv3_small_075"
config.backbone_config = None
config.use_timm_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "ConditionalDetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "ConditionalDetrForSegmentation":
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
@require_timm
def test_hf_backbone(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
# Load a pretrained HF checkpoint as backbone
config.backbone = "microsoft/resnet-18"
config.backbone_config = None
config.use_timm_backbone = False
config.use_pretrained_backbone = True
config.backbone_kwargs = {"out_indices": [2, 3, 4]}
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
with torch.no_grad():
outputs = model(**self._prepare_for_class(inputs_dict, model_class))
if model_class.__name__ == "ConditionalDetrForObjectDetection":
expected_shape = (
self.model_tester.batch_size,
self.model_tester.num_queries,
self.model_tester.num_labels,
)
self.assertEqual(outputs.logits.shape, expected_shape)
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
elif model_class.__name__ == "ConditionalDetrForSegmentation":
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.conditional_detr.model.backbone.conv_encoder.intermediate_channel_sizes), 3)
else:
# Confirm out_indices was propagated to backbone
self.assertEqual(len(model.backbone.conv_encoder.intermediate_channel_sizes), 3)
self.assertTrue(outputs)
TOLERANCE = 1e-4
# We will verify our results on an image of cute cats
def prepare_img():
image = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png")
return image
@require_timm
@require_vision
@slow
| ConditionalDetrModelTest |
python | modin-project__modin | modin/core/dataframe/algebra/default2pandas/resample.py | {
"start": 1011,
"end": 2078
} | class ____:
"""Builder class for resampled aggregation functions."""
@classmethod
def build_resample(cls, func, squeeze_self):
"""
Build function that resamples time-series data and does aggregation.
Parameters
----------
func : callable
Aggregation function to execute under resampled frame.
squeeze_self : bool
Whether or not to squeeze frame before resampling.
Returns
-------
callable
Function that takes pandas DataFrame and applies aggregation
to resampled time-series data.
"""
def fn(df, resample_kwargs, *args, **kwargs):
"""Resample time-series data of the passed frame and apply specified aggregation."""
if squeeze_self:
df = df.squeeze(axis=1)
resampler = df.resample(**resample_kwargs)
if type(func) is property:
return func.fget(resampler)
return func(resampler, *args, **kwargs)
return fn
| Resampler |
python | huggingface__transformers | src/transformers/models/luke/modeling_luke.py | {
"start": 32064,
"end": 32747
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.entity_emb_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.entity_emb_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
| EntityPredictionHeadTransform |
python | getsentry__sentry | src/sentry/incidents/events.py | {
"start": 316,
"end": 496
} | class ____(BaseIncidentEvent):
prev_status: int
status: int
analytics.register(IncidentCreatedEvent)
analytics.register(IncidentStatusUpdatedEvent)
| IncidentStatusUpdatedEvent |
python | astropy__astropy | astropy/utils/masked/tests/test_table.py | {
"start": 4041,
"end": 6569
} | class ____(TestMaskedArrayTable, MaskedQuantityTableSetup):
# Runs tests from TestMaskedArrayTable as well as some extra ones.
def test_table_operations_requiring_masking(self):
t1 = self.t
t2 = QTable({"ma2": Masked([1, 2] * u.m)})
t12 = hstack([t1, t2], join_type="outer")
assert np.all(t12["ma"].mask == [True, False, False])
# 'ma2' is shorter by one so we expect one True from hstack so length matches
assert np.all(t12["ma2"].mask == [False, False, True])
t12 = hstack([t1, t2], join_type="inner")
assert np.all(t12["ma"].mask == [True, False])
assert np.all(t12["ma2"].mask == [False, False])
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type="outer")
# ma ma2
# m m
# --- ---
# —— ——
# 5.0 ——
# 0.0 ——
# —— 1.0
# —— 2.0
assert np.all(t12["ma"].mask == [True, False, False, True, True])
assert np.all(t12["ma2"].mask == [True, True, True, False, False])
def test_table_operations_requiring_masking_auto_promote(self):
MaskedQuantity = Masked(u.Quantity)
t1 = QTable({"ma1": [1, 2] * u.m})
t2 = QTable({"ma2": [3, 4, 5] * u.m})
t12 = hstack([t1, t2], join_type="outer")
assert isinstance(t12["ma1"], MaskedQuantity)
assert np.all(t12["ma1"].mask == [False, False, True])
assert np.all(t12["ma1"] == [1, 2, 0] * u.m)
assert not isinstance(t12["ma2"], MaskedQuantity)
assert isinstance(t12["ma2"], u.Quantity)
assert np.all(t12["ma2"] == [3, 4, 5] * u.m)
t12 = hstack([t1, t2], join_type="inner")
assert isinstance(t12["ma1"], u.Quantity)
assert not isinstance(t12["ma1"], MaskedQuantity)
assert isinstance(t12["ma2"], u.Quantity)
assert not isinstance(t12["ma2"], MaskedQuantity)
# Vstack tables with different column names. In this case we get masked
# values
t12 = vstack([t1, t2], join_type="outer")
assert np.all(t12["ma1"].mask == [False, False, True, True, True])
assert np.all(t12["ma2"].mask == [True, True, False, False, False])
t1["a"] = [1, 2]
t2["a"] = [1, 3, 4]
t12 = join(t1, t2, join_type="outer")
assert np.all(t12["ma1"].mask == [False, False, True, True])
assert np.all(t12["ma2"].mask == [False, True, False, False])
| TestMaskedQuantityTable |
python | Pylons__pyramid | docs/quick_tutorial/request_response/tutorial/views.py | {
"start": 122,
"end": 596
} | class ____:
def __init__(self, request):
self.request = request
@view_config(route_name='home')
def home(self):
return HTTPFound(location='/plain')
@view_config(route_name='plain')
def plain(self):
name = self.request.params.get('name', 'No Name Provided')
body = 'URL %s with name: %s' % (self.request.url, name)
return Response(
content_type='text/plain',
body=body
)
| TutorialViews |
python | python-openxml__python-docx | tests/oxml/unitdata/numbering.py | {
"start": 205,
"end": 412
} | class ____(BaseBuilder):
__tag__ = "w:numbering"
__nspfxs__ = ("w",)
__attrs__ = ()
def a_num():
return CT_NumBuilder()
def a_numbering():
return CT_NumberingBuilder()
| CT_NumberingBuilder |
python | catalyst-team__catalyst | examples/detection/criterion.py | {
"start": 4703,
"end": 6942
} | class ____(nn.Module):
def __init__(
self,
num_classes=1,
mask_loss_weight=1.0,
regr_loss_weight=1.0,
size_average=True,
):
"""
Args:
num_classes (int): Number of classes in model.
Default is ``1``.
mask_loss_weight (float): heatmap loss weight coefficient.
Default is ``1.0``.
regr_loss_weight (float): HW regression loss weight coefficient.
Default is ``1.0``.
size_average (bool): loss batch scaling.
Default is ``True``.
"""
super().__init__()
self.num_classes = num_classes
self.mask_loss_weight = mask_loss_weight
self.regr_loss_weight = regr_loss_weight
self.size_average = size_average
def forward(self, predicted_heatmap, predicted_regr, target_heatmap, target_regr):
"""Compute loss for CenterNet.
Args:
predicted_heatmap (torch.Tensor): center heatmap prediction logits,
expected shapes [batch, height, width, num classes].
predicted_regr (torch.Tensor): predicted HW regression,
expected shapes [batch, height, width, 2].
target_heatmap ([type]): ground truth center heatmap,
expected shapes [batch, height, width, num classes],
each value should be in range [0,1].
target_regr (torch.Tensor): ground truth HW regression,
expected shapes [batch, height, width, 2].
Returns:
torch.Tensor with loss value.
"""
pred_mask = torch.sigmoid(predicted_heatmap)
mask_loss = neg_loss(pred_mask, target_heatmap)
mask_loss *= self.mask_loss_weight
regr_loss = (
torch.abs(predicted_regr - target_regr).sum(1)[:, None, :, :]
* target_heatmap
).sum() # .sum(1).sum(1).sum(1)
regr_loss = regr_loss / target_heatmap.sum() # .sum(1).sum(1).sum(1)
regr_loss *= self.regr_loss_weight
loss = mask_loss + regr_loss
if not self.size_average:
loss *= predicted_heatmap.shape[0]
return loss, mask_loss, regr_loss
| CenterNetCriterion |
python | readthedocs__readthedocs.org | readthedocs/subscriptions/products.py | {
"start": 1937,
"end": 5463
} | class ____:
"""A local representation of a Stripe product."""
stripe_id: str
features: dict[str, RTDProductFeature]
# If this product should be available to users to purchase.
listed: bool = False
# If this product is an extra that can be added to a main plan.
# For example, an extra builder.
extra: bool = False
def to_item(self):
"""
Return a tuple with the stripe_id and the product itself.
Useful to use it as a dictionary item.
"""
return self.stripe_id, self
def get_product(stripe_id) -> RTDProduct:
"""Return the product with the given stripe_id."""
return settings.RTD_PRODUCTS.get(stripe_id)
def get_listed_products():
"""Return a list of products that are available to users to purchase."""
return [product for product in settings.RTD_PRODUCTS.values() if product.listed]
def get_products_with_feature(feature_type) -> list[RTDProduct]:
"""Return a list of products that have the given feature."""
return [
product for product in settings.RTD_PRODUCTS.values() if feature_type in product.features
]
def get_feature(obj, feature_type) -> RTDProductFeature:
"""
Get the feature object for the given type of the object.
If the object doesn't have the feature, return the default feature or None.
:param obj: An organization or project instance.
:param type: The type of the feature (readthedocs.subscriptions.constants.TYPE_*).
"""
# Hit the DB only if subscriptions and organizations are enabled.
if not settings.RTD_PRODUCTS or not settings.RTD_ALLOW_ORGANIZATIONS:
return settings.RTD_DEFAULT_FEATURES.get(feature_type)
from readthedocs.organizations.models import Organization
from readthedocs.projects.models import Project
if isinstance(obj, Project):
# Fetch the subscription as well, as it's used just below.
organization = obj.organizations.select_related("stripe_subscription").first()
elif isinstance(obj, Organization):
organization = obj
else:
raise TypeError
# This happens when running tests on .com only.
# In production projects are always associated with an organization.
if not organization:
return settings.RTD_DEFAULT_FEATURES.get(feature_type)
# A subscription can have multiple products, but we only want
# the products from the organization that has the feature we are looking for.
available_stripe_products_id = [
product.stripe_id for product in get_products_with_feature(feature_type)
]
stripe_subscription = organization.stripe_subscription
if stripe_subscription:
subscription_items = stripe_subscription.items.filter(
price__product__id__in=available_stripe_products_id
).select_related("price__product")
final_rtd_feature = None
for subscription_item in subscription_items:
rtd_feature = settings.RTD_PRODUCTS[subscription_item.price.product.id].features[
feature_type
]
if final_rtd_feature is None:
final_rtd_feature = rtd_feature * subscription_item.quantity
else:
final_rtd_feature += rtd_feature * subscription_item.quantity
if final_rtd_feature:
return final_rtd_feature
# Fallback to the default feature if the organization
# doesn't have a subscription with the feature.
return settings.RTD_DEFAULT_FEATURES.get(feature_type)
| RTDProduct |
python | doocs__leetcode | solution/1000-1099/1012.Numbers With Repeated Digits/Solution.py | {
"start": 0,
"end": 604
} | class ____:
def numDupDigitsAtMostN(self, n: int) -> int:
@cache
def dfs(i: int, mask: int, lead: bool, limit: bool) -> int:
if i >= len(s):
return lead ^ 1
up = int(s[i]) if limit else 9
ans = 0
for j in range(up + 1):
if lead and j == 0:
ans += dfs(i + 1, mask, True, False)
elif mask >> j & 1 ^ 1:
ans += dfs(i + 1, mask | 1 << j, False, limit and j == up)
return ans
s = str(n)
return n - dfs(0, 0, True, True)
| Solution |
python | mlflow__mlflow | mlflow/telemetry/events.py | {
"start": 3092,
"end": 3387
} | class ____(Event):
name: str = "create_logged_model"
@classmethod
def parse(cls, arguments: dict[str, Any]) -> dict[str, Any] | None:
if flavor := arguments.get("flavor"):
return {"flavor": flavor.removeprefix("mlflow.")}
return None
| CreateLoggedModelEvent |
python | walkccc__LeetCode | solutions/2211. Count Collisions on a Road/2211.py | {
"start": 0,
"end": 289
} | class ____:
def countCollisions(self, directions: str) -> int:
l = 0
r = len(directions) - 1
while l < len(directions) and directions[l] == 'L':
l += 1
while r >= 0 and directions[r] == 'R':
r -= 1
return sum(c != 'S' for c in directions[l:r + 1])
| Solution |
python | django__django | tests/template_tests/test_library.py | {
"start": 4511,
"end": 5715
} | class ____(SimpleTestCase):
def setUp(self):
self.library = Library()
def test_tag(self):
@self.library.tag
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["func"], func)
def test_tag_parens(self):
@self.library.tag()
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["func"], func)
def test_tag_name_arg(self):
@self.library.tag("name")
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["name"], func)
def test_tag_name_kwarg(self):
@self.library.tag(name="name")
def func(parser, token):
return Node()
self.assertEqual(self.library.tags["name"], func)
def test_tag_call(self):
def func(parser, token):
return Node()
self.library.tag("name", func)
self.assertEqual(self.library.tags["name"], func)
def test_tag_invalid(self):
msg = "Unsupported arguments to Library.tag: (None, '')"
with self.assertRaisesMessage(ValueError, msg):
self.library.tag(None, "")
| TagRegistrationTests |
python | facelessuser__pymdown-extensions | pymdownx/tilde.py | {
"start": 4177,
"end": 5060
} | class ____(util.PatternSequenceProcessor):
"""Smart delete and subscript processor."""
PATTERNS = [
util.PatSeqItem(re.compile(SMART_DEL_SUB, re.DOTALL | re.UNICODE), 'double', 'del,sub'),
util.PatSeqItem(re.compile(SMART_SUB_DEL, re.DOTALL | re.UNICODE), 'double', 'sub,del'),
util.PatSeqItem(re.compile(SMART_DEL_SUB2, re.DOTALL | re.UNICODE), 'double', 'del,sub'),
util.PatSeqItem(re.compile(SMART_DEL_SUB3, re.DOTALL | re.UNICODE), 'double2', 'del,sub'),
util.PatSeqItem(re.compile(SMART_DEL, re.DOTALL | re.UNICODE), 'single', 'del'),
util.PatSeqItem(re.compile(SMART_SUB_DEL2, re.DOTALL | re.UNICODE), 'double2', 'sub,del'),
util.PatSeqItem(re.compile(SUB2, re.DOTALL | re.UNICODE), 'single', 'sub', True),
util.PatSeqItem(re.compile(SUB, re.DOTALL | re.UNICODE), 'single', 'sub')
]
| TildeSmartProcessor |
python | doocs__leetcode | solution/1700-1799/1743.Restore the Array From Adjacent Pairs/Solution2.py | {
"start": 0,
"end": 457
} | class ____:
def restoreArray(self, adjacentPairs: List[List[int]]) -> List[int]:
def dfs(i, fa):
ans.append(i)
for j in g[i]:
if j != fa:
dfs(j, i)
g = defaultdict(list)
for a, b in adjacentPairs:
g[a].append(b)
g[b].append(a)
i = next(i for i, v in g.items() if len(v) == 1)
ans = []
dfs(i, 1e6)
return ans
| Solution |
python | google__jax | jax/_src/typing.py | {
"start": 1554,
"end": 1629
} | class ____(Protocol):
@property
def size(self, /) -> int: ...
| SupportsSize |
python | jazzband__django-oauth-toolkit | tests/test_application_views.py | {
"start": 7262,
"end": 9539
} | class ____(
TestApplicationRegistrationViewRedirectURIWithWildcard
):
def _test_valid(self, uris):
self.client.login(username="foo_user", password="123456")
form_data = {
"name": "Foo app",
"client_id": "client_id",
"client_secret": "client_secret",
"client_type": Application.CLIENT_CONFIDENTIAL,
"allowed_origins": uris,
"redirect_uris": "https://example.com",
"post_logout_redirect_uris": "http://example.com",
"authorization_grant_type": Application.GRANT_AUTHORIZATION_CODE,
"algorithm": "",
}
response = self.client.post(reverse("oauth2_provider:register"), form_data)
self.assertEqual(response.status_code, 302)
app = get_application_model().objects.get(name="Foo app")
self.assertEqual(app.user.username, "foo_user")
app = Application.objects.get()
self.assertEqual(app.name, form_data["name"])
self.assertEqual(app.client_id, form_data["client_id"])
self.assertEqual(app.redirect_uris, form_data["redirect_uris"])
self.assertEqual(app.post_logout_redirect_uris, form_data["post_logout_redirect_uris"])
self.assertEqual(app.client_type, form_data["client_type"])
self.assertEqual(app.authorization_grant_type, form_data["authorization_grant_type"])
self.assertEqual(app.algorithm, form_data["algorithm"])
def _test_invalid(self, uri, error_message):
self.client.login(username="foo_user", password="123456")
form_data = {
"name": "Foo app",
"client_id": "client_id",
"client_secret": "client_secret",
"client_type": Application.CLIENT_CONFIDENTIAL,
"allowed_origins": uri,
"redirect_uris": "http://example.com",
"post_logout_redirect_uris": "http://example.com",
"authorization_grant_type": Application.GRANT_AUTHORIZATION_CODE,
"algorithm": "",
}
response = self.client.post(reverse("oauth2_provider:register"), form_data)
self.assertEqual(response.status_code, 200)
self.assertContains(response, error_message)
| TestApplicationRegistrationViewAllowedOriginWithWildcard |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 14331,
"end": 14404
} | class ____(PydanticValueError):
msg_template = 'Invalid JSON'
| JsonError |
python | pytorch__pytorch | test/cpp_extensions/open_registration_extension/torch_openreg/tests/test_utils.py | {
"start": 121,
"end": 530
} | class ____(TestCase):
def test_open_device_dlpack(self):
x_in = torch.randn(2, 3).to("openreg")
capsule = torch.utils.dlpack.to_dlpack(x_in)
x_out = torch.from_dlpack(capsule)
self.assertTrue(x_out.device == x_in.device)
x_in = x_in.to("cpu")
x_out = x_out.to("cpu")
self.assertEqual(x_in, x_out)
if __name__ == "__main__":
run_tests()
| TestDLPack |
python | PrefectHQ__prefect | src/prefect/server/services/telemetry.py | {
"start": 707,
"end": 5246
} | class ____(RunInEphemeralServers, RunInWebservers, LoopService):
"""
Sends anonymous data to Prefect to help us improve
It can be toggled off with the PREFECT_SERVER_ANALYTICS_ENABLED setting.
"""
loop_seconds: float = 600
@classmethod
def service_settings(cls) -> ServicesBaseSetting:
raise NotImplementedError("Telemetry service does not have settings")
@classmethod
def environment_variable_name(cls) -> str:
return "PREFECT_SERVER_ANALYTICS_ENABLED"
@classmethod
def enabled(cls) -> bool:
return get_current_settings().server.analytics_enabled
def __init__(self, loop_seconds: Optional[int] = None, **kwargs: Any):
super().__init__(loop_seconds=loop_seconds, **kwargs)
self.telemetry_environment: str = os.environ.get(
"PREFECT_API_TELEMETRY_ENVIRONMENT", "production"
)
@db_injector
async def _fetch_or_set_telemetry_session(self, db: PrefectDBInterface):
"""
This method looks for a telemetry session in the configuration table. If there
isn't one, it sets one. It then sets `self.session_id` and
`self.session_start_timestamp`.
Telemetry sessions last until the database is reset.
"""
async with db.session_context(begin_transaction=True) as session:
telemetry_session = await configuration.read_configuration(
session, "TELEMETRY_SESSION"
)
if telemetry_session is None:
self.logger.debug("No telemetry session found, setting")
session_id = str(uuid4())
session_start_timestamp = now("UTC").isoformat()
telemetry_session = Configuration(
key="TELEMETRY_SESSION",
value={
"session_id": session_id,
"session_start_timestamp": session_start_timestamp,
},
)
await configuration.write_configuration(session, telemetry_session)
self.session_id = session_id
self.session_start_timestamp = session_start_timestamp
else:
self.logger.debug("Session information retrieved from database")
self.session_id: str = telemetry_session.value["session_id"]
self.session_start_timestamp: str = telemetry_session.value[
"session_start_timestamp"
]
self.logger.debug(
f"Telemetry Session: {self.session_id}, {self.session_start_timestamp}"
)
return (self.session_start_timestamp, self.session_id)
async def run_once(self) -> None:
"""
Sends a heartbeat to the sens-o-matic
"""
from prefect.client.constants import SERVER_API_VERSION
if not hasattr(self, "session_id"):
await self._fetch_or_set_telemetry_session()
heartbeat = {
"source": "prefect_server",
"type": "heartbeat",
"payload": {
"platform": platform.system(),
"architecture": platform.machine(),
"python_version": platform.python_version(),
"python_implementation": platform.python_implementation(),
"environment": self.telemetry_environment,
"ephemeral_server": bool(os.getenv("PREFECT__SERVER_EPHEMERAL", False)),
"api_version": SERVER_API_VERSION,
"prefect_version": prefect.__version__,
"session_id": self.session_id,
"session_start_timestamp": self.session_start_timestamp,
},
}
try:
async with httpx.AsyncClient() as client:
result = await client.post(
"https://sens-o-matic.prefect.io/",
json=heartbeat,
headers={"x-prefect-event": "prefect_server"},
)
result.raise_for_status()
except Exception as exc:
self.logger.error(
f"Failed to send telemetry: {exc}\nShutting down telemetry service...",
# The traceback is only needed if doing deeper debugging, otherwise
# this looks like an impactful server error
exc_info=PREFECT_DEBUG_MODE.value(),
)
await self.stop(block=False)
if __name__ == "__main__":
asyncio.run(Telemetry(handle_signals=True).start())
| Telemetry |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/execution_api/versions/head/test_dag_runs.py | {
"start": 10886,
"end": 16441
} | class ____:
def setup_method(self):
clear_db_runs()
def teardown_method(self):
clear_db_runs()
def test_get_previous_dag_run_basic(self, client, session, dag_maker):
"""Test getting the previous DAG run without state filtering."""
dag_id = "test_get_previous_basic"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
# Create multiple DAG runs
dag_maker.create_dagrun(
run_id="run1", logical_date=timezone.datetime(2025, 1, 1), state=DagRunState.SUCCESS
)
dag_maker.create_dagrun(
run_id="run2", logical_date=timezone.datetime(2025, 1, 5), state=DagRunState.FAILED
)
dag_maker.create_dagrun(
run_id="run3", logical_date=timezone.datetime(2025, 1, 10), state=DagRunState.SUCCESS
)
session.commit()
# Query for previous DAG run before 2025-01-10
response = client.get(
f"/execution/dag-runs/{dag_id}/previous",
params={
"logical_date": timezone.datetime(2025, 1, 10).isoformat(),
},
)
assert response.status_code == 200
result = response.json()
assert result["dag_id"] == dag_id
assert result["run_id"] == "run2" # Most recent before 2025-01-10
assert result["state"] == "failed"
def test_get_previous_dag_run_with_state_filter(self, client, session, dag_maker):
"""Test getting the previous DAG run with state filtering."""
dag_id = "test_get_previous_with_state"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
# Create multiple DAG runs with different states
dag_maker.create_dagrun(
run_id="run1", logical_date=timezone.datetime(2025, 1, 1), state=DagRunState.SUCCESS
)
dag_maker.create_dagrun(
run_id="run2", logical_date=timezone.datetime(2025, 1, 5), state=DagRunState.FAILED
)
dag_maker.create_dagrun(
run_id="run3", logical_date=timezone.datetime(2025, 1, 8), state=DagRunState.SUCCESS
)
session.commit()
# Query for previous successful DAG run before 2025-01-10
response = client.get(
f"/execution/dag-runs/{dag_id}/previous",
params={"logical_date": timezone.datetime(2025, 1, 10).isoformat(), "state": "success"},
)
assert response.status_code == 200
result = response.json()
assert result["dag_id"] == dag_id
assert result["run_id"] == "run3" # Most recent successful run before 2025-01-10
assert result["state"] == "success"
def test_get_previous_dag_run_no_previous_found(self, client, session, dag_maker):
"""Test getting previous DAG run when none exists returns null."""
dag_id = "test_get_previous_none"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
# Create only one DAG run - no previous should exist
dag_maker.create_dagrun(
run_id="run1", logical_date=timezone.datetime(2025, 1, 1), state=DagRunState.SUCCESS
)
response = client.get(f"/execution/dag-runs/{dag_id}/previous?logical_date=2025-01-01T00:00:00Z")
assert response.status_code == 200
assert response.json() is None # Should return null
def test_get_previous_dag_run_no_matching_state(self, client, session, dag_maker):
"""Test getting previous DAG run with state filter that matches nothing returns null."""
dag_id = "test_get_previous_no_match"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
# Create DAG runs with different states
dag_maker.create_dagrun(
run_id="run1", logical_date=timezone.datetime(2025, 1, 1), state=DagRunState.FAILED
)
dag_maker.create_dagrun(
run_id="run2", logical_date=timezone.datetime(2025, 1, 2), state=DagRunState.FAILED
)
# Look for previous success but only failed runs exist
response = client.get(
f"/execution/dag-runs/{dag_id}/previous?logical_date=2025-01-03T00:00:00Z&state=success"
)
assert response.status_code == 200
assert response.json() is None
def test_get_previous_dag_run_dag_not_found(self, client, session):
"""Test getting previous DAG run for non-existent DAG returns 404."""
response = client.get(
"/execution/dag-runs/nonexistent_dag/previous?logical_date=2025-01-01T00:00:00Z"
)
assert response.status_code == 200
assert response.json() is None
def test_get_previous_dag_run_invalid_state_parameter(self, client, session, dag_maker):
"""Test that invalid state parameter returns 422 validation error."""
dag_id = "test_get_previous_invalid_state"
with dag_maker(dag_id=dag_id, session=session, serialized=True):
EmptyOperator(task_id="test_task")
dag_maker.create_dagrun(
run_id="run1", logical_date=timezone.datetime(2025, 1, 1), state=DagRunState.SUCCESS
)
session.commit()
response = client.get(
f"/execution/dag-runs/{dag_id}/previous?logical_date=2025-01-02T00:00:00Z&state=invalid_state"
)
assert response.status_code == 422
| TestGetPreviousDagRun |
python | getsentry__sentry | tests/sentry/monitors/endpoints/test_organization_monitor_environment_details.py | {
"start": 316,
"end": 455
} | class ____(BaseDeleteMonitorTest):
endpoint = "sentry-api-0-organization-monitor-environment-details"
__test__ = True
| DeleteMonitorTest |
python | wandb__wandb | wandb/vendor/watchdog_0_9_0/wandb_watchdog/observers/inotify.py | {
"start": 8049,
"end": 8546
} | class ____(BaseObserver):
"""
Observer thread that schedules watching directories and dispatches
calls to event handlers.
"""
def __init__(self, timeout=DEFAULT_OBSERVER_TIMEOUT, generate_full_events=False):
if (generate_full_events):
BaseObserver.__init__(self, emitter_class=InotifyFullEmitter, timeout=timeout)
else:
BaseObserver.__init__(self, emitter_class=InotifyEmitter,
timeout=timeout)
| InotifyObserver |
python | tensorflow__tensorflow | tensorflow/python/ops/nn_fused_batchnorm_test.py | {
"start": 1452,
"end": 32446
} | class ____(test.TestCase):
def _batch_norm(self, x, mean, var, offset, scale, epsilon):
# We compute the batch norm manually in this function because
# nn_impl.batch_normalization does not support float16 yet.
# TODO(reedwm): Add float16 support to nn_impl.batch_normalization.
inv = math_ops.rsqrt(var + epsilon) * scale
y = math_ops.cast(x, scale.dtype) * inv + (offset - mean * inv)
return math_ops.cast(y, x.dtype)
def _inference_ref(self, x, scale, offset, mean, var, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('data_format must be NCHW or NHWC for 4D tensors or'
'NCDHW or NDHWC for 5D tensors, got %s.' % data_format)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
elif data_format == 'NCDHW':
x = array_ops.transpose(x, [0, 2, 3, 4, 1])
y = self._batch_norm(x, mean, var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
elif data_format == 'NCDHW':
y = array_ops.transpose(y, [0, 4, 1, 2, 3])
return self.evaluate(y)
def _test_inference(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
mean = constant_op.constant(mean_val, name='mean')
var = constant_op.constant(var_val, name='variance')
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=False)
y_val = self.evaluate(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
data_format)
# An atol value of 1e-3 is too small for float16's, because some adjacent
# float16 values that y_val can take are greater than 1e-3 apart, e.g.
# 2.16602 and 2.16797.
atol = 2e-3 if x_dtype in [np.float16, dtypes.bfloat16.as_numpy_dtype
] else 1e-3
self.assertAllClose(y_ref, y_val, atol=atol)
def _running_mean(self, old_mean, new_val, factor):
if factor == 1.0:
return new_val
else:
return (1.0 - factor) * old_mean + factor * new_val
def _training_ref(self, x, scale, offset, old_mean, old_var,
exponential_avg_factor, epsilon, data_format):
if data_format not in ['NHWC', 'NCHW', 'NDHWC', 'NCDHW']:
raise ValueError('data_format must be NCHW or NHWC for 4D tensors or'
'NCDHW or NDHWC for 5D tensors, got %s.' % data_format)
use_4d_tensor = (x.shape.ndims == 4)
if data_format == 'NCHW':
x = array_ops.transpose(x, [0, 2, 3, 1])
elif data_format == 'NCDHW':
x = array_ops.transpose(x, [0, 2, 3, 4, 1])
mean_axis = [0, 1, 2] if use_4d_tensor else [0, 1, 2, 3]
batch_mean, batch_var = nn_impl.moments(
math_ops.cast(x, scale.dtype), mean_axis, keep_dims=False)
y = self._batch_norm(x, batch_mean, batch_var, offset, scale, epsilon)
if data_format == 'NCHW':
y = array_ops.transpose(y, [0, 3, 1, 2])
elif data_format == 'NCDHW':
y = array_ops.transpose(y, [0, 4, 1, 2, 3])
# This is for Bessel's correction. tf.nn.moments uses n, instead of n-1, as
# the denominator in the formula to calculate variance, while
# tf.compat.v1.nn.fused_batch_norm has Bessel's correction built in.
sample_size = math_ops.cast(
array_ops.size(x) / array_ops.size(scale), scale.dtype)
batch_var_corrected = batch_var * sample_size / (
math_ops.maximum(sample_size - 1.0, 1.0))
mean = self._running_mean(old_mean, batch_mean, exponential_avg_factor)
var = self._running_mean(old_var, batch_var_corrected,
exponential_avg_factor)
return self.evaluate(y), self.evaluate(mean), self.evaluate(var)
def _test_training(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format='NHWC'):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
if exponential_avg_factor == 1.0:
old_mean_val = None
old_var_val = None
else:
old_mean_val = np.random.random_sample(scale_shape).astype(scale_dtype)
old_var_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
epsilon = 0.001
y, mean, var = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=old_mean_val,
variance=old_var_val,
epsilon=epsilon,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=True)
y_val, mean_val, var_val = self.evaluate([y, mean, var])
y_ref, mean_ref, var_ref = self._training_ref(x, scale, offset,
old_mean_val, old_var_val,
exponential_avg_factor,
epsilon, data_format)
y_atol = 1e-3
if x_dtype == np.float16:
y_atol = 2e-3
elif x_dtype == dtypes.bfloat16.as_numpy_dtype:
y_atol = 1e-2
self.assertAllClose(y_ref, y_val, atol=y_atol)
self.assertAllClose(mean_ref, mean_val, atol=1e-3)
self.assertAllClose(var_ref, var_val, atol=1e-3)
def _compute_gradient_error_float16(self, x, x32, x_shape, y, y32, y_shape,
x_dtype):
"""Computes the gradient error for float16 inputs and/or outputs.
This returns the same value as gradient_checker.compute_gradient_error. The
difference is that gradient_checker.compute_gradient_error does not
numerically compute the gradients in a numerically stable way for float16
tensors. To fix this, this function requires float32 versions of x and y to
numerically compute the gradients, to compare with the float16 symbolically
computed gradients.
Args:
x: The input tensor.
x32: A float32 version of x.
x_shape: The shape of x.
y: The output tensor.
y32: A float32 version of y. Must be calculated based on x32, not x.
y_shape: The shape of y.
x_dtype: The type of x, float16 or bfloat16.
Returns:
The maximum error in between the two Jacobians, as in
gradient_checker.compute_gradient_error.
"""
x_init_val = np.random.random_sample(x_shape).astype(x_dtype)
x32_init_val = x_init_val.astype(np.float32)
# TODO(reedwm): Do not perform the unnecessary computations in
# compute_gradient, since they double the computation time of this function.
theoretical_grad, _ = gradient_checker.compute_gradient(
x, x_shape, y, y_shape, delta=1e-3, x_init_value=x_init_val)
_, numerical_grad = gradient_checker.compute_gradient(
x32, x_shape, y32, y_shape, delta=1e-3, x_init_value=x32_init_val)
# If grad is empty, no error.
if theoretical_grad.size == 0 and numerical_grad.size == 0:
return 0
return np.fabs(theoretical_grad - numerical_grad).max()
def _test_gradient(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format='NHWC',
is_training=True):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu):
x = constant_op.constant(x_val, name='x')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training and exponential_avg_factor == 1.0:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training)
if x_dtype not in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
err_x = gradient_checker.compute_gradient_error(x, x_shape, y, x_shape)
err_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, y, x_shape)
err_offset = gradient_checker.compute_gradient_error(
offset, scale_shape, y, x_shape)
else:
x32 = constant_op.constant(x_val, name='x32', dtype=dtypes.float32)
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
data_format=data_format,
exponential_avg_factor=exponential_avg_factor,
is_training=is_training)
err_x = self._compute_gradient_error_float16(x, x32, x_shape, y, y32,
x_shape, x_dtype)
err_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, y, y32, x_shape, x_dtype)
err_offset = self._compute_gradient_error_float16(
offset, offset, scale_shape, y, y32, x_shape, x_dtype)
x_err_tolerance = 1e-3
if x_dtype == np.float16:
x_err_tolerance = 2e-3
elif dtypes.bfloat16.as_numpy_dtype:
x_err_tolerance = 2e-2
scale_err_tolerance = 1e-3
self.assertLess(err_x, x_err_tolerance)
self.assertLess(err_scale, scale_err_tolerance)
self.assertLess(err_offset, scale_err_tolerance)
def _test_grad_grad(self,
x_shape,
x_dtype,
scale_shape,
scale_dtype,
use_gpu=True,
exponential_avg_factor=1.0,
data_format='NHWC',
is_training=True,
err_tolerance=1e-3):
np.random.seed(1)
x_val = np.random.random_sample(x_shape).astype(x_dtype)
grad_y_val = np.random.random_sample(x_shape).astype(x_dtype)
scale_val = np.random.random_sample(scale_shape).astype(scale_dtype)
offset_val = np.random.random_sample(scale_shape).astype(scale_dtype)
with self.cached_session(use_gpu=use_gpu) as sess:
x = constant_op.constant(x_val, name='x')
grad_y = constant_op.constant(grad_y_val, name='grad_y')
scale = constant_op.constant(scale_val, name='scale')
offset = constant_op.constant(offset_val, name='offset')
if is_training and exponential_avg_factor == 1.0:
pop_mean = None
pop_var = None
else:
pop_mean = np.random.random_sample(scale_shape).astype(scale_dtype)
pop_var = np.random.random_sample(scale_shape).astype(scale_dtype)
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=pop_mean,
variance=pop_var,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training)
grad_x, grad_scale, grad_offset = gradients_impl.gradients(
y, [x, scale, offset], grad_y)
if is_training:
epsilon = y.op.get_attr('epsilon')
data_format = y.op.get_attr('data_format')
grad_vals = self.evaluate([grad_x, grad_scale, grad_offset])
grad_internal = nn_fused_batch_norm_grad._BatchNormGrad(
grad_y, x, scale, pop_mean, pop_var, epsilon, data_format)
grad_internal_vals = self.evaluate(list(grad_internal))
for grad_val, grad_internal_val in zip(grad_vals, grad_internal_vals):
self.assertAllClose(grad_val, grad_internal_val, atol=err_tolerance)
if x_dtype not in [np.float16, dtypes.bfloat16.as_numpy_dtype]:
err_grad_grad_y_1 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_x, x_shape)
err_grad_grad_y_2 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_scale, scale_shape)
err_grad_grad_y_3 = gradient_checker.compute_gradient_error(
grad_y, x_shape, grad_offset, scale_shape)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = gradient_checker.compute_gradient_error(
x, x_shape, grad_x, x_shape)
err_grad_x_2 = gradient_checker.compute_gradient_error(
x, x_shape, grad_scale, scale_shape)
err_grad_scale = gradient_checker.compute_gradient_error(
scale, scale_shape, grad_x, x_shape)
else:
x32 = constant_op.constant(x_val, dtype=dtypes.float32, name='x32')
grad_y32 = constant_op.constant(
grad_y_val, dtype=dtypes.float32, name='grad_y32')
y32, _, _ = nn_impl.fused_batch_norm(
x32,
scale,
offset,
mean=pop_mean,
variance=pop_var,
exponential_avg_factor=exponential_avg_factor,
data_format=data_format,
is_training=is_training)
grad_x32, grad_scale32, grad_offset32 = gradients_impl.gradients(
y32, [x32, scale, offset], grad_y32)
err_grad_grad_y_1 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_x, grad_x32, x_shape, x_dtype)
err_grad_grad_y_2 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_scale, grad_scale32, scale_shape,
x_dtype)
err_grad_grad_y_3 = self._compute_gradient_error_float16(
grad_y, grad_y32, x_shape, grad_offset, grad_offset32, scale_shape,
x_dtype)
# In freeze mode, grad_x is not a function of x.
if is_training:
err_grad_x_1 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_x, grad_x32, x_shape, x_dtype)
err_grad_x_2 = self._compute_gradient_error_float16(
x, x32, x_shape, grad_scale, grad_scale32, scale_shape, x_dtype)
err_grad_scale = self._compute_gradient_error_float16(
scale, scale, scale_shape, grad_x, grad_x32, x_shape, x_dtype)
self.assertLess(err_grad_grad_y_1, err_tolerance)
self.assertLess(err_grad_grad_y_2, err_tolerance)
self.assertLess(err_grad_grad_y_3, err_tolerance)
if is_training:
self.assertLess(err_grad_x_1, err_tolerance)
self.assertLess(err_grad_x_2, err_tolerance)
self.assertLess(err_grad_scale, err_tolerance)
def _runtests(self, x_shape, is_training, gradient_test=False,
cpu_only=False):
if len(x_shape) == 4:
data_format_list = ['NHWC', 'NCHW']
else:
data_format_list = ['NCDHW', 'NDHWC']
use_gpu_vals = [False]
if test.is_gpu_available() and not cpu_only:
use_gpu_vals += [True]
factors = [1.0, 0.6]
for dtype in [np.float16, np.float32, dtypes.bfloat16.as_numpy_dtype]:
for use_gpu in use_gpu_vals:
for data_format in data_format_list:
if data_format == 'NHWC' or data_format == 'NDHWC':
scale_shape = x_shape[-1:]
else:
scale_shape = x_shape[1:2]
for exponential_avg_factor in factors:
if gradient_test:
self._test_gradient(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
is_training=is_training,
exponential_avg_factor=exponential_avg_factor)
else:
if is_training:
self._test_training(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
exponential_avg_factor=exponential_avg_factor)
else:
self._test_inference(
x_shape,
dtype,
scale_shape,
np.float32,
use_gpu=use_gpu,
data_format=data_format,
exponential_avg_factor=exponential_avg_factor)
def testInferenceShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, False)
def testInferenceShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, False)
def testInferenceShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, False)
def testInferenceShape4(self):
x_shape = [27, 131, 127, 6]
self._runtests(x_shape, False)
def testInferenceShape5(self):
x_shape = [0, 131, 127, 6]
self._runtests(x_shape, False)
def testInferenceShape6(self):
x_shape = [1, 1, 1, 1]
# GPU kernel doesn't properly handle case where non-channel dimensions are 1
self._runtests(x_shape, False, cpu_only=True)
def testInferenceShape7(self):
x_shape = [1, 2, 6, 1, 3]
self._runtests(x_shape, False)
def testTrainingShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, True)
def testTrainingShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, True)
def testTrainingShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, True)
def testTrainingShape4(self):
x_shape = [27, 131, 127, 6]
self._runtests(x_shape, True)
@test_util.disable_xla('b/141236973: Empty inputs wrong on CPU.')
def testTrainingShape5(self):
x_shape = [0, 131, 127, 6]
self._runtests(x_shape, True)
@test_util.run_deprecated_v1
def testTrainingShape6(self):
x_shape = [1, 1, 1, 1]
# GPU kernel doesn't properly handle case where non-channel dimensions are 1
self._runtests(x_shape, True, cpu_only=True)
def testTrainingShape7(self):
x_shape = [1, 2, 6, 1, 3]
self._runtests(x_shape, True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape4(self):
x_shape = [5, 7, 11, 4]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
@test_util.disable_xla('This test never passed for XLA')
def testBatchNormGradInferenceShape5(self):
x_shape = [0, 7, 11, 4]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape6(self):
x_shape = [1, 1, 1, 1]
# GPU kernel doesn't properly handle case where non-channel dimensions are 1
self._runtests(x_shape, is_training=False, gradient_test=True,
cpu_only=True)
@test_util.run_deprecated_v1
def testBatchNormGradInferenceShape7(self):
x_shape = [1, 2, 6, 1, 3]
self._runtests(x_shape, is_training=False, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape1(self):
x_shape = [1, 1, 6, 1]
self._runtests(x_shape, is_training=True, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape2(self):
x_shape = [1, 1, 6, 2]
self._runtests(x_shape, is_training=True, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape3(self):
x_shape = [1, 2, 1, 6]
self._runtests(x_shape, is_training=True, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape4(self):
x_shape = [5, 7, 11, 4]
self._runtests(x_shape, is_training=True, gradient_test=True)
@test_util.run_deprecated_v1
@test_util.disable_xla('This test never passed for XLA')
def testBatchNormGradTrainingShape5(self):
x_shape = [0, 7, 11, 4]
self._runtests(x_shape, is_training=True, gradient_test=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape6(self):
x_shape = [1, 1, 1, 1]
# GPU kernel doesn't properly handle case where non-channel dimensions are 1
self._runtests(x_shape, is_training=True, gradient_test=True, cpu_only=True)
@test_util.run_deprecated_v1
def testBatchNormGradTrainingShape7(self):
x_shape = [1, 2, 6, 1, 3]
self._runtests(x_shape, is_training=True, gradient_test=True)
def _testBatchNormGradGrad(self, config):
shape = config['shape']
err_tolerance = config['err_tolerance']
dtype = config['dtype']
rank = len(shape)
if rank == 4:
data_format_nhwc, features_nhwc = 'NHWC', shape[3]
data_format_nchw, features_nchw = 'NCHW', shape[1]
else:
data_format_nhwc, features_nhwc = 'NDHWC', shape[4]
data_format_nchw, features_nchw = 'NCDHW', shape[1]
for is_training in [True, False]:
if test.is_gpu_available():
self._test_grad_grad(
shape,
dtype, [features_nhwc],
np.float32,
use_gpu=True,
data_format=data_format_nhwc,
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [features_nchw],
np.float32,
use_gpu=True,
data_format=data_format_nchw,
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [features_nhwc],
np.float32,
use_gpu=False,
data_format=data_format_nhwc,
is_training=is_training,
err_tolerance=err_tolerance)
self._test_grad_grad(
shape,
dtype, [features_nchw],
np.float32,
use_gpu=False,
data_format=data_format_nchw,
is_training=is_training,
err_tolerance=err_tolerance)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig1(self):
config = {
'shape': [2, 3, 4, 5],
'err_tolerance': 1e-2,
'dtype': np.float32,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig2(self):
config = {
'shape': [2, 3, 2, 2],
'err_tolerance': 1e-3,
'dtype': np.float32,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig3(self):
config = {
'shape': [2, 3, 4, 5],
'err_tolerance': 2e-2,
'dtype': np.float16,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig4(self):
config = {
'shape': [2, 3, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float16,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig5(self):
config = {
'shape': [2, 3, 2, 2, 2],
'err_tolerance': 2e-3,
'dtype': np.float32,
}
self._testBatchNormGradGrad(config)
@test_util.run_deprecated_v1
def testBatchNormGradGradConfig6(self):
config = {
'shape': [2, 3, 2, 2, 2],
'err_tolerance': 3e-3,
'dtype': np.float16,
}
self._testBatchNormGradGrad(config)
def test5dBatchNormFollowedByRelu(self):
# The remapper grappler pass previously did not properly handle a 5D
# inference FusedBatchNorm followed by Relu. This asserts that this case is
# correctly handled.
np.random.seed(1)
x = np.random.random_sample((2, 3, 2, 2, 3)).astype(np.float32)
scale = np.random.random_sample((3,)).astype(np.float32)
offset = np.random.random_sample((3,)).astype(np.float32)
mean = np.random.random_sample((3,)).astype(np.float32)
var = np.random.random_sample((3,)).astype(np.float32)
epsilon = 0.001
y, _, _ = nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=var,
epsilon=epsilon,
data_format='NCDHW',
is_training=False)
y = nn_ops.relu(y)
y_val = self.evaluate(y)
y_ref = self._inference_ref(x, scale, offset, mean, var, epsilon,
'NCDHW')
y_ref = np.maximum(y_ref, 0.)
self.assertAllClose(y_ref, y_val, atol=1e-3)
def testEagerShapeErrors(self):
with context.eager_mode():
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((3,))
offset = array_ops.ones((2,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'scale must have the same number of elements'):
nn_impl.fused_batch_norm(x, scale, offset)
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
offset = array_ops.ones((3,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'offset must have the same number of elements'):
nn_impl.fused_batch_norm(x, scale, offset)
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
offset = array_ops.ones((2,))
mean = array_ops.ones((0,))
variance = array_ops.ones((2,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'When is_training=false, mean must have the same number of elements'):
nn_impl.fused_batch_norm(
x, scale, offset, mean=mean, variance=variance, is_training=False)
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
offset = array_ops.ones((2,))
mean = array_ops.ones((2,))
variance = array_ops.ones((0,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'When is_training=false, variance must have the same number of '
'elements'):
nn_impl.fused_batch_norm(
x, scale, offset, mean=mean, variance=variance, is_training=False)
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
offset = array_ops.ones((2,))
mean = array_ops.ones((0,))
variance = array_ops.ones((2,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'When exponential_avg_factor != 1, mean must have the same number of '
'elements'):
nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=variance,
exponential_avg_factor=0.5)
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
offset = array_ops.ones((2,))
mean = array_ops.ones((2,))
variance = array_ops.ones((0,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'When exponential_avg_factor != 1, variance must have the same '
'number of elements'):
nn_impl.fused_batch_norm(
x,
scale,
offset,
mean=mean,
variance=variance,
exponential_avg_factor=0.5)
def testEagerShapeGradErrors(self):
with context.eager_mode():
y_backprop = array_ops.ones((2, 2, 2, 3))
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
reserve_space_1 = array_ops.ones((2,))
reserve_space_2 = array_ops.ones((2,))
with self.assertRaisesRegex(errors_impl.InvalidArgumentError,
'x and y_backprop must have same shape,'):
gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,
reserve_space_1, reserve_space_2)
y_backprop = array_ops.ones((2, 2, 2, 2))
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((3,))
reserve_space_1 = array_ops.ones((2,))
reserve_space_2 = array_ops.ones((2,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'scale must have the same number of elements'):
gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,
reserve_space_1, reserve_space_2)
y_backprop = array_ops.ones((2, 2, 2, 2))
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
reserve_space_1 = array_ops.ones((3,))
reserve_space_2 = array_ops.ones((2,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'reserve_space_1 must have the same number of elements'):
gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,
reserve_space_1, reserve_space_2)
y_backprop = array_ops.ones((2, 2, 2, 2))
x = array_ops.ones((2, 2, 2, 2))
scale = array_ops.ones((2,))
reserve_space_1 = array_ops.ones((2,))
reserve_space_2 = array_ops.ones((3,))
with self.assertRaisesRegex(
errors_impl.InvalidArgumentError,
'reserve_space_2 must have the same number of elements'):
gen_nn_ops.fused_batch_norm_grad_v2(y_backprop, x, scale,
reserve_space_1, reserve_space_2)
if __name__ == '__main__':
test.main()
| BatchNormalizationTest |
python | ray-project__ray | python/ray/tune/logger/noop.py | {
"start": 174,
"end": 246
} | class ____(Logger):
def on_result(self, result):
pass
| NoopLogger |
python | google__python-fire | fire/console/console_attr.py | {
"start": 4615,
"end": 4886
} | class ____(ProgressTrackerSymbols):
"""Characters used by progress trackers."""
@property
def spin_marks(self):
return ['|', '/', '-', '\\',]
success = 'OK'
failed = 'X'
interrupted = '-'
not_started = '.'
prefix_length = 3
| ProgressTrackerSymbolsAscii |
python | numba__numba | numba/core/typing/arraydecl.py | {
"start": 24042,
"end": 25112
} | class ____(AbstractTemplate):
key = "static_setitem"
def generic(self, args, kws):
# Resolution of members for record and structured arrays
record, idx, value = args
if isinstance(record, types.Record):
if isinstance(idx, str):
expectedty = record.typeof(idx)
if self.context.can_convert(value, expectedty) is not None:
return signature(types.void, record, types.literal(idx),
value)
elif isinstance(idx, int):
if idx >= len(record.fields):
msg = f"Requested index {idx} is out of range"
raise NumbaIndexError(msg)
str_field = list(record.fields)[idx]
expectedty = record.typeof(str_field)
if self.context.can_convert(value, expectedty) is not None:
return signature(types.void, record, types.literal(idx),
value)
@infer_global(operator.setitem)
| StaticSetItemRecord |
python | conda__conda | conda/common/_logic.py | {
"start": 1675,
"end": 3615
} | class ____:
"""
Storage for the CNF clauses, represented as a flat int array.
Each clause is terminated by int(0).
"""
def __init__(self):
self._clause_array = array("i")
# Methods append and extend are directly bound for performance reasons,
# to avoid call overhead and lookups.
self._array_append = self._clause_array.append
self._array_extend = self._clause_array.extend
def extend(self, clauses):
for clause in clauses:
self.append(clause)
def append(self, clause):
self._array_extend(clause)
self._array_append(0)
def get_clause_count(self):
"""
Return number of stored clauses.
This is an O(n) operation since we don't store the number of clauses
explicitly due to performance reasons (Python interpreter overhead in
self.append).
"""
return self._clause_array.count(0)
def save_state(self):
"""
Get state information to be able to revert temporary additions of
supplementary clauses. _ClauseArray: state is the length of the int
array, NOT number of clauses.
"""
return len(self._clause_array)
def restore_state(self, saved_state):
"""
Restore state saved via `save_state`.
Removes clauses that were added after the state has been saved.
"""
len_clause_array = saved_state
self._clause_array[len_clause_array:] = array("i")
def as_list(self):
"""Return clauses as a list of tuples of ints."""
clause = []
for v in self._clause_array:
if v == 0:
yield tuple(clause)
clause.clear()
else:
clause.append(v)
def as_array(self):
"""Return clauses as a flat int array, each clause being terminated by 0."""
return self._clause_array
| _ClauseArray |
python | great-expectations__great_expectations | contrib/capitalone_dataprofiler_expectations/capitalone_dataprofiler_expectations/metrics/data_profiler_metrics/data_profiler_profile_diff.py | {
"start": 414,
"end": 1651
} | class ____(DataProfilerProfileMetricProvider):
metric_name = "data_profiler.profile_diff"
value_keys = ("profile_path",)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine,
metric_domain_kwargs,
metric_value_kwargs,
metrics,
runtime_configuration,
):
df, _, _ = execution_engine.get_compute_domain(
metric_domain_kwargs, domain_type=MetricDomainTypes.TABLE
)
first_profile = None
try:
first_profile_path = metric_value_kwargs["profile_path"]
first_profile = dp.Profiler.load(first_profile_path)
except FileNotFoundError:
raise ValueError( # noqa: TRY003
"'profile_path' does not point to a valid DataProfiler stored profile."
)
profiler_opts = dp.ProfilerOptions()
profiler_opts.structured_options.multiprocess.is_enabled = False
new_profile = dp.Profiler(df, options=profiler_opts)
report_diff = new_profile.diff(first_profile) # Results in diff of new_prof - first_prof
# Values in this report indicate +/- change from old profile
return report_diff
| DataProfilerProfileDiff |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 30749,
"end": 31275
} | class ____(PrefectFilterBaseModel):
"""Filter by `TaskRun.state_name`."""
any_: Optional[list[str]] = Field(
default=None, description="A list of task run state names to include"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.TaskRun.state_name.in_(self.any_))
return filters
| TaskRunFilterStateName |
python | pandas-dev__pandas | pandas/tests/io/formats/test_to_string.py | {
"start": 571,
"end": 4426
} | class ____:
def test_keyword_deprecation(self):
# GH 57280
msg = (
"Starting with pandas version 4.0 all arguments of to_string "
"except for the argument 'buf' will be keyword-only."
)
s = Series(["a", "b"])
with tm.assert_produces_warning(Pandas4Warning, match=msg):
s.to_string(None, "NaN")
def test_to_string_masked_ea_with_formatter(self):
# GH#39336
df = DataFrame(
{
"a": Series([0.123456789, 1.123456789], dtype="Float64"),
"b": Series([1, 2], dtype="Int64"),
}
)
result = df.to_string(formatters=["{:.2f}".format, "{:.2f}".format])
expected = dedent(
"""\
a b
0 0.12 1.00
1 1.12 2.00"""
)
assert result == expected
def test_to_string_with_formatters(self):
df = DataFrame(
{
"int": [1, 2, 3],
"float": [1.0, 2.0, 3.0],
"object": [(1, 2), True, False],
},
columns=["int", "float", "object"],
)
formatters = [
("int", lambda x: f"0x{x:x}"),
("float", lambda x: f"[{x: 4.1f}]"),
("object", lambda x: f"-{x!s}-"),
]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=list(zip(*formatters))[1])
assert result == (
" int float object\n"
"0 0x1 [ 1.0] -(1, 2)-\n"
"1 0x2 [ 2.0] -True-\n"
"2 0x3 [ 3.0] -False-"
)
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({"months": months})
def format_func(x):
return x.strftime("%Y-%m")
result = x.to_string(formatters={"months": format_func})
expected = dedent(
"""\
months
0 2016-01
1 2016-02"""
)
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame(
{"hod": to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")}
)
def format_func(x):
return x.strftime("%H:%M")
result = x.to_string(formatters={"hod": format_func})
expected = dedent(
"""\
hod
0 10:10
1 12:12"""
)
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
result = df.to_string(formatters={"c/\u03c3": str})
expected = dedent(
"""\
c/\u03c3
0 1
1 2
2 3"""
)
assert result == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = dedent(
"""\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14"""
)
assert rs == xp
def test_no_extra_space(self):
# GH#52690: Check that no extra space is given
col1 = "TEST"
col2 = "PANDAS"
col3 = "to_string"
expected = f"{col1:<6s} {col2:<7s} {col3:<10s}"
df = DataFrame([{"col1": "TEST", "col2": "PANDAS", "col3": "to_string"}])
d = {"col1": "{:<6s}".format, "col2": "{:<7s}".format, "col3": "{:<10s}".format}
result = df.to_string(index=False, header=False, formatters=d)
assert result == expected
| TestDataFrameToStringFormatters |
python | google__jax | tests/state_test.py | {
"start": 37070,
"end": 38464
} | class ____(NamedTuple):
ref_aval: shaped_array_ref
ref_shape: Shape
indexed_dims: list[bool]
idx_avals: tuple[core.ShapedArray, ...]
idx_shape: Shape
slice_aval: core.ShapedArray
slice_shape: Shape
@hps.composite
def index_params(draw):
ref_shape = draw(hnp.array_shapes(max_dims=4, max_side=7), label='ref_shape')
indexed_dims = draw(hps.lists(hps.booleans(),
min_size=len(ref_shape),
max_size=len(ref_shape)))
idx_shape = draw(hnp.array_shapes(max_dims=3, max_side=5))
if not any(indexed_dims):
slice_shape = ref_shape
else:
sliced_shape = tuple(s for s, b in zip(ref_shape, indexed_dims) if not b)
int_indexers_contiguous = bool(
np.all(np.diff(np.where(indexed_dims)[0]) == 1)
)
if not int_indexers_contiguous:
slice_shape = (*idx_shape, *sliced_shape)
else:
insert_pos = indexed_dims.index(True)
slice_shape = (
*sliced_shape[:insert_pos],
*idx_shape,
*sliced_shape[insert_pos:],
)
ref_aval = shaped_array_ref(ref_shape, np.float32)
idx_avals = tuple(core.ShapedArray(idx_shape, np.int32) for _ in
range(sum(indexed_dims)))
slice_aval = core.ShapedArray(slice_shape, np.float32)
return IndexParam(ref_aval, ref_shape, indexed_dims, idx_avals, idx_shape,
slice_aval, slice_shape)
| IndexParam |
python | pytorch__pytorch | torch/testing/_internal/common_quantization.py | {
"start": 61718,
"end": 62392
} | class ____(torch.nn.Module):
def __init__(self, mod_type):
super().__init__()
self.qconfig = default_dynamic_qconfig
if mod_type == "GRUCell":
self.mod = torch.nn.GRUCell(2, 2).to(dtype=torch.float)
if mod_type == "LSTMCell":
self.mod = torch.nn.LSTMCell(2, 2).to(dtype=torch.float)
if mod_type == "RNNReLU":
self.mod = torch.nn.RNNCell(2, 2, nonlinearity="relu").to(dtype=torch.float)
if mod_type == "RNNTanh":
self.mod = torch.nn.RNNCell(2, 2, nonlinearity="tanh").to(dtype=torch.float)
def forward(self, x):
x = self.mod(x)
return x
| RNNCellDynamicModel |
python | realpython__materials | python-split-list/parallel_demo.py | {
"start": 1105,
"end": 3603
} | class ____:
max_iterations: int
escape_radius: float = 2.0
def __contains__(self, c):
return self.stability(c) == 1
def stability(self, c, smooth=False, clamp=True):
value = self.escape_count(c, smooth) / self.max_iterations
return max(0.0, min(value, 1.0)) if clamp else value
def escape_count(self, c, smooth=False):
z = 0 + 0j
for iteration in range(self.max_iterations):
z = z**2 + c
if abs(z) > self.escape_radius:
if smooth:
return iteration + 1 - log(log(abs(z))) / log(2)
return iteration
return self.max_iterations
def transform(y: int, x: int) -> complex:
"""Transform the given pixel coordinates to the complex plane."""
im = SCALE * (IMAGE_HEIGHT / 2 - y)
re = SCALE * (x - IMAGE_WIDTH / 2)
return complex(re, im) + CENTER
def generate_chunk(bounds: Bounds) -> Chunk:
"""Generate a chunk of pixels for the given bounds."""
chunk = Chunk(bounds)
mandelbrot_set = MandelbrotSet(MAX_ITERATIONS, ESCAPE_RADIUS)
for y, x in bounds:
c = transform(y, x)
instability = 1 - mandelbrot_set.stability(c, smooth=True)
chunk[y, x] = int(instability * 255)
return chunk
def combine(chunks: Iterable[Chunk]) -> Image.Image:
"""Combine the chunks into a single image."""
pixels = np.zeros((IMAGE_HEIGHT, IMAGE_WIDTH), dtype=np.uint8)
for chunk in chunks:
pixels[chunk.bounds.slices()] = chunk.pixels
return Image.fromarray(pixels, mode="L")
def timed(function: Callable) -> Callable:
@functools.wraps(function)
def wrapper(*args, **kwargs):
start = time.perf_counter()
result = function(*args, **kwargs)
end = time.perf_counter()
print(f"{function.__name__}() took {end - start:.2f} seconds")
return result
return wrapper
def process_sequentially(bounds_iter: Iterator[Bounds]) -> Iterator[Chunk]:
return map(generate_chunk, bounds_iter)
def process_in_parallel(bounds_iter: Iterator[Bounds]) -> list[Chunk]:
with multiprocessing.Pool() as pool:
return pool.map(generate_chunk, bounds_iter)
@timed
def compute(worker: Callable) -> Image.Image:
return combine(worker(split_multi(NUM_CHUNKS, IMAGE_HEIGHT, IMAGE_WIDTH)))
def main() -> None:
for worker in (process_sequentially, process_in_parallel):
compute(worker).show()
if __name__ == "__main__":
main()
| MandelbrotSet |
python | great-expectations__great_expectations | great_expectations/core/partitioners.py | {
"start": 1175,
"end": 1374
} | class ____(pydantic.BaseModel):
mod: int
column_name: str
sort_ascending: bool = True
method_name: Literal["partition_on_mod_integer"] = "partition_on_mod_integer"
| PartitionerModInteger |
python | kamyu104__LeetCode-Solutions | Python/minimize-the-maximum-edge-weight-of-graph.py | {
"start": 98,
"end": 1176
} | class ____(object):
def minMaxWeight(self, n, edges, threshold):
"""
:type n: int
:type edges: List[List[int]]
:type threshold: int
:rtype: int
"""
def dijkstra():
best = [float("inf")]*len(adj)
best[0] = 0
min_heap = [(best[0], 0)]
while min_heap:
curr, u = heapq.heappop(min_heap)
if curr != best[u]:
continue
for v, w in adj[u].iteritems():
if not (max(curr, w) < best[v]):
continue
best[v] = max(curr, w)
heapq.heappush(min_heap, (best[v], v))
result = max(best)
return result if result != float("inf") else -1
adj = [collections.defaultdict(lambda: float("inf")) for _ in xrange(n)]
for i, j, w in edges:
adj[j][i] = min(adj[j][i], w)
return dijkstra()
# Time: O(nlogn + e)
# Space: O(n + e)
import collections
import heapq
# prim's algorithm
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/components_tests/resolution_tests/test_resolved_from.py | {
"start": 370,
"end": 7609
} | class ____(dg.Model):
foo: str
def test_nested_resolvable():
class ResolvableComponent(dg.Component, dg.Resolvable, dg.Model):
thing: MyModel
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolvableComponent,
"""
thing:
foo: hi
""",
)
assert c.thing.foo
@dataclass
class ResolveFromComponent(dg.Component, dg.Resolvable):
thing: MyModel
num: Annotated[
int,
dg.Resolver(
lambda _, v: int(v),
model_field_type=str,
),
]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolveFromComponent,
"""
num: '123'
thing:
foo: hi
""",
)
assert c.thing.foo
@dataclass
class ResolveFromListComponent(dg.Component, dg.Resolvable):
thing: Optional[list[MyModel]]
num: Annotated[
int,
dg.Resolver(
lambda _, v: int(v),
model_field_type=str,
),
]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolveFromListComponent,
"""
num: '123'
thing:
- foo: hi
- foo: bye
""",
)
assert c.thing
assert c.thing[0].foo
def test_class():
class ResolveFromComponent(dg.Component, dg.Resolvable):
def __init__(
self,
thing: MyModel,
num: Annotated[int, dg.Resolver(lambda _, v: int(v), model_field_type=str)],
):
self.thing = thing
self.num = num
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolveFromComponent,
"""
num: '123'
thing:
foo: hi
""",
)
assert c.thing.foo
assert c.num == 123
def test_union_resolvable():
class FooModel(dg.Model):
foo: str
class BarModel(dg.Model):
bar: str
@dataclass
class ResolveFromListComponent(dg.Component, dg.Resolvable):
thing: Union[FooModel, BarModel]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = ResolveFromListComponent.resolve_from_yaml(
"""
thing:
foo: hi
""",
)
assert isinstance(c.thing, FooModel)
assert c.thing.foo == "hi"
c = ResolveFromListComponent.resolve_from_yaml(
"""
thing:
bar: hello
""",
)
assert isinstance(c.thing, BarModel)
assert c.thing.bar == "hello"
def test_union_resolvable_complex():
class FooModel(dg.Model):
foo: str
# Test a nested model, in a sequence, with a custom resolver
class NumModel(dg.Model, dg.Resolvable):
num: Annotated[int, dg.Resolver(lambda _, v: int(v), model_field_type=str)]
@dataclass
class ResolveFromListComponent(dg.Component, dg.Resolvable):
thing: Union[FooModel, Sequence[NumModel]]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolveFromListComponent,
"""
thing:
foo: hi
""",
)
assert isinstance(c.thing, FooModel)
assert c.thing.foo == "hi"
c = load_component_for_test(
ResolveFromListComponent,
"""
thing:
- num: '123'
- num: '456'
""",
)
assert isinstance(c.thing, Sequence)
assert len(c.thing) == 2
assert c.thing[0].num == 123
assert c.thing[1].num == 456
def test_union_resolvable_discriminator():
class FooModel(dg.Model):
type: Literal["foo"] = "foo"
value: str
class BarModel(dg.Model):
type: Literal["bar"] = "bar"
value: str
@dataclass
class ResolveFromUnionComponent(dg.Component, dg.Resolvable):
thing: Union[FooModel, BarModel]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = load_component_for_test(
ResolveFromUnionComponent,
"""
thing:
type: foo
value: hi
""",
)
assert isinstance(c.thing, FooModel)
assert c.thing.value == "hi"
c = load_component_for_test(
ResolveFromUnionComponent,
"""
thing:
type: bar
value: hello
""",
)
assert isinstance(c.thing, BarModel)
assert c.thing.value == "hello"
def test_union_nested_custom_resolver():
class FooNonModel:
def __init__(self, foo: str):
self.foo = foo
class BarNonModel:
def __init__(self, bar: str):
self.bar = bar
class FooModel(dg.Model):
foo: str
class BarModel(dg.Model):
bar: str
# We nest complex custom resolvers in the union
# Under the hood, this will choose the resolver whose model_field_type matches the input model type
@dataclass
class ResolveUnionResolversComponent(dg.Component, dg.Resolvable):
thing: Union[
Annotated[
FooNonModel,
dg.Resolver(lambda _, v: FooNonModel(foo=v.foo), model_field_type=FooModel),
],
Annotated[
BarNonModel,
dg.Resolver(lambda _, v: BarNonModel(bar=v.bar), model_field_type=BarModel),
],
]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
c = ResolveUnionResolversComponent.resolve_from_yaml(
"""
thing:
foo: hi
""",
)
assert isinstance(c.thing, FooNonModel)
assert c.thing.foo == "hi"
c = ResolveUnionResolversComponent.resolve_from_yaml(
"""
thing:
bar: hello
""",
)
assert isinstance(c.thing, BarNonModel)
assert c.thing.bar == "hello"
def _raise_exc():
raise Exception("test")
def test_union_nested_custom_resolver_no_match():
class FooNonModel:
def __init__(self, foo: str):
self.foo = foo
class BarNonModel:
def __init__(self, bar: str):
self.bar = bar
class FooModel(dg.Model):
foo: str
class BarModel(dg.Model):
bar: str
@dataclass
class ResolveUnionResolversComponent(dg.Component, dg.Resolvable):
thing: Union[
Annotated[
FooNonModel,
dg.Resolver(lambda _, v: _raise_exc(), model_field_type=FooModel),
],
Annotated[
BarNonModel,
dg.Resolver(lambda _, v: _raise_exc(), model_field_type=BarModel),
],
]
def build_defs(self, context: ComponentLoadContext) -> dg.Definitions:
return dg.Definitions()
with pytest.raises(
ResolutionException,
match=r"No resolver matched the field value",
):
ResolveUnionResolversComponent.resolve_from_yaml(
"""
thing:
foo: hi
""",
)
| MyModel |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/error_utils.py | {
"start": 858,
"end": 4376
} | class ____(
collections.namedtuple('FrameInfo',
('filename', 'lineno', 'function_name', 'code',
'is_converted', 'is_allowlisted'))):
__slots__ = ()
def _stack_trace_inside_mapped_code(tb, source_map, converter_filename):
"""Summarizes inner traceback frames up to the call to a given function.
This functions locates the innermost (i.e. most recent) frame that corresponds
to code that can be mapped by source_map originated from, and returns a
translated stack trace ending at that frame. If no such frame is found, the
entire stack trace is summarized.
For example, the following code:
def f():
for i in tf.range(1):
z = y + i # z only defined here
Would generate this traceback:
<converted code>
ag__.for_stmt(...)
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Which is then processed into:
<f>
for i in tf.range(1):
<for_stmt>
return _known_len_tf_for_stmt(iter_, extra_test, body, init_state)
<_known_len_tf_for_stmt>
_disallow_undefs_into_loop(*init_state)
<_disallow_undefs_into_loop>
raise ...
Args:
tb: traceback.FrameSummary, The traceback corresponding to an error.
Typically, the output of traceback.Summary.extract(capture_locals=True).
source_map: Dict[LineLocation, OriginInfo], a source map as created by
origin_info.create_source_map.
converter_filename: str, the file path of the converted module. Call frames
corresponding to this module are elided and their preceding frames are
marked as allowlisted. Note that frames enclosing converted code are
dropped using a different mechanism.
Returns:
List[FrameInfo]
"""
result_frames = []
for filename, line_number, function_name, text in reversed(tb):
loc = origin_info.LineLocation(filename=filename, lineno=line_number)
if loc in source_map:
origin = source_map[loc]
fi = FrameInfo(
filename=origin.loc.filename,
lineno=origin.loc.lineno,
function_name=origin.function_name,
code=origin.source_code_line,
is_converted=True,
is_allowlisted=False)
result_frames.append(fi)
break
if filename == converter_filename:
if result_frames:
prev = result_frames[-1]
assert not prev.is_converted # See the if above.
fi = FrameInfo(
filename=prev.filename,
lineno=prev.lineno,
function_name=prev.function_name,
code=prev.code,
is_converted=False,
is_allowlisted=True)
result_frames[-1] = fi
continue
fi = FrameInfo(
filename=filename,
lineno=line_number,
function_name=function_name,
code=text,
is_converted=False,
is_allowlisted=False)
result_frames.append(fi)
return tuple(result_frames)
KNOWN_STRING_CONSTRUCTOR_ERRORS = (
AssertionError,
AttributeError,
NameError,
NotImplementedError,
RuntimeError,
StopIteration,
TypeError,
UnboundLocalError,
ValueError,
)
# KeyError escapes newlines in strings. We create a special subclass
# that doesn't do that. Overriding the name for display purposes; hopefully
# that won't create too many surprises.
| FrameInfo |
python | django__django | tests/m2m_through/models.py | {
"start": 1940,
"end": 2222
} | class ____(models.Model):
name = models.CharField(max_length=5)
friends = models.ManyToManyField("self", through="Friendship", symmetrical=False)
sym_friends = models.ManyToManyField(
"self", through="SymmetricalFriendship", symmetrical=True
)
| PersonSelfRefM2M |
python | google__jax | jax/_src/export/_export.py | {
"start": 13954,
"end": 14171
} | class ____(Protocol):
def __call__(self, serialized_aux_data: bytes) -> PyTreeAuxData:
"""Deserializes the PyTree node AuxData.
The result will be passed to ``_BuildFromChildren``.
"""
| _DeserializeAuxData |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/type_lookup.py | {
"start": 535,
"end": 5312
} | class ____(
UserDict,
Mapping[ValidTypes, ValidTypes],
):
"""
Dict-like Mapping object that creates keys from values and values from keys.
Because of this, all values must be Hashable.
`NoneType` / `None` is not allowed.
If a Mapping-like object is passed as the first parameter, its key/values will be
unpacked (and combined with kwargs) into the new `TypeDict` object.
Once set, values/keys cannot be overwritten.
"""
def __init__(
self,
dict: Optional[Mapping[ValidTypes, ValidTypes]] = None,
/,
**kwargs: Hashable,
):
__dict = dict or {}
super().__init__(__dict, **kwargs)
def type_names(self) -> Generator[str, None, None]:
"""Yields only the type `str` names of the TypeLookup."""
for k in self:
if isinstance(k, str):
yield k
continue
@overload
def __getitem__(self, key: str) -> Type: ...
@overload
def __getitem__(self, key: Type) -> str: ...
@override
def __getitem__(self, key: ValidTypes) -> ValidTypes:
try:
return super().__getitem__(key)
except KeyError as key_err:
msg = f"{key} was not found."
if isinstance(key, str):
msg = f"type {msg} Available types are: {', '.join(self.type_names())}"
raise LookupError(msg) from key_err
@override
def __delitem__(self, key: ValidTypes):
value = self.data.pop(key)
super().pop(value, None)
@override
def __setitem__(self, key: ValidTypes, value: ValidTypes):
if key in self and value in self and self[key] == value and self[value] == key:
# This key, value pair has already been registered so we return
return
if key is None:
raise TypeLookupError(f"`NoneType` for {value} is not allowed - bad key") # noqa: TRY003 # FIXME CoP
if value is None:
raise TypeLookupError(f"`NoneType` for {key} is not allowed - bad value") # noqa: TRY003 # FIXME CoP
if key in self:
raise TypeLookupError(f"`{key}` already set - bad key") # noqa: TRY003 # FIXME CoP
if value in self:
raise TypeLookupError(f"`{value}` already set - bad value") # noqa: TRY003 # FIXME CoP
super().__setitem__(key, value)
super().__setitem__(value, key)
@override
def __repr__(self) -> str:
return f"{type(self).__name__}({super().__repr__()})"
@override
def __str__(self) -> str:
return str(self.data)
def intersection(self, collection_: Iterable[ValidTypes]) -> Set[ValidTypes]:
"""
Returns the intersection of a list (or other iterable) and the keys/values of
the `TypeLookup` instance.
"""
return set(collection_).intersection(self.keys())
def raise_if_contains(self, collection_: Iterable[ValidTypes]):
"""Raise a TypeLookup error if the passed iterable contains any overlapping items."""
intersection = self.intersection(collection_)
if intersection:
raise TypeLookupError(f"Items are already present - {intersection}") # noqa: TRY003 # FIXME CoP
@override
def clear(self) -> None:
"""Clear all data. Deletes all keys and values."""
return self.data.clear()
@contextlib.contextmanager
def transaction(self) -> Generator[TypeLookup, None, None]:
"""
Context manager that waits until end of transaction to commit changes.
Any exceptions that happen within the context will prevent any of the changes from being committed.
Any exceptions encountered will be re-raised on exit.
Example
-------
>>> t = TypeLookup()
>>> with t.transaction():
... t["my_type"] = tuple
... assert tuple in t, "Should not fail"
... assert True is False, "Should fail"
Traceback (most recent call last):
...
AssertionError: Should fail
>>> print(tuple in t)
False
""" # noqa: E501 # FIXME CoP
txn_exc: Union[Exception, None] = None
backup_data = copy.copy(self.data)
logger.debug("Beginning TypeLookup transaction")
try:
yield self
except Exception as exc:
txn_exc = exc
raise
finally:
if txn_exc:
logger.debug("Transaction of items rolled back")
self.data = backup_data
else:
logger.debug("Transaction committing items")
logger.debug("Completed TypeLookup transaction")
if __name__ == "__main__":
import doctest
doctest.testmod(report=True, verbose=True)
| TypeLookup |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/ext/associationproxy.py | {
"start": 46431,
"end": 47245
} | class ____(_LazyCollectionProtocol[_T]):
def __init__(self, obj: Any, target: str):
self.parent = obj
self.target = target
def __call__(
self,
) -> Union[MutableSet[_T], MutableMapping[Any, _T], MutableSequence[_T]]:
return getattr(self.parent, self.target) # type: ignore[no-any-return]
def __getstate__(self) -> Any:
return {"obj": self.parent, "target": self.target}
def __setstate__(self, state: Any) -> None:
self.parent = state["obj"]
self.target = state["target"]
_IT = TypeVar("_IT", bound="Any")
"""instance type - this is the type of object inside a collection.
this is not the same as the _T of AssociationProxy and
AssociationProxyInstance itself, which will often refer to the
collection[_IT] type.
"""
| _lazy_collection |
python | pydantic__pydantic | tests/mypy/modules/custom_constructor.py | {
"start": 33,
"end": 259
} | class ____(BaseModel):
id: int
name: str
birth_year: int
def __init__(self, id: int) -> None:
super().__init__(id=id, name='Patrick', birth_year=1991)
Person(1)
Person(id=1)
Person(name='Patrick')
| Person |
python | walkccc__LeetCode | solutions/481. Magical String/481.py | {
"start": 0,
"end": 277
} | class ____:
def magicalString(self, n: int) -> int:
s = [' ', '1', '2', '2']
for i in range(3, n + 1):
if i % 2 == 1:
s.extend(['1'] * (int(s[i])))
else:
s.extend(['2'] * (int(s[i])))
return sum(1 for c in s[:n + 1] if c == '1')
| Solution |
python | cython__cython | Cython/Compiler/AutoDocTransforms.py | {
"start": 1414,
"end": 11974
} | class ____(CythonTransform):
def __init__(self, context):
super().__init__(context)
self.class_name = None
self.class_node = None
def _fmt_expr(self, node):
writer = ExpressionWriter(allow_unknown_nodes=True)
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _fmt_annotation(self, node):
writer = AnnotationWriter()
result = writer.write(node)
# print(type(node).__name__, '-->', result)
return result
def _setup_format(self):
signature_format = self.current_directives['embedsignature.format']
self.is_format_c = signature_format == 'c'
self.is_format_python = signature_format == 'python'
self.is_format_clinic = signature_format == 'clinic'
def _fmt_arg(self, arg):
arg_doc = arg.name
annotation = None
defaultval = None
if arg.is_self_arg:
if self.is_format_clinic:
arg_doc = '$self'
elif arg.is_type_arg:
if self.is_format_clinic:
arg_doc = '$type'
elif self.is_format_c:
if arg.type is not PyrexTypes.py_object_type:
arg_doc = arg.type.declaration_code(arg.name, for_display=1)
elif self.is_format_python:
if not arg.annotation:
annotation = self._fmt_type(arg.type)
if arg.annotation:
if not self.is_format_clinic:
annotation = self._fmt_annotation(arg.annotation)
if arg.default:
defaultval = self._fmt_expr(arg.default)
if annotation:
arg_doc = arg_doc + (': %s' % annotation)
if defaultval:
arg_doc = arg_doc + (' = %s' % defaultval)
elif defaultval:
arg_doc = arg_doc + ('=%s' % defaultval)
return arg_doc
def _fmt_star_arg(self, arg):
arg_doc = arg.name
if arg.annotation:
if not self.is_format_clinic:
annotation = self._fmt_annotation(arg.annotation)
arg_doc = arg_doc + (': %s' % annotation)
return arg_doc
def _fmt_arglist(self, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
hide_self=False):
arglist = []
for arg in args:
if not hide_self or not arg.entry.is_self_arg:
arg_doc = self._fmt_arg(arg)
arglist.append(arg_doc)
if pargs:
arg_doc = self._fmt_star_arg(pargs)
arglist.insert(npargs + npoargs, '*%s' % arg_doc)
elif nkargs:
arglist.insert(npargs + npoargs, '*')
if npoargs:
arglist.insert(npoargs, '/')
if kargs:
arg_doc = self._fmt_star_arg(kargs)
arglist.append('**%s' % arg_doc)
return arglist
def _fmt_type(self, type):
if type is PyrexTypes.py_object_type:
return None
elif self.is_format_c:
code = type.declaration_code("", for_display=1)
return code
elif self.is_format_python:
annotation = None
if type.is_string:
annotation = self.current_directives['c_string_type']
elif type.is_numeric:
annotation = type.py_type_name()
if annotation is None:
code = type.declaration_code('', for_display=1)
annotation = code.replace(' ', '_').replace('*', 'p')
return annotation
return None
def _fmt_signature(self, cls_name, func_name, args,
npoargs=0, npargs=0, pargs=None,
nkargs=0, kargs=None,
return_expr=None, return_type=None,
hide_self=False):
arglist = self._fmt_arglist(
args, npoargs, npargs, pargs, nkargs, kargs,
hide_self=hide_self,
)
arglist_doc = ', '.join(arglist)
func_doc = '%s(%s)' % (func_name, arglist_doc)
if self.is_format_c and cls_name:
func_doc = '%s.%s' % (cls_name, func_doc)
if not self.is_format_clinic:
ret_doc = None
if return_expr:
ret_doc = self._fmt_annotation(return_expr)
elif return_type:
ret_doc = self._fmt_type(return_type)
if ret_doc:
func_doc = '%s -> %s' % (func_doc, ret_doc)
return func_doc
def _embed_signature(self, signature, node_doc):
if self.is_format_clinic and self.current_directives['binding']:
return node_doc
if node_doc:
if self.is_format_clinic:
docfmt = "%s\n--\n\n%s"
else:
docfmt = "%s\n\n%s"
node_doc = inspect.cleandoc(node_doc)
return docfmt % (signature, node_doc)
else:
if self.is_format_clinic:
docfmt = "%s\n--\n\n"
else:
docfmt = "%s"
return docfmt % signature
def __call__(self, node):
if not Options.docstrings:
return node
else:
return super().__call__(node)
def visit_ClassDefNode(self, node):
oldname = self.class_name
oldclass = self.class_node
self.class_node = node
try:
# PyClassDefNode
self.class_name = node.name
except AttributeError:
# CClassDefNode
self.class_name = node.class_name
self.visitchildren(node)
self.class_name = oldname
self.class_node = oldclass
return node
def visit_LambdaNode(self, node):
# lambda expressions so not have signature or inner functions
return node
def visit_DefNode(self, node):
if not self.current_directives['embedsignature']:
return node
self._setup_format()
is_constructor = False
hide_self = False
if node.entry.is_special:
is_constructor = self.class_node and node.name == '__init__'
if is_constructor:
class_name = None
func_name = node.name
if self.is_format_c:
func_name = self.class_name
hide_self = True
else:
class_name, func_name = self.class_name, node.name
else:
class_name, func_name = self.class_name, node.name
npoargs = getattr(node, 'num_posonly_args', 0)
nkargs = getattr(node, 'num_kwonly_args', 0)
npargs = len(node.args) - nkargs - npoargs
signature = self._fmt_signature(
class_name, func_name, node.args,
npoargs, npargs, node.star_arg,
nkargs, node.starstar_arg,
return_expr=node.return_type_annotation,
return_type=None, hide_self=hide_self)
if signature:
if is_constructor and self.is_format_c:
doc_holder = self.class_node.entry.type.scope
else:
doc_holder = node.entry
if doc_holder.doc is not None:
old_doc = doc_holder.doc
elif not is_constructor and getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
if not node.entry.is_special or is_constructor or node.entry.wrapperbase_cname is not None:
# TODO: the wrapperbase must be generated for __doc__ to exist;
# however this phase is run later in the pipeline than
# Compiler/Nodes.py:declare_pyfunction, so wrapperbase_cname
# may already be set to None
doc_holder.doc = EncodedString(new_doc)
if not is_constructor and getattr(node, 'py_func', None) is not None:
node.py_func.entry.doc = EncodedString(new_doc)
return node
def visit_CFuncDefNode(self, node):
if not node.overridable: # not cpdef FOO(...):
return node
if not self.current_directives['embedsignature']:
return node
self._setup_format()
signature = self._fmt_signature(
self.class_name, node.declarator.base.name,
node.declarator.args,
return_type=node.return_type)
if signature:
if node.entry.doc is not None:
old_doc = node.entry.doc
elif getattr(node, 'py_func', None) is not None:
old_doc = node.py_func.entry.doc
else:
old_doc = None
new_doc = self._embed_signature(signature, old_doc)
node.entry.doc = EncodedString(new_doc)
py_func = getattr(node, 'py_func', None)
if py_func is not None:
py_func.entry.doc = EncodedString(new_doc)
return node
def visit_PropertyNode(self, node):
if not self.current_directives['embedsignature']:
return node
self._setup_format()
entry = node.entry
body = node.body
prop_name = entry.name
type_name = None
if entry.visibility == 'public':
if self.is_format_c:
# property synthesised from a cdef public attribute
type_name = entry.type.declaration_code("", for_display=1)
if not entry.type.is_pyobject:
type_name = "'%s'" % type_name
elif entry.type.is_extension_type:
type_name = entry.type.module_name + '.' + type_name
elif self.is_format_python:
type_name = self._fmt_type(entry.type)
if type_name is None:
for stat in body.stats:
if stat.name != '__get__':
continue
if self.is_format_c:
prop_name = '%s.%s' % (self.class_name, prop_name)
ret_annotation = stat.return_type_annotation
if ret_annotation:
type_name = self._fmt_annotation(ret_annotation)
if type_name is not None :
signature = '%s: %s' % (prop_name, type_name)
new_doc = self._embed_signature(signature, entry.doc)
if not self.is_format_clinic:
entry.doc = EncodedString(new_doc)
return node
| EmbedSignature |
python | jazzband__django-waffle | waffle/tests/test_testutils.py | {
"start": 3456,
"end": 3570
} | class ____(OverrideSwitchMixin, TestCase):
"""
Run tests with Django TestCase
"""
| OverrideSwitchTestCase |
python | readthedocs__readthedocs.org | readthedocs/projects/migrations/0045_project_max_concurrent_builds.py | {
"start": 149,
"end": 629
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("projects", "0044_auto_20190703_1300"),
]
operations = [
migrations.AddField(
model_name="project",
name="max_concurrent_builds",
field=models.IntegerField(
blank=True,
null=True,
verbose_name="Maximum concurrent builds allowed for this project",
),
),
]
| Migration |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/strings_ops/string_split_op_test.py | {
"start": 9602,
"end": 18960
} | class ____(test_util.TensorFlowTestCase, parameterized.TestCase):
@parameterized.named_parameters([
{"testcase_name": "Simple",
"input": [b"pigs on the wing", b"animals"],
"expected": [[b"pigs", b"on", b"the", b"wing"], [b"animals"]]},
{"testcase_name": "MultiCharSeparator",
"input": [b"1<>2<>3", b"<><>4<>5<><>6<>"],
"sep": b"<>",
"expected": [[b"1", b"2", b"3"],
[b"", b"", b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "SimpleSeparator",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "EmptySeparator",
"input": [b"1 2 3", b" 4 5 6 "],
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"6"]]},
{"testcase_name": "EmptySeparatorEmptyInputString",
"input": [b""],
"expected": [[]]},
{"testcase_name": "EmptyInputVector",
"input": [],
"expected": []},
{"testcase_name": "SimpleSeparatorMaxSplit",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"maxsplit": 1,
"expected": [[b"1", b"2,3"], [b"4", b"5,,6,"]]},
{"testcase_name": "EmptySeparatorMaxSplit",
"input": [b"1 2 3", b" 4 5 6 "],
"maxsplit": 1,
"expected": [[b"1", b"2 3"], [b"4", b"5 6 "]]},
{"testcase_name": "ScalarInput",
"input": b"1,2,3",
"sep": b",",
"expected": [b"1", b"2", b"3"]},
{"testcase_name": "Dense2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6", b"7,8,9"]],
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]],
[[b"5", b"6"], [b"7", b"8", b"9"]]]},
{"testcase_name": "Ragged2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6"]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]]},
{"testcase_name": "Ragged3DInput",
"input": [[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]]},
{"testcase_name": "Ragged4DInput",
"input": [[[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]], [[[b""]]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[b""]]]]]},
{"testcase_name": "Ragged4DInputEmptySeparator",
"input": [[[[b"1 2 3", b"4"], [b"5 6"]], [[b"7 8 9"]]], [[[b""]]]],
"input_is_ragged": True,
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[]]]]]},
]) # pyformat: disable
def testSplitV2(self,
input,
expected,
input_is_ragged=False,
**kwargs): # pylint: disable=redefined-builtin
# Check that we are matching the behavior of Python's str.split:
self.assertEqual(expected, self._py_split(input, **kwargs))
# Prepare the input tensor.
if input_is_ragged:
input = ragged_factory_ops.constant(input, dtype=dtypes.string)
else:
input = constant_op.constant(input, dtype=dtypes.string)
# Check that the public version (which returns a RaggedTensor) works
# correctly.
expected_ragged = ragged_factory_ops.constant(
expected, ragged_rank=input.shape.ndims)
actual_ragged_v2 = ragged_string_ops.string_split_v2(input, **kwargs)
actual_ragged_v2_input_kwarg = ragged_string_ops.string_split_v2(
input=input, **kwargs)
self.assertAllEqual(expected_ragged, actual_ragged_v2)
self.assertAllEqual(expected_ragged, actual_ragged_v2_input_kwarg)
# Check that the internal version (which returns a SparseTensor) works
# correctly. Note: the internal version oly supports vector inputs.
if input.shape.ndims == 1:
expected_sparse = self.evaluate(expected_ragged.to_sparse())
actual_sparse_v2 = string_ops.string_split_v2(input, **kwargs)
self.assertEqual(expected_sparse.indices.tolist(),
self.evaluate(actual_sparse_v2.indices).tolist())
self.assertEqual(expected_sparse.values.tolist(),
self.evaluate(actual_sparse_v2.values).tolist())
self.assertEqual(expected_sparse.dense_shape.tolist(),
self.evaluate(actual_sparse_v2.dense_shape).tolist())
@parameterized.named_parameters([
{"testcase_name": "Simple",
"input": [b"pigs on the wing", b"animals"],
"expected": [[b"pigs", b"on", b"the", b"wing"], [b"animals"]]},
{"testcase_name": "MultiCharSeparator",
"input": [b"1<>2<>3", b"<><>4<>5<><>6<>"],
"sep": b"<>",
"expected": [[b"1", b"2", b"3"],
[b"", b"", b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "SimpleSeparator",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"", b"6", b""]]},
{"testcase_name": "EmptySeparator",
"input": [b"1 2 3", b" 4 5 6 "],
"expected": [[b"1", b"2", b"3"], [b"4", b"5", b"6"]]},
{"testcase_name": "EmptySeparatorEmptyInputString",
"input": [b""],
"expected": [[]]},
{"testcase_name": "SimpleSeparatorMaxSplit",
"input": [b"1,2,3", b"4,5,,6,"],
"sep": b",",
"maxsplit": 1,
"expected": [[b"1", b"2,3"], [b"4", b"5,,6,"]]},
{"testcase_name": "EmptySeparatorMaxSplit",
"input": [b"1 2 3", b" 4 5 6 "],
"maxsplit": 1,
"expected": [[b"1", b"2 3"], [b"4", b"5 6 "]]},
{"testcase_name": "ScalarInput",
"input": b"1,2,3",
"sep": b",",
"expected": [[b"1", b"2", b"3"]]},
{"testcase_name": "Dense2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6", b"7,8,9"]],
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]],
[[b"5", b"6"], [b"7", b"8", b"9"]]]},
{"testcase_name": "Ragged2DInput",
"input": [[b"1,2,3", b"4"], [b"5,6"]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]]},
{"testcase_name": "Ragged3DInput",
"input": [[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]]},
{"testcase_name": "Ragged4DInput",
"input": [[[[b"1,2,3", b"4"], [b"5,6"]], [[b"7,8,9"]]], [[[b""]]]],
"input_is_ragged": True,
"sep": b",",
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[b""]]]]]},
{"testcase_name": "Ragged4DInputEmptySeparator",
"input": [[[[b"1 2 3", b"4"], [b"5 6"]], [[b"7 8 9"]]], [[[b""]]]],
"input_is_ragged": True,
"expected": [[[[[b"1", b"2", b"3"], [b"4"]], [[b"5", b"6"]]],
[[[b"7", b"8", b"9"]]]], [[[[]]]]]},
]) # pyformat: disable
def testSplitV1(self, input, expected, input_is_ragged=False, **kwargs): # pylint: disable=redefined-builtin
# Prepare the input tensor.
if input_is_ragged:
input = ragged_factory_ops.constant(input, dtype=dtypes.string)
else:
input = constant_op.constant(input, dtype=dtypes.string)
expected_ragged = ragged_factory_ops.constant(expected)
actual_ragged_v1 = ragged_string_ops.strings_split_v1(
input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_input_kwarg = ragged_string_ops.strings_split_v1(
input=input, result_type="RaggedTensor", **kwargs)
actual_ragged_v1_source_kwarg = ragged_string_ops.strings_split_v1(
source=input, result_type="RaggedTensor", **kwargs)
self.assertAllEqual(expected_ragged, actual_ragged_v1)
self.assertAllEqual(expected_ragged, actual_ragged_v1_input_kwarg)
self.assertAllEqual(expected_ragged, actual_ragged_v1_source_kwarg)
expected_sparse = self.evaluate(expected_ragged.to_sparse())
actual_sparse_v1 = ragged_string_ops.strings_split_v1(
input, result_type="SparseTensor", **kwargs)
self.assertEqual(expected_sparse.indices.tolist(),
self.evaluate(actual_sparse_v1.indices).tolist())
self.assertEqual(expected_sparse.values.tolist(),
self.evaluate(actual_sparse_v1.values).tolist())
self.assertEqual(expected_sparse.dense_shape.tolist(),
self.evaluate(actual_sparse_v1.dense_shape).tolist())
def testSplitV1BadResultType(self):
with self.assertRaisesRegex(ValueError, "result_type must be .*"):
ragged_string_ops.strings_split_v1("foo", result_type="BouncyTensor")
def _py_split(self, strings, **kwargs):
if isinstance(strings, compat.bytes_or_text_types):
# Note: str.split doesn't accept keyword args.
if "maxsplit" in kwargs:
return strings.split(kwargs.get("sep", None), kwargs["maxsplit"])
else:
return strings.split(kwargs.get("sep", None))
else:
return [self._py_split(s, **kwargs) for s in strings]
if __name__ == "__main__":
test.main()
| StringSplitV2OpTest |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 16169,
"end": 16715
} | class ____(nn.Module):
def __init__(self, config: DPTConfig):
super().__init__()
self.attention = DPTSelfAttention(config)
self.output = DPTViTSelfOutput(config)
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
self_attn_output, _ = self.attention(hidden_states)
output = self.output(self_attn_output, hidden_states)
return output
# Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViTConfig->DPTConfig, ViTIntermediate->DPTViTIntermediate
| DPTViTAttention |
python | sqlalchemy__sqlalchemy | test/orm/test_cache_key.py | {
"start": 21404,
"end": 29089
} | class ____(fixtures.CacheKeyFixture, _poly_fixtures._Polymorphic):
run_setup_mappers = "once"
run_inserts = None
run_deletes = None
def test_wp_objects(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
self._run_cache_key_fixture(
lambda: (
inspect(with_polymorphic(Person, [Manager, Engineer])),
inspect(with_polymorphic(Person, [Manager])),
inspect(with_polymorphic(Person, [Manager, Engineer, Boss])),
inspect(
with_polymorphic(Person, [Manager, Engineer], flat=True)
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery(),
)
),
),
compare_values=True,
)
def test_wpoly_cache_keys(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
meb_stmt = inspect(
with_polymorphic(Person, [Manager, Engineer, Boss])
).selectable
me_stmt = inspect(
with_polymorphic(Person, [Manager, Engineer])
).selectable
self._run_cache_key_fixture(
lambda: (
inspect(Person),
inspect(aliased(Person, me_stmt)),
inspect(aliased(Person, meb_stmt)),
inspect(with_polymorphic(Person, [Manager, Engineer])),
# aliased=True is the same as flat=True for default selectable
inspect(
with_polymorphic(
Person, [Manager, Engineer], aliased=True
),
),
inspect(
with_polymorphic(Person, [Manager, Engineer], flat=True),
),
inspect(
with_polymorphic(
Person, [Manager, Engineer], flat=True, innerjoin=True
),
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
flat=True,
_use_mapper_path=True,
),
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
flat=True,
adapt_on_names=True,
),
),
inspect(
with_polymorphic(
Person, [Manager, Engineer], selectable=meb_stmt
),
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer],
selectable=meb_stmt,
aliased=True,
),
),
inspect(with_polymorphic(Person, [Manager, Engineer, Boss])),
inspect(
with_polymorphic(
Person,
[Manager, Engineer, Boss],
polymorphic_on=literal_column("foo"),
),
),
inspect(
with_polymorphic(
Person,
[Manager, Engineer, Boss],
polymorphic_on=literal_column("bar"),
),
),
inspect(with_polymorphic(Person, "*", name="foo")),
),
compare_values=True,
)
def test_wp_queries(self):
Person, Manager, Engineer, Boss = self.classes(
"Person", "Manager", "Engineer", "Boss"
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp)
def three():
wp = with_polymorphic(Person, [Manager, Engineer])
return fixture_session().query(wp).filter(wp.name == "asdfo")
def three_a():
wp = with_polymorphic(Person, [Manager, Engineer], flat=True)
return fixture_session().query(wp).filter(wp.name == "asdfo")
def five():
subq = (
select(Person)
.outerjoin(Manager)
.outerjoin(Engineer)
.subquery()
)
wp = with_polymorphic(Person, [Manager, Engineer], subq)
return fixture_session().query(wp).filter(wp.name == "asdfo")
self._run_cache_key_fixture(
lambda: stmt_20(two(), three(), three_a(), five()),
compare_values=True,
)
def test_wp_joins(self):
Company, Person, Manager, Engineer, Boss = self.classes(
"Company", "Person", "Manager", "Engineer", "Boss"
)
def one():
return (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "asdf")
)
def two():
wp = with_polymorphic(Person, [Manager, Engineer])
return (
fixture_session()
.query(Company)
.join(Company.employees.of_type(wp))
.filter(wp.name == "asdf")
)
def three():
wp = with_polymorphic(Person, [Manager, Engineer])
return (
fixture_session()
.query(Company)
.join(Company.employees.of_type(wp))
.filter(wp.Engineer.name == "asdf")
)
self._run_cache_key_fixture(
lambda: stmt_20(one(), two(), three()),
compare_values=True,
)
@testing.variation(
"exprtype", ["plain_column", "self_standing_case", "case_w_columns"]
)
def test_hybrid_w_case_ac(self, decl_base, exprtype):
"""test #9728"""
class Employees(decl_base):
__tablename__ = "employees"
id = Column(String(128), primary_key=True)
first_name = Column(String(length=64))
@hybrid_property
def name(self):
return self.first_name
@name.expression
def name(
cls,
):
if exprtype.plain_column:
return cls.first_name
elif exprtype.self_standing_case:
return case(
(column("x") == 1, column("q")),
else_=column("q"),
)
elif exprtype.case_w_columns:
return case(
(column("x") == 1, column("q")),
else_=cls.first_name,
)
else:
exprtype.fail()
def go1():
employees_2 = aliased(Employees, name="employees_2")
stmt = select(employees_2.name)
return stmt
def go2():
employees_2 = aliased(Employees, name="employees_2")
stmt = select(employees_2)
return stmt
self._run_cache_key_fixture(
lambda: stmt_20(go1(), go2()),
compare_values=True,
)
| PolyCacheKeyTest |
python | run-llama__llama_index | llama-index-core/llama_index/core/storage/kvstore/types.py | {
"start": 223,
"end": 2193
} | class ____(ABC):
"""Base key-value store."""
@abstractmethod
def put(self, key: str, val: dict, collection: str = DEFAULT_COLLECTION) -> None:
pass
@abstractmethod
async def aput(
self, key: str, val: dict, collection: str = DEFAULT_COLLECTION
) -> None:
pass
def put_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
# by default, support a batch size of 1
if batch_size != 1:
raise NotImplementedError("Batching not supported by this key-value store.")
else:
for key, val in kv_pairs:
self.put(key, val, collection=collection)
async def aput_all(
self,
kv_pairs: List[Tuple[str, dict]],
collection: str = DEFAULT_COLLECTION,
batch_size: int = DEFAULT_BATCH_SIZE,
) -> None:
# by default, support a batch size of 1
if batch_size != 1:
raise NotImplementedError("Batching not supported by this key-value store.")
else:
for key, val in kv_pairs:
await self.aput(key, val, collection=collection)
@abstractmethod
def get(self, key: str, collection: str = DEFAULT_COLLECTION) -> Optional[dict]:
pass
@abstractmethod
async def aget(
self, key: str, collection: str = DEFAULT_COLLECTION
) -> Optional[dict]:
pass
@abstractmethod
def get_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
pass
@abstractmethod
async def aget_all(self, collection: str = DEFAULT_COLLECTION) -> Dict[str, dict]:
pass
@abstractmethod
def delete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
pass
@abstractmethod
async def adelete(self, key: str, collection: str = DEFAULT_COLLECTION) -> bool:
pass
| BaseKVStore |
python | pyqtgraph__pyqtgraph | pyqtgraph/flowchart/library/Display.py | {
"start": 4038,
"end": 5530
} | class ____(Node):
"""Connection to a Canvas widget."""
nodeName = 'CanvasWidget'
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in', 'multi': True}})
self.canvas = None
self.items = {}
def disconnected(self, localTerm, remoteTerm):
if localTerm is self.In and remoteTerm in self.items:
self.canvas.removeItem(self.items[remoteTerm])
del self.items[remoteTerm]
def setCanvas(self, canvas):
self.canvas = canvas
def getCanvas(self):
return self.canvas
def process(self, In, display=True):
if display:
items = set()
for name, vals in In.items():
if vals is None:
continue
if type(vals) is not list:
vals = [vals]
for val in vals:
vid = id(val)
if vid in self.items:
items.add(vid)
else:
self.canvas.addItem(val)
item = val
self.items[vid] = item
items.add(vid)
for vid in list(self.items.keys()):
if vid not in items:
#print "remove", self.items[vid]
self.canvas.removeItem(self.items[vid])
del self.items[vid]
| CanvasNode |
python | doocs__leetcode | solution/1500-1599/1560.Most Visited Sector in a Circular Track/Solution.py | {
"start": 0,
"end": 255
} | class ____:
def mostVisited(self, n: int, rounds: List[int]) -> List[int]:
if rounds[0] <= rounds[-1]:
return list(range(rounds[0], rounds[-1] + 1))
return list(range(1, rounds[-1] + 1)) + list(range(rounds[0], n + 1))
| Solution |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-databricks/llama_index/embeddings/databricks/base.py | {
"start": 582,
"end": 7426
} | class ____(BaseEmbedding):
"""
Databricks class for text embedding.
Databricks adheres to the OpenAI API, so this integration aligns closely with the existing OpenAIEmbedding class.
Args:
model (str): The unique ID of the embedding model as served by the Databricks endpoint.
endpoint (Optional[str]): The url of the Databricks endpoint. Can be set as an environment variable (`DATABRICKS_SERVING_ENDPOINT`).
api_key (Optional[str]): The Databricks API key to use. Can be set as an environment variable (`DATABRICKS_TOKEN`).
Examples:
`pip install llama-index-embeddings-databricks`
```python
import os
from llama_index.core import Settings
from llama_index.embeddings.databricks import DatabricksEmbedding
# Set up the DatabricksEmbedding class with the required model, API key and serving endpoint
os.environ["DATABRICKS_TOKEN"] = "<MY TOKEN>"
os.environ["DATABRICKS_SERVING_ENDPOINT"] = "<MY ENDPOINT>"
embed_model = DatabricksEmbedding(model="databricks-bge-large-en")
Settings.embed_model = embed_model
# Embed some text
embeddings = embed_model.get_text_embedding("The DatabricksEmbedding integration works great.")
```
"""
additional_kwargs: Dict[str, Any] = Field(
default_factory=dict, description="Additional kwargs as for the OpenAI API."
)
model: str = Field(
description="The ID of a model hosted on the databricks endpoint."
)
api_key: str = Field(description="The Databricks API key.")
endpoint: str = Field(description="The Databricks API endpoint.")
max_retries: int = Field(default=10, description="Maximum number of retries.", ge=0)
timeout: float = Field(default=60.0, description="Timeout for each request.", ge=0)
default_headers: Optional[Dict[str, str]] = Field(
default=None, description="The default headers for API requests."
)
reuse_client: bool = Field(
default=True,
description=(
"Reuse the client between requests. When doing anything with large "
"volumes of async API calls, setting this to false can improve stability."
),
)
_query_engine: str = PrivateAttr()
_text_engine: str = PrivateAttr()
_client: Optional[OpenAI] = PrivateAttr()
_aclient: Optional[AsyncOpenAI] = PrivateAttr()
_http_client: Optional[httpx.Client] = PrivateAttr()
def __init__(
self,
model: str,
endpoint: Optional[str] = None,
embed_batch_size: int = 100,
additional_kwargs: Optional[Dict[str, Any]] = None,
api_key: Optional[str] = None,
max_retries: int = 10,
timeout: float = 60.0,
reuse_client: bool = True,
callback_manager: Optional[CallbackManager] = None,
default_headers: Optional[Dict[str, str]] = None,
http_client: Optional[httpx.Client] = None,
num_workers: Optional[int] = None,
**kwargs: Any,
) -> None:
additional_kwargs = additional_kwargs or {}
api_key = get_from_param_or_env("api_key", api_key, "DATABRICKS_TOKEN")
endpoint = get_from_param_or_env(
"endpoint", endpoint, "DATABRICKS_SERVING_ENDPOINT"
)
super().__init__(
model=model,
endpoint=endpoint,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
model_name=model,
additional_kwargs=additional_kwargs,
api_key=api_key,
max_retries=max_retries,
reuse_client=reuse_client,
timeout=timeout,
default_headers=default_headers,
num_workers=num_workers,
**kwargs,
)
self._client = None
self._aclient = None
self._http_client = http_client
def _get_client(self) -> OpenAI:
if not self.reuse_client:
return OpenAI(**self._get_credential_kwargs())
if self._client is None:
self._client = OpenAI(**self._get_credential_kwargs())
return self._client
def _get_aclient(self) -> AsyncOpenAI:
if not self.reuse_client:
return AsyncOpenAI(**self._get_credential_kwargs())
if self._aclient is None:
self._aclient = AsyncOpenAI(**self._get_credential_kwargs())
return self._aclient
@classmethod
def class_name(cls) -> str:
return "DatabricksEmbedding"
def _get_credential_kwargs(self) -> Dict[str, Any]:
return {
"api_key": self.api_key,
"base_url": self.endpoint,
"max_retries": self.max_retries,
"timeout": self.timeout,
"default_headers": self.default_headers,
"http_client": self._http_client,
}
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
client = self._get_client()
return get_embedding(
client,
query,
engine=self.model,
**self.additional_kwargs,
)
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
aclient = self._get_aclient()
return await aget_embedding(
aclient,
query,
engine=self.model,
**self.additional_kwargs,
)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
client = self._get_client()
return get_embedding(
client,
text,
engine=self.model,
**self.additional_kwargs,
)
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
aclient = self._get_aclient()
return await aget_embedding(
aclient,
text,
engine=self.model,
**self.additional_kwargs,
)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""
Get text embeddings.
By default, this is a wrapper around _get_text_embedding.
Can be overridden for batch queries.
"""
client = self._get_client()
return get_embeddings(
client,
texts,
engine=self.model,
**self.additional_kwargs,
)
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
aclient = self._get_aclient()
return await aget_embeddings(
aclient,
texts,
engine=self.model,
**self.additional_kwargs,
)
| DatabricksEmbedding |
python | jazzband__pip-tools | piptools/build.py | {
"start": 1106,
"end": 1225
} | class ____:
extras: tuple[str, ...]
requirements: tuple[InstallRequirement, ...]
@dataclass
| StaticProjectMetadata |
python | dask__distributed | distributed/tests/test_client.py | {
"start": 57349,
"end": 91282
} | class ____:
def __getstate__(self):
return 1
def __setstate__(self, state):
raise TypeError("hello!")
@pytest.mark.skip
@gen_test()
async def test_badly_serialized_input_stderr(capsys, c):
o = BadlySerializedObject()
future = c.submit(inc, o)
while True:
sleep(0.01)
out, err = capsys.readouterr()
if "hello!" in err:
break
assert future.status == "error"
@pytest.mark.parametrize(
"func",
[
str,
repr,
operator.methodcaller("_repr_html_"),
],
)
def test_repr(loop, func):
nworkers = 6
with cluster(nworkers=nworkers, worker_kwargs={"memory_limit": "2 GiB"}) as (
s,
*workers,
):
with Client(s["address"], loop=loop) as c:
# NOTE: Intentionally testing when we have more workers than the default
# in `client.scheduler_info()` (xref https://github.com/dask/distributed/issues/9065)
info = c.scheduler_info()
assert len(info["workers"]) < nworkers
text = func(c)
assert c.scheduler.address in text
assert f"threads={nworkers}" in text or "Total threads: </strong>" in text
assert f"{2 * nworkers}.00 GiB" in text
if "<table" not in text:
assert len(text) < 80
text = func(c)
assert "No scheduler connected" in text
@gen_cluster(client=True)
async def test_repr_async(c, s, a, b):
c._repr_html_()
@gen_cluster(client=True, worker_kwargs={"memory_limit": None})
async def test_repr_no_memory_limit(c, s, a, b):
c._repr_html_()
@gen_test()
async def test_repr_localcluster():
async with (
LocalCluster(
processes=False, dashboard_address=":0", asynchronous=True
) as cluster,
Client(cluster, asynchronous=True) as client,
):
text = client._repr_html_()
assert cluster.scheduler.address in text
assert is_valid_xml(client._repr_html_())
@gen_cluster(client=True)
async def test_forget_simple(c, s, a, b):
x = c.submit(inc, 1, retries=2)
y = c.submit(inc, 2)
z = c.submit(add, x, y, workers=[a.ip], allow_other_workers=True)
await wait([x, y, z])
assert not s.tasks[x.key].waiting_on
assert not s.tasks[y.key].waiting_on
assert set(s.tasks) == {x.key, y.key, z.key}
s.client_releases_keys(keys=[x.key], client=c.id)
assert x.key in s.tasks
s.client_releases_keys(keys=[z.key], client=c.id)
assert x.key not in s.tasks
assert z.key not in s.tasks
assert not s.tasks[y.key].dependents
s.client_releases_keys(keys=[y.key], client=c.id)
assert not s.tasks
@gen_cluster(client=True)
async def test_forget_complex(e, s, A, B):
a, b, c, d = await e.scatter(list(range(4)))
ab = e.submit(add, a, b)
cd = e.submit(add, c, d)
ac = e.submit(add, a, c)
acab = e.submit(add, ac, ab)
await wait([a, b, c, d, ab, ac, cd, acab])
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[ab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ab, ac, cd, acab, a, b, c, d]}
s.client_releases_keys(keys=[b.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, acab, a, c, d]}
s.client_releases_keys(keys=[acab.key], client=e.id)
assert set(s.tasks) == {f.key for f in [ac, cd, a, c, d]}
assert b.key not in s.tasks
while b.key in A.data or b.key in B.data:
await asyncio.sleep(0.01)
s.client_releases_keys(keys=[ac.key], client=e.id)
assert set(s.tasks) == {f.key for f in [cd, a, c, d]}
@gen_cluster(client=True)
async def test_forget_in_flight(e, s, A, B):
delayed2 = partial(delayed, pure=True)
a, b, c, d = (delayed2(slowinc)(i) for i in range(4))
ab = delayed2(slowadd)(a, b, dask_key_name="ab")
cd = delayed2(slowadd)(c, d, dask_key_name="cd")
ac = delayed2(slowadd)(a, c, dask_key_name="ac")
acab = delayed2(slowadd)(ac, ab, dask_key_name="acab")
x, y = e.compute([ac, acab])
s.validate_state()
for _ in range(5):
await asyncio.sleep(0.01)
s.validate_state()
s.client_releases_keys(keys=[y.key], client=e.id)
s.validate_state()
for k in [acab.key, ab.key, b.key]:
assert k not in s.tasks
@gen_cluster(client=True)
async def test_forget_errors(c, s, a, b):
x = c.submit(div, 1, 0)
y = c.submit(inc, x)
z = c.submit(inc, y)
await wait([z])
assert s.tasks[x.key].exception
assert s.tasks[x.key].exception_blame
assert s.tasks[y.key].exception_blame
assert s.tasks[z.key].exception_blame
s.client_releases_keys(keys=[z.key], client=c.id)
assert s.tasks[x.key].exception
assert s.tasks[x.key].exception_blame
assert s.tasks[y.key].exception_blame
assert z.key not in s.tasks
s.client_releases_keys(keys=[x.key], client=c.id)
assert s.tasks[x.key].exception
assert s.tasks[x.key].exception_blame
assert s.tasks[y.key].exception_blame
assert z.key not in s.tasks
s.client_releases_keys(keys=[y.key], client=c.id)
assert x.key not in s.tasks
assert y.key not in s.tasks
assert z.key not in s.tasks
def test_repr_sync(c):
s = str(c)
r = repr(c)
assert c.scheduler.address in s
assert c.scheduler.address in r
assert str(2) in s # nworkers
assert "cores" in s or "threads" in s
@gen_cluster()
async def test_multi_client(s, a, b):
async with Client(s.address, asynchronous=True) as f:
async with Client(s.address, asynchronous=True) as c:
assert set(s.client_comms) == {c.id, f.id}
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
assert {ts.key for ts in s.clients[c.id].wants_what} == {x.key, y.key}
assert {ts.key for ts in s.clients[f.id].wants_what} == {y.key}
while c.id in s.clients:
await asyncio.sleep(0.01)
assert c.id not in s.clients
assert c.id not in s.tasks[y.key].who_wants
assert x.key not in s.tasks
while s.tasks:
await asyncio.sleep(0.01)
def long_running_client_connection(address):
with Client(address, loop=None) as c:
x = c.submit(lambda x: x + 1, 10)
x.result()
sleep(100)
@gen_cluster()
async def test_cleanup_after_broken_client_connection(s, a, b):
proc = get_mp_context().Process(
target=long_running_client_connection, args=(s.address,)
)
proc.daemon = True
proc.start()
while not s.tasks:
await asyncio.sleep(0.01)
proc.terminate()
while s.tasks:
await asyncio.sleep(0.01)
@gen_cluster()
async def test_multi_garbage_collection(s, a, b):
async with (
Client(s.address, asynchronous=True) as c,
Client(s.address, asynchronous=True) as f,
):
x = c.submit(inc, 1)
y = f.submit(inc, 2)
y2 = c.submit(inc, 2)
assert y.key == y2.key
await wait([x, y])
x.__del__()
while x.key in a.data or x.key in b.data:
await asyncio.sleep(0.01)
assert {ts.key for ts in s.clients[c.id].wants_what} == {y.key}
assert {ts.key for ts in s.clients[f.id].wants_what} == {y.key}
y.__del__()
while x.key in {ts.key for ts in s.clients[f.id].wants_what}:
await asyncio.sleep(0.01)
await asyncio.sleep(0.1)
assert y.key in a.data or y.key in b.data
assert {ts.key for ts in s.clients[c.id].wants_what} == {y.key}
assert not s.clients[f.id].wants_what
y2.__del__()
while y.key in a.data or y.key in b.data:
await asyncio.sleep(0.01)
assert not s.tasks
@gen_cluster(client=True, config=NO_AMM)
async def test__broadcast(c, s, a, b):
x, y = await c.scatter([1, 2], broadcast=True)
assert a.data == b.data == {x.key: 1, y.key: 2}
@gen_cluster(client=True, nthreads=[("127.0.0.1", 1)] * 4, config=NO_AMM)
async def test__broadcast_integer(c, s, *workers):
x, y = await c.scatter([1, 2], broadcast=2)
assert len(s.tasks[x.key].who_has) == 2
assert len(s.tasks[y.key].who_has) == 2
@gen_cluster(client=True, config=NO_AMM)
async def test__broadcast_dict(c, s, a, b):
d = await c.scatter({"x": 1}, broadcast=True)
assert a.data == b.data == {"x": 1}
@gen_cluster(client=True)
async def test_proxy(c, s, a, b):
msg = await c.scheduler.proxy(msg={"op": "identity"}, worker=a.address)
assert msg["id"] == a.identity()["id"]
@gen_cluster(client=True)
async def test_cancel(c, s, a, b):
x = c.submit(slowinc, 1)
y = c.submit(slowinc, x)
while y.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x])
assert x.cancelled()
assert "cancel" in str(x)
s.validate_state()
while not y.cancelled():
await asyncio.sleep(0.01)
assert not s.tasks
s.validate_state()
@gen_cluster(client=True)
async def test_cancel_tuple_key(c, s, a, b):
x = c.submit(inc, 1, key=("x", 0, 1))
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await x
@gen_cluster()
async def test_cancel_multi_client(s, a, b):
async with Client(s.address, asynchronous=True, name="c") as c:
async with Client(s.address, asynchronous=True, name="f") as f:
x = c.submit(slowinc, 1)
y = f.submit(slowinc, 1)
assert x.key == y.key
# Ensure both clients are known to the scheduler.
await y
await x
await c.cancel([x])
# Give the scheduler time to pass messages
await asyncio.sleep(0.1)
assert x.cancelled()
assert not y.cancelled()
out = await y
assert out == 2
with pytest.raises(CancelledError):
await x
@gen_cluster(nthreads=[("", 1)], client=True)
async def test_cancel_before_known_to_scheduler(c, s, a):
f = c.submit(inc, 1)
f2 = c.submit(inc, f)
await c.cancel([f])
assert f.cancelled()
with pytest.raises(CancelledError):
await f2
assert any(f"Scheduler cancels key {f.key}" in msg for _, msg in s.get_logs())
@gen_cluster(client=True)
async def test_cancel_collection(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await c.cancel(x)
await c.cancel([x])
assert all(f.cancelled() for f in L)
while s.tasks:
await asyncio.sleep(0.01)
def test_cancel_sync(c):
x = c.submit(slowinc, 1, key="x")
y = c.submit(slowinc, x, key="y")
z = c.submit(slowinc, y, key="z")
c.cancel([y])
start = time()
while not z.cancelled():
sleep(0.01)
assert time() < start + 30
assert x.result() == 2
z.cancel()
assert z.cancelled()
@gen_cluster(client=True)
async def test_future_type(c, s, a, b):
x = c.submit(inc, 1)
await wait([x])
assert x.type == int
assert "int" in str(x)
@gen_cluster(client=True)
async def test_traceback_clean(c, s, a, b):
x = c.submit(div, 1, 0)
try:
await x
except Exception as e:
f = e
exc_type, exc_value, tb = sys.exc_info()
while tb:
assert "scheduler" not in tb.tb_frame.f_code.co_filename
assert "worker" not in tb.tb_frame.f_code.co_filename
tb = tb.tb_next
@gen_cluster(client=True)
async def test_map_differnet_lengths(c, s, a, b):
assert len(c.map(add, [1, 2], [1, 2, 3])) == 2
def test_Future_exception_sync_2(loop, capsys):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
assert dask.base.get_scheduler() == c.get
out, err = capsys.readouterr()
assert len(out.strip().split("\n")) == 1
assert dask.base.get_scheduler() != c.get
@gen_cluster(timeout=60, client=True)
async def test_async_persist(c, s, a, b):
from dask.delayed import Delayed, delayed
x = delayed(1)
y = delayed(inc)(x)
z = delayed(dec)(x)
w = delayed(add)(y, z)
yy, ww = c.persist([y, w])
assert type(yy) == type(y)
assert type(ww) == type(w)
assert len(yy.dask) == 1
assert len(ww.dask) == 1
assert len(w.dask) > 1
assert y.__dask_keys__() == yy.__dask_keys__()
assert w.__dask_keys__() == ww.__dask_keys__()
while y.key not in s.tasks and w.key not in s.tasks:
await asyncio.sleep(0.01)
assert {cs.client_key for cs in s.tasks[y.key].who_wants} == {c.id}
assert {cs.client_key for cs in s.tasks[w.key].who_wants} == {c.id}
yyf, wwf = c.compute([yy, ww])
yyy, www = await c.gather([yyf, wwf])
assert yyy == inc(1)
assert www == add(inc(1), dec(1))
assert isinstance(c.persist(y), Delayed)
assert isinstance(c.persist([y]), (list, tuple))
@gen_cluster(client=True)
async def test_persist_async(c, s, a, b):
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
g, h = c.compute([y, yy])
gg, hh = await c.gather([g, h])
assert (gg == hh).all()
def test_persist(c):
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
x = da.ones((10, 10), chunks=(5, 10))
y = 2 * (x + 1)
assert len(y.dask) == 6
yy = c.persist(y)
assert len(y.dask) == 6
assert len(yy.dask) == 2
assert all(isinstance(v, Future) for v in yy.dask.values())
assert yy.__dask_keys__() == y.__dask_keys__()
zz = yy.compute()
z = y.compute()
assert (zz == z).all()
@gen_cluster(timeout=60, client=True)
async def test_long_traceback(c, s, a, b):
from distributed.protocol.pickle import dumps
def deep(n):
if n == 0:
1 / 0
else:
return deep(n - 1)
x = c.submit(deep, 200)
await wait([x])
assert len(dumps(c.futures[x.key].traceback)) < 10000
assert isinstance(c.futures[x.key].exception, ZeroDivisionError)
@gen_cluster(client=True)
async def test_wait_on_collections(c, s, a, b):
L = c.map(double, [[1], [2], [3]])
x = db.Bag({("b", i): f for i, f in enumerate(L)}, "b", 3)
await wait(x)
assert all(f.key in a.data or f.key in b.data for f in L)
@gen_cluster(client=True)
async def test_futures_of_get(c, s, a, b):
x, y, z = c.map(inc, [1, 2, 3])
assert set(futures_of(0)) == set()
assert set(futures_of(x)) == {x}
assert set(futures_of([x, y, z])) == {x, y, z}
assert set(futures_of([x, [y], [[z]]])) == {x, y, z}
assert set(futures_of({"x": x, "y": [y]})) == {x, y}
b = db.Bag({("b", i): f for i, f in enumerate([x, y, z])}, "b", 3)
assert set(futures_of(b)) == {x, y, z}
def test_futures_of_class():
pytest.importorskip("numpy")
da = pytest.importorskip("dask.array")
assert futures_of([da.Array]) == []
@gen_cluster(client=True)
async def test_futures_of_cancelled_raises(c, s, a, b):
x = c.submit(inc, 1)
while x.key not in s.tasks:
await asyncio.sleep(0.01)
await c.cancel([x], reason="testreason")
with pytest.raises(CancelledError, match="reason: testreason"):
await x
while x.key in s.tasks:
await asyncio.sleep(0.01)
with pytest.raises(CancelledError, match="reason: lost dependencies"):
get_obj = c.get({"x": (inc, x), "y": (inc, 2)}, ["x", "y"], sync=False)
gather_obj = c.gather(get_obj)
await gather_obj
with pytest.raises(CancelledError, match="reason: lost dependencies"):
await c.submit(inc, x)
with pytest.raises(CancelledError, match="reason: lost dependencies"):
await c.submit(add, 1, y=x)
with pytest.raises(CancelledError, match="reason: lost dependencies"):
await c.gather(c.map(add, [1], y=x))
@pytest.mark.skip(reason="Use fast random selection now")
@gen_cluster(client=True)
async def test_balance_tasks_by_stacks(c, s, a, b):
x = c.submit(inc, 1)
await wait(x)
y = c.submit(inc, 2)
await wait(y)
assert len(a.data) == len(b.data) == 1
@gen_cluster(client=True)
async def test_run(c, s, a, b):
results = await c.run(inc, 1)
assert results == {a.address: 2, b.address: 2}
results = await c.run(inc, 1, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(inc, 1, workers=[])
assert results == {}
@gen_cluster(client=True)
async def test_run_handles_picklable_data(c, s, a, b):
futures = c.map(inc, range(10))
await wait(futures)
def func():
return {}, set(), [], (), 1, "hello", b"100"
results = await c.run_on_scheduler(func)
assert results == func()
results = await c.run(func)
assert results == {w.address: func() for w in [a, b]}
def test_run_sync(c, s, a, b):
def func(x, y=10):
return x + y
result = c.run(func, 1, y=2)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(func, 1, y=2, workers=[a["address"]])
assert result == {a["address"]: 3}
@gen_cluster(client=True)
async def test_run_coroutine(c, s, a, b):
results = await c.run(asyncinc, 1, delay=0.05)
assert results == {a.address: 2, b.address: 2}
results = await c.run(asyncinc, 1, delay=0.05, workers=[a.address])
assert results == {a.address: 2}
results = await c.run(asyncinc, 1, workers=[])
assert results == {}
with pytest.raises(RuntimeError, match="hello"):
await c.run(throws, 1)
results = await c.run(asyncinc, 2, delay=0.01)
assert results == {a.address: 3, b.address: 3}
def test_run_coroutine_sync(c, s, a, b):
result = c.run(asyncinc, 2, delay=0.01)
assert result == {a["address"]: 3, b["address"]: 3}
result = c.run(asyncinc, 2, workers=[a["address"]])
assert result == {a["address"]: 3}
t1 = time()
result = c.run(asyncinc, 2, delay=10, wait=False)
t2 = time()
assert result is None
assert t2 - t1 <= 1.0
@gen_cluster(client=True)
async def test_run_exception(c, s, a, b):
class MyError(Exception):
pass
def raise_exception(dask_worker, addr):
if addr == dask_worker.address:
raise MyError("informative message")
return 123
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address)
with pytest.raises(MyError, match="informative message"):
await c.run(raise_exception, addr=a.address, on_error="raise")
with pytest.raises(ValueError, match="on_error must be"):
await c.run(raise_exception, addr=a.address, on_error="invalid")
out = await c.run(raise_exception, addr=a.address, on_error="return")
assert isinstance(out[a.address], MyError)
assert out[b.address] == 123
out = await c.run(raise_exception, addr=a.address, on_error="ignore")
assert out == {b.address: 123}
@gen_cluster(client=True, config={"distributed.comm.timeouts.connect": "200ms"})
async def test_run_rpc_error(c, s, a, b):
a.stop()
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1)
with pytest.raises(OSError, match="Timed out trying to connect"):
await c.run(inc, 1, on_error="raise")
out = await c.run(inc, 1, on_error="return")
assert isinstance(out[a.address], OSError)
assert out[b.address] == 2
out = await c.run(inc, 1, on_error="ignore")
assert out == {b.address: 2}
def test_diagnostic_ui(loop):
with cluster() as (s, [a, b]):
a_addr = a["address"]
b_addr = b["address"]
with Client(s["address"], loop=loop) as c:
d = c.nthreads()
assert d == {a_addr: 1, b_addr: 1}
d = c.nthreads([a_addr])
assert d == {a_addr: 1}
d = c.nthreads(a_addr)
assert d == {a_addr: 1}
d = c.nthreads(a["address"])
assert d == {a_addr: 1}
x = c.submit(inc, 1)
y = c.submit(inc, 2)
z = c.submit(inc, 3)
wait([x, y, z])
d = c.who_has()
assert set(d) == {x.key, y.key, z.key}
assert all(w in [a_addr, b_addr] for v in d.values() for w in v)
assert all(d.values())
d = c.who_has([x, y])
assert set(d) == {x.key, y.key}
d = c.who_has(x)
assert set(d) == {x.key}
d = c.has_what()
assert set(d) == {a_addr, b_addr}
assert all(k in [x.key, y.key, z.key] for v in d.values() for k in v)
d = c.has_what([a_addr])
assert set(d) == {a_addr}
d = c.has_what(a_addr)
assert set(d) == {a_addr}
def test_diagnostic_nbytes_sync(c):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
wait(incs + doubles)
assert c.nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert c.nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True)
async def test_diagnostic_nbytes(c, s, a, b):
incs = c.map(inc, [1, 2, 3])
doubles = c.map(double, [1, 2, 3])
await wait(incs + doubles)
assert s.get_nbytes(summary=False) == {k.key: sizeof(1) for k in incs + doubles}
assert s.get_nbytes(summary=True) == {"inc": sizeof(1) * 3, "double": sizeof(1) * 3}
@gen_cluster(client=True, nthreads=[])
async def test_worker_aliases(c, s):
a = Worker(s.address, name="alice")
b = Worker(s.address, name="bob")
w = Worker(s.address, name=3)
await asyncio.gather(a, b, w)
L = c.map(inc, range(10), workers="alice")
future = await c.scatter(123, workers=3)
await wait(L)
assert len(a.data) == 10
assert len(b.data) == 0
assert dict(w.data) == {future.key: 123}
for i, alias in enumerate([3, [3], "alice"]):
result = await c.submit(lambda x: x + 1, i, workers=alias)
assert result == i + 1
await asyncio.gather(a.close(), b.close(), w.close())
def test_persist_get_sync(c):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
assert xxyy3.compute() == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.parametrize("do_wait", [True, False])
@gen_cluster(client=True)
async def test_persist_get(c, s, a, b, do_wait):
x, y = delayed(1), delayed(2)
xx = delayed(add)(x, x)
yy = delayed(add)(y, y)
xxyy = delayed(add)(xx, yy)
xxyy2 = c.persist(xxyy)
xxyy3 = delayed(add)(xxyy2, 10)
if do_wait:
await wait(xxyy2)
result = await c.gather(c.get(xxyy3.dask, xxyy3.__dask_keys__(), sync=False))
assert result[0] == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
result = await c.compute(xxyy3)
assert result == ((1 + 1) + (2 + 2)) + 10
@pytest.mark.skipif(WINDOWS, reason="num_fds not supported on windows")
def test_client_num_fds(loop):
with cluster() as (s, [a, b]):
proc = psutil.Process()
with Client(s["address"], loop=loop) as c: # first client to start loop
before = proc.num_fds() # measure
for _ in range(4):
with Client(s["address"], loop=loop): # start more clients
pass
start = time()
while proc.num_fds() > before:
sleep(0.01)
assert time() < start + 10, (before, proc.num_fds())
@gen_cluster()
async def test_startup_close_startup(s, a, b):
async with Client(s.address, asynchronous=True):
pass
async with Client(s.address, asynchronous=True):
pass
def test_startup_close_startup_sync(loop):
with cluster() as (s, [a, b]):
with Client(s["address"], loop=loop) as c:
sleep(0.1)
with Client(s["address"], loop=None) as c:
pass
with Client(s["address"], loop=None) as c:
pass
sleep(0.1)
with Client(s["address"], loop=None) as c:
pass
@gen_cluster(client=True)
async def test_badly_serialized_exceptions(c, s, a, b):
def f():
class BadlySerializedException(Exception):
def __reduce__(self):
raise TypeError()
raise BadlySerializedException("hello world")
x = c.submit(f)
with pytest.raises(Exception, match="hello world"):
await x
# Set rebalance() to work predictably on small amounts of managed memory. By default, it
# uses optimistic memory, which would only be possible to test by allocating very large
# amounts of managed memory, so that they would hide variations in unmanaged memory.
REBALANCE_MANAGED_CONFIG = merge(
NO_AMM,
{
"distributed.worker.memory.rebalance.measure": "managed",
"distributed.worker.memory.rebalance.sender-min": 0,
"distributed.worker.memory.rebalance.sender-recipient-gap": 0,
},
)
@gen_cluster(client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance(c, s, a, b):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
await c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(nthreads=[("", 1)] * 3, client=True, config=REBALANCE_MANAGED_CONFIG)
async def test_rebalance_workers_and_keys(client, s, a, b, c):
"""Test Client.rebalance(). These are just to test the Client wrapper around
Scheduler.rebalance(); for more thorough tests on the latter see test_scheduler.py.
"""
futures = await client.scatter(range(100), workers=[a.address])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Passing empty iterables is not the same as omitting the arguments
await client.rebalance([])
await client.rebalance(workers=[])
assert (len(a.data), len(b.data), len(c.data)) == (100, 0, 0)
# Limit rebalancing to two arbitrary keys and two arbitrary workers.
await client.rebalance([futures[3], futures[7]], [a.address, b.address])
assert (len(a.data), len(b.data), len(c.data)) == (98, 2, 0)
with pytest.raises(KeyError):
await client.rebalance(workers=["notexist"])
def test_rebalance_sync(loop):
with dask.config.set(REBALANCE_MANAGED_CONFIG):
with Client(
n_workers=2, processes=False, dashboard_address=":0", loop=loop
) as c:
s = c.cluster.scheduler
a = c.cluster.workers[0]
b = c.cluster.workers[1]
futures = c.scatter(range(100), workers=[a.address])
assert len(a.data) == 100
assert len(b.data) == 0
c.rebalance()
assert len(a.data) == 50
assert len(b.data) == 50
@gen_cluster(client=True, config=NO_AMM)
async def test_rebalance_unprepared(c, s, a, b):
"""Client.rebalance() internally waits for unfinished futures"""
futures = c.map(slowinc, range(10), delay=0.05, workers=a.address)
# Let the futures reach the scheduler
await asyncio.sleep(0.1)
# We didn't wait enough for futures to complete. However, Client.rebalance() will
# block until all futures are completed before invoking Scheduler.rebalance().
await c.rebalance(futures)
s.validate_state()
@gen_cluster(client=True)
async def test_receive_lost_key(c, s, a, b):
x = c.submit(inc, 1, workers=[a.address])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True)
async def test_unrunnable_task_runs(c, s, a, b):
x = c.submit(inc, 1, workers=[a.ip])
await x
await a.close()
while x.status == "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] in s.unrunnable
assert s.get_task_status(keys=[x.key]) == {x.key: "no-worker"}
async with Worker(s.address):
while x.status != "finished":
await asyncio.sleep(0.01)
assert s.tasks[x.key] not in s.unrunnable
result = await x
assert result == 2
@gen_cluster(client=True, nthreads=[])
async def test_add_worker_after_tasks(c, s):
futures = c.map(inc, range(10))
async with Nanny(s.address, nthreads=2):
await c.gather(futures)
@pytest.mark.skipif(not LINUX, reason="Need 127.0.0.2 to mean localhost")
@gen_cluster([("127.0.0.1", 1), ("127.0.0.2", 2)], client=True, config=NO_AMM)
async def test_workers_register_indirect_data(c, s, a, b):
[x] = await c.scatter([1], workers=a.address)
y = c.submit(inc, x, workers=b.ip)
await y
assert b.data[x.key] == 1
assert s.tasks[x.key].who_has == {s.workers[a.address], s.workers[b.address]}
assert s.workers[b.address].has_what == {s.tasks[x.key], s.tasks[y.key]}
s.validate_state()
@gen_cluster(client=True)
async def test_submit_on_cancelled_future(c, s, a, b):
x = c.submit(inc, 1)
await x
await c.cancel(x)
with pytest.raises(CancelledError):
await c.submit(inc, x)
@pytest.mark.parametrize("validate", [True, False])
@pytest.mark.parametrize("swap_keys", [True, False])
@gen_cluster(client=True)
async def test_compute_partially_forgotten(c, s, *workers, validate, swap_keys):
if not validate:
s.validate = False
# (CPython impl detail)
# While it is not possible to know what the iteration order of a set will
# be, it is deterministic and only depends on the hash of the inserted
# elements. Therefore, converting the set to a list will always yield the
# same order. We're initializing the keys in this very specific order to
# ensure that the scheduler internally arranges the keys in this way
# We'll need the list to be
# ['key', 'lost_dep_of_key']
# At the time of writing, it is unclear why the lost_dep_of_key is part of
# keys but this triggers an observed error
keys = key, lost_dep_of_key = list({"foo", "bar"})
if swap_keys:
keys = lost_dep_of_key, key = [key, lost_dep_of_key]
# Ordinarily this is not submitted as a graph but it could be if a persist
# was leading up to this
task = Task(key, inc, TaskRef(lost_dep_of_key))
# Only happens if it is submitted twice. The first submission leaves a
# zombie task around after triggering the "lost deps" exception. That zombie
# causes the second one to trigger the transition error.
res = c.get({task.key: task}, keys, sync=False)
res = c.get({task.key: task}, keys, sync=False)
with pytest.raises(CancelledError, match="lost dependencies"):
await res[1].result()
with pytest.raises(CancelledError, match="lost dependencies"):
await res[0].result()
# No transition errors
while (
# This waits until update-graph is truly finished
len([msg[1]["action"] == "update-graph" for msg in s.get_events("scheduler")])
< 2
):
await asyncio.sleep(0.01)
assert not s.get_events("transitions")
assert not s.tasks
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 10,
config=NO_AMM,
)
async def test_replicate(c, s, *workers):
[a, b] = await c.scatter([1, 2])
await s.replicate(keys=[a.key, b.key], n=5)
s.validate_state()
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers) == 5
assert sum(b.key in w.data for w in workers) == 5
@gen_cluster(client=True, config=NO_AMM)
async def test_replicate_tuple_keys(c, s, a, b):
x = delayed(inc)(1, dask_key_name=("x", 1))
f = c.persist(x)
await c.replicate(f, n=5)
s.validate_state()
assert a.data and b.data
await c.rebalance(f)
s.validate_state()
@gen_cluster(
client=True,
nthreads=[("127.0.0.1", 1)] * 10,
config=NO_AMM,
)
async def test_replicate_workers(c, s, *workers):
[a, b] = await c.scatter([1, 2], workers=[workers[0].address])
await s.replicate(
keys=[a.key, b.key], n=5, workers=[w.address for w in workers[:5]]
)
assert len(s.tasks[a.key].who_has) == 5
assert len(s.tasks[b.key].who_has) == 5
assert sum(a.key in w.data for w in workers[:5]) == 5
assert sum(b.key in w.data for w in workers[:5]) == 5
assert sum(a.key in w.data for w in workers[5:]) == 0
assert sum(b.key in w.data for w in workers[5:]) == 0
await s.replicate(keys=[a.key, b.key], n=1)
assert len(s.tasks[a.key].who_has) == 1
assert len(s.tasks[b.key].who_has) == 1
assert sum(a.key in w.data for w in workers) == 1
assert sum(b.key in w.data for w in workers) == 1
s.validate_state()
await s.replicate(keys=[a.key, b.key], n=None) # all
assert len(s.tasks[a.key].who_has) == 10
assert len(s.tasks[b.key].who_has) == 10
s.validate_state()
await s.replicate(
keys=[a.key, b.key], n=1, workers=[w.address for w in workers[:5]]
)
assert sum(a.key in w.data for w in workers[:5]) == 1
assert sum(b.key in w.data for w in workers[:5]) == 1
assert sum(a.key in w.data for w in workers[5:]) == 5
assert sum(b.key in w.data for w in workers[5:]) == 5
s.validate_state()
| BadlySerializedObject |
python | google__jax | jax/_src/pallas/core.py | {
"start": 36981,
"end": 44902
} | class ____:
"""Encodes the grid parameters for :func:`jax.experimental.pallas.pallas_call`.
See the documentation for :func:`jax.experimental.pallas.pallas_call`,
and also :ref:`pallas_grids_and_blockspecs` for a more detailed
description of the parameters.
"""
# A canonicalized internal version is in GridMapping.
grid: TupleGrid
grid_names: tuple[Hashable, ...] | None
in_specs: BlockSpecTree
out_specs: BlockSpecTree
scratch_shapes: ScratchShapeTree = ()
def __init__(
self,
grid: Grid = (),
in_specs: BlockSpecTree = no_block_spec,
out_specs: BlockSpecTree = no_block_spec,
scratch_shapes: ScratchShapeTree = (),
):
# Be more lenient for in/out_specs
if isinstance(in_specs, list):
in_specs = tuple(in_specs)
elif in_specs is not no_block_spec and not isinstance(in_specs, Sequence):
raise ValueError(f"`in_specs` must be a tuple or a list. Found: {in_specs}")
if isinstance(out_specs, list):
out_specs = tuple(out_specs)
self.in_specs = in_specs
self.out_specs = out_specs
self.scratch_shapes = tuple(scratch_shapes)
grid_names = None
if isinstance(grid, int):
grid = (grid,)
elif grid and isinstance(grid[0], tuple): # Check if we have a named grid
grid_names, grid = util.unzip2(grid) # type: ignore
# TODO(b/353730556): allow NumPy scalars in grids
if not all(_is_valid_grid_dim(g) for g in grid): # type: ignore
raise ValueError(
f"Grid must be a tuple of integers or jax.Array, got {grid}"
)
self.grid = grid # type: ignore
self.grid_names = grid_names
def _make_scalar_ref_aval(self, aval):
assert False # Not needed in GridSpec
def get_grid_mapping(
grid_spec: GridSpec,
in_avals: Sequence[jax_core.AbstractValue],
in_tree: tree_util.PyTreeDef,
in_origins: Sequence[OriginStr],
out_avals: Sequence[jax_core.AbstractValue],
out_tree: tree_util.PyTreeDef,
out_origins: Sequence[OriginStr],
debug: bool = False,
) -> tuple[tuple[jax_core.AbstractValue, ...], GridMapping]:
if dynamic_shapes_export_enabled():
dim_check : Any = jax_core.is_dim
else:
dim_check : Any = jax_core.is_constant_dim # type: ignore[no-redef]
assert all(i is None or dim_check(i) for i in grid_spec.grid)
grid_mapping_grid = tuple(
dynamic_grid_dim if d is None else d for d in grid_spec.grid
)
# The inputs for the index maps
index_map_avals = (
index_map_grid_aval.update(sharding=jax_core.get_cur_mesh_sharding()),
) * len(grid_spec.grid)
index_map_tree = tree_util.tree_structure((index_map_avals, {}))
num_scalar_prefetch: int = getattr(grid_spec, "num_scalar_prefetch", 0)
if num_scalar_prefetch:
all_avals = tree_util.tree_unflatten(in_tree, in_avals)
scalar_avals, unflat_in_avals = split_list(
all_avals, [num_scalar_prefetch])
flat_scalar_avals, scalar_tree = tree_util.tree_flatten(scalar_avals)
num_flat_scalar_prefetch = len(flat_scalar_avals)
scalar_ref_avals = [
grid_spec._make_scalar_ref_aval(aval)
for aval in flat_scalar_avals]
jaxpr_scalar_ref_avals = tree_util.tree_unflatten(
scalar_tree, scalar_ref_avals)
in_avals, in_tree = tree_util.tree_flatten(tuple(unflat_in_avals))
index_map_tree = tree_util.tree_structure(((*index_map_avals,
*scalar_avals), {}))
index_map_avals = (*index_map_avals, *scalar_ref_avals)
del scalar_ref_avals, flat_scalar_avals, scalar_tree
del scalar_avals, unflat_in_avals, all_avals
else:
num_flat_scalar_prefetch = 0
jaxpr_scalar_ref_avals = ()
if grid_spec.scratch_shapes:
flat_scratch_shapes, scratch_tree = tree_util.tree_flatten(
grid_spec.scratch_shapes)
flat_scratch_avals = tuple(s.get_ref_aval() for s in flat_scratch_shapes)
jaxpr_scratch_avals = tree_util.tree_unflatten(
scratch_tree, flat_scratch_avals)
if not isinstance(jaxpr_scratch_avals, (tuple, list)):
jaxpr_scratch_avals = (jaxpr_scratch_avals,)
del flat_scratch_shapes, scratch_tree
else:
flat_scratch_avals = ()
jaxpr_scratch_avals = ()
if grid_spec.in_specs is not no_block_spec:
flat_in_specs, in_specs_tree = tree_util.tree_flatten(grid_spec.in_specs)
if in_specs_tree != in_tree:
raise ValueError(
pytreedef_mismatch_err_msg("`in_specs`", in_specs_tree,
"`inputs`", in_tree))
else:
flat_in_specs = [no_block_spec] * len(in_avals)
in_block_mappings = map(
partial(
_convert_block_spec_to_block_mapping,
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
grid=grid_mapping_grid, # type: ignore[arg-type]
vmapped_dims=(),
debug=debug,
),
flat_in_specs,
in_origins[num_flat_scalar_prefetch:],
in_avals,
)
if grid_spec.out_specs is not no_block_spec:
flat_out_specs, out_specs_tree = tree_util.tree_flatten(grid_spec.out_specs)
if out_specs_tree != out_tree:
raise ValueError(
pytreedef_mismatch_err_msg("`out_specs`", out_specs_tree,
"`out_shape`", out_tree))
else:
flat_out_specs = [no_block_spec] * len(out_avals)
out_block_mappings = map(
partial(
_convert_block_spec_to_block_mapping,
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
grid=grid_mapping_grid, # type: ignore[arg-type]
vmapped_dims=(),
debug=debug,
),
flat_out_specs,
out_origins,
out_avals,
)
grid_mapping = GridMapping(
grid=grid_mapping_grid, # type: ignore[arg-type]
grid_names=grid_spec.grid_names,
block_mappings=(*in_block_mappings, *out_block_mappings),
index_map_avals=index_map_avals,
index_map_tree=index_map_tree,
vmapped_dims=(),
num_index_operands=num_flat_scalar_prefetch,
num_inputs=len(flat_in_specs),
num_outputs=len(flat_out_specs),
scratch_avals=flat_scratch_avals,
debug=debug,
)
grid_mapping.check_invariants()
in_ref_avals = [bm.ref_aval for bm in in_block_mappings]
jaxpr_in_ref_avals = tree_util.tree_unflatten(in_tree, in_ref_avals)
jaxpr_in_avals = (*jaxpr_scalar_ref_avals,
*jaxpr_in_ref_avals)
out_ref_avals = [bm.ref_aval for bm in out_block_mappings]
jaxpr_out_avals = tree_util.tree_unflatten(out_tree, out_ref_avals)
if not isinstance(jaxpr_out_avals, (tuple, list)):
jaxpr_out_avals = (jaxpr_out_avals,)
return (*jaxpr_in_avals, *jaxpr_out_avals,
*jaxpr_scratch_avals), grid_mapping
def unzip_dynamic_grid_bounds(
grid_spec: GridSpec) -> tuple[GridSpec, tuple[Any, ...]]:
if dynamic_shapes_export_enabled():
new_grid : Any = grid_spec.grid
else:
new_grid : Any = tuple(d if isinstance(d, int) else None for d in grid_spec.grid) # type: ignore[no-redef]
dynamic_bounds = tuple(d for d in grid_spec.grid if not isinstance(d, int))
# We can't use dataclasses.replace, because our fields are incompatible
# with __init__'s signature.
static_self = copy.copy(grid_spec)
static_self.grid = new_grid
return static_self, dynamic_bounds
def pytreedef_mismatch_err_msg(
what1: str, tree1: tree_util.PyTreeDef,
what2: str, tree2: tree_util.PyTreeDef) -> str:
errs = list(tree_util.equality_errors_pytreedef(tree1, tree2))
msg = []
msg.append(
f"Pytree for {what1} and {what2} do not match. "
f"There are {len(errs)} mismatches, including:")
for path, thing1, thing2, explanation in errs:
where = f"at {tree_util.keystr(path)}, " if path else ""
msg.append(
f" * {where}{what1} is a {thing1} but"
f" {what2} is a {thing2}, so {explanation}")
return "\n".join(msg)
@dataclasses.dataclass(frozen=True)
| GridSpec |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 3966,
"end": 4491
} | class ____(BaseEvent):
"""
LLMChatStartEvent.
Args:
messages (List[ChatMessage]): List of chat messages.
additional_kwargs (dict): Additional keyword arguments.
model_dict (dict): Model dictionary.
"""
model_config = ConfigDict(protected_namespaces=("pydantic_model_",))
messages: List[ChatMessage]
additional_kwargs: dict
model_dict: dict
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMChatStartEvent"
| LLMChatStartEvent |
python | doocs__leetcode | solution/1200-1299/1217.Minimum Cost to Move Chips to The Same Position/Solution.py | {
"start": 0,
"end": 174
} | class ____:
def minCostToMoveChips(self, position: List[int]) -> int:
a = sum(p % 2 for p in position)
b = len(position) - a
return min(a, b)
| Solution |
python | pyca__cryptography | src/cryptography/hazmat/primitives/_serialization.py | {
"start": 1289,
"end": 1554
} | class ____(utils.Enum):
SubjectPublicKeyInfo = "X.509 subjectPublicKeyInfo with PKCS#1"
PKCS1 = "Raw PKCS#1"
OpenSSH = "OpenSSH"
Raw = "Raw"
CompressedPoint = "X9.62 Compressed Point"
UncompressedPoint = "X9.62 Uncompressed Point"
| PublicFormat |
python | huggingface__transformers | src/transformers/models/altclip/modeling_altclip.py | {
"start": 8945,
"end": 11754
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.FloatTensor] = None,
output_attentions: Optional[bool] = False,
) -> tuple[torch.Tensor]:
input_shape = hidden_states.shape[:-1]
hidden_shape = (*input_shape, -1, self.attention_head_size)
query_layer = self.query(hidden_states).view(hidden_shape).transpose(1, 2)
key_layer = self.key(hidden_states).view(hidden_shape).transpose(1, 2)
value_layer = self.value(hidden_states).view(hidden_shape).transpose(1, 2)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in AltRobertaModel forward() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = nn.functional.softmax(attention_scores, dim=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(new_context_layer_shape)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
# Copied from transformers.models.roberta.modeling_roberta.RobertaSelfOutput
| AltRobertaSelfAttention |
python | scrapy__scrapy | tests/mockserver/dns.py | {
"start": 817,
"end": 1767
} | class ____:
def __enter__(self):
self.proc = Popen(
[sys.executable, "-u", "-m", "tests.mockserver.dns"],
stdout=PIPE,
env=get_script_run_env(),
)
self.host = "127.0.0.1"
self.port = int(
self.proc.stdout.readline().strip().decode("ascii").split(":")[1]
)
return self
def __exit__(self, exc_type, exc_value, traceback):
self.proc.kill()
self.proc.communicate()
def main() -> None:
from twisted.internet import reactor
clients = [MockDNSResolver()]
factory = DNSServerFactory(clients=clients)
protocol = dns.DNSDatagramProtocol(controller=factory)
listener = reactor.listenUDP(0, protocol)
def print_listening():
host = listener.getHost()
print(f"{host.host}:{host.port}")
reactor.callWhenRunning(print_listening)
reactor.run()
if __name__ == "__main__":
main()
| MockDNSServer |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.