language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numpy__numpy | numpy/lib/tests/test_index_tricks.py | {
"start": 19552,
"end": 24407
} | class ____:
def test_diag_indices_from(self):
x = np.random.random((4, 4))
r, c = diag_indices_from(x)
assert_array_equal(r, np.arange(4))
assert_array_equal(c, np.arange(4))
def test_error_small_input(self):
x = np.ones(7)
with assert_raises_regex(ValueError, "at least 2-d"):
diag_indices_from(x)
def test_error_shape_mismatch(self):
x = np.zeros((3, 3, 2, 3), int)
with assert_raises_regex(ValueError, "equal length"):
diag_indices_from(x)
def test_ndindex():
x = list(ndindex(1, 2, 3))
expected = [ix for ix, e in ndenumerate(np.zeros((1, 2, 3)))]
assert_array_equal(x, expected)
x = list(ndindex((1, 2, 3)))
assert_array_equal(x, expected)
# Test use of scalars and tuples
x = list(ndindex((3,)))
assert_array_equal(x, list(ndindex(3)))
# Make sure size argument is optional
x = list(ndindex())
assert_equal(x, [()])
x = list(ndindex(()))
assert_equal(x, [()])
# Make sure 0-sized ndindex works correctly
x = list(ndindex(*[0]))
assert_equal(x, [])
def test_ndindex_zero_dimensions_explicit():
"""Test ndindex produces empty iterators for explicit
zero-length dimensions."""
assert list(np.ndindex(0, 3)) == []
assert list(np.ndindex(3, 0, 2)) == []
assert list(np.ndindex(0)) == []
@pytest.mark.parametrize("bad_shape", [2.5, "2", [2, 3], (2.0, 3)])
def test_ndindex_non_integer_dimensions(bad_shape):
"""Test that non-integer dimensions raise TypeError."""
with pytest.raises(TypeError):
# Passing invalid_shape_arg directly to ndindex. It will try to use it
# as a dimension and should trigger a TypeError.
list(np.ndindex(bad_shape))
def test_ndindex_stop_iteration_behavior():
"""Test that StopIteration is raised properly after exhaustion."""
it = np.ndindex(2, 2)
# Exhaust the iterator
list(it)
# Should raise StopIteration on subsequent calls
with pytest.raises(StopIteration):
next(it)
def test_ndindex_iterator_independence():
"""Test that each ndindex instance creates independent iterators."""
shape = (2, 3)
iter1 = np.ndindex(*shape)
iter2 = np.ndindex(*shape)
next(iter1)
next(iter1)
assert_equal(next(iter2), (0, 0))
assert_equal(next(iter1), (0, 2))
def test_ndindex_tuple_vs_args_consistency():
"""Test that ndindex(shape) and ndindex(*shape) produce same results."""
# Single dimension
assert_equal(list(np.ndindex(5)), list(np.ndindex((5,))))
# Multiple dimensions
assert_equal(list(np.ndindex(2, 3)), list(np.ndindex((2, 3))))
# Complex shape
shape = (2, 1, 4)
assert_equal(list(np.ndindex(*shape)), list(np.ndindex(shape)))
def test_ndindex_against_ndenumerate_compatibility():
"""Test ndindex produces same indices as ndenumerate."""
for shape in [(1, 2, 3), (3,), (2, 2), ()]:
ndindex_result = list(np.ndindex(shape))
ndenumerate_indices = [ix for ix, _ in np.ndenumerate(np.zeros(shape))]
assert_array_equal(ndindex_result, ndenumerate_indices)
def test_ndindex_multidimensional_correctness():
"""Test ndindex produces correct indices for multidimensional arrays."""
shape = (2, 1, 3)
result = list(np.ndindex(*shape))
expected = [
(0, 0, 0),
(0, 0, 1),
(0, 0, 2),
(1, 0, 0),
(1, 0, 1),
(1, 0, 2),
]
assert_equal(result, expected)
def test_ndindex_large_dimensions_behavior():
"""Test ndindex behaves correctly when initialized with large dimensions."""
large_shape = (1000, 1000)
iter_obj = np.ndindex(*large_shape)
first_element = next(iter_obj)
assert_equal(first_element, (0, 0))
def test_ndindex_empty_iterator_behavior():
"""Test detailed behavior of empty iterators."""
empty_iter = np.ndindex(0, 5)
assert_equal(list(empty_iter), [])
empty_iter2 = np.ndindex(3, 0, 2)
with pytest.raises(StopIteration):
next(empty_iter2)
@pytest.mark.parametrize(
"negative_shape_arg",
[
(-1,), # Single negative dimension
(2, -3, 4), # Negative dimension in the middle
(5, 0, -2), # Mix of valid (0) and invalid (negative) dimensions
],
)
def test_ndindex_negative_dimensions(negative_shape_arg):
"""Test that negative dimensions raise ValueError."""
with pytest.raises(ValueError):
ndindex(negative_shape_arg)
def test_ndindex_empty_shape():
import numpy as np
# ndindex() and ndindex(()) should return a single empty tuple
assert list(np.ndindex()) == [()]
assert list(np.ndindex(())) == [()]
def test_ndindex_negative_dim_raises():
# ndindex(-1) should raise a ValueError
with pytest.raises(ValueError):
list(np.ndindex(-1))
| TestDiagIndicesFrom |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 56480,
"end": 56602
} | class ____(ReturnTypeFromArgs[_T]): # noqa: A001
"""The SQL MAX() aggregate function."""
inherit_cache = True
| max |
python | spack__spack | lib/spack/spack/resource.py | {
"start": 289,
"end": 754
} | class ____:
"""Represents any resource to be fetched by a package.
This includes the main tarball or source archive, as well as extra archives defined
by the resource() directive.
Aggregates a name, a fetcher, a destination and a placement.
"""
def __init__(self, name, fetcher, destination, placement):
self.name = name
self.fetcher = fetcher
self.destination = destination
self.placement = placement
| Resource |
python | airbytehq__airbyte | airbyte-integrations/bases/connector-acceptance-test/connector_acceptance_test/tests/test_core.py | {
"start": 33461,
"end": 35216
} | class ____(BaseTest):
async def test_check(self, connector_config, inputs: ConnectionTestConfig, docker_runner: ConnectorRunner):
if inputs.status == ConnectionTestConfig.Status.Succeed:
output = await docker_runner.call_check(config=connector_config)
con_messages = filter_output(output, Type.CONNECTION_STATUS)
assert len(con_messages) == 1, "Connection status message should be emitted exactly once"
assert con_messages[0].connectionStatus.status == Status.SUCCEEDED
elif inputs.status == ConnectionTestConfig.Status.Failed:
output = await docker_runner.call_check(config=connector_config)
con_messages = filter_output(output, Type.CONNECTION_STATUS)
assert len(con_messages) == 1, "Connection status message should be emitted exactly once"
assert con_messages[0].connectionStatus.status == Status.FAILED
elif inputs.status == ConnectionTestConfig.Status.Exception:
output = await docker_runner.call_check(config=connector_config, raise_container_error=False)
trace_messages = filter_output(output, Type.TRACE)
assert len(trace_messages) == 1, "A trace message should be emitted in case of unexpected errors"
trace = trace_messages[0].trace
assert isinstance(trace, AirbyteTraceMessage)
assert trace.error is not None
assert trace.error.message is not None
# Running tests in parallel can sometime delay the execution of the tests if downstream services are not able to handle the load.
# This is why we set a timeout on tests that call command that should return quickly, like discover
@pytest.mark.default_timeout(FIVE_MINUTES)
| TestConnection |
python | huggingface__transformers | src/transformers/modeling_outputs.py | {
"start": 63928,
"end": 68152
} | class ____(ModelOutput):
"""
Base class for outputs of sequence-to-sequence sentence classification models.
Args:
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `label` is provided):
Classification (or regression if config.num_labels==1) loss.
logits (`torch.FloatTensor` of shape `(batch_size, config.num_labels)`):
Classification (or regression if config.num_labels==1) scores (before SoftMax).
past_key_values (`EncoderDecoderCache`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
It is a [`~cache_utils.EncoderDecoderCache`] instance. For more details, see our [kv cache guide](https://huggingface.co/docs/transformers/en/kv_cache).
Contains pre-computed hidden-states (key and values in the self-attention blocks and in the cross-attention
blocks) that can be used (see `past_key_values` input) to speed up sequential decoding.
decoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the decoder at the output of each layer plus the initial embedding outputs.
decoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the decoder's cross-attention layer, after the attention softmax, used to compute the
weighted average in the cross-attention heads.
encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
Sequence of hidden-states at the output of the last layer of the encoder of the model.
encoder_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
Hidden-states of the encoder at the output of each layer plus the initial embedding outputs.
encoder_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights of the encoder, after the attention softmax, used to compute the weighted average in the
self-attention heads.
"""
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
past_key_values: Optional[EncoderDecoderCache] = None
decoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
decoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
cross_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_last_hidden_state: Optional[torch.FloatTensor] = None
encoder_hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
encoder_attentions: Optional[tuple[torch.FloatTensor, ...]] = None
@dataclass
| Seq2SeqSequenceClassifierOutput |
python | huggingface__transformers | src/transformers/models/mask2former/modeling_mask2former.py | {
"start": 49367,
"end": 53382
} | class ____(nn.Module):
def __init__(self, config: Mask2FormerConfig):
super().__init__()
self.embed_dim = config.feature_size
self.self_attn = Mask2FormerPixelDecoderEncoderMultiscaleDeformableAttention(
embed_dim=self.embed_dim,
num_heads=config.num_attention_heads,
n_levels=3,
n_points=4,
)
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.dropout = config.dropout
self.activation_fn = nn.functional.relu
self.activation_dropout = config.dropout
self.fc1 = nn.Linear(self.embed_dim, config.encoder_feedforward_dim)
self.fc2 = nn.Linear(config.encoder_feedforward_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes_list=None,
level_start_index=None,
output_attentions: bool = False,
):
"""
Args:
hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
Input to the layer.
attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`):
Attention mask.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings, to be added to `hidden_states`.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes_list (`list` of `tuple`):
Spatial shapes of the backbone feature maps as a list of tuples.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Apply Multi-scale Deformable Attention Module on the multi-scale feature maps.
hidden_states, attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=hidden_states,
encoder_attention_mask=attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
if self.training:
if torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any():
clamp_value = torch.finfo(hidden_states.dtype).max - 1000
hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value)
outputs = (hidden_states,)
if output_attentions:
outputs += (attn_weights.transpose(1, 0),)
return outputs
# Modified from from transformers.models.detr.modeling_deformable_detr.DeformableDetrEncoder with DeformableDetrEncoder->Mask2FormerPixelDecoderEncoderOnly
| Mask2FormerPixelDecoderEncoderLayer |
python | getsentry__sentry | tests/sentry/integrations/api/endpoints/test_user_organizationintegration.py | {
"start": 951,
"end": 2309
} | class ____(APITestCase):
endpoint = "sentry-api-0-user-organization-integrations"
method = "get"
def setUp(self) -> None:
super().setUp()
self.login_as(self.user)
def test_simple(self) -> None:
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=self.organization.id, integration_id=integration.id
)
response = self.get_success_response(self.user.id)
assert response.data[0]["organizationId"] == self.organization.id
def test_billing_users_dont_see_integrations(self) -> None:
integration = self.create_provider_integration(provider="github")
self.create_organization_integration(
organization_id=self.organization.id, integration_id=integration.id
)
mock_org_roles = MockOrganizationRoles()
with patch("sentry.roles.organization_roles.get", mock_org_roles.get):
alice = self.create_user()
self.create_member(user=alice, organization=self.organization, role="alice")
self.login_as(alice)
response = self.get_success_response(alice.id)
assert response.status_code == 200
content = orjson.loads(response.content)
assert content == []
| UserOrganizationIntegationTest |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_remove_timezone.py | {
"start": 617,
"end": 2382
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("remove_timezone01.xlsx")
def test_remove_timezone_none(self):
"""Test write_datetime without timezones."""
workbook = Workbook(self.got_filename, {"remove_timezone": False})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 20)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
format3 = workbook.add_format({"num_format": 22})
date1 = time(12, 0, 0)
date2 = date(2016, 9, 23)
date3 = datetime.strptime("2016-09-12 12:00", "%Y-%m-%d %H:%M")
worksheet.write_datetime(0, 0, date1, format1)
worksheet.write_datetime(1, 0, date2, format2)
worksheet.write_datetime(2, 0, date3, format3)
workbook.close()
self.assertExcelEqual()
def test_remove_timezone_gmt(self):
"""Test write_datetime with timezones."""
workbook = Workbook(self.got_filename, {"remove_timezone": True})
worksheet = workbook.add_worksheet()
worksheet.set_column(0, 0, 20)
format1 = workbook.add_format({"num_format": 20})
format2 = workbook.add_format({"num_format": 14})
format3 = workbook.add_format({"num_format": 22})
date1 = time(12, 0, 0, tzinfo=GMT())
date2 = date(2016, 9, 23)
date3 = datetime.strptime("2016-09-12 12:00", "%Y-%m-%d %H:%M")
date3 = date3.replace(tzinfo=GMT())
worksheet.write_datetime(1, 0, date2, format2)
worksheet.write_datetime(2, 0, date3, format3)
workbook.close()
| TestCompareXLSXFiles |
python | huggingface__transformers | src/transformers/models/qwen2_5_omni/modeling_qwen2_5_omni.py | {
"start": 133920,
"end": 134646
} | class ____(nn.Module):
def __init__(self, dim):
super().__init__()
self.silu = nn.SiLU()
self.linear = nn.Linear(dim, dim * 6)
self.norm = nn.LayerNorm(dim, elementwise_affine=False, eps=1e-6)
def forward(self, hidden_states, emb=None):
emb = self.linear(self.silu(emb))
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = torch.chunk(emb, 6, dim=1)
hidden_states = self.norm(hidden_states) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp
# AdaLayerNormZero for final layer
# return only with modulated x for attn input, cuz no more mlp modulation
| Qwen2_5_OmniAdaLayerNormZero |
python | PrefectHQ__prefect | src/prefect/utilities/importtools.py | {
"start": 9358,
"end": 17760
} | class ____(Loader):
def __init__(
self,
alias: str,
callback: Optional[Callable[[str], None]],
real_spec: ModuleSpec,
):
self.alias = alias
self.callback = callback
self.real_spec = real_spec
def exec_module(self, module: ModuleType) -> None:
root_module = importlib.import_module(self.real_spec.name)
if self.callback is not None:
self.callback(self.alias)
sys.modules[self.alias] = root_module
def safe_load_namespace(
source_code: str, filepath: Optional[str] = None
) -> dict[str, Any]:
"""
Safely load a namespace from source code, optionally handling relative imports.
If a `filepath` is provided, `sys.path` is modified to support relative imports.
Changes to `sys.path` are reverted after completion, but this function is not thread safe
and use of it in threaded contexts may result in undesirable behavior.
Args:
source_code: The source code to load
filepath: Optional file path of the source code. If provided, enables relative imports.
Returns:
The namespace loaded from the source code.
"""
parsed_code = ast.parse(source_code)
namespace: dict[str, Any] = {"__name__": "prefect_safe_namespace_loader"}
# Remove the body of the if __name__ == "__main__": block
new_body = [node for node in parsed_code.body if not _is_main_block(node)]
parsed_code.body = new_body
temp_module = None
original_sys_path = None
if filepath:
# Setup for relative imports
file_dir = os.path.dirname(os.path.abspath(filepath))
package_name = os.path.basename(file_dir)
parent_dir = os.path.dirname(file_dir)
# Save original sys.path and modify it
original_sys_path = sys.path.copy()
sys.path.insert(0, parent_dir)
sys.path.insert(0, file_dir)
# Create a temporary module for import context
temp_module = ModuleType(package_name)
temp_module.__file__ = filepath
temp_module.__package__ = package_name
# Create a spec for the module
temp_module.__spec__ = ModuleSpec(package_name, None)
temp_module.__spec__.loader = None
temp_module.__spec__.submodule_search_locations = [file_dir]
try:
for node in parsed_code.body:
if isinstance(node, ast.Import):
for alias in node.names:
module_name = alias.name
as_name = alias.asname or module_name
try:
namespace[as_name] = importlib.import_module(module_name)
logger.debug("Successfully imported %s", module_name)
except ImportError as e:
logger.debug(f"Failed to import {module_name}: {e}")
elif isinstance(node, ast.ImportFrom):
module_name = node.module or ""
if filepath:
try:
if node.level > 0:
# For relative imports, use the parent package to inform the import
if TYPE_CHECKING:
assert temp_module is not None
assert temp_module.__package__ is not None
package_parts = temp_module.__package__.split(".")
if len(package_parts) < node.level:
raise ImportError(
"Attempted relative import beyond top-level package"
)
parent_package = ".".join(
package_parts[: (1 - node.level)]
if node.level > 1
else package_parts
)
module = importlib.import_module(
f".{module_name}" if module_name else "",
package=parent_package,
)
else:
# Absolute imports are handled as normal
module = importlib.import_module(module_name)
for alias in node.names:
name = alias.name
asname = alias.asname or name
if name == "*":
# Handle 'from module import *'
module_dict = {
k: v
for k, v in module.__dict__.items()
if not k.startswith("_")
}
namespace.update(module_dict)
else:
try:
attribute = getattr(module, name)
namespace[asname] = attribute
except AttributeError as e:
logger.debug(
"Failed to retrieve %s from %s: %s",
name,
module_name,
e,
)
except ImportError as e:
logger.debug("Failed to import from %s: %s", module_name, e)
else:
# Handle as absolute import when no filepath is provided
try:
module = importlib.import_module(module_name)
for alias in node.names:
name = alias.name
asname = alias.asname or name
if name == "*":
# Handle 'from module import *'
module_dict = {
k: v
for k, v in module.__dict__.items()
if not k.startswith("_")
}
namespace.update(module_dict)
else:
try:
attribute = getattr(module, name)
namespace[asname] = attribute
except AttributeError as e:
logger.debug(
"Failed to retrieve %s from %s: %s",
name,
module_name,
e,
)
except ImportError as e:
logger.debug("Failed to import from %s: %s", module_name, e)
# Handle local definitions
for node in parsed_code.body:
if isinstance(node, (ast.ClassDef, ast.FunctionDef, ast.Assign)):
try:
code = compile(
ast.Module(body=[node], type_ignores=[]),
filename="<ast>",
mode="exec",
)
exec(code, namespace)
except Exception as e:
logger.debug("Failed to compile: %s", e)
finally:
# Restore original sys.path if it was modified
if original_sys_path:
sys.path[:] = original_sys_path
return namespace
def _is_main_block(node: ast.AST):
"""
Check if the node is an `if __name__ == "__main__":` block.
"""
if isinstance(node, ast.If):
try:
# Check if the condition is `if __name__ == "__main__":`
if (
isinstance(node.test, ast.Compare)
and isinstance(node.test.left, ast.Name)
and node.test.left.id == "__name__"
and isinstance(node.test.comparators[0], ast.Constant)
and node.test.comparators[0].value == "__main__"
):
return True
except AttributeError:
pass
return False
| AliasedModuleLoader |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-clarifai/llama_index/llms/clarifai/base.py | {
"start": 754,
"end": 7799
} | class ____(LLM):
"""
Clarifai LLM.
Examples:
`pip install llama-index-llms-clarifai`
```python
from llama_index.llms.clarifai import Clarifai
llm = Clarifai(
user_id="clarifai",
app_id="ml",
model_name="llama2-7b-alternative-4k",
model_url=(
"https://clarifai.com/clarifai/ml/models/llama2-7b-alternative-4k"
)
response = llm.complete("Hello World!")
print(response)
```
"""
model_url: Optional[str] = Field(
description=f"Full URL of the model. e.g. `{EXAMPLE_URL}`"
)
model_version_id: Optional[str] = Field(description="Model Version ID.")
app_id: Optional[str] = Field(description="Clarifai application ID of the model.")
user_id: Optional[str] = Field(description="Clarifai user ID of the model.")
pat: Optional[str] = Field(
description="Personal Access Tokens(PAT) to validate requests."
)
_model: Any = PrivateAttr()
_is_chat_model: bool = PrivateAttr()
def __init__(
self,
model_name: Optional[str] = None,
model_url: Optional[str] = None,
model_version_id: Optional[str] = "",
app_id: Optional[str] = None,
user_id: Optional[str] = None,
pat: Optional[str] = None,
temperature: float = 0.1,
max_tokens: int = 512,
additional_kwargs: Optional[Dict[str, Any]] = None,
callback_manager: Optional[CallbackManager] = None,
system_prompt: Optional[str] = None,
messages_to_prompt: Optional[Callable[[Sequence[ChatMessage]], str]] = None,
completion_to_prompt: Optional[Callable[[str], str]] = None,
pydantic_program_mode: PydanticProgramMode = PydanticProgramMode.DEFAULT,
output_parser: Optional[BaseOutputParser] = None,
):
if pat is None and os.environ.get("CLARIFAI_PAT") is not None:
pat = os.environ.get("CLARIFAI_PAT")
if not pat and os.environ.get("CLARIFAI_PAT") is None:
raise ValueError(
"Set `CLARIFAI_PAT` as env variable or pass `pat` as constructor argument"
)
if model_url is not None and model_name is not None:
raise ValueError("You can only specify one of model_url or model_name.")
if model_url is None and model_name is None:
raise ValueError("You must specify one of model_url or model_name.")
model = None
if model_name is not None:
if app_id is None or user_id is None:
raise ValueError(
f"Missing one app ID or user ID of the model: {app_id=}, {user_id=}"
)
else:
model = Model(
user_id=user_id,
app_id=app_id,
model_id=model_name,
model_version={"id": model_version_id},
pat=pat,
)
if model_url is not None:
model = Model(model_url, pat=pat)
model_name = model.id
is_chat_model = False
if "chat" in model.app_id or "chat" in model.id:
is_chat_model = True
additional_kwargs = additional_kwargs or {}
super().__init__(
temperature=temperature,
max_tokens=max_tokens,
additional_kwargs=additional_kwargs,
callback_manager=callback_manager,
model_name=model_name,
system_prompt=system_prompt,
messages_to_prompt=messages_to_prompt,
completion_to_prompt=completion_to_prompt,
pydantic_program_mode=pydantic_program_mode,
output_parser=output_parser,
)
self._model = model
self._is_chat_model = is_chat_model
@classmethod
def class_name(cls) -> str:
return "ClarifaiLLM"
@property
def metadata(self) -> LLMMetadata:
"""LLM metadata."""
return LLMMetadata(
context_window=self.context_window,
num_output=self.max_tokens,
model_name=self._model,
is_chat_model=self._is_chat_model,
)
# TODO: When the Clarifai python SDK supports inference params, add here.
def chat(
self,
messages: Sequence[ChatMessage],
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> ChatResponse:
"""Chat endpoint for LLM."""
prompt = "".join([str(m) for m in messages])
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="UTF-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return ChatResponse(message=ChatMessage(content=response))
def complete(
self,
prompt: str,
formatted: bool = False,
inference_params: Optional[Dict] = {},
**kwargs: Any,
) -> CompletionResponse:
"""Completion endpoint for LLM."""
try:
response = (
self._model.predict_by_bytes(
input_bytes=prompt.encode(encoding="utf-8"),
input_type="text",
inference_params=inference_params,
)
.outputs[0]
.data.text.raw
)
except Exception as e:
raise Exception(f"Prediction failed: {e}")
return CompletionResponse(text=response)
def stream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
def stream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseGen:
raise NotImplementedError(
"Clarifai does not currently support streaming completion."
)
@llm_chat_callback()
async def achat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponse:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def acomplete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponse:
return self.complete(prompt, **kwargs)
@llm_chat_callback()
async def astream_chat(
self, messages: Sequence[ChatMessage], **kwargs: Any
) -> ChatResponseAsyncGen:
raise NotImplementedError("Currently not supported.")
@llm_completion_callback()
async def astream_complete(
self, prompt: str, formatted: bool = False, **kwargs: Any
) -> CompletionResponseAsyncGen:
raise NotImplementedError("Clarifai does not currently support this function.")
| Clarifai |
python | kamyu104__LeetCode-Solutions | Python/checking-existence-of-edge-length-limited-paths-ii.py | {
"start": 5133,
"end": 6378
} | class ____(object): # Time: O(n * α(n)), Space: O(n)
def __init__(self, n):
self.snap_id = 0
self.set = SnapshotArray(n)
for i in xrange(n):
self.set.set(i, i, self.snap_id)
self.rank = SnapshotArray(n)
def find_set(self, x, snap_id):
stk = []
while self.set.get(x, snap_id) != x: # path compression
stk.append(x)
x = self.set.get(x, snap_id)
while stk:
self.set.set(stk.pop(), x, snap_id)
return x
def union_set(self, x, y):
x_root = self.find_set(x, self.snap_id)
y_root = self.find_set(y, self.snap_id)
if x_root == y_root:
return False
if self.rank.get(x_root, self.snap_id) < self.rank.get(y_root, self.snap_id): # union by rank
self.set.set(x_root, y_root, self.snap_id)
elif self.rank.get(x_root, self.snap_id) > self.rank.get(y_root, self.snap_id):
self.set.set(y_root, x_root, self.snap_id)
else:
self.set.set(y_root, x_root, self.snap_id)
self.rank.set(x_root, self.rank.get(x_root, self.snap_id)+1, self.snap_id)
return True
def snap(self):
self.snap_id += 1
| VersionedUnionFind |
python | django__django | django/db/migrations/questioner.py | {
"start": 303,
"end": 3478
} | class ____:
"""
Give the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None, specified_apps=None, dry_run=None):
self.defaults = defaults or {}
self.specified_apps = specified_apps or set()
self.dry_run = dry_run
def ask_initial(self, app_label):
"""Should we create an initial migration for the app?"""
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Otherwise, we look to see if it has a migrations module
# without any Python files in it, apart from __init__.py.
# Apps from the new app template will have these; the Python
# file check will ensure we skip South ones.
try:
app_config = apps.get_app_config(app_label)
except LookupError: # It's a fake app.
return self.defaults.get("ask_initial", False)
migrations_import_path, _ = MigrationLoader.migrations_module(app_config.label)
if migrations_import_path is None:
# It's an application with migrations disabled.
return self.defaults.get("ask_initial", False)
try:
migrations_module = importlib.import_module(migrations_import_path)
except ImportError:
return self.defaults.get("ask_initial", False)
else:
if getattr(migrations_module, "__file__", None):
filenames = os.listdir(os.path.dirname(migrations_module.__file__))
elif hasattr(migrations_module, "__path__"):
if len(migrations_module.__path__) > 1:
return False
filenames = os.listdir(list(migrations_module.__path__)[0])
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"""Adding a NOT NULL field to a model."""
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"""Changing a NULL field to NOT NULL."""
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"""Was this field really renamed?"""
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"""Was this model really renamed?"""
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"""Should these migrations really be merged?"""
return self.defaults.get("ask_merge", False)
def ask_auto_now_add_addition(self, field_name, model_name):
"""Adding an auto_now_add field to a model."""
# None means quit
return None
def ask_unique_callable_default_addition(self, field_name, model_name):
"""Adding a unique field with a callable default."""
# None means continue.
return None
| MigrationQuestioner |
python | PrefectHQ__prefect | src/integrations/prefect-kubernetes/prefect_kubernetes/worker.py | {
"start": 6723,
"end": 6928
} | class ____(enum.Enum):
"""Enum representing the image pull policy options for a Kubernetes job."""
IF_NOT_PRESENT = "IfNotPresent"
ALWAYS = "Always"
NEVER = "Never"
| KubernetesImagePullPolicy |
python | ansible__ansible | test/lib/ansible_test/_internal/http.py | {
"start": 3041,
"end": 3649
} | class ____:
"""HTTP response."""
def __init__(self, method: str, url: str, status_code: int, response: str) -> None:
self.method = method
self.url = url
self.status_code = status_code
self.response = response
def json(self) -> t.Any:
"""Return the response parsed as JSON, raising an exception if parsing fails."""
try:
return json.loads(self.response)
except ValueError:
raise HttpError(self.status_code, 'Cannot parse response to %s %s as JSON:\n%s' % (self.method, self.url, self.response)) from None
| HttpResponse |
python | jazzband__django-formtools | formtools/wizard/storage/base.py | {
"start": 162,
"end": 4998
} | class ____:
step_key = 'step'
step_data_key = 'step_data'
step_files_key = 'step_files'
extra_data_key = 'extra_data'
def __init__(self, prefix, request=None, file_storage=None):
self.prefix = 'wizard_%s' % prefix
self.request = request
self.file_storage = file_storage
self._files = {}
self._tmp_files = []
def init_data(self):
self.data = {
self.step_key: None,
self.step_data_key: {},
self.step_files_key: {},
self.extra_data_key: {},
}
def reset(self):
# Store unused temporary file names in order to delete them
# at the end of the response cycle through a callback attached in
# `update_response`.
wizard_files = self.data[self.step_files_key]
for step_files in wizard_files.values():
for step_file in step_files.values():
self._tmp_files.append(step_file['tmp_name'])
self.init_data()
def _get_current_step(self):
return self.data[self.step_key]
def _set_current_step(self, step):
self.data[self.step_key] = step
@property
def current_step(self):
return self._get_current_step()
@current_step.setter
def current_step(self, value):
return self._set_current_step(value)
def _get_extra_data(self):
return self.data[self.extra_data_key]
def _set_extra_data(self, extra_data):
self.data[self.extra_data_key] = extra_data
@property
def extra_data(self):
return self._get_extra_data()
@extra_data.setter
def extra_data(self, value):
return self._set_extra_data(value)
def get_step_data(self, step):
# When reading the serialized data, upconvert it to a MultiValueDict,
# some serializers (json) don't preserve the type of the object.
values = self.data[self.step_data_key].get(step, None)
if values is not None:
values = MultiValueDict(values)
return values
def set_step_data(self, step, cleaned_data):
# If the value is a MultiValueDict, convert it to a regular dict of the
# underlying contents. Some serializers call the public API on it (as
# opposed to the underlying dict methods), in which case the content
# can be truncated (__getitem__ returns only the first item).
if isinstance(cleaned_data, MultiValueDict):
cleaned_data = dict(cleaned_data.lists())
self.data[self.step_data_key][step] = cleaned_data
@property
def current_step_data(self):
return self.get_step_data(self.current_step)
def get_step_files(self, step):
wizard_files = self.data[self.step_files_key].get(step, {})
if wizard_files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
files = {}
for field, field_dict in wizard_files.items():
field_dict = field_dict.copy()
tmp_name = field_dict.pop('tmp_name')
if (step, field) not in self._files:
self._files[(step, field)] = UploadedFile(
file=self.file_storage.open(tmp_name), **field_dict)
files[field] = self._files[(step, field)]
return files or None
def set_step_files(self, step, files):
if files and not self.file_storage:
raise NoFileStorageConfigured(
"You need to define 'file_storage' in your "
"wizard view in order to handle file uploads.")
if step not in self.data[self.step_files_key]:
self.data[self.step_files_key][step] = {}
for field, field_file in (files or {}).items():
tmp_filename = self.file_storage.save(field_file.name, field_file)
file_dict = {
'tmp_name': tmp_filename,
'name': field_file.name,
'content_type': field_file.content_type,
'size': field_file.size,
'charset': field_file.charset
}
self.data[self.step_files_key][step][field] = file_dict
@property
def current_step_files(self):
return self.get_step_files(self.current_step)
def update_response(self, response):
def post_render_callback(response):
for file in self._files.values():
if not file.closed:
file.close()
for tmp_file in self._tmp_files:
self.file_storage.delete(tmp_file)
if hasattr(response, 'render'):
response.add_post_render_callback(post_render_callback)
else:
post_render_callback(response)
| BaseStorage |
python | crytic__slither | slither/core/declarations/solidity_variables.py | {
"start": 5343,
"end": 5923
} | class ____(SolidityVariable):
def _check_name(self, name: str) -> None:
assert name in SOLIDITY_VARIABLES_COMPOSED
@property
def name(self) -> str:
return self._name
@property
def type(self) -> ElementaryType:
return ElementaryType(SOLIDITY_VARIABLES_COMPOSED[self.name])
def __str__(self) -> str:
return self._name
def __eq__(self, other: Any) -> bool:
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self) -> int:
return hash(self.name)
| SolidityVariableComposed |
python | openai__openai-python | src/openai/types/responses/web_search_tool.py | {
"start": 525,
"end": 1218
} | class ____(BaseModel):
city: Optional[str] = None
"""Free text input for the city of the user, e.g. `San Francisco`."""
country: Optional[str] = None
"""
The two-letter [ISO country code](https://en.wikipedia.org/wiki/ISO_3166-1) of
the user, e.g. `US`.
"""
region: Optional[str] = None
"""Free text input for the region of the user, e.g. `California`."""
timezone: Optional[str] = None
"""
The [IANA timezone](https://timeapi.io/documentation/iana-timezones) of the
user, e.g. `America/Los_Angeles`.
"""
type: Optional[Literal["approximate"]] = None
"""The type of location approximation. Always `approximate`."""
| UserLocation |
python | cython__cython | tests/run/py3k_super.py | {
"start": 2788,
"end": 3073
} | class ____:
"""
>>> obj = D()
>>> obj.method(1)
1
>>> obj.method(0) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
UnboundLocalError: ... '__class__' ...
"""
def method(self, x):
if x: __class__ = x
print(__class__)
| D |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-baseten/llama_index/llms/baseten/utils.py | {
"start": 648,
"end": 4455
} | class ____(BaseModel):
"""
Model information for Baseten models.
Args:
id: unique identifier for the model, passed as model parameter for requests
model_type: API type (defaults to "chat")
client: client name
"""
id: str
model_type: str = "chat"
client: str = "Baseten"
def __hash__(self) -> int:
return hash(self.id)
def validate_model_slug(model_id: str) -> None:
"""
Validate that the model_id is a supported model slug for Baseten Model APIs.
Args:
model_id: The model ID to validate
Raises:
ValueError: If the model_id is not a supported model slug
"""
if model_id not in SUPPORTED_MODEL_SLUGS:
raise ValueError(
f"Model '{model_id}' is not supported by Baseten Model APIs. "
f"Supported models are: {', '.join(SUPPORTED_MODEL_SLUGS)}"
)
def is_supported_model_slug(model_id: str) -> bool:
"""
Check if the model_id is a supported model slug for Baseten Model APIs.
Args:
model_id: The model ID to check
Returns:
True if the model_id is supported, False otherwise
"""
return model_id in SUPPORTED_MODEL_SLUGS
def get_supported_models() -> List[str]:
"""
Get a list of all supported model slugs for Baseten Model APIs.
Returns:
List of supported model slugs
"""
return SUPPORTED_MODEL_SLUGS.copy()
def get_available_models_dynamic(client) -> List[Model]:
"""
Dynamically fetch available models from Baseten Model APIs.
Args:
client: The OpenAI-compatible client instance
Returns:
List of Model objects representing available models
"""
models = []
try:
for element in client.models.list().data:
model = Model(id=element.id)
models.append(model)
# Filter out models that might not work properly with chat completions
# (Currently no exclusions, but this allows for future filtering)
exclude = set()
return [model for model in models if model.id not in exclude]
except Exception as e:
warnings.warn(
f"Failed to fetch models dynamically: {e}. Falling back to static list."
)
# Fallback to current static list
return [Model(id=slug) for slug in SUPPORTED_MODEL_SLUGS]
def validate_model_dynamic(client, model_name: str) -> None:
"""
Validate model against dynamically fetched list from Baseten Model APIs.
Args:
client: The OpenAI-compatible client instance
model_name: The model name to validate
Raises:
ValueError: If the model is not available
"""
try:
available_models = get_available_models_dynamic(client)
available_model_ids = [model.id for model in available_models]
if model_name not in available_model_ids:
# Try to find partial matches for helpful error messages
candidates = [
model_id for model_id in available_model_ids if model_name in model_id
]
if candidates:
suggestion = f"Did you mean one of: {candidates[:3]}"
else:
suggestion = f"Available models: {available_model_ids[:5]}{'...' if len(available_model_ids) > 5 else ''}"
raise ValueError(
f"Model '{model_name}' not found in available models. {suggestion}"
)
except Exception as e:
if "not found in available models" in str(e):
# Re-raise our validation error
raise
else:
# For other errors, fall back to static validation
warnings.warn(f"Dynamic validation failed: {e}. Using static validation.")
validate_model_slug(model_name)
| Model |
python | great-expectations__great_expectations | contrib/experimental/great_expectations_experimental/expectations/expect_column_values_to_not_be_outliers.py | {
"start": 747,
"end": 2419
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
# Please see https://docs.greatexpectations.io/en/latest/reference/core_concepts/metrics.html#metrics
# for information on how to choose an id string for your Metric.
condition_metric_name = "column_values.not_outliers"
condition_value_keys = ("method", "multiplier")
# This method defines the business logic for evaluating your metric when using a PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, method="iqr", multiplier=1.5, **kwargs):
if method == "iqr":
iqr = stats.iqr(column)
median = column.median()
return (column - median).abs() < multiplier * iqr
elif method == "std":
std = column.std()
mean = column.mean()
return (column - mean).abs() < multiplier * std
else:
raise NotImplementedError(f"method {method} has not been implemented")
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# return column.in_([3])
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# return column.isin([3])
# This class defines the Expectation itself
# The main business logic for calculation lives here.
| ColumnValuesNotOutliers |
python | doocs__leetcode | solution/1600-1699/1639.Number of Ways to Form a Target String Given a Dictionary/Solution.py | {
"start": 0,
"end": 613
} | class ____:
def numWays(self, words: List[str], target: str) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= m:
return 1
if j >= n:
return 0
ans = dfs(i + 1, j + 1) * cnt[j][ord(target[i]) - ord('a')]
ans = (ans + dfs(i, j + 1)) % mod
return ans
m, n = len(target), len(words[0])
cnt = [[0] * 26 for _ in range(n)]
for w in words:
for j, c in enumerate(w):
cnt[j][ord(c) - ord('a')] += 1
mod = 10**9 + 7
return dfs(0, 0)
| Solution |
python | spack__spack | var/spack/test_repos/spack_repo/tutorial/packages/hdf5/package.py | {
"start": 290,
"end": 24954
} | class ____(CMakePackage):
"""HDF5 is a data model, library, and file format for storing and managing
data. It supports an unlimited variety of datatypes, and is designed for
flexible and efficient I/O and for high volume and complex data.
"""
homepage = "https://portal.hdfgroup.org"
url = "https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-1.10/hdf5-1.10.7/src/hdf5-1.10.7.tar.gz"
list_url = "https://support.hdfgroup.org/ftp/HDF5/releases"
list_depth = 3
git = "https://github.com/HDFGroup/hdf5.git"
maintainers(
"lrknox",
"brtnfld",
"byrnHDF",
"ChristopherHogan",
"epourmal",
"gheber",
"hyoklee",
"lkurz",
"soumagne",
)
test_requires_compiler = True
# The 'develop' version is renamed so that we could uninstall (or patch) it
# without affecting other develop version.
version("develop-1.13", branch="develop")
version("develop-1.12", branch="hdf5_1_12")
version("develop-1.10", branch="hdf5_1_10")
version("develop-1.8", branch="hdf5_1_8")
version("1.12.1", sha256="79c66ff67e666665369396e9c90b32e238e501f345afd2234186bfb8331081ca")
version("1.12.0", sha256="a62dcb276658cb78e6795dd29bf926ed7a9bc4edf6e77025cd2c689a8f97c17a")
# HDF5 1.12 broke API compatibility, so we currently prefer the latest
# 1.10 release. packages that want later versions of HDF5 should specify,
# e.g., depends_on("hdf5@1.12:") to get 1.12 or higher.
version(
"1.10.7",
sha256="7a1a0a54371275ce2dfc5cd093775bb025c365846512961e7e5ceaecb437ef15",
preferred=True,
)
version("1.10.6", sha256="5f9a3ee85db4ea1d3b1fa9159352aebc2af72732fc2f58c96a3f0768dba0e9aa")
version("1.10.5", sha256="6d4ce8bf902a97b050f6f491f4268634e252a63dadd6656a1a9be5b7b7726fa8")
version("1.10.4", sha256="8f60dc4dd6ab5fcd23c750d1dc5bca3d0453bdce5c8cdaf0a4a61a9d1122adb2")
version("1.10.3", sha256="b600d7c914cfa80ae127cd1a1539981213fee9994ac22ebec9e3845e951d9b39")
version("1.10.2", sha256="bfec1be8c366965a99812cf02ddc97e4b708c1754fccba5414d4adccdc073866")
version("1.10.1", sha256="048a9d149fb99aaa1680a712963f5a78e9c43b588d0e79d55e06760ec377c172")
version(
"1.10.0-patch1", sha256="6e78cfe32a10e6e0629393cdfddf6cfa536571efdaf85f08e35326e1b4e9eff0"
)
version("1.10.0", sha256="81f6201aba5c30dced5dcd62f5d5477a2790fd5850e02ac514ca8bf3e2bb375a")
version("1.8.22", sha256="8406d96d9355ef8961d2739fb8fd5474ad4cdf52f3cfac657733defd9709bfaa")
version("1.8.21", sha256="87d8c82eba5cf766d97cd06c054f4639c1049c4adeaa3a79f77f8bd374f80f37")
version("1.8.19", sha256="a4335849f19fae88c264fd0df046bc321a78c536b2548fc508627a790564dc38")
version("1.8.18", sha256="cdb195ad8d9e6782acf24b2488061289f615628c2ccda8457b0a0c3fb7a8a063")
version("1.8.17", sha256="d9cda297ee76ade9881c4208987939250d397bae6252d0ccb66fa7d24d67e263")
version("1.8.16", sha256="ed17178abd9928a7237f30370189ba767b9e39e0db45917c2ac4665eb9cb4771")
version("1.8.15", sha256="4e963216b7d32469596bc1321a8c3f6e0c278dcbbdb7be6414c63c081b34c275")
version("1.8.14", sha256="1dbefeeef7f591897c632b2b090db96bb8d35ad035beaa36bc39cb2bc67e0639")
version("1.8.13", sha256="82f6b38eec103b4fccfbf14892786e0c27a8135d3252d8601cf5bf20066d38c1")
version("1.8.12", sha256="b5cccea850096962b5fd9e96f22c4f47d2379224bb41130d9bc038bb6c37dfcb")
version("1.8.10", sha256="4813b79c5fb8701a625b9924b8203bc7154a77f9b826ad4e034144b4056a160a")
variant("shared", default=True, description="Builds a shared version of the library")
variant("hl", default=False, description="Enable the high-level library")
variant("cxx", default=False, description="Enable C++ support")
variant("fortran", default=False, description="Enable Fortran support")
variant("java", default=False, description="Enable Java support")
variant("threadsafe", default=False, description="Enable thread-safe capabilities")
variant("tools", default=True, description="Enable building tools")
variant("mpi", default=True, description="Enable MPI support")
variant("szip", default=False, description="Enable szip support")
# Build HDF5 with API compatibility.
variant(
"api",
default="default",
description="Choose api compatibility for earlier version",
values=("default", "v114", "v112", "v110", "v18", "v16"),
multi=False,
)
depends_on("cmake@3.12:", type="build")
depends_on("mpi", when="+mpi")
depends_on("java", type=("build", "run"), when="+java")
# numactl does not currently build on darwin
if sys.platform != "darwin":
depends_on("numactl", when="+mpi+fortran")
depends_on("szip", when="+szip")
depends_on("zlib-api")
# The compiler wrappers (h5cc, h5fc, etc.) run 'pkg-config'.
depends_on("pkgconfig", type="run")
conflicts("api=v114", when="@1.6.0:1.12", msg="v114 is not compatible with this release")
conflicts("api=v112", when="@1.6.0:1.10", msg="v112 is not compatible with this release")
conflicts("api=v110", when="@1.6.0:1.8", msg="v110 is not compatible with this release")
conflicts("api=v18", when="@1.6.0:1.6", msg="v18 is not compatible with this release")
# The Java wrappers and associated libhdf5_java library
# were first available in 1.10
conflicts("+java", when="@:1.9")
# The Java wrappers cannot be built without shared libs.
conflicts("+java", when="~shared")
# There are several officially unsupported combinations of the features:
# 1. Thread safety is not guaranteed via high-level C-API but in some cases
# it works.
# conflicts('+threadsafe+hl')
# 2. Thread safety is not guaranteed via Fortran (CXX) API, but it's
# possible for a dependency tree to contain a package that uses Fortran
# (CXX) API in a single thread and another one that uses low-level C-API
# in multiple threads. To allow for such scenarios, we don't specify the
# following conflicts.
# conflicts('+threadsafe+cxx')
# conflicts('+threadsafe+fortran')
# 3. Parallel features are not supported via CXX API, but for the reasons
# described in #2 we allow for such combination.
# conflicts('+mpi+cxx')
# There are known build failures with intel@18.0.1. This issue is
# discussed and patch is provided at
# https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/747951.
patch("h5f90global-mult-obj-same-equivalence-same-common-block.patch", when="@1.10.1%intel@18")
# Turn line comments into block comments to conform with pre-C99 language
# standards. Versions of hdf5 after 1.8.10 don't require this patch,
# either because they conform to pre-C99 or neglect to ask for pre-C99
# language standards from their compiler. The hdf5 build system adds
# the -ansi cflag (run 'man gcc' for info on -ansi) for some versions
# of some compilers (see hdf5-1.8.10/config/gnu-flags). The hdf5 build
# system does not provide an option to disable -ansi, but since the
# pre-C99 code is restricted to just five lines of line comments in
# three src files, this patch accomplishes the simple task of patching the
# three src files and leaves the hdf5 build system alone.
patch("pre-c99-comments.patch", when="@1.8.10")
# There are build errors with GCC 8, see
# https://forum.hdfgroup.org/t/1-10-2-h5detect-compile-error-gcc-8-1-0-on-centos-7-2-solved/4441
patch(
"https://salsa.debian.org/debian-gis-team/hdf5/raw/bf94804af5f80f662cad80a5527535b3c6537df6/debian/patches/gcc-8.patch",
sha256="57cee5ff1992b4098eda079815c36fc2da9b10e00a9056df054f2384c4fc7523",
when="@1.10.2%gcc@8:",
)
# Disable MPI C++ interface when C++ is disabled, otherwise downstream
# libraries fail to link; see https://github.com/spack/spack/issues/12586
patch(
"h5public-skip-mpicxx.patch",
when="@1.8.10:1.8.21,1.10.0:1.10.5+mpi~cxx",
sha256="b61e2f058964ad85be6ee5ecea10080bf79e73f83ff88d1fa4b602d00209da9c",
)
# Fixes BOZ literal constant error when compiled with GCC 10.
# The issue is described here: https://github.com/spack/spack/issues/18625
patch(
"hdf5_1.8_gcc10.patch",
when="@:1.8.21",
sha256="0e20187cda3980a4fdff410da92358b63de7ebef2df1d7a425371af78e50f666",
)
# The argument 'buf_size' of the C function 'h5fget_file_image_c' is
# declared as intent(in) though it is modified by the invocation. As a
# result, aggressive compilers such as Fujitsu's may do a wrong
# optimization to cause an error.
def patch(self):
filter_file(
"INTEGER(SIZE_T), INTENT(IN) :: buf_size",
"INTEGER(SIZE_T), INTENT(OUT) :: buf_size",
"fortran/src/H5Fff.F90",
string=True,
ignore_absent=True,
)
filter_file(
"INTEGER(SIZE_T), INTENT(IN) :: buf_size",
"INTEGER(SIZE_T), INTENT(OUT) :: buf_size",
"fortran/src/H5Fff_F03.f90",
string=True,
ignore_absent=True,
)
# The parallel compiler wrappers (i.e. h5pcc, h5pfc, etc.) reference MPI
# compiler wrappers and do not need to be changed.
filter_compiler_wrappers(
"h5cc", "h5hlcc", "h5fc", "h5hlfc", "h5c++", "h5hlc++", relative_root="bin"
)
def url_for_version(self, version):
url = (
"https://support.hdfgroup.org/ftp/HDF5/releases/hdf5-{0}/hdf5-{1}/src/hdf5-{1}.tar.gz"
)
return url.format(version.up_to(2), version)
def flag_handler(self, name, flags):
cmake_flags = []
if name == "cflags":
if self.spec.satisfies("%gcc") or self.spec.satisfies("%clang"):
# Quiet warnings/errors about implicit declaration of functions
# in C99:
cmake_flags.append("-Wno-implicit-function-declaration")
# Note that this flag will cause an error if building %nvhpc.
if self.spec.satisfies("@:1.8.12~shared"):
# More recent versions set CMAKE_POSITION_INDEPENDENT_CODE to
# True and build with PIC flags.
cmake_flags.append(self.compiler.cc_pic_flag)
elif name == "cxxflags":
if self.spec.satisfies("@:1.8.12+cxx~shared"):
cmake_flags.append(self.compiler.cxx_pic_flag)
elif name == "fflags":
if self.spec.satisfies("@:1.8.12+fortran~shared"):
cmake_flags.append(self.compiler.fc_pic_flag)
elif name == "ldlibs":
if "+fortran %fj" in self.spec:
cmake_flags.extend(["-lfj90i", "-lfj90f", "-lfjsrcinfo", "-lelf"])
return flags, None, (cmake_flags or None)
@property
def libs(self):
"""HDF5 can be queried for the following parameters:
- "hl": high-level interface
- "cxx": C++ APIs
- "fortran": Fortran APIs
- "java": Java APIs
:return: list of matching libraries
"""
query_parameters = self.spec.last_query.extra_parameters
shared = "+shared" in self.spec
# This map contains a translation from query_parameters
# to the libraries needed
query2libraries = {
tuple(): ["libhdf5"],
("cxx", "fortran", "hl", "java"): [
# When installed with Autotools, the basename of the real
# library file implementing the High-level Fortran interface is
# 'libhdf5hl_fortran'. Starting versions 1.8.22, 1.10.5 and
# 1.12.0, the Autotools installation also produces a symbolic
# link 'libhdf5_hl_fortran.<so/a>' to
# 'libhdf5hl_fortran.<so/a>'. Note that in the case of the
# dynamic library, the latter is a symlink to the real sonamed
# file 'libhdf5_fortran.so.<abi-version>'. This means that all
# dynamically linked executables/libraries of the dependent
# packages need 'libhdf5_fortran.so.<abi-version>' with the same
# DT_SONAME entry. However, the CMake installation (at least
# starting version 1.8.10) does not produce it. Instead, the
# basename of the library file is 'libhdf5_hl_fortran'. Which
# means that switching to CMake requires rebuilding of all
# dependant packages that use the High-level Fortran interface.
# Therefore, we do not try to preserve backward compatibility
# with Autotools installations by creating symlinks. The only
# packages that could benefit from it would be those that
# hardcode the library name in their building systems. Such
# packages should simply be patched.
"libhdf5_hl_fortran",
"libhdf5_hl_f90cstub",
"libhdf5_hl_cpp",
"libhdf5_hl",
"libhdf5_fortran",
"libhdf5_f90cstub",
"libhdf5_java",
"libhdf5",
],
("cxx", "hl"): ["libhdf5_hl_cpp", "libhdf5_hl", "libhdf5"],
("fortran", "hl"): [
"libhdf5_hl_fortran",
"libhdf5_hl_f90cstub",
"libhdf5_hl",
"libhdf5_fortran",
"libhdf5_f90cstub",
"libhdf5",
],
("hl",): ["libhdf5_hl", "libhdf5"],
("cxx", "fortran"): ["libhdf5_fortran", "libhdf5_f90cstub", "libhdf5_cpp", "libhdf5"],
("cxx",): ["libhdf5_cpp", "libhdf5"],
("fortran",): ["libhdf5_fortran", "libhdf5_f90cstub", "libhdf5"],
("java",): ["libhdf5_java", "libhdf5"],
}
# Turn the query into the appropriate key
key = tuple(sorted(query_parameters))
libraries = query2libraries[key]
return find_libraries(libraries, root=self.prefix, shared=shared, recursive=True)
@when("@:1.8.21,1.10.0:1.10.5+szip")
def setup_build_environment(self, env: EnvironmentModifications) -> None:
env.set("SZIP_INSTALL", self.spec["szip"].prefix)
@run_before("cmake")
def fortran_check(self):
if "+fortran" in self.spec and not self.compiler.fc:
msg = "cannot build a Fortran variant without a Fortran compiler"
raise RuntimeError(msg)
def cmake_args(self):
spec = self.spec
if spec.satisfies("@:1.8.15+shared"):
tty.warn("hdf5@:1.8.15+shared does not produce static libraries")
args = [
# Always enable this option. This does not actually enable any
# features: it only *allows* the user to specify certain
# combinations of other arguments.
self.define("ALLOW_UNSUPPORTED", True),
# Speed-up the building by skipping the examples:
self.define("HDF5_BUILD_EXAMPLES", False),
self.define(
"BUILD_TESTING",
self.run_tests or
# Version 1.8.22 fails to build the tools when shared libraries
# are enabled but the tests are disabled.
spec.satisfies("@1.8.22+shared+tools"),
),
self.define("HDF5_ENABLE_Z_LIB_SUPPORT", True),
self.define_from_variant("HDF5_ENABLE_SZIP_SUPPORT", "szip"),
self.define_from_variant("HDF5_ENABLE_SZIP_ENCODING", "szip"),
self.define_from_variant("BUILD_SHARED_LIBS", "shared"),
self.define("ONLY_SHARED_LIBS", False),
self.define_from_variant("HDF5_ENABLE_PARALLEL", "mpi"),
self.define_from_variant("HDF5_ENABLE_THREADSAFE", "threadsafe"),
self.define_from_variant("HDF5_BUILD_HL_LIB", "hl"),
self.define_from_variant("HDF5_BUILD_CPP_LIB", "cxx"),
self.define_from_variant("HDF5_BUILD_FORTRAN", "fortran"),
self.define_from_variant("HDF5_BUILD_JAVA", "java"),
self.define_from_variant("HDF5_BUILD_TOOLS", "tools"),
]
api = spec.variants["api"].value
if api != "default":
args.append(self.define("DEFAULT_API_VERSION", api))
if "+mpi" in spec:
args.append(self.define("CMAKE_C_COMPILER", spec["mpi"].mpicc))
if "+cxx" in self.spec:
args.append(self.define("CMAKE_CXX_COMPILER", spec["mpi"].mpicxx))
if "+fortran" in self.spec:
args.append(self.define("CMAKE_Fortran_COMPILER", spec["mpi"].mpifc))
return args
@run_after("install")
def ensure_parallel_compiler_wrappers(self):
# When installed with Autotools and starting at least version 1.8.10,
# the package produces C compiler wrapper called either 'h5cc' (when MPI
# support is disabled) or 'h5pcc' (when MPI support is enabled). The
# CMake installation produces the wrapper called 'h5cc' (regardless of
# whether MPI support is enabled) only starting versions 1.8.21, 1.10.2
# and 1.12.0. The current develop versions also produce 'h5pcc' when MPI
# support is enabled and the file is identical to 'h5cc'. Here, we make
# sure that 'h5pcc' is available when MPI support is enabled (only for
# versions that generate 'h5cc').
if self.spec.satisfies("@1.8.21:1.8.22,1.10.2:1.10.7,1.12.0+mpi"):
with working_dir(self.prefix.bin):
# No try/except here, fix the condition above instead:
symlink("h5cc", "h5pcc")
# The same as for 'h5pcc'. However, the CMake installation produces the
# Fortran compiler wrapper called 'h5fc' only starting versions 1.8.22,
# 1.10.6 and 1.12.0. The current develop versions do not produce 'h5pfc'
# at all. Here, we make sure that 'h5pfc' is available when Fortran and
# MPI support are enabled (only for versions that generate 'h5fc').
if self.spec.satisfies(
"@1.8.22:1.8," "1.10.6:1.10," "1.12.0:1.12," "develop:" "+fortran+mpi"
):
with working_dir(self.prefix.bin):
# No try/except here, fix the condition above instead:
symlink("h5fc", "h5pfc")
@run_after("install")
def fix_package_config(self):
# We need to fix the pkg-config files, which are also used by the
# compiler wrappers. The files are created starting versions 1.8.21,
# 1.10.2 and 1.12.0. However, they are broken (except for the version
# 1.8.22): the files are named <name>-<version>.pc but reference <name>
# packages. This was fixed in the develop versions at some point: the
# files started referencing <name>-<version> packages but got broken
# again: the files got names <name>.pc but references had not been
# updated accordingly. Another issue, which we address here, is that
# some Linux distributions install pkg-config files named hdf5.pc and we
# want to override them. Therefore, the following solution makes sure
# that each <name>-<version>.pc file is symlinked by <name>.pc and all
# references to <name>-<version> packages in the original files are
# replaced with references to <name> packages.
pc_files = find(self.prefix.lib.pkgconfig, "hdf5*.pc", recursive=False)
if not pc_files:
# This also tells us that the pkgconfig directory does not exist.
return
# Replace versioned references in all pkg-config files:
filter_file(
r"(Requires(?:\.private)?:.*)(hdf5[^\s,]*)(?:-[^\s,]*)(.*)",
r"\1\2\3",
*pc_files,
backup=False,
)
# Create non-versioned symlinks to the versioned pkg-config files:
with working_dir(self.prefix.lib.pkgconfig):
for f in pc_files:
src_filename = os.path.basename(f)
version_sep_idx = src_filename.find("-")
if version_sep_idx > -1:
tgt_filename = src_filename[:version_sep_idx] + ".pc"
if not os.path.exists(tgt_filename):
symlink(src_filename, tgt_filename)
@run_after("install")
@on_package_attributes(run_tests=True)
def check_install(self):
self._check_install()
def _check_install(self):
# Build and run a small program to test the installed HDF5 library
spec = self.spec
print("Checking HDF5 installation...")
checkdir = "spack-check"
with working_dir(checkdir, create=True):
source = r"""
#include <hdf5.h>
#include <assert.h>
#include <stdio.h>
int main(int argc, char **argv) {
unsigned majnum, minnum, relnum;
herr_t herr = H5get_libversion(&majnum, &minnum, &relnum);
assert(!herr);
printf("HDF5 version %d.%d.%d %u.%u.%u\n", H5_VERS_MAJOR, H5_VERS_MINOR,
H5_VERS_RELEASE, majnum, minnum, relnum);
return 0;
}
"""
expected = """\
HDF5 version {version} {version}
""".format(
version=str(spec.version.up_to(3))
)
with open("check.c", "w", encoding="utf-8") as f:
f.write(source)
if "+mpi" in spec:
cc = Executable(spec["mpi"].mpicc)
else:
cc = Executable(self.compiler.cc)
cc(*(["-c", "check.c"] + spec["hdf5"].headers.cpp_flags.split()))
cc(*(["-o", "check", "check.o"] + spec["hdf5"].libs.ld_flags.split()))
try:
check = Executable("./check")
output = check(output=str)
except ProcessError:
output = ""
success = output == expected
if not success:
print("Produced output does not match expected output.")
print("Expected output:")
print("-" * 80)
print(expected)
print("-" * 80)
print("Produced output:")
print("-" * 80)
print(output)
print("-" * 80)
raise RuntimeError("HDF5 install check failed")
shutil.rmtree(checkdir)
def _test_check_versions(self):
"""Perform version checks on selected installed package binaries."""
spec_vers_str = "Version {0}".format(self.spec.version)
exes = [
"h5copy",
"h5diff",
"h5dump",
"h5format_convert",
"h5ls",
"h5mkgrp",
"h5repack",
"h5stat",
"h5unjam",
]
use_short_opt = ["h52gif", "h5repart", "h5unjam"]
for exe in exes:
reason = "test: ensuring version of {0} is {1}".format(exe, spec_vers_str)
option = "-V" if exe in use_short_opt else "--version"
self.run_test(
exe, option, spec_vers_str, installed=True, purpose=reason, skip_missing=True
)
def _test_example(self):
"""This test performs copy, dump, and diff on an example hdf5 file."""
test_data_dir = self.test_suite.current_test_data_dir
filename = "spack.h5"
h5_file = test_data_dir.join(filename)
reason = "test: ensuring h5dump produces expected output"
expected = get_escaped_text_output(test_data_dir.join("dump.out"))
self.run_test(
"h5dump",
filename,
expected,
installed=True,
purpose=reason,
skip_missing=True,
work_dir=test_data_dir,
)
reason = "test: ensuring h5copy runs"
options = ["-i", h5_file, "-s", "Spack", "-o", "test.h5", "-d", "Spack"]
self.run_test(
"h5copy", options, [], installed=True, purpose=reason, skip_missing=True, work_dir="."
)
reason = "test: ensuring h5diff shows no differences between orig and" " copy"
self.run_test(
"h5diff",
[h5_file, "test.h5"],
[],
installed=True,
purpose=reason,
skip_missing=True,
work_dir=".",
)
def test(self):
"""Perform smoke tests on the installed package."""
# Simple version check tests on known binaries
self._test_check_versions()
# Run sequence of commands on an hdf5 file
self._test_example()
# Run existing install check
self._check_install()
| Hdf5 |
python | ray-project__ray | python/ray/data/_internal/execution/operators/base_physical_operator.py | {
"start": 9115,
"end": 9817
} | class ____(PhysicalOperator):
"""An operator that has multiple input dependencies and one output.
This operator serves as the base for union, zip, etc.
"""
def __init__(
self,
data_context: DataContext,
*input_ops: LogicalOperator,
):
"""Create a OneToOneOperator.
Args:
input_op: Operator generating input data for this op.
name: The name of this operator.
"""
input_names = ", ".join([op._name for op in input_ops])
op_name = f"{self.__class__.__name__}({input_names})"
super().__init__(
op_name,
list(input_ops),
data_context,
)
| NAryOperator |
python | pytorch__pytorch | torch/testing/_internal/distributed/distributed_test.py | {
"start": 16361,
"end": 18928
} | class ____(MultiProcessTestCase):
@classmethod
def setUpClass(cls):
os.environ["MASTER_ADDR"] = str(MASTER_ADDR)
# Not setting MASTER_PORT and get a random free port
super().setUpClass()
def setUp(self):
super().setUp()
# initialize temp directories
initialize_temp_directories()
# initialize Barrier
Barrier.init()
# Skip return code checking for following tests as they are expected to
# crash a process due to TORCH_NCCL_ASYNC_ERROR_HANDLING.
self.skip_return_code_checks = [self.test_ddp_has_finalized.__wrapped__]
def tearDown(self):
cleanup_temp_dir()
super().tearDown()
@property
def init_method(self):
return f"{FILE_SCHEMA}{self.file_name}"
@property
def destroy_pg_upon_exit(self) -> bool:
# Overriding base test class: do not auto destroy PG upon exit.
return False
@classmethod
def _run(cls, rank, test_name, file_name, pipe, **kwargs):
if BACKEND == "nccl" and not torch.cuda.is_available():
sys.exit(TEST_SKIPS["no_cuda"].exit_code)
self = cls(test_name)
self.rank = rank
self.file_name = file_name
if torch.cuda.is_available() and torch.cuda.device_count() < int(
self.world_size
):
sys.exit(TEST_SKIPS[f"multi-gpu-{self.world_size}"].exit_code)
try:
pg_timeout_seconds = CUSTOM_PG_TIMEOUT.get(test_name, default_pg_timeout)
timeout = timedelta(seconds=pg_timeout_seconds)
dist.init_process_group(
init_method=self.init_method,
backend=BACKEND,
world_size=int(self.world_size),
rank=self.rank,
timeout=timeout,
)
except RuntimeError as e:
if "recompile" in e.args[0]:
sys.exit(TEST_SKIPS["backend_unavailable"].exit_code)
raise
# Execute barrier prior to running test to ensure that every process
# has finished initialization and that the following test
# immediately exiting due to a skip doesn't cause flakiness.
self._barrier()
self.run_test(test_name, pipe)
self._barrier()
dist.destroy_process_group()
sys.exit(0)
# Needed since MultiProcessTestCase assumes a world_size of 4, but we
# run these tests under other various world_sizes.
@property
def world_size(self):
return os.environ["WORLD_SIZE"]
| TestDistBackend |
python | sqlalchemy__sqlalchemy | test/orm/inheritance/test_relationship.py | {
"start": 97077,
"end": 100668
} | class ____(
AssertsCompiledSQL, fixtures.DeclarativeMappedTest
):
"""test for #12843 / discussion #12842"""
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class LogEntry(ComparableEntity, Base):
__tablename__ = "log_entry"
id: Mapped[int] = mapped_column(primary_key=True)
timestamp: Mapped[int] = mapped_column(Integer)
type: Mapped[str]
__mapper_args__ = {
"polymorphic_on": "type",
"polymorphic_identity": "log_entry",
}
class StartEntry(LogEntry):
__mapper_args__ = {
"polymorphic_identity": "start_entry",
}
StartAlias = aliased(StartEntry)
next_start_ts = (
select(func.min(StartAlias.timestamp))
.where(
StartAlias.timestamp > LogEntry.timestamp,
)
.scalar_subquery()
)
StartEntry.next_start_ts = column_property(next_start_ts)
LogAlias = aliased(LogEntry)
StartEntry.associated_entries = relationship(
LogAlias,
primaryjoin=and_(
foreign(LogAlias.timestamp) >= LogEntry.timestamp,
or_(
next_start_ts == None,
LogAlias.timestamp < next_start_ts,
),
),
viewonly=True,
order_by=LogAlias.id,
)
@classmethod
def insert_data(cls, connection):
LogEntry, StartEntry = cls.classes.LogEntry, cls.classes.StartEntry
with Session(connection) as sess:
s1 = StartEntry(timestamp=1)
l1 = LogEntry(timestamp=2)
l2 = LogEntry(timestamp=3)
s2 = StartEntry(timestamp=4)
l3 = LogEntry(timestamp=5)
sess.add_all([s1, l1, l2, s2, l3])
sess.commit()
def test_assoc_entries(self):
LogEntry, StartEntry = self.classes.LogEntry, self.classes.StartEntry
sess = fixture_session()
s1 = sess.scalars(select(StartEntry).filter_by(timestamp=1)).one()
with self.sql_execution_asserter(testing.db) as asserter:
eq_(
s1.associated_entries,
[
StartEntry(timestamp=1),
LogEntry(timestamp=2),
LogEntry(timestamp=3),
],
)
asserter.assert_(
CompiledSQL(
"SELECT log_entry_1.id, "
"log_entry_1.timestamp, "
"log_entry_1.type "
"FROM log_entry AS log_entry_1 "
"WHERE log_entry_1.timestamp >= :param_1 AND "
"((SELECT min(log_entry_2.timestamp) AS min_1 "
"FROM log_entry AS log_entry_2 "
"WHERE log_entry_2.timestamp > :param_1 "
"AND log_entry_2.type IN (__[POSTCOMPILE_type_1])) IS NULL "
"OR log_entry_1.timestamp < "
"(SELECT min(log_entry_2.timestamp) AS min_1 "
"FROM log_entry AS log_entry_2 "
"WHERE log_entry_2.timestamp > :param_1 "
"AND log_entry_2.type IN (__[POSTCOMPILE_type_2]))) "
"ORDER BY log_entry_1.id",
params=[
{
"param_1": 1,
"type_1": ["start_entry"],
"type_2": ["start_entry"],
}
],
)
)
| SingleSubclassInRelationship |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/selectable.py | {
"start": 150901,
"end": 151103
} | class ____(Enum):
UNION = "UNION"
UNION_ALL = "UNION ALL"
EXCEPT = "EXCEPT"
EXCEPT_ALL = "EXCEPT ALL"
INTERSECT = "INTERSECT"
INTERSECT_ALL = "INTERSECT ALL"
| _CompoundSelectKeyword |
python | getsentry__sentry | src/sentry/models/releases/util.py | {
"start": 1074,
"end": 9515
} | class ____(BaseQuerySet["Release"]):
def annotate_prerelease_column(self):
"""
Adds a `prerelease_case` column to the queryset which is used to properly sort
by prerelease. We treat an empty (but not null) prerelease as higher than any
other value.
"""
return self.annotate(
prerelease_case=Case(
When(prerelease="", then=1), default=0, output_field=models.IntegerField()
)
)
def filter_to_semver(self) -> Self:
"""
Filters the queryset to only include semver compatible rows
"""
return self.filter(major__isnull=False)
def filter_by_semver_build(
self,
organization_id: int,
operator: str,
build: str,
project_ids: Sequence[int] | None = None,
negated: bool = False,
) -> Self:
"""
Filters released by build. If the passed `build` is a numeric string, we'll filter on
`build_number` and make use of the passed operator.
If it is a non-numeric string, then we'll filter on `build_code` instead. We support a
wildcard only at the end of this string, so that we can filter efficiently via the index.
"""
qs = self.filter(organization_id=organization_id)
query_func = "exclude" if negated else "filter"
if project_ids:
qs = qs.filter(
id__in=ReleaseProject.objects.filter(project_id__in=project_ids).values_list(
"release_id", flat=True
)
)
if build.isdecimal() and validate_bigint(int(build)):
qs = getattr(qs, query_func)(**{f"build_number__{operator}": int(build)})
else:
if not build or build.endswith("*"):
qs = getattr(qs, query_func)(build_code__startswith=build[:-1])
else:
qs = getattr(qs, query_func)(build_code=build)
return qs
def filter_by_semver(
self,
organization_id: int,
semver_filter: SemverFilter,
project_ids: Sequence[int] | None = None,
) -> Self:
"""
Filters releases based on a based `SemverFilter` instance.
`SemverFilter.version_parts` can contain up to 6 components, which should map
to the columns defined in `Release.SEMVER_COLS`. If fewer components are
included, then we will exclude later columns from the filter.
`SemverFilter.package` is optional, and if included we will filter the `package`
column using the provided value.
`SemverFilter.operator` should be a Django field filter.
Typically we build a `SemverFilter` via `sentry.search.events.filter.parse_semver`
"""
qs = self.filter(organization_id=organization_id).annotate_prerelease_column()
query_func = "exclude" if semver_filter.negated else "filter"
if semver_filter.package:
if isinstance(semver_filter.package, str):
qs = getattr(qs, query_func)(package=semver_filter.package)
else:
qs = getattr(qs, query_func)(package__in=semver_filter.package)
if project_ids:
qs = qs.filter(
id__in=ReleaseProject.objects.filter(project_id__in=project_ids).values_list(
"release_id", flat=True
)
)
if semver_filter.version_parts:
filter_func = Func(
*(
Value(part) if isinstance(part, str) else part
for part in semver_filter.version_parts
),
function="ROW",
)
cols = self.model.SEMVER_COLS[: len(semver_filter.version_parts)]
qs = qs.annotate(
semver=Func(
*(F(col) for col in cols), function="ROW", output_field=models.JSONField()
)
)
qs = getattr(qs, query_func)(**{f"semver__{semver_filter.operator}": filter_func})
return qs
def filter_by_stage(
self,
organization_id: int,
operator: str,
value,
project_ids: Sequence[int] | None = None,
environments: Sequence[str | int] | None = None,
) -> Self:
from sentry.models.releaseprojectenvironment import ReleaseProjectEnvironment, ReleaseStages
from sentry.search.events.filter import to_list
if not environments or len(environments) != 1:
raise InvalidSearchQuery("Choose a single environment to filter by release stage.")
filters = {
ReleaseStages.ADOPTED: Q(adopted__isnull=False, unadopted__isnull=True),
ReleaseStages.REPLACED: Q(adopted__isnull=False, unadopted__isnull=False),
ReleaseStages.LOW_ADOPTION: Q(adopted__isnull=True, unadopted__isnull=True),
}
value = to_list(value)
operator_conversions = {"=": "IN", "!=": "NOT IN"}
operator = operator_conversions.get(operator, operator)
for stage in value:
if stage not in filters:
raise InvalidSearchQuery("Unsupported release.stage value.")
rpes = ReleaseProjectEnvironment.objects.filter(
release__organization_id=organization_id,
).select_related("release")
if project_ids:
rpes = rpes.filter(project_id__in=project_ids)
query = Q()
if operator == "IN":
for stage in value:
query |= filters[stage]
elif operator == "NOT IN":
for stage in value:
query &= ~filters[stage]
qs = self.filter(id__in=Subquery(rpes.filter(query).values_list("release_id", flat=True)))
return qs
def order_by_recent(self) -> Self:
return self.order_by("-date_added", "-id")
@staticmethod
def massage_semver_cols_into_release_object_data(kwargs):
"""
Helper function that takes kwargs as an argument and massages into it the release semver
columns (if possible)
Inputs:
* kwargs: data of the release that is about to be created
"""
if "version" in kwargs:
try:
version_info = parse_release(kwargs["version"])
package = version_info.get("package")
version_parsed = version_info.get("version_parsed")
if version_parsed is not None and all(
validate_bigint(version_parsed[field])
for field in ("major", "minor", "patch", "revision")
):
build_code = version_parsed.get("build_code")
build_number = ReleaseQuerySet._convert_build_code_to_build_number(build_code)
kwargs.update(
{
"major": version_parsed.get("major"),
"minor": version_parsed.get("minor"),
"patch": version_parsed.get("patch"),
"revision": version_parsed.get("revision"),
"prerelease": version_parsed.get("pre") or "",
"build_code": build_code,
"build_number": build_number,
"package": package,
}
)
except RelayError:
# This can happen on invalid legacy releases
pass
@staticmethod
def _convert_build_code_to_build_number(build_code):
"""
Helper function that takes the build_code and checks if that build code can be parsed into
a 64 bit integer
Inputs:
* build_code: str
Returns:
* build_number
"""
build_number = None
if build_code is not None:
try:
build_code_as_int = int(build_code)
if validate_bigint(build_code_as_int):
build_number = build_code_as_int
except ValueError:
pass
return build_number
def parse_semver_pre_save(instance, **kwargs):
if instance.id:
return
ReleaseQuerySet.massage_semver_cols_into_release_object_data(instance.__dict__)
pre_save.connect(
parse_semver_pre_save, sender="sentry.Release", dispatch_uid="parse_semver_pre_save"
)
| ReleaseQuerySet |
python | keras-team__keras | keras/src/ops/node.py | {
"start": 154,
"end": 4214
} | class ____:
"""A `Node` describes an operation `__call__()` event.
A Keras Function is a DAG with `Node` instances as nodes, and
`KerasTensor` instances as edges. Nodes aren't `Operation` instances,
because a single operation could be called multiple times, which would
result in graph cycles.
A `__call__()` event involves input tensors (and other input arguments),
the operation that was called, and the resulting output tensors.
A `Node` will include all this information.
Since a single `Operation` could be called multiple times,
the `Node` instances are stored on operations as a list.
Each time an operation is called, a node is added to `op._inbound_nodes`.
Each time the output of an operation is used by another operation,
a node is added to `op._outbound_nodes`.
Every `KerasTensor` instance has a `KerasHistory` object attached,
which tracks the `Node` that records the `__call__()` event that created
the tensor. By recursively walking through `Node` instances
via the `KerasHistory` metadata of `KerasTensor` instances, once can
retrieve the entire DAG of a Keras Function.
Args:
operation: The Operation that was called in the `op.__call__()`
event that this node represents.
call_args: The positional arguments the operation was called with.
call_kwargs: The keyword arguments the operation was called with.
outputs: The output tensors of the `op.__call__()` call.
"""
def __init__(
self, operation, call_args=None, call_kwargs=None, outputs=None
):
self.operation = operation
self.arguments = SymbolicArguments(*call_args, **call_kwargs)
self.outputs = [] if outputs is None else tree.flatten(outputs)
for x in self.outputs:
if not isinstance(x, KerasTensor):
raise ValueError(
"All operation outputs must be tensors. "
f"Operation {operation} returned a non-tensor. "
f"Non-tensor received: {x}"
)
zero_history = any(
not x.record_history for x in self.arguments.keras_tensors
)
# If inputs don't have metadata yet, add it.
if not zero_history:
for tensor in self.arguments.keras_tensors:
if not hasattr(tensor, "_keras_history"):
tensor._keras_history = KerasHistory(
operation=None, node_index=0, tensor_index=0
)
# Wire up Node to Operations.
self.operation._inbound_nodes.append(self)
for kt in self.arguments.keras_tensors:
inbound_op = kt._keras_history.operation
if inbound_op is not None: # It's a graph entry point.
inbound_op._outbound_nodes.append(self)
# Set metadata on outputs.
if not zero_history:
node_index = len(self.operation._inbound_nodes) - 1
for i, tensor in enumerate(self.outputs):
tensor._keras_history = KerasHistory(
operation=operation, node_index=node_index, tensor_index=i
)
# Whether this is a root node.
self.is_input = not self.arguments.keras_tensors
def __repr__(self):
return f"<Node operation={self.operation.name}, id={id(self)}>"
@property
def input_tensors(self):
return self.arguments.keras_tensors
@property
def output_tensors(self):
return self.outputs
@property
def parent_nodes(self):
"""The parent `Node`s.
Returns:
all the `Node`s whose output this node immediately depends on.
"""
node_deps = []
for kt in self.arguments.keras_tensors:
op = kt._keras_history.operation
node_index = kt._keras_history.node_index
if op is not None: # `None` for `Input` tensors.
node_deps.append(op._inbound_nodes[node_index])
return node_deps
| Node |
python | allegroai__clearml | clearml/backend_api/services/v2_9/workers.py | {
"start": 43264,
"end": 46023
} | class ____(Request):
"""
Returns count of active company workers in the selected time range.
:param from_date: Starting time (in seconds from epoch) for collecting
statistics
:type from_date: float
:param to_date: Ending time (in seconds from epoch) for collecting statistics
:type to_date: float
:param interval: Time interval in seconds for a single statistics point. The
minimal value is 1
:type interval: int
"""
_service = "workers"
_action = "get_activity_report"
_version = "2.9"
_schema = {
"definitions": {},
"properties": {
"from_date": {
"description": "Starting time (in seconds from epoch) for collecting statistics",
"type": "number",
},
"interval": {
"description": "Time interval in seconds for a single statistics point. The minimal value is 1",
"type": "integer",
},
"to_date": {
"description": "Ending time (in seconds from epoch) for collecting statistics",
"type": "number",
},
},
"required": ["from_date", "to_date", "interval"],
"type": "object",
}
def __init__(self, from_date: float, to_date: float, interval: int, **kwargs: Any) -> None:
super(GetActivityReportRequest, self).__init__(**kwargs)
self.from_date = from_date
self.to_date = to_date
self.interval = interval
@schema_property("from_date")
def from_date(self) -> float:
return self._property_from_date
@from_date.setter
def from_date(self, value: float) -> None:
if value is None:
self._property_from_date = None
return
self.assert_isinstance(value, "from_date", six.integer_types + (float,))
self._property_from_date = value
@schema_property("to_date")
def to_date(self) -> float:
return self._property_to_date
@to_date.setter
def to_date(self, value: float) -> None:
if value is None:
self._property_to_date = None
return
self.assert_isinstance(value, "to_date", six.integer_types + (float,))
self._property_to_date = value
@schema_property("interval")
def interval(self) -> int:
return self._property_interval
@interval.setter
def interval(self, value: int) -> None:
if value is None:
self._property_interval = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "interval", six.integer_types)
self._property_interval = value
| GetActivityReportRequest |
python | dagster-io__dagster | python_modules/dagster/dagster_tests/storage_tests/test_event_log.py | {
"start": 1394,
"end": 2201
} | class ____(TestEventLogStorage):
__test__ = True
@pytest.fixture(scope="function", name="storage")
def event_log_storage(self, instance):
yield instance.event_log_storage
@pytest.fixture(name="instance", scope="function")
def instance(self):
with DagsterInstance.ephemeral() as the_instance:
yield the_instance
def can_wipe_asset_partitions(self) -> bool:
return False
@pytest.mark.skipif(
sys.version_info >= (3, 12) and sqlalchemy_version.startswith("1.4."),
reason="flaky Sqlite issues on certain version combinations",
)
def test_basic_get_logs_for_run_multiple_runs_cursors(self, instance, storage):
super().test_basic_get_logs_for_run_multiple_runs_cursors(instance, storage)
| TestInMemoryEventLogStorage |
python | huggingface__transformers | src/transformers/models/t5/modeling_t5.py | {
"start": 3940,
"end": 5230
} | class ____(nn.Module):
def __init__(self, config: T5Config):
super().__init__()
self.wi_0 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wi_1 = nn.Linear(config.d_model, config.d_ff, bias=False)
self.wo = nn.Linear(config.d_ff, config.d_model, bias=False)
self.dropout = nn.Dropout(config.dropout_rate)
self.act = ACT2FN[config.dense_act_fn]
def forward(self, hidden_states):
hidden_gelu = self.act(self.wi_0(hidden_states))
hidden_linear = self.wi_1(hidden_states)
hidden_states = hidden_gelu * hidden_linear
hidden_states = self.dropout(hidden_states)
# To make 8bit quantization work for google/flan-t5-xxl, self.wo is kept in float32.
# See https://github.com/huggingface/transformers/issues/20287
# we also make sure the weights are not in `int8` in case users will force `_keep_in_fp32_modules` to be `None``
if (
isinstance(self.wo.weight, torch.Tensor)
and hidden_states.dtype != self.wo.weight.dtype
and self.wo.weight.dtype != torch.int8
):
hidden_states = hidden_states.to(self.wo.weight.dtype)
hidden_states = self.wo(hidden_states)
return hidden_states
| T5DenseGatedActDense |
python | cython__cython | Cython/Compiler/PyrexTypes.py | {
"start": 198292,
"end": 223624
} | class ____(Exception):
def __init__(self, errors, candidate_count):
if len(errors) == 1 or len({msg for _, msg in errors}) == 1:
_, errmsg = errors[0]
elif candidate_count:
errmsg = f"no suitable method found (candidates: {candidate_count})"
else:
# No candidates at all. This can happen with fused types,
# in which case the error is reported elsewhere.
errmsg = ""
super().__init__(errmsg)
def map_argument_type(src_type, dst_type):
"""Return a tuple (src_type, target_type, needs_coercion).
"""
if dst_type.assignable_from(src_type):
return (src_type, dst_type)
# Now take care of unprefixed string literals. So when you call a cdef
# function that takes a char *, the coercion will mean that the
# type will simply become bytes. We need to do this coercion
# manually for overloaded and fused functions
c_src_type = None
if src_type.is_pyobject:
if src_type.is_builtin_type and src_type.name == 'str' and dst_type.resolve().is_string:
c_src_type = dst_type.resolve()
else:
c_src_type = src_type.default_coerced_ctype()
elif src_type.is_pythran_expr:
c_src_type = src_type.org_buffer
if c_src_type is not None and dst_type.assignable_from(c_src_type):
return (c_src_type, dst_type)
return (src_type, None)
def best_match(arg_types, functions, fail_if_empty=False, arg_is_lvalue_array=None):
"""
Given a list args of arguments and a list of functions, choose one
to call which seems to be the "best" fit for this list of arguments.
This function is used, e.g., when deciding which overloaded method
to dispatch for C++ classes.
We first eliminate functions based on arity, and if only one
function has the correct arity, we return it. Otherwise, we weight
functions based on how much work must be done to convert the
arguments, with the following priorities:
* identical types or pointers to identical types
* promotions
* non-Python types
That is, we prefer functions where no arguments need converted,
and failing that, functions where only promotions are required, and
so on.
If no function is deemed a good fit, or if two or more functions have
the same weight, we return None (as there is no best match). If pos
is not None, we also generate an error.
"""
actual_nargs = len(arg_types)
candidates = []
errors = []
for func in functions:
error_mesg = ""
func_type = func.type
if func_type.is_ptr:
func_type = func_type.base_type
# Check function type
if not func_type.is_cfunction:
if not func_type.is_error and fail_if_empty:
error_mesg = f"Calling non-function type '{func_type}'"
errors.append((func, error_mesg))
continue
# Check no. of args
max_nargs = len(func_type.args)
min_nargs = max_nargs - func_type.optional_arg_count
if actual_nargs < min_nargs or (not func_type.has_varargs and actual_nargs > max_nargs):
if max_nargs == min_nargs and not func_type.has_varargs:
expectation = max_nargs
elif actual_nargs < min_nargs:
expectation = f"at least {min_nargs}"
else:
expectation = f"at most {max_nargs}"
errors.append((func, f"Call with wrong number of arguments (expected {expectation}, got {actual_nargs})"))
continue
if func_type.templates:
# For any argument/parameter pair A/P, if P is a forwarding reference,
# use lvalue-reference-to-A for deduction in place of A when the
# function call argument is an lvalue. See:
# https://en.cppreference.com/w/cpp/language/template_argument_deduction#Deduction_from_a_function_call
arg_types_for_deduction = list(arg_types)
if func.type.is_cfunction and arg_is_lvalue_array:
for i, formal_arg in enumerate(func.type.args):
if formal_arg.is_forwarding_reference():
if arg_is_lvalue_array[i]:
arg_types_for_deduction[i] = c_ref_type(arg_types[i])
deductions = reduce(
merge_template_deductions,
[pattern.type.deduce_template_params(actual) for (pattern, actual) in zip(func_type.args, arg_types_for_deduction)],
{})
if deductions is None:
errors.append((func, "Unable to deduce type parameters for %s given (%s)" % (
func_type, ', '.join(map(str, arg_types_for_deduction)))))
elif len(deductions) < len(func_type.templates):
errors.append((func, "Unable to deduce type parameter %s" % (
", ".join([param.name for param in func_type.templates if param not in deductions]))))
else:
type_list = [deductions[param] for param in func_type.templates]
from .Symtab import Entry
specialization = Entry(
name = func.name + "[%s]" % ",".join([str(t) for t in type_list]),
cname = func.cname + "<%s>" % ",".join([t.empty_declaration_code() for t in type_list]),
type = func_type.specialize(deductions),
pos = func.pos)
specialization.scope = func.scope
candidates.append((specialization, specialization.type))
else:
candidates.append((func, func_type))
# Optimize the most common case of no overloading...
if len(candidates) == 1:
return candidates[0][0]
elif not candidates:
if fail_if_empty:
raise NoMatchFound(errors, len(functions))
return None
possibilities = []
bad_types = []
for index, (func, func_type) in enumerate(candidates):
score = [0,0,0,0,0,0,0]
for i in range(min(actual_nargs, len(func_type.args))):
src_type, dst_type = map_argument_type(arg_types[i], func_type.args[i].type)
if dst_type is None:
bad_types.append((func, f"Invalid conversion from '{arg_types[i]}' to '{func_type.args[i].type}'"))
break
if src_type == dst_type or dst_type.same_as(src_type):
pass # score 0
elif func_type.is_strict_signature:
break # exact match requested but not found
elif is_promotion(src_type, dst_type):
score[2] += 1
elif ((src_type.is_int and dst_type.is_int) or
(src_type.is_float and dst_type.is_float)):
src_is_unsigned = not src_type.signed
dst_is_unsigned = not dst_type.signed
score[2] += abs(dst_type.rank + dst_is_unsigned -
(src_type.rank + src_is_unsigned)) + 1
# Prefer assigning to larger types over smaller types, unless they have different signedness.
score[3] += (dst_type.rank < src_type.rank) * 2 + (src_is_unsigned != dst_is_unsigned)
elif dst_type.is_ptr and src_type.is_ptr:
if dst_type.base_type == c_void_type:
score[4] += 1
elif src_type.base_type.is_cpp_class and src_type.base_type.is_subclass(dst_type.base_type):
score[6] += src_type.base_type.subclass_dist(dst_type.base_type)
else:
score[5] += 1
elif not src_type.is_pyobject:
score[1] += 1
else:
score[0] += 1
else:
possibilities.append((score, index, func)) # so we can sort it
if possibilities:
possibilities.sort()
if len(possibilities) > 1:
score1 = possibilities[0][0]
score2 = possibilities[1][0]
if score1 == score2:
if fail_if_empty:
raise NoMatchFound([(None, "ambiguous overloaded method")], len(functions))
return None
function = possibilities[0][-1]
return function
if fail_if_empty:
raise NoMatchFound(bad_types, len(functions))
return None
def merge_template_deductions(a, b):
# Used to reduce lists of deduced template mappings into one mapping.
if a is None or b is None:
return None
add_if_missing = a.setdefault
for param, value in b.items():
if add_if_missing(param, value) != value:
# Found mismatch, cannot merge.
return None
return a
def widest_numeric_type(type1, type2):
"""Given two numeric types, return the narrowest type encompassing both of them.
"""
if type1.is_reference:
type1 = type1.ref_base_type
if type2.is_reference:
type2 = type2.ref_base_type
if type1.is_cv_qualified:
type1 = type1.cv_base_type
if type2.is_cv_qualified:
type2 = type2.cv_base_type
if type1 == type2:
widest_type = type1
elif type1.is_complex or type2.is_complex:
def real_type(ntype):
if ntype.is_complex:
return ntype.real_type
return ntype
widest_type = CComplexType(
widest_numeric_type(
real_type(type1),
real_type(type2)))
if type1 is soft_complex_type or type2 is soft_complex_type:
type1_is_other_complex = type1 is not soft_complex_type and type1.is_complex
type2_is_other_complex = type2 is not soft_complex_type and type2.is_complex
if (not type1_is_other_complex and not type2_is_other_complex and
widest_type.real_type == soft_complex_type.real_type):
# ensure we can do an actual "is" comparison
# (this possibly goes slightly wrong when mixing long double and soft complex)
widest_type = soft_complex_type
elif type1.is_enum and type2.is_enum:
widest_type = c_int_type
elif type1.rank < type2.rank:
widest_type = type2
elif type1.rank > type2.rank:
widest_type = type1
elif type1.signed < type2.signed:
widest_type = type1
elif type1.signed > type2.signed:
widest_type = type2
elif type1.is_typedef > type2.is_typedef:
widest_type = type1
else:
widest_type = type2
return widest_type
def result_type_of_builtin_operation(builtin_type, type2):
"""
Try to find a suitable (C) result type for a binary operation with a known builtin type.
"""
if builtin_type.name == 'float':
if type2.is_numeric:
return widest_numeric_type(c_double_type, type2)
elif type2.is_builtin_type and type2.name in ('int', 'float'):
return c_double_type
elif type2.is_builtin_type and type2.name == 'complex':
return type2
elif builtin_type.name == 'int':
if type2 == builtin_type or type2.is_int:
return builtin_type
elif type2.is_float or type2.is_builtin_type and type2.name == 'float':
return c_double_type
elif type2.is_builtin_type and type2.name == 'complex':
return type2
elif builtin_type.name == 'complex':
if type2.is_complex:
return CComplexType(widest_numeric_type(c_double_type, type2.real_type))
elif type2.is_numeric:
return CComplexType(widest_numeric_type(c_double_type, type2))
elif type2.is_builtin_type and type2.name in ('int', 'float', 'complex'):
return CComplexType(c_double_type)
return None
def numeric_type_fits(small_type, large_type):
return widest_numeric_type(small_type, large_type) == large_type
def independent_spanning_type(type1, type2):
# Return a type assignable independently from both type1 and
# type2, but do not require any interoperability between the two.
# For example, in "True * 2", it is safe to assume an integer
# result type (so spanning_type() will do the right thing),
# whereas "x = True or 2" must evaluate to a type that can hold
# both a boolean value and an integer, so this function works
# better.
if type1.is_reference ^ type2.is_reference:
if type1.is_reference:
type1 = type1.ref_base_type
else:
type2 = type2.ref_base_type
resolved_type1 = type1.resolve()
resolved_type2 = type2.resolve()
if resolved_type1 == resolved_type2:
return type1
elif ((resolved_type1 is c_bint_type or resolved_type2 is c_bint_type)
and (type1.is_numeric and type2.is_numeric)):
# special case: if one of the results is a bint and the other
# is another C integer, we must prevent returning a numeric
# type so that we do not lose the ability to coerce to a
# Python bool if we have to.
return py_object_type
elif resolved_type1.is_pyobject != resolved_type2.is_pyobject:
# e.g. PyFloat + double => double
if resolved_type1.is_pyobject and resolved_type1.equivalent_type == resolved_type2:
return resolved_type2
if resolved_type2.is_pyobject and resolved_type2.equivalent_type == resolved_type1:
return resolved_type1
# PyInt + C int => PyInt
if resolved_type1.is_int and resolved_type2.is_builtin_type and resolved_type2.name == 'int':
return resolved_type2
if resolved_type2.is_int and resolved_type1.is_builtin_type and resolved_type1.name == 'int':
return resolved_type1
# e.g. PyInt + double => object
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return error_type
return span_type
def spanning_type(type1, type2):
# Return a type assignable from both type1 and type2, or
# py_object_type if no better type is found. Assumes that the
# code that calls this will try a coercion afterwards, which will
# fail if the types cannot actually coerce to a py_object_type.
if type1 == type2:
return type1
elif type1 is py_object_type or type2 is py_object_type:
return py_object_type
elif type1 is c_py_unicode_type or type2 is c_py_unicode_type:
# Py_UNICODE behaves more like a string than an int
return py_object_type
span_type = _spanning_type(type1, type2)
if span_type is None:
return py_object_type
return span_type
def _spanning_type(type1, type2):
if type1.is_numeric and type2.is_numeric:
return widest_numeric_type(type1, type2)
elif type1.is_builtin_type:
return result_type_of_builtin_operation(type1, type2) or py_object_type
elif type2.is_builtin_type:
return result_type_of_builtin_operation(type2, type1) or py_object_type
elif type1.is_extension_type and type2.is_extension_type:
return widest_extension_type(type1, type2)
elif type1.is_pyobject or type2.is_pyobject:
return py_object_type
elif type1.assignable_from(type2):
if type1.is_extension_type and type1.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type1
elif type2.assignable_from(type1):
if type2.is_extension_type and type2.typeobj_is_imported():
# external types are unsafe, so we use PyObject instead
return py_object_type
return type2
elif type1.is_ptr and type2.is_ptr:
if type1.base_type.is_cpp_class and type2.base_type.is_cpp_class:
common_base = widest_cpp_type(type1.base_type, type2.base_type)
if common_base:
return CPtrType(common_base)
# incompatible pointers, void* will do as a result
return c_void_ptr_type
else:
return None
def widest_extension_type(type1, type2):
if type1.typeobj_is_imported() or type2.typeobj_is_imported():
return py_object_type
while True:
if type1.subtype_of(type2):
return type2
elif type2.subtype_of(type1):
return type1
type1, type2 = type1.base_type, type2.base_type
if type1 is None or type2 is None:
return py_object_type
def widest_cpp_type(type1, type2):
@cached_function
def bases(type):
all = set()
for base in type.base_classes:
all.add(base)
all.update(bases(base))
return all
common_bases = bases(type1).intersection(bases(type2))
common_bases_bases = reduce(set.union, [bases(b) for b in common_bases], set())
candidates = [b for b in common_bases if b not in common_bases_bases]
if len(candidates) == 1:
return candidates[0]
else:
# Fall back to void* for now.
return None
def simple_c_type(signed, longness, name):
# Find type descriptor for simple type given name and modifiers.
# Returns None if arguments don't make sense.
return modifiers_and_name_to_type.get((signed, longness, name))
def parse_basic_type(name: str):
base = None
if name.startswith('p_'):
base = parse_basic_type(name[2:])
elif name.startswith('p'):
base = parse_basic_type(name[1:])
elif name.endswith('*'):
base = parse_basic_type(name[:-1])
if base:
return CPtrType(base)
if name.startswith(('const_', 'volatile_')):
modifier, _, base_name = name.partition('_')
base = parse_basic_type(base_name)
if base:
return CConstOrVolatileType(
base, is_const=modifier == 'const', is_volatile=modifier == 'volatile')
#
basic_type = parse_basic_ctype(name)
if basic_type:
return basic_type
#
if name.startswith('u'):
name = name[1:]
signed = 0
elif (name.startswith('s') and
not name.startswith('short')):
name = name[1:]
signed = 2
else:
signed = 1
# We parse both (cy) 'long long' and (py) 'longlong' style names here.
longness = 0
while name.startswith(('long', 'short')):
if name.startswith('long'):
name = name[4:].lstrip()
longness += 1
else:
name = name[5:].lstrip()
longness -= 1
if longness != 0 and not name:
name = 'int' # long/short [int]
return simple_c_type(signed, longness, name)
def parse_basic_ctype(name):
"""
This only covers C types without spaces (i.e. what NameNode can represent).
It doesn't cover 'longlong' or 'p_long' or similar - just what appears in C.
"""
if name in fixed_sign_int_types:
return fixed_sign_int_types[name][1]
if "complex" in name and name != "complex":
return None # not a "simple" name
basic_type = simple_c_type(1, 0, name)
if basic_type:
return basic_type
return None
def _construct_type_from_base(cls, base_type, *args):
if base_type is error_type:
return error_type
return cls(base_type, *args)
def c_array_type(base_type, size):
# Construct a C array type.
return _construct_type_from_base(CArrayType, base_type, size)
def c_ptr_type(base_type):
# Construct a C pointer type.
if base_type.is_reference:
base_type = base_type.ref_base_type
return _construct_type_from_base(CPtrType, base_type)
def c_ref_type(base_type):
# Construct a C reference type
return _construct_type_from_base(CReferenceType, base_type)
def cpp_rvalue_ref_type(base_type):
# Construct a C++ rvalue reference type
return _construct_type_from_base(CppRvalueReferenceType, base_type)
def c_const_or_volatile_type(base_type, is_const=False, is_volatile=False):
# Construct a C const/volatile type.
return _construct_type_from_base(CConstOrVolatileType, base_type, is_const, is_volatile)
def same_type(type1, type2):
return type1.same_as(type2)
def assignable_from(type1, type2):
return type1.assignable_from(type2)
def typecast(to_type, from_type, expr_code):
# Return expr_code cast to a C type which can be
# assigned to to_type, assuming its existing C type
# is from_type.
if (to_type is from_type or
(not to_type.is_pyobject and assignable_from(to_type, from_type))):
return expr_code
elif (to_type is py_object_type and from_type and
from_type.is_builtin_type and from_type.name != 'type'):
# no cast needed, builtins are PyObject* already
return expr_code
else:
#print "typecast: to", to_type, "from", from_type ###
return to_type.cast_code(expr_code)
def type_list_identifier(types):
return cap_length('__and_'.join(type_identifier(type) for type in types))
_special_type_characters = {
'__': '__dunder',
'const ': '__const_',
' ': '__space_',
'*': '__ptr',
'&': '__ref',
'&&': '__fwref',
'[': '__lArr',
']': '__rArr',
'<': '__lAng',
'>': '__rAng',
'(': '__lParen',
')': '__rParen',
',': '__comma_',
'...': '__EL',
'::': '__in_',
':': '__D',
}
_escape_special_type_characters = partial(re.compile(
# join substrings in reverse order to put longer matches first, e.g. "::" before ":"
" ?(%s) ?" % "|".join(re.escape(s) for s in sorted(_special_type_characters, reverse=True))
).sub, lambda match: _special_type_characters[match.group(1)])
def type_identifier(type, pyrex=False):
scope = None
decl = type.empty_declaration_code(pyrex=pyrex)
entry = getattr(type, "entry", None)
if entry and entry.scope:
scope = entry.scope
return type_identifier_from_declaration(decl, scope=scope)
_type_identifier_cache = {}
def type_identifier_from_declaration(decl, scope = None):
key = (decl, scope)
safe = _type_identifier_cache.get(key)
if safe is None:
safe = decl
if scope:
safe = scope.mangle(prefix="", name=safe)
safe = re.sub(' +', ' ', safe)
safe = re.sub(' ?([^a-zA-Z0-9_]) ?', r'\1', safe)
safe = _escape_special_type_characters(safe)
safe = cap_length(re.sub('[^a-zA-Z0-9_]', lambda x: '__%X' % ord(x.group(0)), safe))
_type_identifier_cache[key] = safe
return safe
def cap_length(s, max_len=63):
if len(s) <= max_len:
return s
hash_prefix = hashlib.sha256(s.encode('ascii')).hexdigest()[:6]
return '%s__%s__etc' % (hash_prefix, s[:max_len-17])
def write_noexcept_performance_hint(pos, env,
function_name=None, void_return=False, is_call=False,
is_from_pxd=False):
if function_name:
# we need it escaped everywhere we use it
function_name = "'%s'" % function_name
if is_call:
on_what = "after calling %s " % (function_name or 'function')
elif function_name:
on_what = "on %s " % function_name
else:
on_what =''
msg = (
"Exception check %swill always require the GIL to be acquired."
) % on_what
the_function = function_name if function_name else "the function"
if is_call and not function_name:
the_function = the_function + " you are calling"
solutions = ["Declare %s as 'noexcept' if you control the definition and "
"you're sure you don't want the function to raise exceptions."
% the_function]
if void_return:
solutions.append(
"Use an 'int' return type on %s to allow an error code to be returned." %
the_function)
if is_from_pxd and not void_return:
solutions.append(
"Declare any exception value explicitly for functions in pxd files.")
if len(solutions) == 1:
msg = "%s %s" % (msg, solutions[0])
else:
solutions = ["\t%s. %s" % (i+1, s) for i, s in enumerate(solutions)]
msg = "%s\nPossible solutions:\n%s" % (msg, "\n".join(solutions))
performance_hint(pos, msg, env)
def remove_cv_ref(tp, remove_fakeref=False):
# named by analogy with c++ std::remove_cv_ref
last_tp = None
# The while-loop is probably unnecessary, but I'm not confident
# of the order or how careful we are prevent nesting.
while tp != last_tp:
last_tp = tp
if tp.is_cv_qualified:
tp = tp.cv_base_type
if tp.is_reference and (not tp.is_fake_reference or remove_fakeref):
tp = tp.ref_base_type
return tp
def get_all_subtypes(tp, _seen=None):
"""Generate all transitive subtypes of the given type, in top-down order.
"""
if _seen is None:
_seen = set()
yield tp
_seen.add(tp)
for attr in tp.subtypes:
subtype_or_iterable = getattr(tp, attr)
if subtype_or_iterable:
if isinstance(subtype_or_iterable, BaseType):
if subtype_or_iterable not in _seen:
yield from get_all_subtypes(subtype_or_iterable, _seen)
else:
for sub_tp in subtype_or_iterable:
if sub_tp not in _seen:
yield from get_all_subtypes(sub_tp, _seen)
| NoMatchFound |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_M.py | {
"start": 19766,
"end": 20837
} | class ____(Benchmark):
r"""
MultiModal objective function.
This class defines the MultiModal global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{MultiModal}}(x) = \left( \sum_{i=1}^n \lvert x_i \rvert
\right) \left( \prod_{i=1}^n \lvert x_i \rvert \right)
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-10, 10]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 0` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-10.0] * self.N, [10.0] * self.N))
self.custom_bounds = [(-5, 5), (-5, 5)]
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 0.0
def fun(self, x, *args):
self.nfev += 1
return sum(abs(x)) * prod(abs(x))
| MultiModal |
python | scipy__scipy | scipy/optimize/tests/test_quadratic_assignment.py | {
"start": 7442,
"end": 9909
} | class ____(QAPCommonTests):
method = "faq"
def test_options(self):
# cost and distance matrices of QAPLIB instance chr12c
rng = np.random.default_rng(4358764578823597324)
A, B, opt_perm = chr12c()
n = len(A)
# check that max_iter is obeying with low input value
res = quadratic_assignment(A, B, options={'maxiter': 5})
assert_equal(res.nit, 5)
# test with shuffle
res = quadratic_assignment(A, B, options={'shuffle_input': True})
assert_(11156 <= res.fun < 21000)
# test with randomized init
res = quadratic_assignment(A, B, options={'rng': rng, 'P0': "randomized"})
assert_(11156 <= res.fun < 21000)
# check with specified P0
K = np.ones((n, n)) / float(n)
K = _doubly_stochastic(K)
res = quadratic_assignment(A, B, options={'P0': K})
assert_(11156 <= res.fun < 21000)
def test_specific_input_validation(self):
A = np.identity(2)
B = A
# method is implicitly faq
# ValueError Checks: making sure single value parameters are of
# correct value
with pytest.raises(ValueError, match="Invalid 'P0' parameter"):
quadratic_assignment(A, B, options={'P0': "random"})
with pytest.raises(
ValueError, match="'maxiter' must be a positive integer"):
quadratic_assignment(A, B, options={'maxiter': -1})
with pytest.raises(ValueError, match="'tol' must be a positive float"):
quadratic_assignment(A, B, options={'tol': -1})
# TypeError Checks: making sure single value parameters are of
# correct type
with pytest.raises(TypeError):
quadratic_assignment(A, B, options={'maxiter': 1.5})
# test P0 matrix input
with pytest.raises(
ValueError,
match="`P0` matrix must have shape m' x m', where m'=n-m"):
quadratic_assignment(
np.identity(4), np.identity(4),
options={'P0': np.ones((3, 3))}
)
K = [[0.4, 0.2, 0.3],
[0.3, 0.6, 0.2],
[0.2, 0.2, 0.7]]
# matrix that isn't quite doubly stochastic
with pytest.raises(
ValueError, match="`P0` matrix must be doubly stochastic"):
quadratic_assignment(
np.identity(3), np.identity(3), options={'P0': K}
)
| TestFAQ |
python | django-import-export__django-import-export | tests/core/tests/test_widgets.py | {
"start": 24846,
"end": 27254
} | class ____(TestCase, RowDeprecationTestMixin):
def setUp(self):
self.widget = widgets.ManyToManyWidget(Category)
self.widget_name = widgets.ManyToManyWidget(Category, field="name")
self.cat1 = Category.objects.create(name="Cat úňíčóďě")
self.cat2 = Category.objects.create(name="Cat 2")
def test_clean(self):
value = f"{self.cat1.pk},{self.cat2.pk}"
cleaned_data = self.widget.clean(value)
self.assertEqual(len(cleaned_data), 2)
self.assertIn(self.cat1, cleaned_data)
self.assertIn(self.cat2, cleaned_data)
def test_clean_field(self):
value = f"{self.cat1.name},{self.cat2.name}"
cleaned_data = self.widget_name.clean(value)
self.assertEqual(len(cleaned_data), 2)
self.assertIn(self.cat1, cleaned_data)
self.assertIn(self.cat2, cleaned_data)
def test_clean_field_spaces(self):
value = f"{self.cat1.name}, {self.cat2.name}"
cleaned_data = self.widget_name.clean(value)
self.assertEqual(len(cleaned_data), 2)
self.assertIn(self.cat1, cleaned_data)
self.assertIn(self.cat2, cleaned_data)
def test_clean_typo(self):
value = "%s," % self.cat1.pk
cleaned_data = self.widget.clean(value)
self.assertEqual(len(cleaned_data), 1)
self.assertIn(self.cat1, cleaned_data)
@mock.patch("core.models.Category.objects.none")
def test_clean_handles_None_value(self, mock_none):
self.widget.clean(None)
self.assertEqual(1, mock_none.call_count)
def test_int(self):
value = self.cat1.pk
cleaned_data = self.widget.clean(value)
self.assertEqual(len(cleaned_data), 1)
self.assertIn(self.cat1, cleaned_data)
def test_float(self):
value = float(self.cat1.pk)
cleaned_data = self.widget.clean(value)
self.assertEqual(len(cleaned_data), 1)
self.assertIn(self.cat1, cleaned_data)
def test_render(self):
self.assertEqual(
self.widget.render(Category.objects.order_by("id")),
f"{self.cat1.pk},{self.cat2.pk}",
)
self.assertEqual(
self.widget_name.render(Category.objects.order_by("id")),
f"{self.cat1.name},{self.cat2.name}",
)
def test_render_value_none_as_blank(self):
self.assertEqual("", self.widget.render(None))
| ManyToManyWidget |
python | getsentry__sentry | src/sentry/api/bases/organization.py | {
"start": 6440,
"end": 6583
} | class ____(OrganizationPermission):
scope_map = {"GET": ["project:read", "project:write", "project:admin"]}
| OrganizationUserReportsPermission |
python | pypa__pip | src/pip/_vendor/resolvelib/resolvers/exceptions.py | {
"start": 817,
"end": 1283
} | class ____(ResolverException, Generic[RT, CT]):
def __init__(self, candidate: CT, criterion: Criterion[RT, CT]):
super().__init__(candidate, criterion)
self.candidate = candidate
self.criterion = criterion
def __str__(self) -> str:
return "Provided candidate {!r} does not satisfy {}".format(
self.candidate,
", ".join(repr(r) for r in self.criterion.iter_requirement()),
)
| InconsistentCandidate |
python | getsentry__sentry | src/sentry/issues/endpoints/organization_group_search_view_details.py | {
"start": 1373,
"end": 4939
} | class ____(OrganizationEndpoint):
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
"PUT": ApiPublishStatus.EXPERIMENTAL,
"DELETE": ApiPublishStatus.EXPERIMENTAL,
}
owner = ApiOwner.ISSUES
permission_classes = (GroupSearchViewPermission,)
def get(self, request: Request, organization: Organization, view_id: str) -> Response:
"""
Get an issue view for the current organization member.
"""
try:
view = GroupSearchView.objects.get(id=view_id, organization=organization)
except GroupSearchView.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
return Response(
serialize(
view,
request.user,
serializer=GroupSearchViewSerializer(
organization=organization,
),
),
status=status.HTTP_200_OK,
)
def put(self, request: Request, organization: Organization, view_id: str) -> Response:
"""
Update an issue view for the current organization member.
"""
if not features.has("organizations:issue-views", organization, actor=request.user):
return Response(status=status.HTTP_404_NOT_FOUND)
try:
view = GroupSearchView.objects.get(id=view_id, organization=organization)
except GroupSearchView.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
self.check_object_permissions(request, view)
serializer = ViewValidator(
data=request.data,
context={"organization": organization},
)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
validated_data = serializer.validated_data
view.name = validated_data["name"]
view.query = validated_data["query"]
view.query_sort = validated_data["querySort"]
view.is_all_projects = validated_data["isAllProjects"]
view.environments = validated_data["environments"]
view.time_filters = validated_data["timeFilters"]
view.projects.set(validated_data["projects"])
view.save()
return Response(
serialize(
view,
request.user,
serializer=GroupSearchViewSerializer(
organization=organization,
),
),
status=status.HTTP_200_OK,
)
def delete(self, request: Request, organization: Organization, view_id: str) -> Response:
"""
Delete an issue view for the current organization member.
"""
try:
view = GroupSearchView.objects.get(id=view_id, organization=organization)
except GroupSearchView.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
self.check_object_permissions(request, view)
try:
GroupSearchViewStarred.objects.clear_starred_view_for_all_members(
organization=organization, view=view
)
except GroupSearchViewStarred.DoesNotExist:
pass
try:
GroupSearchViewLastVisited.objects.filter(
organization=organization, group_search_view=view
).delete()
except GroupSearchViewLastVisited.DoesNotExist:
pass
view.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
| OrganizationGroupSearchViewDetailsEndpoint |
python | pennersr__django-allauth | allauth/mfa/webauthn/views.py | {
"start": 2892,
"end": 3616
} | class ____(NextRedirectMixin, DeleteView):
object: Authenticator # https://github.com/typeddjango/django-stubs/issues/1227
template_name = (
"mfa/webauthn/authenticator_confirm_delete."
+ account_settings.TEMPLATE_EXTENSION
)
success_url = reverse_lazy("mfa_list_webauthn")
def get_queryset(self):
return Authenticator.objects.filter(
user=self.request.user, type=Authenticator.Type.WEBAUTHN
)
def form_valid(self, form):
authenticator = self.get_object()
flows.remove_authenticator(self.request, authenticator)
return HttpResponseRedirect(self.get_success_url())
remove_webauthn = RemoveWebAuthnView.as_view()
| RemoveWebAuthnView |
python | PrefectHQ__prefect | tests/test_tasks.py | {
"start": 26881,
"end": 27775
} | class ____:
def test_task_version_defaults_to_null(self):
@task
def my_task():
pass
assert my_task.version is None
def test_task_version_can_be_provided(self):
@task(version="test-dev-experimental")
def my_task():
pass
assert my_task.version == "test-dev-experimental"
async def test_task_version_is_set_in_backend(
self, prefect_client, events_pipeline
):
@task(version="test-dev-experimental")
def my_task():
pass
@flow
def test():
return my_task(return_state=True)
task_state = test()
await events_pipeline.process_events()
task_run = await prefect_client.read_task_run(
task_state.state_details.task_run_id
)
assert task_run.task_version == "test-dev-experimental"
| TestTaskVersion |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/postgresql/json.py | {
"start": 2510,
"end": 2942
} | class ____(JSONPathType):
"""JSON Path Type.
This is usually required to cast literal values to json path when using
json search like function, such as ``jsonb_path_query_array`` or
``jsonb_path_exists``::
stmt = sa.select(
sa.func.jsonb_path_query_array(
table.c.jsonb_col, cast("$.address.id", JSONPATH)
)
)
"""
__visit_name__ = "JSONPATH"
| JSONPATH |
python | jazzband__django-model-utils | tests/test_managers/test_join_manager.py | {
"start": 130,
"end": 1372
} | class ____(TestCase):
def setUp(self) -> None:
for i in range(20):
BoxJoinModel.objects.create(name=f'name_{i}')
JoinItemForeignKey.objects.create(
weight=10, belonging=BoxJoinModel.objects.get(name='name_1')
)
JoinItemForeignKey.objects.create(weight=20)
def test_self_join(self) -> None:
a_slice = BoxJoinModel.objects.all()[0:10]
with self.assertNumQueries(1):
result = a_slice.join()
self.assertEqual(result.count(), 10)
def test_self_join_with_where_statement(self) -> None:
qs = BoxJoinModel.objects.filter(name='name_1')
result = qs.join()
self.assertEqual(result.count(), 1)
def test_join_with_other_qs(self) -> None:
item_qs = JoinItemForeignKey.objects.filter(weight=10)
boxes = BoxJoinModel.objects.all().join(qs=item_qs)
self.assertEqual(boxes.count(), 1)
self.assertEqual(boxes[0].name, 'name_1')
def test_reverse_join(self) -> None:
box_qs = BoxJoinModel.objects.filter(name='name_1')
items = JoinItemForeignKey.objects.all().join(box_qs)
self.assertEqual(items.count(), 1)
self.assertEqual(items[0].weight, 10)
| JoinManagerTest |
python | eth-brownie__brownie | brownie/_gui/source.py | {
"start": 279,
"end": 5615
} | class ____(ttk.Notebook):
def __init__(self, parent):
super().__init__(parent)
self.root = self._root()
self._scope = None
self.configure(padding=0)
self._frames = []
self.bind_count = 0
self.root.bind("<Left>", self.key_left)
self.root.bind("<Right>", self.key_right)
base_path = self.root.active_project._path.joinpath(
self.root.active_project._structure["contracts"]
)
for path in base_path.glob("**/*"):
if path.suffix in (".sol", ".vy"):
self.add(path)
self.set_visible([])
self.bind("<<NotebookTabChanged>>", self.on_tab_change)
def add(self, path):
path = Path(path)
label = path.name
if label in [i._label for i in self._frames]:
return
with path.open() as fp:
frame = SourceFrame(self, fp.read(), path.suffix)
super().add(frame, text=f" {label} ")
frame._id = len(self._frames)
frame._label = label
frame._visible = True
frame._path = str(path)
self._frames.append(frame)
def get_frame(self, label):
label = Path(label).name
return next(i for i in self._frames if i._label == label)
def hide(self, label):
frame = self.get_frame(label)
if frame._visible:
super().hide(frame)
frame._visible = False
def show(self, label):
label = Path(label).name
frame = next(i for i in self._frames if i._label == label)
if frame._visible:
return
frame._visible = True
super().add(frame, text=f" {label} ")
def on_tab_change(self, event):
if self.select():
tab = event.widget.tab("current")["text"]
self.root.toolbar.report.set_values(Path(tab).stem.strip())
def set_visible(self, labels):
labels = [Path(i).name for i in labels]
for label in [i._label for i in self._frames]:
if label in labels:
self.show(label)
else:
self.hide(label)
def active_frame(self):
id_ = self.index(self.select())
return self._frames[id_]
def set_active(self, label):
self.select(self.get_frame(label))
def key_left(self, event):
self._key([i for i in self._frames if i._visible][::-1])
def key_right(self, event):
self._key([i for i in self._frames if i._visible])
def _key(self, visible):
if not visible:
return
f = self.active_frame()
if visible[-1] == f:
self.select(visible[0])
else:
self.select(visible[visible.index(f) + 1])
def apply_scope(self, start, stop):
self.clear_scope()
frame = self.active_frame()
self._scope = [frame, start, stop]
frame.tag_add("dark", 0, start)
frame.tag_add("dark", stop, "end")
for f in [v for v in self._frames if v != frame]:
f.tag_add("dark", 0, "end")
def clear_scope(self):
self.unmark_all("dark")
self._scope = None
def show_msg(self, frame, tag, msg):
text = self.root.main.console.read()
frame.tag_bind(tag, "<Leave>", lambda e: self.root.main.console.write(text))
self.root.main.console.write(msg)
def mark(self, label, tag, start, stop, msg=None):
frame = self.get_frame(label)
frame.tag_add(tag, start, stop)
self.root.main.console.read()
if msg:
bind_tag = f"bind-{self.bind_count}"
frame.tag_add(bind_tag, start, stop)
frame.tag_bind(bind_tag, "<Enter>", lambda e: self.show_msg(frame, bind_tag, msg))
self.bind_count += 1
def unmark(self, label, tag):
frame = self.get_frame(label)
frame.tag_remove(tag)
def unmark_all(self, *tags):
for frame in self._frames:
for tag in tags:
frame.tag_remove(tag)
def unbind_all(self):
for frame in self._frames:
for tag in (f"bind-{i}" for i in range(self.bind_count)):
frame.tag_remove(tag)
frame.tag_unbind(tag, "<Enter>")
frame.tag_unbind(tag, "<Leave>")
self.bind_count = 0
def _search(self, event):
frame = self.active_frame()
tree = self.root.main.oplist
if not frame.tag_ranges("sel"):
tree.clear_selection()
return
start, stop = frame.tag_ranges("sel")
if self._scope and (
frame != self._scope[0] or start < self._scope[1] or stop > self._scope[2]
):
pc = False
else:
pc = [
k
for k, v in self.root.pcMap.items()
if "path" in v
and frame._label in self.root.pathMap[v["path"]]
and is_inside_offset((start, stop), v["offset"])
]
if not pc:
frame.clear_highlight()
tree.clear_selection()
return
def key(k):
return (start - self.root.pcMap[k]["offset"][0]) + (
self.root.pcMap[k]["offset"][1] - stop
)
id_ = sorted(pc, key=key)[0]
tree.selection_set(id_)
| SourceNoteBook |
python | ray-project__ray | rllib/utils/actor_manager.py | {
"start": 6188,
"end": 46065
} | class ____:
"""A manager that is aware of the healthiness of remote actors.
.. testcode::
import time
import ray
from ray.rllib.utils.actor_manager import FaultTolerantActorManager
@ray.remote
class MyActor:
def apply(self, fn):
return fn(self)
def do_something(self):
return True
actors = [MyActor.remote() for _ in range(3)]
manager = FaultTolerantActorManager(
actors, max_remote_requests_in_flight_per_actor=2,
)
# Synchronous remote calls.
results = manager.foreach_actor(lambda actor: actor.do_something())
# Print results ignoring returned errors.
print([r.get() for r in results.ignore_errors()])
# Asynchronous remote calls.
manager.foreach_actor_async(lambda actor: actor.do_something())
time.sleep(2) # Wait for the tasks to finish.
for r in manager.fetch_ready_async_reqs():
# Handle result and errors.
if r.ok:
print(r.get())
else:
print("Error: {}".format(r.get()))
"""
@dataclass
class _ActorState:
"""State of a single actor."""
# Num of outstanding async requests for this actor by tag.
num_in_flight_async_requests_by_tag: Dict[Optional[str], int] = field(
default_factory=dict
)
# Whether this actor is in a healthy state.
is_healthy: bool = True
def get_num_in_flight_requests(self, tag: Optional[str] = None) -> int:
"""Get number of in-flight requests for a specific tag or all tags."""
if tag is None:
return sum(self.num_in_flight_async_requests_by_tag.values())
return self.num_in_flight_async_requests_by_tag.get(tag, 0)
def increment_requests(self, tag: Optional[str] = None) -> None:
"""Increment the count of in-flight requests for a tag."""
if tag not in self.num_in_flight_async_requests_by_tag:
self.num_in_flight_async_requests_by_tag[tag] = 0
self.num_in_flight_async_requests_by_tag[tag] += 1
def decrement_requests(self, tag: Optional[str] = None) -> None:
"""Decrement the count of in-flight requests for a tag."""
if tag in self.num_in_flight_async_requests_by_tag:
self.num_in_flight_async_requests_by_tag[tag] -= 1
if self.num_in_flight_async_requests_by_tag[tag] <= 0:
del self.num_in_flight_async_requests_by_tag[tag]
def __init__(
self,
actors: Optional[List[ActorHandle]] = None,
max_remote_requests_in_flight_per_actor: int = 2,
init_id: int = 0,
):
"""Construct a FaultTolerantActorManager.
Args:
actors: A list of ray remote actors to manage on. These actors must have an
``apply`` method which takes a function with only one parameter (the
actor instance itself).
max_remote_requests_in_flight_per_actor: The maximum number of remote
requests that can be in flight per actor. Any requests made to the pool
that cannot be scheduled because the limit has been reached will be
dropped. This only applies to the asynchronous remote call mode.
init_id: The initial ID to use for the next remote actor. Default is 0.
"""
# For round-robin style async requests, keep track of which actor to send
# a new func next (current).
self._next_id = self._current_actor_id = init_id
# Actors are stored in a map and indexed by a unique (int) ID.
self._actors: Dict[int, ActorHandle] = {}
self._remote_actor_states: Dict[int, self._ActorState] = {}
self._restored_actors = set()
self.add_actors(actors or [])
# Maps outstanding async requests to the IDs of the actor IDs that
# are executing them.
self._in_flight_req_to_actor_id: Dict[ray.ObjectRef, int] = {}
self._max_remote_requests_in_flight_per_actor = (
max_remote_requests_in_flight_per_actor
)
# Useful metric.
self._num_actor_restarts = 0
@DeveloperAPI
def actor_ids(self) -> List[int]:
"""Returns a list of all worker IDs (healthy or not)."""
return list(self._actors.keys())
@DeveloperAPI
def healthy_actor_ids(self) -> List[int]:
"""Returns a list of worker IDs that are healthy."""
return [k for k, v in self._remote_actor_states.items() if v.is_healthy]
@DeveloperAPI
def add_actors(self, actors: List[ActorHandle]):
"""Add a list of actors to the pool.
Args:
actors: A list of ray remote actors to be added to the pool.
"""
for actor in actors:
self._actors[self._next_id] = actor
self._remote_actor_states[self._next_id] = self._ActorState()
self._next_id += 1
@DeveloperAPI
def remove_actor(self, actor_id: int) -> ActorHandle:
"""Remove an actor from the pool.
Args:
actor_id: ID of the actor to remove.
Returns:
Handle to the actor that was removed.
"""
actor = self._actors[actor_id]
# Remove the actor from the pool.
del self._actors[actor_id]
del self._remote_actor_states[actor_id]
self._restored_actors.discard(actor_id)
self._remove_async_state(actor_id)
return actor
@DeveloperAPI
def num_actors(self) -> int:
"""Return the total number of actors in the pool."""
return len(self._actors)
@DeveloperAPI
def num_healthy_actors(self) -> int:
"""Return the number of healthy remote actors."""
return sum(s.is_healthy for s in self._remote_actor_states.values())
@DeveloperAPI
def total_num_restarts(self) -> int:
"""Return the number of remote actors that have been restarted."""
return self._num_actor_restarts
@DeveloperAPI
def num_outstanding_async_reqs(self, tag: Optional[str] = None) -> int:
"""Return the number of outstanding async requests."""
return sum(
s.get_num_in_flight_requests(tag)
for s in self._remote_actor_states.values()
)
@DeveloperAPI
def is_actor_healthy(self, actor_id: int) -> bool:
"""Whether a remote actor is in healthy state.
Args:
actor_id: ID of the remote actor.
Returns:
True if the actor is healthy, False otherwise.
"""
if actor_id not in self._remote_actor_states:
raise ValueError(f"Unknown actor id: {actor_id}")
return self._remote_actor_states[actor_id].is_healthy
@DeveloperAPI
def set_actor_state(self, actor_id: int, healthy: bool) -> None:
"""Update activate state for a specific remote actor.
Args:
actor_id: ID of the remote actor.
healthy: Whether the remote actor is healthy.
"""
if actor_id not in self._remote_actor_states:
raise ValueError(f"Unknown actor id: {actor_id}")
was_healthy = self._remote_actor_states[actor_id].is_healthy
# Set from unhealthy to healthy -> Add to restored set.
if not was_healthy and healthy:
self._restored_actors.add(actor_id)
# Set from healthy to unhealthy -> Remove from restored set.
elif was_healthy and not healthy:
self._restored_actors.discard(actor_id)
self._remote_actor_states[actor_id].is_healthy = healthy
if not healthy:
# Remove any async states.
self._remove_async_state(actor_id)
@DeveloperAPI
def clear(self):
"""Clean up managed actors."""
for actor in self._actors.values():
ray.kill(actor)
self._actors.clear()
self._remote_actor_states.clear()
self._restored_actors.clear()
self._in_flight_req_to_actor_id.clear()
@DeveloperAPI
def foreach_actor(
self,
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]], str, List[str]],
*,
kwargs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
healthy_only: bool = True,
remote_actor_ids: Optional[List[int]] = None,
timeout_seconds: Optional[float] = None,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> RemoteCallResults:
"""Calls the given function with each actor instance as arg.
Automatically marks actors unhealthy if they crash during the remote call.
Args:
func: A single Callable applied to all specified remote actors or a list
of Callables, that get applied on the list of specified remote actors.
In the latter case, both list of Callables and list of specified actors
must have the same length. Alternatively, you can use the name of the
remote method to be called, instead, or a list of remote method names.
kwargs: An optional single kwargs dict or a list of kwargs dict matching the
list of provided `func` or `remote_actor_ids`. In the first case (single
dict), use `kwargs` on all remote calls. The latter case (list of
dicts) allows you to define individualized kwarg dicts per actor.
healthy_only: If True, applies `func` only to actors currently tagged
"healthy", otherwise to all actors. If `healthy_only=False` and
`mark_healthy=True`, will send `func` to all actors and mark those
actors "healthy" that respond to the request within `timeout_seconds`
and are currently tagged as "unhealthy".
remote_actor_ids: Apply func on a selected set of remote actors. Use None
(default) for all actors.
timeout_seconds: Time to wait (in seconds) for results. Set this to 0.0 for
fire-and-forget. Set this to None (default) to wait infinitely (i.e. for
synchronous execution).
return_obj_refs: whether to return ObjectRef instead of actual results.
Note, for fault tolerance reasons, these returned ObjectRefs should
never be resolved with ray.get() outside of the context of this manager.
mark_healthy: Whether to mark all those actors healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that actors are NOT set unhealthy, if they simply time out
(only if they return a RayActorError).
Also not that this setting is ignored if `healthy_only=True` (b/c this
setting only affects actors that are currently tagged as unhealthy).
Returns:
The list of return values of all calls to `func(actor)`. The values may be
actual data returned or exceptions raised during the remote call in the
format of RemoteCallResults.
"""
remote_actor_ids = remote_actor_ids or self.actor_ids()
if healthy_only:
func, kwargs, remote_actor_ids = self._filter_by_healthy_state(
func=func, kwargs=kwargs, remote_actor_ids=remote_actor_ids
)
# Send out remote requests.
remote_calls = self._call_actors(
func=func,
kwargs=kwargs,
remote_actor_ids=remote_actor_ids,
)
# Collect remote request results (if available given timeout and/or errors).
_, remote_results = self._fetch_result(
remote_actor_ids=remote_actor_ids,
remote_calls=remote_calls,
tags=[None] * len(remote_calls),
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
return remote_results
@DeveloperAPI
def foreach_actor_async(
self,
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]], str, List[str]],
tag: Optional[str] = None,
*,
kwargs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
healthy_only: bool = True,
remote_actor_ids: Optional[List[int]] = None,
) -> int:
"""Calls given functions against each actors without waiting for results.
Args:
func: A single Callable applied to all specified remote actors or a list
of Callables, that get applied on the list of specified remote actors.
In the latter case, both list of Callables and list of specified actors
must have the same length. Alternatively, you can use the name of the
remote method to be called, instead, or a list of remote method names.
tag: A tag to identify the results from this async call.
kwargs: An optional single kwargs dict or a list of kwargs dict matching the
list of provided `func` or `remote_actor_ids`. In the first case (single
dict), use `kwargs` on all remote calls. The latter case (list of
dicts) allows you to define individualized kwarg dicts per actor.
healthy_only: If True, applies `func` only to actors currently tagged
"healthy", otherwise to all actors. If `healthy_only=False` and
later, `self.fetch_ready_async_reqs()` is called with
`mark_healthy=True`, will send `func` to all actors and mark those
actors "healthy" that respond to the request within `timeout_seconds`
and are currently tagged as "unhealthy".
remote_actor_ids: Apply func on a selected set of remote actors.
Note, for fault tolerance reasons, these returned ObjectRefs should
never be resolved with ray.get() outside of the context of this manager.
Returns:
The number of async requests that are actually fired.
"""
# TODO(avnishn, jungong): so thinking about this a bit more, it would be the
# best if we can attach multiple tags to an async all, like basically this
# parameter should be tags:
# For sync calls, tags would be ().
# For async call users, they can attached multiple tags for a single call, like
# ("rollout_worker", "sync_weight").
# For async fetch result, we can also specify a single, or list of tags. For
# example, ("eval", "sample") will fetch all the sample() calls on eval
# workers.
if not remote_actor_ids:
remote_actor_ids = self.actor_ids()
num_calls = (
len(func)
if isinstance(func, list)
else len(kwargs)
if isinstance(kwargs, list)
else len(remote_actor_ids)
)
# Perform round-robin assignment of all provided calls for any number of our
# actors. Note that this way, some actors might receive more than 1 request in
# this call.
if num_calls != len(remote_actor_ids):
remote_actor_ids = [
(self._current_actor_id + i) % self.num_actors()
for i in range(num_calls)
]
# Update our round-robin pointer.
self._current_actor_id += num_calls
self._current_actor_id %= self.num_actors()
if healthy_only:
func, kwargs, remote_actor_ids = self._filter_by_healthy_state(
func=func, kwargs=kwargs, remote_actor_ids=remote_actor_ids
)
num_calls_to_make: Dict[int, int] = defaultdict(lambda: 0)
# Drop calls to actors that are too busy for this specific tag.
if isinstance(func, list):
assert len(func) == len(remote_actor_ids)
limited_func = []
limited_kwargs = []
limited_remote_actor_ids = []
for i, (f, raid) in enumerate(zip(func, remote_actor_ids)):
num_outstanding_reqs_for_tag = self._remote_actor_states[
raid
].get_num_in_flight_requests(tag)
if (
num_outstanding_reqs_for_tag + num_calls_to_make[raid]
< self._max_remote_requests_in_flight_per_actor
):
num_calls_to_make[raid] += 1
k = kwargs[i] if isinstance(kwargs, list) else (kwargs or {})
limited_func.append(f)
limited_kwargs.append(k)
limited_remote_actor_ids.append(raid)
else:
limited_func = func
limited_kwargs = kwargs
limited_remote_actor_ids = []
for raid in remote_actor_ids:
num_outstanding_reqs_for_tag = self._remote_actor_states[
raid
].get_num_in_flight_requests(tag)
if (
num_outstanding_reqs_for_tag + num_calls_to_make[raid]
< self._max_remote_requests_in_flight_per_actor
):
num_calls_to_make[raid] += 1
limited_remote_actor_ids.append(raid)
if not limited_remote_actor_ids:
return 0
remote_calls = self._call_actors(
func=limited_func,
kwargs=limited_kwargs,
remote_actor_ids=limited_remote_actor_ids,
)
# Save these as outstanding requests.
for id, call in zip(limited_remote_actor_ids, remote_calls):
self._remote_actor_states[id].increment_requests(tag)
self._in_flight_req_to_actor_id[call] = (tag, id)
return len(remote_calls)
@DeveloperAPI
def fetch_ready_async_reqs(
self,
*,
tags: Union[str, List[str], Tuple[str, ...]] = (),
timeout_seconds: Optional[float] = 0.0,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> RemoteCallResults:
"""Get results from outstanding async requests that are ready.
Automatically mark actors unhealthy if they fail to respond.
Note: If tags is an empty tuple then results from all ready async requests are
returned.
Args:
timeout_seconds: ray.get() timeout. Default is 0, which only fetched those
results (immediately) that are already ready.
tags: A tag or a list of tags to identify the results from this async call.
return_obj_refs: Whether to return ObjectRef instead of actual results.
mark_healthy: Whether to mark all those actors healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that actors are NOT set to unhealthy, if they simply time out,
meaning take a longer time to fulfil the remote request. We only ever
mark an actor unhealthy, if they raise a RayActorError inside the remote
request.
Also note that this settings is ignored if the preceding
`foreach_actor_async()` call used the `healthy_only=True` argument (b/c
`mark_healthy` only affects actors that are currently tagged as
unhealthy).
Returns:
A list of return values of all calls to `func(actor)` that are ready.
The values may be actual data returned or exceptions raised during the
remote call in the format of RemoteCallResults.
"""
# Construct the list of in-flight requests filtered by tag.
remote_calls, remote_actor_ids, valid_tags = self._filter_calls_by_tag(tags)
ready, remote_results = self._fetch_result(
remote_actor_ids=remote_actor_ids,
remote_calls=remote_calls,
tags=valid_tags,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
for obj_ref, result in zip(ready, remote_results):
# Get the tag for this request and decrease outstanding request count by 1.
if obj_ref in self._in_flight_req_to_actor_id:
tag, actor_id = self._in_flight_req_to_actor_id[obj_ref]
self._remote_actor_states[result.actor_id].decrement_requests(tag)
# Remove this call from the in-flight list.
del self._in_flight_req_to_actor_id[obj_ref]
return remote_results
@DeveloperAPI
def foreach_actor_async_fetch_ready(
self,
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]], str, List[str]],
tag: Optional[str] = None,
*,
kwargs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
timeout_seconds: Optional[float] = 0.0,
return_obj_refs: bool = False,
mark_healthy: bool = False,
healthy_only: bool = True,
remote_actor_ids: Optional[List[int]] = None,
ignore_ray_errors: bool = True,
return_actor_ids: bool = False,
) -> List[Union[Tuple[int, Any], Any]]:
"""Calls the given function asynchronously and returns previous results if any.
This is a convenience function that calls `fetch_ready_async_reqs()` to get
previous results and then `foreach_actor_async()` to start new async calls.
Args:
func: A single Callable applied to all specified remote actors or a list
of Callables, that get applied on the list of specified remote actors.
In the latter case, both list of Callables and list of specified actors
must have the same length. Alternatively, you can use the name of the
remote method to be called, instead, or a list of remote method names.
tag: A tag to identify the results from this async call.
kwargs: An optional single kwargs dict or a list of kwargs dict matching the
list of provided `func` or `remote_actor_ids`. In the first case (single
dict), use `kwargs` on all remote calls. The latter case (list of
dicts) allows you to define individualized kwarg dicts per actor.
timeout_seconds: Time to wait for results from previous calls. Default is 0,
meaning those requests that are already ready.
return_obj_refs: Whether to return ObjectRef instead of actual results.
mark_healthy: Whether to mark all those actors healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
healthy_only: Apply `func` on known-to-be healthy actors only.
remote_actor_ids: Apply func on a selected set of remote actors.
ignore_ray_errors: Whether to ignore RayErrors in results.
return_actor_ids: Whether to return actor IDs in the results.
If True, the results will be a list of (actor_id, result) tuples.
If False, the results will be a list of results.
Returns:
The results from previous async requests that were ready.
"""
# First fetch any ready results from previous async calls
remote_results = self.fetch_ready_async_reqs(
tags=tag,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
# Then start new async calls
self.foreach_actor_async(
func,
tag=tag,
kwargs=kwargs,
healthy_only=healthy_only,
remote_actor_ids=remote_actor_ids,
)
# Handle errors the same way as fetch_ready_async_reqs does
FaultTolerantActorManager.handle_remote_call_result_errors(
remote_results,
ignore_ray_errors=ignore_ray_errors,
)
if return_actor_ids:
return [(r.actor_id, r.get()) for r in remote_results.ignore_errors()]
else:
return [r.get() for r in remote_results.ignore_errors()]
@staticmethod
def handle_remote_call_result_errors(
results_or_errors: RemoteCallResults,
*,
ignore_ray_errors: bool,
) -> None:
"""Checks given results for application errors and raises them if necessary.
Args:
results_or_errors: The results or errors to check.
ignore_ray_errors: Whether to ignore RayErrors within the elements of
`results_or_errors`.
"""
for result_or_error in results_or_errors:
# Good result.
if result_or_error.ok:
continue
# RayError, but we ignore it.
elif ignore_ray_errors:
logger.exception(result_or_error.get())
# Raise RayError.
else:
raise result_or_error.get()
@DeveloperAPI
def probe_unhealthy_actors(
self,
timeout_seconds: Optional[float] = None,
mark_healthy: bool = False,
) -> List[int]:
"""Ping all unhealthy actors to try bringing them back.
Args:
timeout_seconds: Timeout in seconds (to avoid pinging hanging workers
indefinitely).
mark_healthy: Whether to mark all those actors healthy again that are
currently marked unhealthy AND that respond to the `ping` remote request
(within the given `timeout_seconds`).
Note that actors are NOT set to unhealthy, if they simply time out,
meaning take a longer time to fulfil the remote request. We only ever
mark and actor unhealthy, if they return a RayActorError from the remote
request.
Also note that this settings is ignored if `healthy_only=True` (b/c this
setting only affects actors that are currently tagged as unhealthy).
Returns:
A list of actor IDs that were restored by the `ping.remote()` call PLUS
those actors that were previously restored via other remote requests.
The cached set of such previously restored actors will be erased in this
call.
"""
# Collect recently restored actors (from `self._fetch_result` calls other than
# the one triggered here via the `ping`).
already_restored_actors = list(self._restored_actors)
# Which actors are currently marked unhealthy?
unhealthy_actor_ids = [
actor_id
for actor_id in self.actor_ids()
if not self.is_actor_healthy(actor_id)
]
# Some unhealthy actors -> `ping()` all of them to trigger a new fetch and
# gather the just restored ones (b/c of a successful `ping` response).
just_restored_actors = []
if unhealthy_actor_ids:
remote_results = self.foreach_actor(
func=lambda actor: actor.ping(),
remote_actor_ids=unhealthy_actor_ids,
healthy_only=False, # We specifically want to ping unhealthy actors.
timeout_seconds=timeout_seconds,
return_obj_refs=False,
mark_healthy=mark_healthy,
)
just_restored_actors = [
result.actor_id for result in remote_results if result.ok
]
# Clear out previously restored actors (b/c of other successful request
# responses, outside of this method).
self._restored_actors.clear()
# Return all restored actors (previously and just).
return already_restored_actors + just_restored_actors
def _call_actors(
self,
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]], str, List[str]],
*,
kwargs: Optional[Union[Dict[str, Any], List[Dict[str, Any]]]] = None,
remote_actor_ids: List[int] = None,
) -> List[ray.ObjectRef]:
"""Apply functions on a list of remote actors.
Args:
func: A single Callable applied to all specified remote actors or a list
of Callables, that get applied on the list of specified remote actors.
In the latter case, both list of Callables and list of specified actors
must have the same length. Alternatively, you can use the name of the
remote method to be called, instead, or a list of remote method names.
kwargs: An optional single kwargs dict or a list of kwargs dict matching the
list of provided `func` or `remote_actor_ids`. In the first case (single
dict), use `kwargs` on all remote calls. The latter case (list of
dicts) allows you to define individualized kwarg dicts per actor.
remote_actor_ids: Apply func on this selected set of remote actors.
Returns:
A list of ObjectRefs returned from the remote calls.
"""
if remote_actor_ids is None:
remote_actor_ids = self.actor_ids()
calls = []
if isinstance(func, list):
assert len(remote_actor_ids) == len(
func
), "Funcs must have the same number of callables as actor indices."
assert isinstance(
kwargs, list
), "If func is a list of functions, kwargs has to be a list of kwargs."
for i, (raid, f) in enumerate(zip(remote_actor_ids, func)):
if isinstance(f, str):
calls.append(
getattr(self._actors[raid], f).remote(
**(
kwargs[i]
if isinstance(kwargs, list)
else (kwargs or {})
)
)
)
else:
calls.append(self._actors[raid].apply.remote(f))
elif isinstance(func, str):
for i, raid in enumerate(remote_actor_ids):
calls.append(
getattr(self._actors[raid], func).remote(
**(kwargs[i] if isinstance(kwargs, list) else (kwargs or {}))
)
)
else:
for raid in remote_actor_ids:
calls.append(self._actors[raid].apply.remote(func=func, **kwargs or {}))
return calls
@DeveloperAPI
def _fetch_result(
self,
*,
remote_actor_ids: List[int],
remote_calls: List[ray.ObjectRef],
tags: List[str],
timeout_seconds: Optional[float] = None,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> Tuple[List[ray.ObjectRef], RemoteCallResults]:
"""Try fetching results from remote actor calls.
Mark whether an actor is healthy or not accordingly.
Args:
remote_actor_ids: IDs of the actors these remote
calls were fired against.
remote_calls: List of remote calls to fetch.
tags: List of tags used for identifying the remote calls.
timeout_seconds: Timeout (in sec) for the ray.wait() call. Default is None,
meaning wait indefinitely for all results.
return_obj_refs: Whether to return ObjectRef instead of actual results.
mark_healthy: Whether to mark certain actors healthy based on the results
of these remote calls. Useful, for example, to make sure actors
do not come back without proper state restoration.
Returns:
A list of ready ObjectRefs mapping to the results of those calls.
"""
# Notice that we do not return the refs to any unfinished calls to the
# user, since it is not safe to handle such remote actor calls outside the
# context of this actor manager. These requests are simply dropped.
timeout = float(timeout_seconds) if timeout_seconds is not None else None
# This avoids calling ray.init() in the case of 0 remote calls.
# This is useful if the number of remote workers is 0.
if not remote_calls:
return [], RemoteCallResults()
readies, _ = ray.wait(
remote_calls,
num_returns=len(remote_calls),
timeout=timeout,
# Make sure remote results are fetched locally in parallel.
fetch_local=not return_obj_refs,
)
# Remote data should already be fetched to local object store at this point.
remote_results = RemoteCallResults()
for ready in readies:
# Find the corresponding actor ID for this remote call.
actor_id = remote_actor_ids[remote_calls.index(ready)]
tag = tags[remote_calls.index(ready)]
# If caller wants ObjectRefs, return directly without resolving.
if return_obj_refs:
remote_results.add_result(actor_id, ResultOrError(result=ready), tag)
continue
# Try getting the ready results.
try:
result = ray.get(ready)
# Any error type other than `RayError` happening during ray.get() ->
# Throw exception right here (we don't know how to handle these non-remote
# worker issues and should therefore crash).
except RayError as e:
# Return error to the user.
remote_results.add_result(actor_id, ResultOrError(error=e), tag)
# Mark the actor as unhealthy, take it out of service, and wait for
# Ray Core to restore it.
if self.is_actor_healthy(actor_id):
logger.error(
f"Ray error ({str(e)}), taking actor {actor_id} out of service."
)
self.set_actor_state(actor_id, healthy=False)
# If no errors, add result to `RemoteCallResults` to be returned.
else:
# Return valid result to the user.
remote_results.add_result(actor_id, ResultOrError(result=result), tag)
# Actor came back from an unhealthy state. Mark this actor as healthy
# and add it to our healthy set.
if mark_healthy and not self.is_actor_healthy(actor_id):
logger.warning(
f"Bringing previously unhealthy, now-healthy actor {actor_id} "
"back into service."
)
self.set_actor_state(actor_id, healthy=True)
self._num_actor_restarts += 1
# Make sure, to-be-returned results are sound.
assert len(readies) == len(remote_results)
return readies, remote_results
def _filter_by_healthy_state(
self,
*,
func: Union[Callable[[Any], Any], List[Callable[[Any], Any]]],
kwargs: Optional[Union[Dict, List[Dict]]] = None,
remote_actor_ids: List[int],
):
"""Filter out func and remote worker ids by actor state.
Args:
func: A single, or a list of Callables.
kwargs: An optional single kwargs dict or a list of kwargs dicts matching
the list of provided `func` or `remote_actor_ids`. In case of a single
dict, uses `kwargs` on all remote calls. In case of a list of dicts,
the given kwarg dicts are per actor `func` or per `remote_actor_ids`.
remote_actor_ids: IDs of potential remote workers to apply func on.
Returns:
A tuple of (filtered func, filtered remote worker ids).
"""
if isinstance(func, list):
assert len(remote_actor_ids) == len(
func
), "Func must have the same number of callables as remote actor ids."
# We are given a list of functions to apply.
# Need to filter the functions together with worker IDs.
temp_func = []
temp_remote_actor_ids = []
temp_kwargs = []
for i, (f, raid) in enumerate(zip(func, remote_actor_ids)):
if self.is_actor_healthy(raid):
k = kwargs[i] if isinstance(kwargs, list) else (kwargs or {})
temp_func.append(f)
temp_kwargs.append(k)
temp_remote_actor_ids.append(raid)
func = temp_func
kwargs = temp_kwargs
remote_actor_ids = temp_remote_actor_ids
else:
# Simply filter the worker IDs.
remote_actor_ids = [i for i in remote_actor_ids if self.is_actor_healthy(i)]
return func, kwargs, remote_actor_ids
def _filter_calls_by_tag(
self, tags: Optional[Union[str, List[str], Tuple[str, ...]]] = None
) -> Tuple[List[ray.ObjectRef], List[ActorHandle], List[str]]:
"""Return all the in flight requests that match the given tags, if any.
Args:
tags: A str or a list/tuple of str. If tags is empty or None, return all the in
flight requests.
Returns:
A tuple consisting of a list of the remote calls that match the tag(s),
a list of the corresponding remote actor IDs for these calls (same length),
and a list of the tags corresponding to these calls (same length).
"""
if tags is None:
tags = set()
elif isinstance(tags, str):
tags = {tags}
elif isinstance(tags, (list, tuple)):
tags = set(tags)
else:
raise ValueError(
f"tags must be either a str or a list/tuple of str, got {type(tags)}."
)
remote_calls = []
remote_actor_ids = []
valid_tags = []
for call, (tag, actor_id) in self._in_flight_req_to_actor_id.items():
# the default behavior is to return all ready results.
if len(tags) == 0 or tag in tags:
remote_calls.append(call)
remote_actor_ids.append(actor_id)
valid_tags.append(tag)
return remote_calls, remote_actor_ids, valid_tags
def _remove_async_state(self, actor_id: int):
"""Remove internal async state of for a given actor.
This is called when an actor is removed from the pool or being marked
unhealthy.
Args:
actor_id: The id of the actor.
"""
# Remove any outstanding async requests for this actor.
# Use `list` here to not change a looped generator while we mutate the
# underlying dict.
for req, (tag, id) in list(self._in_flight_req_to_actor_id.items()):
if id == actor_id:
del self._in_flight_req_to_actor_id[req]
# Clear all tag-based request counts for this actor
if actor_id in self._remote_actor_states:
self._remote_actor_states[
actor_id
].num_in_flight_async_requests_by_tag.clear()
def actors(self):
# TODO(jungong) : remove this API once EnvRunnerGroup.remote_workers()
# and EnvRunnerGroup._remote_workers() are removed.
return self._actors
| FaultTolerantActorManager |
python | pydantic__pydantic | tests/mypy/outputs/mypy-plugin_ini/plugin_fail_baseConfig.py | {
"start": 5166,
"end": 5449
} | class ____(BaseModel):
x: str = Field(..., alias=x_alias)
z: int
class Config:
validate_by_name = True
DynamicAliasModel2(y='y', z=1)
# MYPY: error: Missing named argument "x" for "DynamicAliasModel2" [call-arg]
DynamicAliasModel2(x='y', z=1)
| DynamicAliasModel2 |
python | pydata__xarray | xarray/coding/variables.py | {
"start": 24699,
"end": 25102
} | class ____(VariableCoder):
def encode(self):
raise NotImplementedError
def decode(self, variable: Variable, name: T_Name = None) -> Variable:
if variable.dtype.kind == "O" and variable.encoding.get("dtype", False) is str:
variable = variable.astype(variable.encoding["dtype"])
return variable
else:
return variable
| ObjectVLenStringCoder |
python | huggingface__transformers | tests/quantization/compressed_tensors_integration/test_compressed_tensors.py | {
"start": 368,
"end": 4173
} | class ____(unittest.TestCase):
tinyllama_w8a16 = "nm-testing/tinyllama-w8a16-dense"
tinyllama_w4a16 = "nm-testing/tinyllama-w4a16-compressed"
tinyllama_w8a8 = "nm-testing/tinyllama-w8a8-compressed"
llama3_8b_fp8 = "nm-testing/Meta-Llama-3-8B-Instruct-fp8-hf_compat"
prompt = "Paris is the capital of which country?"
def tearDown(self):
gc.collect()
backend_empty_cache(torch_device)
gc.collect()
def test_config_args(self):
with self.assertRaises(ValueError):
# passing quant scheme directly is not allowed
CompressedTensorsConfig(config_groups={"weights": {"num_bits": 8}})
CompressedTensorsConfig(
config_groups={"FP8": ["Linear"]},
ignore=["lm_head"],
quantization_status="frozen",
sparsity_config={"format": "dense"},
)
def test_config_to_from_dict(self):
config = CompressedTensorsConfig(config_groups={"FP8": ["Linear"]}, sparsity_config={"format": "dense"})
config_dict = config.to_dict()
config_from_dict = CompressedTensorsConfig.from_dict(config_dict)
from compressed_tensors import QuantizationConfig, SparsityCompressionConfig
self.assertIsInstance(config_from_dict.quantization_config, QuantizationConfig)
self.assertIsInstance(config_from_dict.sparsity_config, SparsityCompressionConfig)
def test_tinyllama_w8a8(self):
expected_out = "<s> Paris is the capital of which country?\n\n**A) 10** Paris is the capital of which country?\n\n**B) 11** Paris is the capital of which country?\n\n**C) 1"
self._test_quantized_model(self.tinyllama_w8a8, expected_out)
def test_tinyllama_w4a16(self):
expected_out = "<s> Paris is the capital of which country?\nAnswer: Paris is the capital of France.\nQuestion: Which country is the capital of which city?\nAnswer: The capital of the city of New York is New York.\nQuestion: Which"
self._test_quantized_model(self.tinyllama_w4a16, expected_out)
def test_tinyllama_w8a16(self):
expected_out = "<s> Paris is the capital of which country?\nA. France\nB. Germany\nC. Spain\nD. Italy\nE. Switzerland\nQ10. Which of the following is not a country in the European Union?\nA."
self._test_quantized_model(self.tinyllama_w8a16, expected_out)
def test_llama_8b_fp8(self):
expected_out = "<|begin_of_text|>Paris is the capital of which country? France\nWhat is the name of the famous art museum in Paris? The Louvre\nWhat is the name of the famous bridge in Paris? Pont des Arts\nWhat is the name of the famous opera? "
self._test_quantized_model(self.llama3_8b_fp8, expected_out)
def _test_quantized_model(self, model_name: str, expected_output: str):
"""Carry out generation"""
quantized_model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)
device = quantized_model.device
self.assertIsNotNone(
quantized_model.config.quantization_config,
"quantization_config should not be None",
)
self.assertTrue(
any(
key
for key, tensor in quantized_model.state_dict().items()
if "scale" in key and not torch.all(tensor == 1.0)
),
"quantized model should load a non-trivial scale into the state dict",
)
inputs = tokenizer(self.prompt, return_tensors="pt").to(device)
generated_ids = quantized_model.generate(**inputs, max_length=50, do_sample=False)
outputs = tokenizer.batch_decode(generated_ids)
self.assertIsNotNone(outputs)
self.assertEqual(outputs[0], expected_output)
| CompressedTensorsTest |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_memorystore.py | {
"start": 25184,
"end": 29040
} | class ____(GoogleCloudBaseOperator):
"""
Lists all Redis instances owned by a project in either the specified location (region) or all locations.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:CloudMemorystoreListInstancesOperator`
:param location: The location of the Cloud Memorystore instance (for example europe-west1)
If it is specified as ``-`` (wildcard), then all regions available to the project are
queried, and the results are aggregated.
:param page_size: The maximum number of resources contained in the underlying API response. If page
streaming is performed per- resource, this parameter does not affect the return value. If page
streaming is performed per-page, this determines the maximum number of resources in a page.
:param project_id: Project ID of the project that contains the instance. If set
to None or missing, the default project_id from the Google Cloud connection is used.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
:param gcp_conn_id: The connection ID to use connecting to Google Cloud.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = (
"location",
"page_size",
"project_id",
"retry",
"timeout",
"metadata",
"gcp_conn_id",
"impersonation_chain",
)
operator_extra_links = (RedisInstanceListLink(),)
def __init__(
self,
*,
location: str,
page_size: int,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.location = location
self.page_size = page_size
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudMemorystoreHook(
gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain
)
result = hook.list_instances(
location=self.location,
page_size=self.page_size,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
RedisInstanceListLink.persist(
context=context,
project_id=self.project_id or hook.project_id,
)
instances = [Instance.to_dict(a) for a in result]
return instances
| CloudMemorystoreListInstancesOperator |
python | pypa__pip | src/pip/_vendor/distlib/util.py | {
"start": 51863,
"end": 52967
} | class ____(xmlrpclib.Transport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.Transport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, x509 = self.get_host_info(host)
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPConnection(h)
return self._connection[1]
if ssl:
class SafeTransport(xmlrpclib.SafeTransport):
def __init__(self, timeout, use_datetime=0):
self.timeout = timeout
xmlrpclib.SafeTransport.__init__(self, use_datetime)
def make_connection(self, host):
h, eh, kwargs = self.get_host_info(host)
if not kwargs:
kwargs = {}
kwargs['timeout'] = self.timeout
if not self._connection or host != self._connection[0]:
self._extra_headers = eh
self._connection = host, httplib.HTTPSConnection(h, None, **kwargs)
return self._connection[1]
| Transport |
python | lepture__authlib | authlib/oidc/registration/claims.py | {
"start": 133,
"end": 17264
} | class ____(BaseClaims):
REGISTERED_CLAIMS = [
"token_endpoint_auth_signing_alg",
"application_type",
"sector_identifier_uri",
"subject_type",
"id_token_signed_response_alg",
"id_token_encrypted_response_alg",
"id_token_encrypted_response_enc",
"userinfo_signed_response_alg",
"userinfo_encrypted_response_alg",
"userinfo_encrypted_response_enc",
"default_max_age",
"require_auth_time",
"default_acr_values",
"initiate_login_uri",
"request_object_signing_alg",
"request_object_encryption_alg",
"request_object_encryption_enc",
"request_uris",
]
def validate(self):
self._validate_essential_claims()
self.validate_token_endpoint_auth_signing_alg()
self.validate_application_type()
self.validate_sector_identifier_uri()
self.validate_subject_type()
self.validate_id_token_signed_response_alg()
self.validate_id_token_encrypted_response_alg()
self.validate_id_token_encrypted_response_enc()
self.validate_userinfo_signed_response_alg()
self.validate_userinfo_encrypted_response_alg()
self.validate_userinfo_encrypted_response_enc()
self.validate_default_max_age()
self.validate_require_auth_time()
self.validate_default_acr_values()
self.validate_initiate_login_uri()
self.validate_request_object_signing_alg()
self.validate_request_object_encryption_alg()
self.validate_request_object_encryption_enc()
self.validate_request_uris()
def _validate_uri(self, key):
uri = self.get(key)
uris = uri if isinstance(uri, list) else [uri]
for uri in uris:
if uri and not is_valid_url(uri):
raise InvalidClaimError(key)
@classmethod
def get_claims_options(self, metadata):
"""Generate claims options validation from Authorization Server metadata."""
options = {}
if acr_values_supported := metadata.get("acr_values_supported"):
def _validate_default_acr_values(claims, value):
return not value or set(value).issubset(set(acr_values_supported))
options["default_acr_values"] = {"validate": _validate_default_acr_values}
values_mapping = {
"token_endpoint_auth_signing_alg_values_supported": "token_endpoint_auth_signing_alg",
"subject_types_supported": "subject_type",
"id_token_signing_alg_values_supported": "id_token_signed_response_alg",
"id_token_encryption_alg_values_supported": "id_token_encrypted_response_alg",
"id_token_encryption_enc_values_supported": "id_token_encrypted_response_enc",
"userinfo_signing_alg_values_supported": "userinfo_signed_response_alg",
"userinfo_encryption_alg_values_supported": "userinfo_encrypted_response_alg",
"userinfo_encryption_enc_values_supported": "userinfo_encrypted_response_enc",
"request_object_signing_alg_values_supported": "request_object_signing_alg",
"request_object_encryption_alg_values_supported": "request_object_encryption_alg",
"request_object_encryption_enc_values_supported": "request_object_encryption_enc",
}
def make_validator(metadata_claim_values):
def _validate(claims, value):
return not value or value in metadata_claim_values
return _validate
for metadata_claim_name, request_claim_name in values_mapping.items():
if metadata_claim_values := metadata.get(metadata_claim_name):
options[request_claim_name] = {
"validate": make_validator(metadata_claim_values)
}
return options
def validate_token_endpoint_auth_signing_alg(self):
"""JWS [JWS] alg algorithm [JWA] that MUST be used for signing the JWT [JWT]
used to authenticate the Client at the Token Endpoint for the private_key_jwt
and client_secret_jwt authentication methods.
All Token Requests using these authentication methods from this Client MUST be
rejected, if the JWT is not signed with this algorithm. Servers SHOULD support
RS256. The value none MUST NOT be used. The default, if omitted, is that any
algorithm supported by the OP and the RP MAY be used.
"""
if self.get("token_endpoint_auth_signing_alg") == "none":
raise InvalidClaimError("token_endpoint_auth_signing_alg")
self._validate_claim_value("token_endpoint_auth_signing_alg")
def validate_application_type(self):
"""Kind of the application.
The default, if omitted, is web. The defined values are native or web. Web
Clients using the OAuth Implicit Grant Type MUST only register URLs using the
https scheme as redirect_uris; they MUST NOT use localhost as the hostname.
Native Clients MUST only register redirect_uris using custom URI schemes or
loopback URLs using the http scheme; loopback URLs use localhost or the IP
loopback literals 127.0.0.1 or [::1] as the hostname. Authorization Servers MAY
place additional constraints on Native Clients. Authorization Servers MAY
reject Redirection URI values using the http scheme, other than the loopback
case for Native Clients. The Authorization Server MUST verify that all the
registered redirect_uris conform to these constraints. This prevents sharing a
Client ID across different types of Clients.
"""
self.setdefault("application_type", "web")
if self.get("application_type") not in ("web", "native"):
raise InvalidClaimError("application_type")
self._validate_claim_value("application_type")
def validate_sector_identifier_uri(self):
"""URL using the https scheme to be used in calculating Pseudonymous Identifiers
by the OP.
The URL references a file with a single JSON array of redirect_uri values.
Please see Section 5. Providers that use pairwise sub (subject) values SHOULD
utilize the sector_identifier_uri value provided in the Subject Identifier
calculation for pairwise identifiers.
"""
self._validate_uri("sector_identifier_uri")
def validate_subject_type(self):
"""subject_type requested for responses to this Client.
The subject_types_supported discovery parameter contains a list of the supported
subject_type values for the OP. Valid types include pairwise and public.
"""
self._validate_claim_value("subject_type")
def validate_id_token_signed_response_alg(self):
"""JWS alg algorithm [JWA] REQUIRED for signing the ID Token issued to this
Client.
The value none MUST NOT be used as the ID Token alg value unless the Client uses
only Response Types that return no ID Token from the Authorization Endpoint
(such as when only using the Authorization Code Flow). The default, if omitted,
is RS256. The public key for validating the signature is provided by retrieving
the JWK Set referenced by the jwks_uri element from OpenID Connect Discovery 1.0
[OpenID.Discovery].
"""
if self.get(
"id_token_signed_response_alg"
) == "none" and "id_token" in self.get("response_type", ""):
raise InvalidClaimError("id_token_signed_response_alg")
self.setdefault("id_token_signed_response_alg", "RS256")
self._validate_claim_value("id_token_signed_response_alg")
def validate_id_token_encrypted_response_alg(self):
"""JWE alg algorithm [JWA] REQUIRED for encrypting the ID Token issued to this
Client.
If this is requested, the response will be signed then encrypted, with the
result being a Nested JWT, as defined in [JWT]. The default, if omitted, is that
no encryption is performed.
"""
self._validate_claim_value("id_token_encrypted_response_alg")
def validate_id_token_encrypted_response_enc(self):
"""JWE enc algorithm [JWA] REQUIRED for encrypting the ID Token issued to this
Client.
If id_token_encrypted_response_alg is specified, the default
id_token_encrypted_response_enc value is A128CBC-HS256. When
id_token_encrypted_response_enc is included, id_token_encrypted_response_alg
MUST also be provided.
"""
if self.get("id_token_encrypted_response_enc") and not self.get(
"id_token_encrypted_response_alg"
):
raise InvalidClaimError("id_token_encrypted_response_enc")
if self.get("id_token_encrypted_response_alg"):
self.setdefault("id_token_encrypted_response_enc", "A128CBC-HS256")
self._validate_claim_value("id_token_encrypted_response_enc")
def validate_userinfo_signed_response_alg(self):
"""JWS alg algorithm [JWA] REQUIRED for signing UserInfo Responses.
If this is specified, the response will be JWT [JWT] serialized, and signed
using JWS. The default, if omitted, is for the UserInfo Response to return the
Claims as a UTF-8 [RFC3629] encoded JSON object using the application/json
content-type.
"""
self._validate_claim_value("userinfo_signed_response_alg")
def validate_userinfo_encrypted_response_alg(self):
"""JWE [JWE] alg algorithm [JWA] REQUIRED for encrypting UserInfo Responses.
If both signing and encryption are requested, the response will be signed then
encrypted, with the result being a Nested JWT, as defined in [JWT]. The default,
if omitted, is that no encryption is performed.
"""
self._validate_claim_value("userinfo_encrypted_response_alg")
def validate_userinfo_encrypted_response_enc(self):
"""JWE enc algorithm [JWA] REQUIRED for encrypting UserInfo Responses.
If userinfo_encrypted_response_alg is specified, the default
userinfo_encrypted_response_enc value is A128CBC-HS256. When
userinfo_encrypted_response_enc is included, userinfo_encrypted_response_alg
MUST also be provided.
"""
if self.get("userinfo_encrypted_response_enc") and not self.get(
"userinfo_encrypted_response_alg"
):
raise InvalidClaimError("userinfo_encrypted_response_enc")
if self.get("userinfo_encrypted_response_alg"):
self.setdefault("userinfo_encrypted_response_enc", "A128CBC-HS256")
self._validate_claim_value("userinfo_encrypted_response_enc")
def validate_default_max_age(self):
"""Default Maximum Authentication Age.
Specifies that the End-User MUST be actively authenticated if the End-User was
authenticated longer ago than the specified number of seconds. The max_age
request parameter overrides this default value. If omitted, no default Maximum
Authentication Age is specified.
"""
if self.get("default_max_age") is not None and not isinstance(
self["default_max_age"], (int, float)
):
raise InvalidClaimError("default_max_age")
self._validate_claim_value("default_max_age")
def validate_require_auth_time(self):
"""Boolean value specifying whether the auth_time Claim in the ID Token is
REQUIRED.
It is REQUIRED when the value is true. (If this is false, the auth_time Claim
can still be dynamically requested as an individual Claim for the ID Token using
the claims request parameter described in Section 5.5.1 of OpenID Connect Core
1.0 [OpenID.Core].) If omitted, the default value is false.
"""
self.setdefault("require_auth_time", False)
if self.get("require_auth_time") is not None and not isinstance(
self["require_auth_time"], bool
):
raise InvalidClaimError("require_auth_time")
self._validate_claim_value("require_auth_time")
def validate_default_acr_values(self):
"""Default requested Authentication Context Class Reference values.
Array of strings that specifies the default acr values that the OP is being
requested to use for processing requests from this Client, with the values
appearing in order of preference. The Authentication Context Class satisfied by
the authentication performed is returned as the acr Claim Value in the issued ID
Token. The acr Claim is requested as a Voluntary Claim by this parameter. The
acr_values_supported discovery element contains a list of the supported acr
values supported by the OP. Values specified in the acr_values request parameter
or an individual acr Claim request override these default values.
"""
self._validate_claim_value("default_acr_values")
def validate_initiate_login_uri(self):
"""RI using the https scheme that a third party can use to initiate a login by
the RP, as specified in Section 4 of OpenID Connect Core 1.0 [OpenID.Core].
The URI MUST accept requests via both GET and POST. The Client MUST understand
the login_hint and iss parameters and SHOULD support the target_link_uri
parameter.
"""
self._validate_uri("initiate_login_uri")
def validate_request_object_signing_alg(self):
"""JWS [JWS] alg algorithm [JWA] that MUST be used for signing Request Objects
sent to the OP.
All Request Objects from this Client MUST be rejected, if not signed with this
algorithm. Request Objects are described in Section 6.1 of OpenID Connect Core
1.0 [OpenID.Core]. This algorithm MUST be used both when the Request Object is
passed by value (using the request parameter) and when it is passed by reference
(using the request_uri parameter). Servers SHOULD support RS256. The value none
MAY be used. The default, if omitted, is that any algorithm supported by the OP
and the RP MAY be used.
"""
self._validate_claim_value("request_object_signing_alg")
def validate_request_object_encryption_alg(self):
"""JWE [JWE] alg algorithm [JWA] the RP is declaring that it may use for
encrypting Request Objects sent to the OP.
This parameter SHOULD be included when symmetric encryption will be used, since
this signals to the OP that a client_secret value needs to be returned from
which the symmetric key will be derived, that might not otherwise be returned.
The RP MAY still use other supported encryption algorithms or send unencrypted
Request Objects, even when this parameter is present. If both signing and
encryption are requested, the Request Object will be signed then encrypted, with
the result being a Nested JWT, as defined in [JWT]. The default, if omitted, is
that the RP is not declaring whether it might encrypt any Request Objects.
"""
self._validate_claim_value("request_object_encryption_alg")
def validate_request_object_encryption_enc(self):
"""JWE enc algorithm [JWA] the RP is declaring that it may use for encrypting
Request Objects sent to the OP.
If request_object_encryption_alg is specified, the default
request_object_encryption_enc value is A128CBC-HS256. When
request_object_encryption_enc is included, request_object_encryption_alg MUST
also be provided.
"""
if self.get("request_object_encryption_enc") and not self.get(
"request_object_encryption_alg"
):
raise InvalidClaimError("request_object_encryption_enc")
if self.get("request_object_encryption_alg"):
self.setdefault("request_object_encryption_enc", "A128CBC-HS256")
self._validate_claim_value("request_object_encryption_enc")
def validate_request_uris(self):
"""Array of request_uri values that are pre-registered by the RP for use at the
OP.
These URLs MUST use the https scheme unless the target Request Object is signed
in a way that is verifiable by the OP. Servers MAY cache the contents of the
files referenced by these URIs and not retrieve them at the time they are used
in a request. OPs can require that request_uri values used be pre-registered
with the require_request_uri_registration discovery parameter. If the contents
of the request file could ever change, these URI values SHOULD include the
base64url-encoded SHA-256 hash value of the file contents referenced by the URI
as the value of the URI fragment. If the fragment value used for a URI changes,
that signals the server that its cached value for that URI with the old fragment
value is no longer valid.
"""
self._validate_uri("request_uris")
| ClientMetadataClaims |
python | sphinx-doc__sphinx | sphinx/ext/autosummary/__init__.py | {
"start": 5743,
"end": 21388
} | class ____(SphinxDirective):
"""Pretty table containing short signatures and summaries of functions etc.
autosummary can also optionally generate a hidden toctree:: node.
"""
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
has_content = True
option_spec: ClassVar[OptionSpec] = {
'caption': directives.unchanged_required,
'class': directives.class_option,
'toctree': directives.unchanged,
'nosignatures': directives.flag,
'recursive': directives.flag,
'signatures': directives.unchanged,
'template': directives.unchanged,
}
def run(self) -> list[Node]:
names = [
x.strip().split()[0]
for x in self.content
if x.strip() and re.search(r'^[~a-zA-Z_]', x.strip()[0])
]
items = self.get_items(names)
nodes = self.get_table(items)
if 'toctree' in self.options:
dirname = posixpath.dirname(self.env.current_document.docname)
tree_prefix = self.options['toctree'].strip()
docnames = []
excluded = Matcher(self.config.exclude_patterns)
filename_map = self.config.autosummary_filename_map
for _name, _sig, _summary, real_name in items:
real_name = filename_map.get(real_name, real_name)
docname = posixpath.join(tree_prefix, real_name)
docname = posixpath.normpath(posixpath.join(dirname, docname))
if docname not in self.env.found_docs:
if excluded(str(self.env.doc2path(docname, False))):
msg = __(
'autosummary references excluded document %r. Ignored.'
)
else:
msg = __(
'autosummary: stub file not found %r. '
'Check your autosummary_generate setting.'
)
logger.warning(msg, real_name, location=self.get_location())
continue
docnames.append(docname)
if docnames:
tocnode = addnodes.toctree()
tocnode['includefiles'] = docnames
tocnode['entries'] = [(None, docn) for docn in docnames]
tocnode['maxdepth'] = -1
tocnode['glob'] = None
tocnode['caption'] = self.options.get('caption')
nodes.append(autosummary_toc('', '', tocnode))
if 'toctree' not in self.options and 'caption' in self.options:
logger.warning(
__('A captioned autosummary requires :toctree: option. ignored.'),
location=nodes[-1],
)
return nodes
def import_by_name(
self, name: str, prefixes: list[str | None]
) -> tuple[str, Any, Any, str]:
with mock(self.config.autosummary_mock_imports):
try:
return import_by_name(name, prefixes)
except ImportExceptionGroup as exc:
# check existence of instance attribute
try:
return import_ivar_by_name(name, prefixes)
except ImportError as exc2:
if exc2.__cause__:
errors: list[BaseException] = [*exc.exceptions, exc2.__cause__]
else:
errors = [*exc.exceptions, exc2]
raise ImportExceptionGroup(exc.args[0], errors) from None
def get_items(self, names: list[str]) -> list[tuple[str, str | None, str, str]]:
"""Try to import the given names, and return a list of
``[(name, signature, summary_string, real_name), ...]``.
signature is already formatted and is None if :nosignatures: option was given.
"""
prefixes = get_import_prefixes_from_env(self.env)
items: list[tuple[str, str | None, str, str]] = []
signatures_option = self.options.get('signatures')
if signatures_option is None:
signatures_option = 'none' if 'nosignatures' in self.options else 'long'
if signatures_option not in {'none', 'short', 'long'}:
msg = (
'Invalid value for autosummary :signatures: option: '
f"{signatures_option!r}. Valid values are 'none', 'short', 'long'"
)
raise ValueError(msg)
document_settings = self.state.document.settings
env = self.env
config = _AutodocConfig.from_config(env.config)
current_document = env.current_document
events = env.events
get_attr = _AutodocAttrGetter(env._registry.autodoc_attrgetters)
opts = _AutoDocumenterOptions()
ref_context = env.ref_context
reread_always = env.reread_always
max_item_chars = 50
for name in names:
display_name = name
if name.startswith('~'):
name = name[1:]
display_name = name.split('.')[-1]
try:
real_name, obj, parent, modname = self.import_by_name(
name, prefixes=prefixes
)
except ImportExceptionGroup as exc:
errors = list({f'* {type(e).__name__}: {e}' for e in exc.exceptions})
logger.warning(
__('autosummary: failed to import %s.\nPossible hints:\n%s'),
name,
'\n'.join(errors),
location=self.get_location(),
)
continue
obj_type = _get_documenter(obj, parent)
if isinstance(obj, ModuleType):
full_name = real_name
else:
# give explicitly separated module name, so that members
# of inner classes can be documented
full_name = f'{modname}::{real_name[len(modname) + 1 :]}'
# NB. using full_name here is important, since Documenters
# handle module prefixes slightly differently
props = _load_object_by_name(
name=full_name,
objtype=obj_type,
current_document=current_document,
config=config,
events=events,
get_attr=get_attr,
options=opts,
ref_context=ref_context,
reread_always=reread_always,
)
if props is None:
logger.warning(
__('failed to import object %s'),
real_name,
location=self.get_location(),
)
items.append((display_name, '', '', real_name))
continue
# -- Grab the signature
if signatures_option == 'none':
sig = None
elif not props.signatures:
sig = ''
elif signatures_option == 'short':
sig = '()' if props.signatures == ('()',) else '(…)'
else: # signatures_option == 'long'
max_chars = max(10, max_item_chars - len(display_name))
sig = mangle_signature('\n'.join(props.signatures), max_chars=max_chars)
# -- Grab the summary
# get content from docstrings or attribute documentation
summary = extract_summary(props.docstring_lines, document_settings)
items.append((display_name, sig, summary, real_name))
return items
def get_table(self, items: list[tuple[str, str | None, str, str]]) -> list[Node]:
"""Generate a proper list of table nodes for autosummary:: directive.
*items* is a list produced by :meth:`get_items`.
"""
table_spec = addnodes.tabular_col_spec()
table_spec['spec'] = r'\X{1}{2}\X{1}{2}'
table = autosummary_table('')
real_table = nodes.table(
'', classes=['autosummary', 'longtable', *self.options.get('class', ())]
)
table.append(real_table)
group = nodes.tgroup('', cols=2)
real_table.append(group)
group.append(nodes.colspec('', colwidth=10))
group.append(nodes.colspec('', colwidth=90))
body = nodes.tbody('')
group.append(body)
def append_row(*column_texts: str) -> None:
row = nodes.row('')
source, line = self.state_machine.get_source_and_line()
for text in column_texts:
vl = StringList([text], f'{source}:{line}:<autosummary>')
with switch_source_input(self.state, vl):
col_nodes = nested_parse_to_nodes(
self.state, vl, allow_section_headings=False
)
if col_nodes and isinstance(col_nodes[0], nodes.paragraph):
node = col_nodes[0]
else:
node = nodes.paragraph('')
row.append(nodes.entry('', node))
body.append(row)
for name, sig, summary, real_name in items:
qualifier = 'obj'
if sig is None:
col1 = f':py:{qualifier}:`{name} <{real_name}>`'
else:
col1 = f':py:{qualifier}:`{name} <{real_name}>`\\ {rst.escape(sig)}'
col2 = summary
append_row(col1, col2)
return [table_spec, table]
def strip_arg_typehint(s: str) -> str:
"""Strip a type hint from argument definition."""
return s.partition(':')[0].strip()
def _cleanup_signature(s: str) -> str:
"""Clean up signature using inspect.signautre() for mangle_signature()"""
try:
sig = signature_from_str(s)
parameters = list(sig.parameters.values())
for i, param in enumerate(parameters):
if param.annotation is not Parameter.empty:
# Remove typehints
param = param.replace(annotation=Parameter.empty)
if param.default is not Parameter.empty:
# Replace default value by "None"
param = param.replace(default=None)
parameters[i] = param
sig = sig.replace(parameters=parameters, return_annotation=Parameter.empty)
return str(sig)
except Exception:
# Return the original signature string if failed to clean (ex. parsing error)
return s
def mangle_signature(sig: str, max_chars: int = 30) -> str:
"""Reformat a function signature to a more compact form."""
s = _cleanup_signature(sig)
# Strip return type annotation
s = re.sub(r'\)\s*->\s.*$', ')', s)
# Remove parenthesis
s = re.sub(r'^\((.*)\)$', r'\1', s).strip()
# Strip literals (which can contain things that confuse the code below)
s = re.sub(r'\\\\', '', s) # escaped backslash (maybe inside string)
s = re.sub(r"\\'", '', s) # escaped single quote
s = re.sub(r'\\"', '', s) # escaped double quote
s = re.sub(r"'[^']*'", '', s) # string literal (w/ single quote)
s = re.sub(r'"[^"]*"', '', s) # string literal (w/ double quote)
# Strip complex objects (maybe default value of arguments)
while re.search(
r'\([^)]*\)', s
): # contents of parenthesis (ex. NamedTuple(attr=...))
s = re.sub(r'\([^)]*\)', '', s)
while re.search(r'<[^>]*>', s): # contents of angle brackets (ex. <object>)
s = re.sub(r'<[^>]*>', '', s)
while re.search(r'{[^}]*}', s): # contents of curly brackets (ex. dict)
s = re.sub(r'{[^}]*}', '', s)
# Parse the signature to arguments + options
args: list[str] = []
opts: list[str] = []
opt_re = re.compile(r'^(.*, |)([a-zA-Z0-9_*]+)\s*=\s*')
while s:
m = opt_re.search(s)
if not m:
# The rest are arguments
args = s.split(', ')
break
opts.insert(0, m.group(2))
s = m.group(1)[:-2]
# Strip typehints
for i, arg in enumerate(args):
args[i] = strip_arg_typehint(arg)
for i, opt in enumerate(opts):
opts[i] = strip_arg_typehint(opt)
# Produce a more compact signature
sig = limited_join(', ', args, max_chars=max_chars - 2)
if opts:
if not sig:
sig = '[%s]' % limited_join(', ', opts, max_chars=max_chars - 4)
elif len(sig) < max_chars - 4 - 2 - 3:
sig += '[, %s]' % limited_join(
', ', opts, max_chars=max_chars - len(sig) - 4 - 2
)
return '(%s)' % sig
def extract_summary(doc: Sequence[str], settings: Any) -> str:
"""Extract summary from docstring."""
# Find the first stanza (heading, sentence, paragraph, etc.).
# If there's a blank line, then we can assume that the stanza has ended,
# so anything after shouldn't be part of the summary.
first_stanza = []
content_started = False
for line in doc:
is_blank_line = not line or line.isspace()
if not content_started:
# Skip any blank lines at the start
if is_blank_line:
continue
content_started = True
if content_started:
if is_blank_line:
break
first_stanza.append(line)
if not first_stanza:
return ''
# parse the docstring
node = _parse_summary(first_stanza, settings)
if isinstance(node[0], nodes.section):
# document starts with a section heading, so use that.
summary = node[0].astext().strip()
elif not isinstance(node[0], nodes.paragraph):
# document starts with non-paragraph: pick up the first line
summary = first_stanza[0].strip()
else:
# Try to find the "first sentence", which may span multiple lines
sentences = periods_re.split(' '.join(first_stanza))
if len(sentences) == 1:
summary = sentences[0].strip()
else:
summary = ''
for i in range(len(sentences)):
summary = '. '.join(sentences[: i + 1]).rstrip('.') + '.'
node[:] = []
node = _parse_summary(first_stanza, settings)
if summary.endswith(WELL_KNOWN_ABBREVIATIONS):
pass
elif not any(node.findall(nodes.system_message)):
# considered as that splitting by period does not break inline markups
break
# strip literal notation mark ``::`` from tail of summary
summary = literal_re.sub('.', summary)
return summary
def _parse_summary(doc: Sequence[str], settings: Any) -> nodes.document:
state_machine = RSTStateMachine(state_classes, 'Body')
node = new_document('', settings)
node.reporter = NullReporter()
state_machine.run(doc, node)
return node
def limited_join(
sep: str, items: list[str], max_chars: int = 30, overflow_marker: str = '...'
) -> str:
"""Join a number of strings into one, limiting the length to *max_chars*.
If the string overflows this limit, replace the last fitting item by
*overflow_marker*.
Returns: joined_string
"""
full_str = sep.join(items)
if len(full_str) < max_chars:
return full_str
n_chars = 0
n_items = 0
for item in items:
n_chars += len(item) + len(sep)
if n_chars < max_chars - len(overflow_marker):
n_items += 1
else:
break
return sep.join([*list(items[:n_items]), overflow_marker])
# -- Importing items -----------------------------------------------------------
| Autosummary |
python | apache__airflow | dev/breeze/src/airflow_breeze/utils/kubernetes_utils.py | {
"start": 17814,
"end": 19043
} | class ____(NamedTuple):
kubernetes_version: str
python_version: str
def _get_k8s_python_version(
index: int, kubernetes_version_array: list[str], python_version_array: list[str]
) -> KubernetesPythonVersion:
current_python = python_version_array[index % len(python_version_array)]
current_kubernetes_version = kubernetes_version_array[index % len(kubernetes_version_array)]
return KubernetesPythonVersion(
kubernetes_version=current_kubernetes_version, python_version=current_python
)
def get_kubernetes_python_combos(
kubernetes_version_array, python_version_array
) -> tuple[list[str], list[str], list[KubernetesPythonVersion]]:
num_tests = max(len(python_version_array), len(kubernetes_version_array))
combos: list[KubernetesPythonVersion] = [
_get_k8s_python_version(i, kubernetes_version_array, python_version_array) for i in range(num_tests)
]
combo_titles = [
get_kind_cluster_name(python=combo.python_version, kubernetes_version=combo.kubernetes_version)
for combo in combos
]
short_combo_titles = [combo[len("airflow-python-") :] for combo in combo_titles]
return combo_titles, short_combo_titles, combos
| KubernetesPythonVersion |
python | getsentry__sentry | tests/sentry/api/test_authentication.py | {
"start": 29026,
"end": 31439
} | class ____(TestCase):
def test_generate_signature(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
signature = generate_service_request_signature(url, body, shared_secrets, service_name)
assert signature.startswith("rpc0:")
assert len(signature) > 5 # Should have actual signature data after prefix
def test_generate_signature_uses_first_key(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["first-key", "second-key"]
service_name = "TestService"
signature = generate_service_request_signature(url, body, shared_secrets, service_name)
# Verify it uses the first key by checking it validates with first key only
result = compare_service_signature(url, body, signature, ["first-key"], service_name)
assert result is True
# Should not validate with second key only
result = compare_service_signature(url, body, signature, ["second-key"], service_name)
assert result is False
def test_generate_signature_no_shared_secrets(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
service_name = "TestService"
with pytest.raises(RpcAuthenticationSetupException):
generate_service_request_signature(url, body, [], service_name)
def test_consistent_signatures(self) -> None:
url = "/test/endpoint"
body = b'{"test": "data"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
signature1 = generate_service_request_signature(url, body, shared_secrets, service_name)
signature2 = generate_service_request_signature(url, body, shared_secrets, service_name)
assert signature1 == signature2
def test_different_bodies_different_signatures(self) -> None:
url = "/test/endpoint"
body1 = b'{"test": "data1"}'
body2 = b'{"test": "data2"}'
shared_secrets = ["secret-key"]
service_name = "TestService"
signature1 = generate_service_request_signature(url, body1, shared_secrets, service_name)
signature2 = generate_service_request_signature(url, body2, shared_secrets, service_name)
assert signature1 != signature2
@no_silo_test
| TestGenerateServiceRequestSignature |
python | PyCQA__pylint | tests/config/test_argparse_config.py | {
"start": 764,
"end": 2026
} | class ____:
"""Tests for the argparse implementation of OptionsProviderMixIn.
The logger checker is used as an example checker for this implementation.
"""
@staticmethod
def test_logger_without_options() -> None:
"""Check that we raise messages when we do not supply any options."""
with pytest.raises(SystemExit) as ex:
Run([LOGGING_TEST])
assert ex.value.code == 2
@staticmethod
def test_logger_commandline() -> None:
"""Check that we parse command-line options for the logging checker correctly."""
with pytest.raises(SystemExit) as ex:
Run([LOGGING_TEST, "--logging-format-style=new"])
assert ex.value.code == 0
@staticmethod
def test_logger_rcfile() -> None:
"""Check that we parse the rcfile for the logging checker correctly."""
with pytest.raises(SystemExit) as ex:
# replace only the last .py in the string with .rc
# we do so by inverting the string and replace the first occurrence (of the inverted tokens!)
_rcfile = LOGGING_TEST[::-1].replace("yp.", "cr.", 1)[::-1]
Run([LOGGING_TEST, f"--rcfile={_rcfile}"])
assert ex.value.code == 0
| TestArgparseOptionsProviderMixin |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/definitions/declarative_automation/operands/run_operands.py | {
"start": 3795,
"end": 6667
} | class ____(SubsetAutomationCondition[AssetKey]):
tag_keys: Optional[Set[str]] = None
tag_values: Optional[Mapping[str, str]] = None
@property
@abstractmethod
def base_name(self) -> str: ...
@property
def name(self) -> str:
return _get_run_tag_filter_name(self.base_name, self.tag_keys, self.tag_values)
@abstractmethod
def match_candidate_runs(
self, candidate_run_ids: Set[str], matching_run_ids: Set[str]
) -> bool: ...
async def _get_run_records_from_materializations(
self,
materializations: Sequence["EventLogRecord"],
context: LoadingContext,
) -> Sequence["RunRecord"]:
from dagster._core.storage.dagster_run import RunRecord
run_ids = list({record.run_id for record in materializations if record.run_id})
if not run_ids:
return []
run_records = await RunRecord.gen_many(context, run_ids)
return [record for record in run_records if record]
async def compute_subset( # pyright: ignore[reportIncompatibleMethodOverride]
self,
context: AutomationContext,
) -> EntitySubset[AssetKey]:
if (
not context.previous_temporal_context
or not context.previous_temporal_context.last_event_id
):
return context.get_empty_subset()
new_materializations = context.asset_graph_view.get_inner_queryer_for_back_compat().get_asset_materializations_updated_after_cursor(
asset_key=context.key,
after_cursor=context.previous_temporal_context.last_event_id,
)
if not new_materializations:
return context.get_empty_subset()
run_records = await self._get_run_records_from_materializations(
new_materializations,
context.asset_graph_view,
)
matching_run_ids = {
run_record.dagster_run.run_id
for run_record in run_records
if _run_tag_filter_fn(run_record, self.tag_keys, self.tag_values)
}
partitions_to_run_ids = defaultdict(set)
for materialization in new_materializations:
partitions_to_run_ids[materialization.partition_key].add(materialization.run_id)
matching_partition_keys = set()
for partition_key, run_ids in partitions_to_run_ids.items():
if self.match_candidate_runs(run_ids, matching_run_ids):
matching_partition_keys.add(partition_key)
return context.asset_graph_view.get_asset_subset_from_asset_partitions(
key=context.key,
asset_partitions={
AssetKeyPartitionKey(context.key, partition_key)
for partition_key in matching_partition_keys
},
validate_existence=True,
)
@whitelist_for_serdes
@record
| NewUpdatesWithRunTagsCondition |
python | huggingface__transformers | src/transformers/models/auto/modeling_auto.py | {
"start": 81765,
"end": 81877
} | class ____(_BaseAutoModelClass):
_model_mapping = MODEL_FOR_MASK_GENERATION_MAPPING
| AutoModelForMaskGeneration |
python | hynek__structlog | src/structlog/_output.py | {
"start": 8617,
"end": 9115
} | class ____:
r"""
Produce `BytesLogger`\ s.
To be used with `structlog.configure`\ 's ``logger_factory``.
Args:
file: File to print to. (default: `sys.stdout`\ ``.buffer``)
Positional arguments are silently ignored.
.. versionadded:: 20.2.0
"""
__slots__ = ("_file",)
def __init__(self, file: BinaryIO | None = None):
self._file = file
def __call__(self, *args: Any) -> BytesLogger:
return BytesLogger(self._file)
| BytesLoggerFactory |
python | pytorch__pytorch | torch/_numpy/_ndarray.py | {
"start": 8339,
"end": 21362
} | class ____:
def __init__(self, t=None):
if t is None:
self.tensor = torch.Tensor()
elif isinstance(t, torch.Tensor):
self.tensor = t
else:
raise ValueError(
"ndarray constructor is not recommended; prefer"
"either array(...) or zeros/empty(...)"
)
# Register NumPy functions as methods
for method, name in methods.items():
fn = getattr(_funcs, name or method)
vars()[method] = create_method(fn, method)
# Regular methods but coming from ufuncs
conj = create_method(_ufuncs.conjugate, "conj")
conjugate = create_method(_ufuncs.conjugate)
for method, name in dunder.items():
fn = getattr(_ufuncs, name or method)
method = f"__{method}__"
vars()[method] = create_method(fn, method)
for method, name in ri_dunder.items():
fn = getattr(_ufuncs, name or method)
plain = f"__{method}__"
vars()[plain] = create_method(fn, plain)
rvar = f"__r{method}__"
vars()[rvar] = create_method(lambda self, other, fn=fn: fn(other, self), rvar)
ivar = f"__i{method}__"
vars()[ivar] = create_method(
lambda self, other, fn=fn: fn(self, other, out=self), ivar
)
# There's no __idivmod__
__divmod__ = create_method(_ufuncs.divmod, "__divmod__")
__rdivmod__ = create_method(
lambda self, other: _ufuncs.divmod(other, self), "__rdivmod__"
)
# prevent loop variables leaking into the ndarray class namespace
del ivar, rvar, name, plain, fn, method
@property
def shape(self):
return tuple(self.tensor.shape)
@property
def size(self):
return self.tensor.numel()
@property
def ndim(self):
return self.tensor.ndim
@property
def dtype(self):
return _dtypes.dtype(self.tensor.dtype)
@property
def strides(self):
elsize = self.tensor.element_size()
return tuple(stride * elsize for stride in self.tensor.stride())
@property
def itemsize(self):
return self.tensor.element_size()
@property
def flags(self):
# Note contiguous in torch is assumed C-style
return Flags(
{
"C_CONTIGUOUS": self.tensor.is_contiguous(),
"F_CONTIGUOUS": self.T.tensor.is_contiguous(),
"OWNDATA": self.tensor._base is None,
"WRITEABLE": True, # pytorch does not have readonly tensors
}
)
@property
def data(self):
return self.tensor.data_ptr()
@property
def nbytes(self):
return self.tensor.storage().nbytes()
@property
def T(self):
return self.transpose()
@property
def real(self):
return _funcs.real(self)
@real.setter
def real(self, value):
self.tensor.real = asarray(value).tensor
@property
def imag(self):
return _funcs.imag(self)
@imag.setter
def imag(self, value):
self.tensor.imag = asarray(value).tensor
# ctors
def astype(self, dtype, order="K", casting="unsafe", subok=True, copy=True):
if order != "K":
raise NotImplementedError(f"astype(..., order={order} is not implemented.")
if casting != "unsafe":
raise NotImplementedError(
f"astype(..., casting={casting} is not implemented."
)
if not subok:
raise NotImplementedError(f"astype(..., subok={subok} is not implemented.")
if not copy:
raise NotImplementedError(f"astype(..., copy={copy} is not implemented.")
torch_dtype = _dtypes.dtype(dtype).torch_dtype
t = self.tensor.to(torch_dtype)
return ndarray(t)
@normalizer
def copy(self: ArrayLike, order: NotImplementedType = "C"):
return self.clone()
@normalizer
def flatten(self: ArrayLike, order: NotImplementedType = "C"):
return torch.flatten(self)
def resize(self, *new_shape, refcheck=False):
# NB: differs from np.resize: fills with zeros instead of making repeated copies of input.
if refcheck:
raise NotImplementedError(
f"resize(..., refcheck={refcheck} is not implemented."
)
if new_shape in [(), (None,)]:
return
# support both x.resize((2, 2)) and x.resize(2, 2)
if len(new_shape) == 1:
new_shape = new_shape[0]
if isinstance(new_shape, int):
new_shape = (new_shape,)
if builtins.any(x < 0 for x in new_shape):
raise ValueError("all elements of `new_shape` must be non-negative")
new_numel, old_numel = math.prod(new_shape), self.tensor.numel()
self.tensor.resize_(new_shape)
if new_numel >= old_numel:
# zero-fill new elements
assert self.tensor.is_contiguous()
b = self.tensor.flatten() # does not copy
b[old_numel:].zero_()
def view(self, dtype=_Unspecified.unspecified, type=_Unspecified.unspecified):
if dtype is _Unspecified.unspecified:
dtype = self.dtype
if type is not _Unspecified.unspecified:
raise NotImplementedError(f"view(..., type={type} is not implemented.")
torch_dtype = _dtypes.dtype(dtype).torch_dtype
tview = self.tensor.view(torch_dtype)
return ndarray(tview)
@normalizer
def fill(self, value: ArrayLike):
# Both Pytorch and NumPy accept 0D arrays/tensors and scalars, and
# error out on D > 0 arrays
self.tensor.fill_(value)
def tolist(self):
return self.tensor.tolist()
def __iter__(self):
return (ndarray(x) for x in self.tensor.__iter__())
def __str__(self):
return (
str(self.tensor)
.replace("tensor", "torch.ndarray")
.replace("dtype=torch.", "dtype=")
)
__repr__ = create_method(__str__)
def __eq__(self, other):
try:
return _ufuncs.equal(self, other)
except (RuntimeError, TypeError):
# Failed to convert other to array: definitely not equal.
falsy = torch.full(self.shape, fill_value=False, dtype=bool)
return asarray(falsy)
def __ne__(self, other):
return ~(self == other)
def __index__(self):
try:
return operator.index(self.tensor.item())
except Exception as exc:
raise TypeError(
"only integer scalar arrays can be converted to a scalar index"
) from exc
def __bool__(self):
return bool(self.tensor)
def __int__(self):
return int(self.tensor)
def __float__(self):
return float(self.tensor)
def __complex__(self):
return complex(self.tensor)
def is_integer(self):
try:
v = self.tensor.item()
result = int(v) == v
except Exception:
result = False
return result
def __len__(self):
return self.tensor.shape[0]
def __contains__(self, x):
return self.tensor.__contains__(x)
def transpose(self, *axes):
# np.transpose(arr, axis=None) but arr.transpose(*axes)
return _funcs.transpose(self, axes)
def reshape(self, *shape, order="C"):
# arr.reshape(shape) and arr.reshape(*shape)
return _funcs.reshape(self, shape, order=order)
def sort(self, axis=-1, kind=None, order=None):
# ndarray.sort works in-place
_funcs.copyto(self, _funcs.sort(self, axis, kind, order))
def item(self, *args):
# Mimic NumPy's implementation with three special cases (no arguments,
# a flat index and a multi-index):
# https://github.com/numpy/numpy/blob/main/numpy/_core/src/multiarray/methods.c#L702
if args == ():
return self.tensor.item()
elif len(args) == 1:
# int argument
return self.ravel()[args[0]]
else:
return self.__getitem__(args)
def __getitem__(self, index):
tensor = self.tensor
def neg_step(i, s):
if not (isinstance(s, slice) and s.step is not None and s.step < 0):
return s
nonlocal tensor
tensor = torch.flip(tensor, (i,))
# Account for the fact that a slice includes the start but not the end
assert isinstance(s.start, int) or s.start is None
assert isinstance(s.stop, int) or s.stop is None
start = s.stop + 1 if s.stop else None
stop = s.start + 1 if s.start else None
return slice(start, stop, -s.step)
if isinstance(index, Sequence):
index = type(index)(neg_step(i, s) for i, s in enumerate(index))
else:
index = neg_step(0, index)
index = _util.ndarrays_to_tensors(index)
index = _upcast_int_indices(index)
# Apply NumPy-compatible indexing conversion
index = _numpy_compatible_indexing(index)
# Apply NumPy-compatible empty ellipsis behavior
index, maybe_squeeze, _ = _numpy_empty_ellipsis_patch(index, tensor.ndim)
return maybe_squeeze(ndarray(tensor.__getitem__(index)))
def __setitem__(self, index, value):
index = _util.ndarrays_to_tensors(index)
index = _upcast_int_indices(index)
# Apply NumPy-compatible indexing conversion
index = _numpy_compatible_indexing(index)
# Apply NumPy-compatible empty ellipsis behavior
index, _, maybe_unsqueeze = _numpy_empty_ellipsis_patch(index, self.tensor.ndim)
if not _dtypes_impl.is_scalar(value):
value = normalize_array_like(value)
value = _util.cast_if_needed(value, self.tensor.dtype)
return self.tensor.__setitem__(index, maybe_unsqueeze(value))
take = _funcs.take
put = _funcs.put
def __dlpack__(self, *, stream=None):
return self.tensor.__dlpack__(stream=stream)
def __dlpack_device__(self):
return self.tensor.__dlpack_device__()
def _tolist(obj):
"""Recursively convert tensors into lists."""
a1 = []
for elem in obj:
if isinstance(elem, (list, tuple)):
elem = _tolist(elem)
if isinstance(elem, ndarray):
a1.append(elem.tensor.tolist())
else:
a1.append(elem)
return a1
# This is the ideally the only place which talks to ndarray directly.
# The rest goes through asarray (preferred) or array.
def array(obj, dtype=None, *, copy=True, order="K", subok=False, ndmin=0, like=None):
if subok is not False:
raise NotImplementedError("'subok' parameter is not supported.")
if like is not None:
raise NotImplementedError("'like' parameter is not supported.")
if order != "K":
raise NotImplementedError
# a happy path
if (
isinstance(obj, ndarray)
and copy is False
and dtype is None
and ndmin <= obj.ndim
):
return obj
if isinstance(obj, (list, tuple)):
# FIXME and they have the same dtype, device, etc
if obj and all(isinstance(x, torch.Tensor) for x in obj):
# list of arrays: *under torch.Dynamo* these are FakeTensors
obj = torch.stack(obj)
else:
# XXX: remove tolist
# lists of ndarrays: [1, [2, 3], ndarray(4)] convert to lists of lists
obj = _tolist(obj)
# is obj an ndarray already?
if isinstance(obj, ndarray):
obj = obj.tensor
# is a specific dtype requested?
torch_dtype = None
if dtype is not None:
torch_dtype = _dtypes.dtype(dtype).torch_dtype
tensor = _util._coerce_to_tensor(obj, torch_dtype, copy, ndmin)
return ndarray(tensor)
def asarray(a, dtype=None, order="K", *, like=None):
return array(a, dtype=dtype, order=order, like=like, copy=False, ndmin=0)
def ascontiguousarray(a, dtype=None, *, like=None):
arr = asarray(a, dtype=dtype, like=like)
if not arr.tensor.is_contiguous():
arr.tensor = arr.tensor.contiguous()
return arr
def from_dlpack(x, /):
t = torch.from_dlpack(x)
return ndarray(t)
def _extract_dtype(entry):
try:
dty = _dtypes.dtype(entry)
except Exception:
dty = asarray(entry).dtype
return dty
def can_cast(from_, to, casting="safe"):
from_ = _extract_dtype(from_)
to_ = _extract_dtype(to)
return _dtypes_impl.can_cast_impl(from_.torch_dtype, to_.torch_dtype, casting)
def result_type(*arrays_and_dtypes):
tensors = []
for entry in arrays_and_dtypes:
try:
t = asarray(entry).tensor
except (RuntimeError, ValueError, TypeError):
dty = _dtypes.dtype(entry)
t = torch.empty(1, dtype=dty.torch_dtype)
tensors.append(t)
torch_dtype = _dtypes_impl.result_type_impl(*tensors)
return _dtypes.dtype(torch_dtype)
| ndarray |
python | django__django | tests/admin_views/test_breadcrumbs.py | {
"start": 182,
"end": 1041
} | class ____(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_breadcrumbs_absent(self):
response = self.client.get(reverse("admin:index"))
self.assertNotContains(response, '<nav aria-label="Breadcrumbs">')
def test_breadcrumbs_present(self):
response = self.client.get(reverse("admin:auth_user_add"))
self.assertContains(response, '<nav aria-label="Breadcrumbs">')
response = self.client.get(
reverse("admin:app_list", kwargs={"app_label": "auth"})
)
self.assertContains(response, '<nav aria-label="Breadcrumbs">')
| AdminBreadcrumbsTests |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1546058,
"end": 1549769
} | class ____(VegaLiteSchema):
"""
UnitSpec schema wrapper.
Base interface for a unit (single-view) specification.
Parameters
----------
mark : dict, :class:`Mark`, :class:`AnyMark`, :class:`BoxPlot`, :class:`MarkDef`, :class:`ErrorBar`, :class:`ErrorBand`, :class:`BoxPlotDef`, :class:`ErrorBarDef`, :class:`ErrorBandDef`, :class:`CompositeMark`, :class:`CompositeMarkDef`, Literal['arc', 'area', 'bar', 'image', 'line', 'point', 'rect', 'rule', 'text', 'tick', 'trail', 'circle', 'square', 'geoshape', 'boxplot', 'errorband', 'errorbar']
A string describing the mark type (one of ``"bar"``, ``"circle"``, ``"square"``,
``"tick"``, ``"line"``, ``"area"``, ``"point"``, ``"rule"``, ``"geoshape"``, and
``"text"``) or a `mark definition object
<https://vega.github.io/vega-lite/docs/mark.html#mark-def>`__.
data : dict, :class:`Data`, :class:`UrlData`, :class:`Generator`, :class:`NamedData`, :class:`DataSource`, :class:`InlineData`, :class:`SphereGenerator`, :class:`SequenceGenerator`, :class:`GraticuleGenerator`, None
An object describing the data source. Set to ``null`` to ignore the parent's data
source. If no data is set, it is derived from the parent.
description : str
Description of this mark for commenting purpose.
encoding : dict, :class:`Encoding`
A key-value mapping between encoding channels and definition of fields.
name : str
Name of the visualization for later reference.
params : Sequence[dict, :class:`SelectionParameter`]
An array of parameters that may either be simple variables, or more complex
selections that map user input to data queries.
projection : dict, :class:`Projection`
An object defining properties of geographic projection, which will be applied to
``shape`` path for ``"geoshape"`` marks and to ``latitude`` and ``"longitude"``
channels for other marks.
title : str, dict, :class:`Text`, Sequence[str], :class:`TitleParams`
Title for the plot.
transform : Sequence[dict, :class:`Transform`, :class:`BinTransform`, :class:`FoldTransform`, :class:`LoessTransform`, :class:`PivotTransform`, :class:`StackTransform`, :class:`ExtentTransform`, :class:`FilterTransform`, :class:`ImputeTransform`, :class:`LookupTransform`, :class:`SampleTransform`, :class:`WindowTransform`, :class:`DensityTransform`, :class:`FlattenTransform`, :class:`QuantileTransform`, :class:`TimeUnitTransform`, :class:`AggregateTransform`, :class:`CalculateTransform`, :class:`RegressionTransform`, :class:`JoinAggregateTransform`]
An array of data transformations such as filter and new field calculation.
"""
_schema = {"$ref": "#/definitions/UnitSpec"}
def __init__(
self,
mark: Optional[SchemaBase | Map | Mark_T | CompositeMark_T] = Undefined,
data: Optional[SchemaBase | ChartDataType | Map | None] = Undefined,
description: Optional[str] = Undefined,
encoding: Optional[SchemaBase | Map] = Undefined,
name: Optional[str] = Undefined,
params: Optional[Sequence[SchemaBase | Map]] = Undefined,
projection: Optional[SchemaBase | Map] = Undefined,
title: Optional[str | SchemaBase | Sequence[str] | Map] = Undefined,
transform: Optional[Sequence[SchemaBase | Map]] = Undefined,
**kwds,
):
super().__init__(
mark=mark,
data=data,
description=description,
encoding=encoding,
name=name,
params=params,
projection=projection,
title=title,
transform=transform,
**kwds,
)
| UnitSpec |
python | django__django | tests/delete_regress/models.py | {
"start": 636,
"end": 768
} | class ____(models.Model):
pagecount = models.IntegerField()
owner = models.ForeignKey("Child", models.CASCADE, null=True)
| Book |
python | getsentry__sentry | src/sentry/api/endpoints/internal/mail.py | {
"start": 391,
"end": 1917
} | class ____(Endpoint):
owner = ApiOwner.HYBRID_CLOUD
publish_status = {
"GET": ApiPublishStatus.PRIVATE,
"POST": ApiPublishStatus.PRIVATE,
}
permission_classes = (SuperuserPermission,)
def get(self, request: Request) -> Response:
assert request.user.is_authenticated
data = {
"mailHost": options.get("mail.host"),
"mailPassword": bool(options.get("mail.password")),
"mailUsername": options.get("mail.username"),
"mailPort": options.get("mail.port"),
"mailUseTls": options.get("mail.use-tls"),
"mailUseSsl": options.get("mail.use-ssl"),
"mailFrom": options.get("mail.from"),
"mailListNamespace": options.get("mail.list-namespace"),
"testMailEmail": request.user.email,
}
return Response(data)
def post(self, request: Request) -> Response:
assert request.user.is_authenticated
error = None
body = (
"""This email was sent as a request to test the Sentry outbound email configuration."""
)
try:
send_mail(
"{} Test Email".format(options.get("mail.subject-prefix")),
body,
options.get("mail.from"),
[request.user.email],
fail_silently=False,
)
except Exception as e:
error = str(e)
return Response({"error": error}, status=500 if error else 200)
| InternalMailEndpoint |
python | pydantic__pydantic | pydantic-core/tests/validators/test_int.py | {
"start": 19265,
"end": 21336
} | class ____(float):
pass
def test_float_subclass() -> None:
v = SchemaValidator(cs.int_schema())
v_lax = v.validate_python(FloatSubclass(1))
assert v_lax == 1
assert type(v_lax) == int
def test_int_subclass_plain_enum() -> None:
v = SchemaValidator(cs.int_schema())
from enum import Enum
class PlainEnum(Enum):
ONE = 1
v_lax = v.validate_python(PlainEnum.ONE)
assert v_lax == 1
assert type(v_lax) == int
def test_allow_inf_nan_true_json() -> None:
v = SchemaValidator(cs.int_schema(), config=cs.CoreConfig(allow_inf_nan=True))
assert v.validate_json('123') == 123
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('NaN')
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('Infinity')
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('-Infinity')
def test_allow_inf_nan_false_json() -> None:
v = SchemaValidator(cs.int_schema(), config=cs.CoreConfig(allow_inf_nan=False))
assert v.validate_json('123') == 123
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('NaN')
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('Infinity')
with pytest.raises(ValidationError, match=r'Input should be a finite number \[type=finite_number'):
v.validate_json('-Infinity')
def test_json_big_int_key():
v = SchemaValidator(cs.dict_schema(keys_schema=cs.int_schema(), values_schema=cs.str_schema()))
big_integer = 1433352099889938534014333520998899385340
assert v.validate_python({big_integer: 'x'}) == {big_integer: 'x'}
assert v.validate_json('{"' + str(big_integer) + '": "x"}') == {big_integer: 'x'}
assert v.validate_strings({str(big_integer): 'x'}) == {big_integer: 'x'}
| FloatSubclass |
python | PrefectHQ__prefect | src/prefect/utilities/templating.py | {
"start": 992,
"end": 16017
} | class ____(NamedTuple):
full_match: str
name: str
type: PlaceholderType
def determine_placeholder_type(name: str) -> PlaceholderType:
"""
Determines the type of a placeholder based on its name.
Args:
name: The name of the placeholder
Returns:
The type of the placeholder
"""
if name.startswith(BLOCK_DOCUMENT_PLACEHOLDER_PREFIX):
return PlaceholderType.BLOCK_DOCUMENT
elif name.startswith(VARIABLE_PLACEHOLDER_PREFIX):
return PlaceholderType.VARIABLE
elif name.startswith(ENV_VAR_PLACEHOLDER_PREFIX):
return PlaceholderType.ENV_VAR
else:
return PlaceholderType.STANDARD
def find_placeholders(template: T) -> set[Placeholder]:
"""
Finds all placeholders in a template.
Args:
template: template to discover placeholders in
Returns:
A set of all placeholders in the template
"""
seed: set[Placeholder] = set()
if isinstance(template, (int, float, bool)):
return seed
if isinstance(template, str):
result = PLACEHOLDER_CAPTURE_REGEX.findall(template)
return {
Placeholder(full_match, name, determine_placeholder_type(name))
for full_match, name in result
}
elif isinstance(template, dict):
return seed.union(*[find_placeholders(value) for value in template.values()])
elif isinstance(template, list):
return seed.union(*[find_placeholders(item) for item in template])
else:
raise ValueError(f"Unexpected type: {type(template)}")
@overload
def apply_values(
template: T,
values: dict[str, Any],
remove_notset: Literal[True] = True,
warn_on_notset: bool = False,
) -> T: ...
@overload
def apply_values(
template: T,
values: dict[str, Any],
remove_notset: Literal[False] = False,
warn_on_notset: bool = False,
) -> Union[T, type[NotSet]]: ...
@overload
def apply_values(
template: T,
values: dict[str, Any],
remove_notset: bool = False,
warn_on_notset: bool = False,
) -> Union[T, type[NotSet]]: ...
def apply_values(
template: T,
values: dict[str, Any],
remove_notset: bool = True,
warn_on_notset: bool = False,
) -> Union[T, type[NotSet]]:
"""
Replaces placeholders in a template with values from a supplied dictionary.
Will recursively replace placeholders in dictionaries and lists.
If a value has no placeholders, it will be returned unchanged.
If a template contains only a single placeholder, the placeholder will be
fully replaced with the value.
If a template contains text before or after a placeholder or there are
multiple placeholders, the placeholders will be replaced with the
corresponding variable values.
If a template contains a placeholder that is not in `values`, NotSet will
be returned to signify that no placeholder replacement occurred. If
`template` is a dictionary that contains a key with a value of NotSet,
the key will be removed in the return value unless `remove_notset` is set to False.
Args:
template: template to discover and replace values in
values: The values to apply to placeholders in the template
remove_notset: If True, remove keys with an unset value
warn_on_notset: If True, warn when a placeholder is not found in `values`
Returns:
The template with the values applied
"""
if template in (NotSet, None) or isinstance(template, (int, float)):
return template
if isinstance(template, str):
placeholders = find_placeholders(template)
if not placeholders:
# If there are no values, we can just use the template
return template
elif (
len(placeholders) == 1
and list(placeholders)[0].full_match == template
and list(placeholders)[0].type is PlaceholderType.STANDARD
):
# If there is only one variable with no surrounding text,
# we can replace it. If there is no variable value, we
# return NotSet to indicate that the value should not be included.
value = get_from_dict(values, list(placeholders)[0].name, NotSet)
if value is NotSet and warn_on_notset:
logger.warning(
f"Value for placeholder {list(placeholders)[0].name!r} not found in provided values. Please ensure that "
"the placeholder is spelled correctly and that the corresponding value is provided.",
)
return value
else:
for full_match, name, placeholder_type in placeholders:
if placeholder_type is PlaceholderType.STANDARD:
value = get_from_dict(values, name, NotSet)
elif placeholder_type is PlaceholderType.ENV_VAR:
name = name.lstrip(ENV_VAR_PLACEHOLDER_PREFIX)
value = os.environ.get(name, NotSet)
else:
continue
if value is NotSet:
if warn_on_notset:
logger.warning(
f"Value for placeholder {full_match!r} not found in provided values. Please ensure that "
"the placeholder is spelled correctly and that the corresponding value is provided.",
)
if remove_notset:
template = template.replace(full_match, "")
else:
template = template.replace(full_match, str(value))
return template
elif isinstance(template, dict):
updated_template: dict[str, Any] = {}
for key, value in template.items():
updated_value = apply_values(
value,
values,
remove_notset=remove_notset,
warn_on_notset=warn_on_notset,
)
if updated_value is not NotSet:
updated_template[key] = updated_value
elif not remove_notset:
updated_template[key] = value
return cast(T, updated_template)
elif isinstance(template, list):
updated_list: list[Any] = []
for value in template:
updated_value = apply_values(
value,
values,
remove_notset=remove_notset,
warn_on_notset=warn_on_notset,
)
if updated_value is not NotSet:
updated_list.append(updated_value)
return cast(T, updated_list)
else:
raise ValueError(f"Unexpected template type {type(template).__name__!r}")
@inject_client
async def resolve_block_document_references(
template: T,
client: Optional["PrefectClient"] = None,
value_transformer: Optional[Callable[[str, Any], Any]] = None,
) -> Union[T, dict[str, Any]]:
"""
Resolve block document references in a template by replacing each reference with
its value or the return value of the transformer function if provided.
Recursively searches for block document references in dictionaries and lists.
Identifies block document references by the as dictionary with the following
structure:
```
{
"$ref": {
"block_document_id": <block_document_id>
}
}
```
where `<block_document_id>` is the ID of the block document to resolve.
Once the block document is retrieved from the API, the data of the block document
is used to replace the reference.
Accessing Values:
-----------------
To access different values in a block document, use dot notation combined with the block document's prefix, slug, and block name.
For a block document with the structure:
```json
{
"value": {
"key": {
"nested-key": "nested-value"
},
"list": [
{"list-key": "list-value"},
1,
2
]
}
}
```
examples of value resolution are as follows:
1. Accessing a nested dictionary:
Format: `prefect.blocks.<block_type_slug>.<block_document_name>.value.key`
Example: Returns `{"nested-key": "nested-value"}`
2. Accessing a specific nested value:
Format: `prefect.blocks.<block_type_slug>.<block_document_name>.value.key.nested-key`
Example: Returns `"nested-value"`
3. Accessing a list element's key-value:
Format: `prefect.blocks.<block_type_slug>.<block_document_name>.value.list[0].list-key`
Example: Returns `"list-value"`
Default Resolution for System Blocks:
-------------------------------------
For system blocks, which only contain a `value` attribute, this attribute is resolved by default.
Args:
template: The template to resolve block documents in
value_transformer: A function that takes the block placeholder and the block value and returns replacement text for the template
Returns:
The template with block documents resolved
"""
if TYPE_CHECKING:
# The @inject_client decorator takes care of providing the client, but
# the function signature must mark it as optional to callers.
assert client is not None
if isinstance(template, dict):
block_document_id = template.get("$ref", {}).get("block_document_id")
if block_document_id:
block_document = await client.read_block_document(block_document_id)
return block_document.data
updated_template: dict[str, Any] = {}
for key, value in template.items():
updated_value = await resolve_block_document_references(
value, value_transformer=value_transformer, client=client
)
updated_template[key] = updated_value
return updated_template
elif isinstance(template, list):
return [
await resolve_block_document_references(
item, value_transformer=value_transformer, client=client
)
for item in template
]
elif isinstance(template, str):
placeholders = find_placeholders(template)
has_block_document_placeholder = any(
placeholder.type is PlaceholderType.BLOCK_DOCUMENT
for placeholder in placeholders
)
if not (placeholders and has_block_document_placeholder):
return template
elif (
len(placeholders) == 1
and list(placeholders)[0].full_match == template
and list(placeholders)[0].type is PlaceholderType.BLOCK_DOCUMENT
):
# value_keypath will be a list containing a dot path if additional
# attributes are accessed and an empty list otherwise.
[placeholder] = placeholders
parts = placeholder.name.replace(
BLOCK_DOCUMENT_PLACEHOLDER_PREFIX, ""
).split(".", 2)
block_type_slug, block_document_name, *value_keypath = parts
block_document = await client.read_block_document_by_name(
name=block_document_name, block_type_slug=block_type_slug
)
data = block_document.data
value: Union[T, dict[str, Any]] = data
# resolving system blocks to their data for backwards compatibility
if len(data) == 1 and "value" in data:
# only resolve the value if the keypath is not already pointing to "value"
if not (value_keypath and value_keypath[0].startswith("value")):
data = value = value["value"]
# resolving keypath/block attributes
if value_keypath:
from_dict: Any = get_from_dict(data, value_keypath[0], default=NotSet)
if from_dict is NotSet:
raise ValueError(
f"Invalid template: {template!r}. Could not resolve the"
" keypath in the block document data."
)
value = from_dict
if value_transformer:
value = value_transformer(placeholder.full_match, value)
return value
else:
raise ValueError(
f"Invalid template: {template!r}. Only a single block placeholder is"
" allowed in a string and no surrounding text is allowed."
)
return template
@inject_client
async def resolve_variables(template: T, client: Optional["PrefectClient"] = None) -> T:
"""
Resolve variables in a template by replacing each variable placeholder with the
value of the variable.
Recursively searches for variable placeholders in dictionaries and lists.
Strips variable placeholders if the variable is not found.
Args:
template: The template to resolve variables in
Returns:
The template with variables resolved
"""
if TYPE_CHECKING:
# The @inject_client decorator takes care of providing the client, but
# the function signature must mark it as optional to callers.
assert client is not None
if isinstance(template, str):
placeholders = find_placeholders(template)
has_variable_placeholder = any(
placeholder.type is PlaceholderType.VARIABLE for placeholder in placeholders
)
if not placeholders or not has_variable_placeholder:
# If there are no values, we can just use the template
return template
elif (
len(placeholders) == 1
and list(placeholders)[0].full_match == template
and list(placeholders)[0].type is PlaceholderType.VARIABLE
):
variable_name = list(placeholders)[0].name.replace(
VARIABLE_PLACEHOLDER_PREFIX, ""
)
variable = await client.read_variable_by_name(name=variable_name)
if variable is None:
return ""
else:
return cast(T, variable.value)
else:
for full_match, name, placeholder_type in placeholders:
if placeholder_type is PlaceholderType.VARIABLE:
variable_name = name.replace(VARIABLE_PLACEHOLDER_PREFIX, "")
variable = await client.read_variable_by_name(name=variable_name)
if variable is None:
template = template.replace(full_match, "")
else:
template = template.replace(full_match, str(variable.value))
return template
elif isinstance(template, dict):
return {
key: await resolve_variables(value, client=client)
for key, value in template.items()
}
elif isinstance(template, list):
return [await resolve_variables(item, client=client) for item in template]
else:
return template
| Placeholder |
python | pypa__pip | tests/unit/test_finder.py | {
"start": 4231,
"end": 7204
} | class ____:
def test_skip_invalid_wheel_link(
self, caplog: pytest.LogCaptureFixture, data: TestData
) -> None:
"""
Test if PackageFinder skips invalid wheel filenames
"""
caplog.set_level(logging.DEBUG)
req = install_req_from_line("invalid")
# data.find_links contains "invalid.whl", which is an invalid wheel
finder = make_test_finder(find_links=[data.find_links])
with pytest.raises(DistributionNotFound):
finder.find_requirement(req, True)
assert (
"Could not find a version that satisfies the requirement invalid"
" (from versions:" in caplog.text
)
def test_not_find_wheel_not_supported(self, data: TestData) -> None:
"""
Test not finding an unsupported wheel.
"""
req = install_req_from_line("simple.dist")
target_python = TargetPython()
# Make sure no tags will match.
target_python._valid_tags = []
finder = make_test_finder(
find_links=[data.find_links],
target_python=target_python,
)
with pytest.raises(DistributionNotFound):
finder.find_requirement(req, True)
def test_find_wheel_supported(
self, data: TestData, monkeypatch: pytest.MonkeyPatch
) -> None:
"""
Test finding supported wheel.
"""
monkeypatch.setattr(
pip._internal.utils.compatibility_tags,
"get_supported",
lambda **kw: [("py2", "none", "any")],
)
req = install_req_from_line("simple.dist")
finder = make_test_finder(find_links=[data.find_links])
found = finder.find_requirement(req, True)
assert found is not None
assert found.link.url.endswith("simple.dist-0.1-py2.py3-none-any.whl"), found
def test_wheel_over_sdist_priority(self, data: TestData) -> None:
"""
Test wheels have priority over sdists.
`test_link_sorting` also covers this at lower level
"""
req = install_req_from_line("priority")
finder = make_test_finder(find_links=[data.find_links])
found = finder.find_requirement(req, True)
assert found is not None
assert found.link.url.endswith("priority-1.0-py2.py3-none-any.whl"), found
def test_existing_over_wheel_priority(self, data: TestData) -> None:
"""
Test existing install has priority over wheels.
`test_link_sorting` also covers this at a lower level
"""
req = install_req_from_line("priority")
latest_version = "1.0"
satisfied_by = Mock(
location="/path",
version=parse_version(latest_version),
)
req.satisfied_by = satisfied_by
finder = make_test_finder(find_links=[data.find_links])
with pytest.raises(BestVersionAlreadyInstalled):
finder.find_requirement(req, True)
| TestWheel |
python | django__django | django/contrib/gis/db/models/lookups.py | {
"start": 3654,
"end": 3894
} | class ____(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is
to the left of B's bounding box.
"""
lookup_name = "overlaps_left"
@BaseSpatialField.register_lookup
| OverlapsLeftLookup |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/cloud_composer.py | {
"start": 23678,
"end": 26401
} | class ____(GoogleCloudBaseOperator):
"""
List ImageVersions for provided location.
:param request: The request object. List ImageVersions in a project and location.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
:param gcp_conn_id:
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields = (
"project_id",
"region",
"impersonation_chain",
)
def __init__(
self,
*,
project_id: str,
region: str,
page_size: int | None = None,
page_token: str | None = None,
include_past_releases: bool = False,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.project_id = project_id
self.region = region
self.page_size = page_size
self.page_token = page_token
self.include_past_releases = include_past_releases
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = CloudComposerHook(
gcp_conn_id=self.gcp_conn_id,
impersonation_chain=self.impersonation_chain,
)
result = hook.list_image_versions(
project_id=self.project_id,
region=self.region,
page_size=self.page_size,
page_token=self.page_token,
include_past_releases=self.include_past_releases,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
return [ImageVersion.to_dict(image) for image in result]
| CloudComposerListImageVersionsOperator |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-service-now/llama_index/readers/service_now/base.py | {
"start": 855,
"end": 7984
} | class ____(BaseModel):
"""
Manager for custom file parsers with validation and file processing capabilities.
Validates that custom parsers are provided for processing different file types.
At minimum, an HTML parser must be provided for processing article bodies.
"""
custom_parsers: Dict[FileType, BaseReader] = Field(
description="Dictionary mapping FileType enum values to BaseReader instances"
)
custom_folder: str = Field(
description="Folder path for temporary files during parsing"
)
logger: logging.Logger = Field(
default_factory=lambda: logging.getLogger(__name__),
description="Logger instance",
)
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
@model_validator(mode="after")
def validate_model(self):
# Validate that HTML parser is provided (required for article body processing)
if FileType.HTML not in self.custom_parsers:
raise ValueError(
"HTML parser is required in custom_parsers for processing article bodies. "
"Please provide a parser for FileType.HTML."
)
# Ensure custom_folder exists and is writable
try:
os.makedirs(self.custom_folder, exist_ok=True)
# Test write permissions
test_file = os.path.join(self.custom_folder, ".test_write")
with open(test_file, "w") as f:
f.write("test")
os.remove(test_file)
except (OSError, PermissionError) as e:
raise ValueError(
f"Custom folder '{self.custom_folder}' is not accessible or writable: {e}"
)
return self
@staticmethod
def validate_recommended_parsers(
custom_parsers: Dict[FileType, BaseReader], logger=None
) -> List[str]:
"""
Validate that custom parsers are provided for recommended file types.
Args:
custom_parsers: Dictionary of file type to parser mappings
logger: Optional logger for warnings
Returns:
List of missing recommended file types
"""
recommended_types = [
FileType.PDF,
FileType.HTML,
FileType.DOCUMENT,
FileType.TEXT,
FileType.SPREADSHEET,
FileType.PRESENTATION,
]
missing_types = []
for file_type in recommended_types:
if file_type not in custom_parsers:
missing_types.append(file_type.value)
if missing_types and logger:
logger.warning(
f"Recommended custom parsers missing for file types: {', '.join(missing_types)}"
)
return missing_types
def __remove_custom_file(self, file_path: str):
try:
if os.path.exists(file_path):
os.remove(file_path)
except Exception as e:
self.logger.error(f"Error removing file {file_path}: {e}")
def process_with_custom_parser(
self, file_type: FileType, file_content: bytes, extension: str
) -> str:
"""
Process file content with a custom parser (required).
Args:
file_type: The type of file to process
file_content: The binary file content to process
extension: The file extension
Returns:
Processed markdown text
Raises:
ValueError: If no custom parser found for file type or content is empty
"""
if file_type not in self.custom_parsers:
raise ValueError(f"No custom parser found for file type: {file_type}")
if not file_content:
raise ValueError("File content cannot be empty")
file_name = f"{uuid.uuid4().hex}.{extension}"
custom_file_path = os.path.join(self.custom_folder, file_name)
try:
with open(custom_file_path, "wb") as f:
f.write(file_content)
markdown_text = ""
try:
documents = self.custom_parsers[file_type].load_data(
file_path=custom_file_path
)
if not documents:
raise ValueError(
f"Custom parser for {file_type} returned no documents"
)
markdown_text = "\n".join(doc.text for doc in documents)
if not markdown_text.strip():
raise ValueError(
f"Custom parser for {file_type} returned empty content"
)
except Exception as e:
raise ValueError(
f"Error processing file with custom parser for {file_type}: {e}"
)
finally:
self.__remove_custom_file(custom_file_path)
return markdown_text
except (OSError, PermissionError) as e:
raise ValueError(f"Error creating temporary file for parsing: {e}")
def process_text_with_custom_parser(
self, file_type: FileType, text_content: str, extension: str
) -> str:
"""
Process text content with a custom parser (required).
Args:
file_type: The type of file to process
text_content: The text content to process
extension: The file extension
Returns:
Processed markdown text
Raises:
ValueError: If no custom parser found for file type or content is empty
"""
if file_type not in self.custom_parsers:
raise ValueError(f"No custom parser found for file type: {file_type}")
if not text_content:
raise ValueError("Text content cannot be empty")
# Create a temporary file-like object
file_name = f"{uuid.uuid4().hex}.{extension}"
custom_file_path = os.path.join(self.custom_folder, file_name)
try:
with open(custom_file_path, "w", encoding="utf-8") as f:
f.write(text_content)
markdown_text = ""
try:
documents = self.custom_parsers[file_type].load_data(
file_path=custom_file_path
)
if not documents:
raise ValueError(
f"Custom parser for {file_type} returned no documents"
)
markdown_text = "\n".join(doc.text for doc in documents)
if not markdown_text.strip():
raise ValueError(
f"Custom parser for {file_type} returned empty content"
)
except Exception as e:
raise ValueError(
f"Error processing text with custom parser for {file_type}: {e}"
)
finally:
self.__remove_custom_file(custom_file_path)
return markdown_text
except (OSError, PermissionError) as e:
raise ValueError(f"Error creating temporary file for parsing: {e}")
| CustomParserManager |
python | doocs__leetcode | solution/1400-1499/1436.Destination City/Solution.py | {
"start": 0,
"end": 160
} | class ____:
def destCity(self, paths: List[List[str]]) -> str:
s = {a for a, _ in paths}
return next(b for _, b in paths if b not in s)
| Solution |
python | google__pytype | pytype/pyi/types.py | {
"start": 426,
"end": 2008
} | class ____(Exception):
"""Exceptions raised by the parser."""
def __init__(self, msg, line=None, filename=None, column=None, text=None):
super().__init__(msg)
self._line = line
self._filename = filename
self._column = column
self._text = text
@classmethod
def from_exc(cls, exc) -> "ParseError":
if isinstance(exc, cls):
return exc
elif exc.args:
return cls(exc.args[0])
else:
return cls(repr(exc))
def at(self, node, filename=None, src_code=None):
"""Add position information from `node` if it doesn't already exist."""
if not self._line:
self._line, self._column = node_position(node)
if not self._filename:
self._filename = filename
if self._line and src_code:
try:
self._text = src_code.splitlines()[self._line - 1]
except IndexError:
pass
return self
def clear_position(self):
self._line = None
@property
def line(self):
return self._line
def __str__(self):
lines = []
if self._filename or self._line is not None:
lines.append(f' File: "{self._filename}", line {self._line}')
if self._column is not None and self._text:
indent = 4
stripped = self._text.strip()
lines.append("%*s%s" % (indent, "", stripped))
# Output a pointer below the error column, adjusting for stripped spaces.
pos = indent + (self._column - 1) - (len(self._text) - len(stripped))
lines.append("%*s^" % (pos, ""))
lines.append(f"{type(self).__name__}: {self.args[0]}")
return "\n".join(lines)
| ParseError |
python | readthedocs__readthedocs.org | readthedocs/oauth/querysets.py | {
"start": 481,
"end": 1826
} | class ____(NoReprQuerySet, models.QuerySet):
"""For models with relations through :py:class:`User`."""
def api(self, user=None):
"""Return objects for user."""
if not user.is_authenticated:
return self.none()
queryset = self.filter(users=user)
# Exclude repositories/organizations from the old or new GitHub App to avoid duplicated entries.
# If the user has already started using the GitHub App,
# we shouldn't show repositories from the old GitHub integration.
# Otherwise, we should show the repositories from the old GitHub integration only,
# this is done to avoid confusion for users that haven't migrated their accounts yet,
# but still have access to some repositories from the new GitHub App integration.
using_github_app = _has_account_connected_to_github_app(user)
if using_github_app and queryset.filter(vcs_provider=GITHUB_APP).exists():
queryset = queryset.exclude(vcs_provider=GITHUB)
else:
queryset = queryset.exclude(vcs_provider=GITHUB_APP)
return queryset
def api_v2(self, *args, **kwargs):
# API v2 is the same as API v3 for .org, but it's
# different for .com, this method is overridden there.
return self.api(*args, **kwargs)
| RelatedUserQuerySet |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/adaboost.py | {
"start": 452,
"end": 3133
} | class ____(AutoSklearnRegressionAlgorithm):
def __init__(self, n_estimators, learning_rate, loss, max_depth, random_state=None):
self.n_estimators = n_estimators
self.learning_rate = learning_rate
self.loss = loss
self.random_state = random_state
self.max_depth = max_depth
self.estimator = None
def fit(self, X, y):
import sklearn.ensemble
import sklearn.tree
self.n_estimators = int(self.n_estimators)
self.learning_rate = float(self.learning_rate)
self.max_depth = int(self.max_depth)
base_estimator = sklearn.tree.DecisionTreeRegressor(max_depth=self.max_depth)
self.estimator = sklearn.ensemble.AdaBoostRegressor(
base_estimator=base_estimator,
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
loss=self.loss,
random_state=self.random_state,
)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "AB",
"name": "AdaBoost Regressor",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, SPARSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
# base_estimator = Constant(name="base_estimator", value="None")
n_estimators = UniformIntegerHyperparameter(
name="n_estimators", lower=50, upper=500, default_value=50, log=False
)
learning_rate = UniformFloatHyperparameter(
name="learning_rate", lower=0.01, upper=2, default_value=0.1, log=True
)
loss = CategoricalHyperparameter(
name="loss",
choices=["linear", "square", "exponential"],
default_value="linear",
)
max_depth = UniformIntegerHyperparameter(
name="max_depth", lower=1, upper=10, default_value=1, log=False
)
cs.add_hyperparameters([n_estimators, learning_rate, loss, max_depth])
return cs
| AdaboostRegressor |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1046404,
"end": 1047258
} | class ____(VegaLiteSchema):
"""
Resolve schema wrapper.
Defines how scales, axes, and legends from different specs should be combined. Resolve is a
mapping from ``scale``, ``axis``, and ``legend`` to a mapping from channels to resolutions.
Scales and guides can be resolved to be ``"independent"`` or ``"shared"``.
Parameters
----------
axis : dict, :class:`AxisResolveMap`
legend : dict, :class:`LegendResolveMap`
scale : dict, :class:`ScaleResolveMap`
"""
_schema = {"$ref": "#/definitions/Resolve"}
def __init__(
self,
axis: Optional[SchemaBase | Map] = Undefined,
legend: Optional[SchemaBase | Map] = Undefined,
scale: Optional[SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(axis=axis, legend=legend, scale=scale, **kwds)
| Resolve |
python | TheAlgorithms__Python | data_structures/stacks/stack.py | {
"start": 188,
"end": 4725
} | class ____[T]:
"""A stack is an abstract data type that serves as a collection of
elements with two principal operations: push() and pop(). push() adds an
element to the top of the stack, and pop() removes an element from the top
of a stack. The order in which elements come off of a stack are
Last In, First Out (LIFO).
https://en.wikipedia.org/wiki/Stack_(abstract_data_type)
"""
def __init__(self, limit: int = 10):
self.stack: list[T] = []
self.limit = limit
def __bool__(self) -> bool:
return bool(self.stack)
def __str__(self) -> str:
return str(self.stack)
def push(self, data: T) -> None:
"""
Push an element to the top of the stack.
>>> S = Stack(2) # stack size = 2
>>> S.push(10)
>>> S.push(20)
>>> print(S)
[10, 20]
>>> S = Stack(1) # stack size = 1
>>> S.push(10)
>>> S.push(20)
Traceback (most recent call last):
...
data_structures.stacks.stack.StackOverflowError
"""
if len(self.stack) >= self.limit:
raise StackOverflowError
self.stack.append(data)
def pop(self) -> T:
"""
Pop an element off of the top of the stack.
>>> S = Stack()
>>> S.push(-5)
>>> S.push(10)
>>> S.pop()
10
>>> Stack().pop()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack.pop()
def peek(self) -> T:
"""
Peek at the top-most element of the stack.
>>> S = Stack()
>>> S.push(-5)
>>> S.push(10)
>>> S.peek()
10
>>> Stack().peek()
Traceback (most recent call last):
...
data_structures.stacks.stack.StackUnderflowError
"""
if not self.stack:
raise StackUnderflowError
return self.stack[-1]
def is_empty(self) -> bool:
"""
Check if a stack is empty.
>>> S = Stack()
>>> S.is_empty()
True
>>> S = Stack()
>>> S.push(10)
>>> S.is_empty()
False
"""
return not bool(self.stack)
def is_full(self) -> bool:
"""
>>> S = Stack()
>>> S.is_full()
False
>>> S = Stack(1)
>>> S.push(10)
>>> S.is_full()
True
"""
return self.size() == self.limit
def size(self) -> int:
"""
Return the size of the stack.
>>> S = Stack(3)
>>> S.size()
0
>>> S = Stack(3)
>>> S.push(10)
>>> S.size()
1
>>> S = Stack(3)
>>> S.push(10)
>>> S.push(20)
>>> S.size()
2
"""
return len(self.stack)
def __contains__(self, item: T) -> bool:
"""
Check if item is in stack
>>> S = Stack(3)
>>> S.push(10)
>>> 10 in S
True
>>> S = Stack(3)
>>> S.push(10)
>>> 20 in S
False
"""
return item in self.stack
def test_stack() -> None:
"""
>>> test_stack()
"""
stack: Stack[int] = Stack(10)
assert bool(stack) is False
assert stack.is_empty() is True
assert stack.is_full() is False
assert str(stack) == "[]"
try:
_ = stack.pop()
raise AssertionError # This should not happen
except StackUnderflowError:
assert True # This should happen
try:
_ = stack.peek()
raise AssertionError # This should not happen
except StackUnderflowError:
assert True # This should happen
for i in range(10):
assert stack.size() == i
stack.push(i)
assert bool(stack)
assert not stack.is_empty()
assert stack.is_full()
assert str(stack) == str(list(range(10)))
assert stack.pop() == 9
assert stack.peek() == 8
stack.push(100)
assert str(stack) == str([0, 1, 2, 3, 4, 5, 6, 7, 8, 100])
try:
stack.push(200)
raise AssertionError # This should not happen
except StackOverflowError:
assert True # This should happen
assert not stack.is_empty()
assert stack.size() == 10
assert 5 in stack
assert 55 not in stack
if __name__ == "__main__":
test_stack()
import doctest
doctest.testmod()
| Stack |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_stateful.py | {
"start": 31964,
"end": 33429
} | class ____(RuleBasedStateMachine):
@initialize()
def init_a(self):
self.a = 0
@rule()
def inc(self):
self.a += 1
@invariant()
def check_a_positive(self):
# This will fail if run before the init_a method, but without
# @invariant(check_during_init=True) it will only run afterwards.
assert self.a >= 0
def test_invariants_are_checked_after_init_steps():
run_state_machine_as_test(TrickyInitMachine)
def test_invariants_can_be_checked_during_init_steps():
class UndefinedMachine(TrickyInitMachine):
@invariant(check_during_init=True)
def check_a_defined(self):
# This will fail because `a` is undefined before the init rule.
self.a
with pytest.raises(AttributeError):
run_state_machine_as_test(UndefinedMachine)
def test_check_during_init_must_be_boolean():
invariant(check_during_init=False)
invariant(check_during_init=True)
with pytest.raises(InvalidArgument):
invariant(check_during_init="not a bool")
def test_deprecated_target_consumes_bundle():
# It would be nicer to raise this error at runtime, but the internals make
# this sadly impractical. Most InvalidDefinition errors happen at, well,
# definition-time already anyway, so it's not *worse* than the status quo.
with validate_deprecation():
rule(target=consumes(Bundle("b")))
@Settings(stateful_step_count=5)
| TrickyInitMachine |
python | realpython__materials | arcade-platformer/arcade_platformer/04_define_player.py | {
"start": 575,
"end": 6411
} | class ____(arcade.Window):
def __init__(self) -> None:
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# These lists will hold different sets of sprites
self.coins = None
self.background = None
self.walls = None
self.ladders = None
self.goals = None
self.enemies = None
# One sprite for the player, no more is needed
self.player = None
# We need a physics engine as well
self.physics_engine = None
# Someplace to keep score
self.score = 0
# Which level are we on?
self.level = 1
# Load up our sounds here
self.coin_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "coin.wav")
)
self.jump_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "jump.wav")
)
self.victory_sound = arcade.load_sound(
str(ASSETS_PATH / "sounds" / "victory.wav")
)
def setup(self) -> None:
"""Sets up the game for the current level"""
# Get the current map based on the level
map_name = f"platform_level_{self.level:02}.tmx"
map_path = ASSETS_PATH / map_name
# What are the names of the layers?
wall_layer = "ground"
coin_layer = "coins"
goal_layer = "goal"
background_layer = "background"
ladders_layer = "ladders"
# Load the current map
game_map = arcade.tilemap.read_tmx(str(map_path))
# Load the layers
self.background = arcade.tilemap.process_layer(
game_map, layer_name=background_layer, scaling=MAP_SCALING
)
self.goals = arcade.tilemap.process_layer(
game_map, layer_name=goal_layer, scaling=MAP_SCALING
)
self.walls = arcade.tilemap.process_layer(
game_map, layer_name=wall_layer, scaling=MAP_SCALING
)
self.ladders = arcade.tilemap.process_layer(
game_map, layer_name=ladders_layer, scaling=MAP_SCALING
)
self.coins = arcade.tilemap.process_layer(
game_map, layer_name=coin_layer, scaling=MAP_SCALING
)
# Set the background color
background_color = arcade.color.FRESH_AIR
if game_map.background_color:
background_color = game_map.background_color
arcade.set_background_color(background_color)
# Create the player sprite, if they're not already setup
if not self.player:
self.player = self.create_player_sprite()
# Move the player sprite back to the beginning
self.player.center_x = PLAYER_START_X
self.player.center_y = PLAYER_START_Y
self.player.change_x = 0
self.player.change_y = 0
# Load the physics engine for this map
self.physics_engine = arcade.PhysicsEnginePlatformer(
player_sprite=self.player,
platforms=self.walls,
gravity_constant=GRAVITY,
ladders=self.ladders,
)
def create_player_sprite(self) -> arcade.AnimatedWalkingSprite:
"""Creates the animated player sprite
Returns:
The properly setup player sprite
"""
# Where are the player images stored?
texture_path = ASSETS_PATH / "images" / "player"
# Setup the appropriate textures
walking_paths = [
texture_path / f"alienGreen_walk{x}.png" for x in (1, 2)
]
climbing_paths = [
texture_path / f"alienGreen_climb{x}.png" for x in (1, 2)
]
standing_path = texture_path / "alienGreen_stand.png"
# Load them all now
walking_right_textures = [
arcade.load_texture(texture) for texture in walking_paths
]
walking_left_textures = [
arcade.load_texture(texture, mirrored=True)
for texture in walking_paths
]
walking_up_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
walking_down_textures = [
arcade.load_texture(texture) for texture in climbing_paths
]
standing_right_textures = [arcade.load_texture(standing_path)]
standing_left_textures = [
arcade.load_texture(standing_path, mirrored=True)
]
# Create the sprite
player = arcade.AnimatedWalkingSprite()
# Add the proper textures
player.stand_left_textures = standing_left_textures
player.stand_right_textures = standing_right_textures
player.walk_left_textures = walking_left_textures
player.walk_right_textures = walking_right_textures
player.walk_up_textures = walking_up_textures
player.walk_down_textures = walking_down_textures
# Set the player defaults
player.center_x = PLAYER_START_X
player.center_y = PLAYER_START_Y
player.state = arcade.FACE_RIGHT
# Set the initial texture
player.texture = player.stand_right_textures[0]
return player
def on_key_press(self, key: int, modifiers: int):
"""Arguments:
key {int} -- Which key was pressed
modifiers {int} -- Which modifiers were down at the time
"""
def on_key_release(self, key: int, modifiers: int):
"""Arguments:
key {int} -- Which key was released
modifiers {int} -- Which modifiers were down at the time
"""
def on_update(self, delta_time: float):
"""Updates the position of all game objects
Arguments:
delta_time {float} -- How much time since the last call
"""
pass
def on_draw(self):
pass
if __name__ == "__main__":
window = Platformer()
window.setup()
arcade.run()
| Platformer |
python | allegroai__clearml | clearml/model.py | {
"start": 76727,
"end": 107380
} | class ____(BaseModel):
"""
Create an output model for a Task (experiment) to store the training results.
The OutputModel object is always connected to a Task object, because it is instantiated with a Task object
as an argument. It is, therefore, automatically registered as the Task's (experiment's) output model.
The OutputModel object is read-write.
A common use case is to reuse the OutputModel object, and override the weights after storing a model snapshot.
Another use case is to create multiple OutputModel objects for a Task (experiment), and after a new high score
is found, store a model snapshot.
If the model configuration and / or the model's label enumeration
are ``None``, then the output model is initialized with the values from the Task object's input model.
.. note::
When executing a Task (experiment) remotely in a worker, you can modify the model configuration and / or model's
label enumeration using the **ClearML Web-App**.
"""
_default_output_uri = None
_offline_folder = "models"
@property
def published(self) -> bool:
"""
Get the published state of this model.
:return:
"""
if not self.id:
return False
return self._get_base_model().locked
@property
def config_text(self) -> str:
"""
Get the configuration as a string. For example, prototxt, an ini file, or Python code to evaluate.
:return: The configuration.
"""
# noinspection PyProtectedMember
return _Model._unwrap_design(self._get_model_data().design)
@config_text.setter
def config_text(self, value: str) -> None:
"""
Set the configuration. Store a blob of text for custom usage.
"""
self.update_design(config_text=value)
@property
def config_dict(self) -> dict:
"""
Get the configuration as a dictionary parsed from the ``config_text`` text. This usually represents the model
configuration. For example, from prototxt to ini file or python code to evaluate.
:return: The configuration.
"""
return self._text_to_config_dict(self.config_text)
@config_dict.setter
def config_dict(self, value: dict) -> None:
"""
Set the configuration. Saved in the model object.
:param dict value: The configuration parameters.
"""
self.update_design(config_dict=value)
@property
def labels(self) -> Dict[str, int]:
"""
Get the label enumeration as a dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
:return: The label enumeration.
"""
return self._get_model_data().labels
@labels.setter
def labels(self, value: Mapping[str, int]) -> None:
"""
Set the label enumeration.
:param dict value: The label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
"""
self.update_labels(labels=value)
@property
def upload_storage_uri(self) -> str:
"""
The URI of the storage destination for uploaded model weight files.
:return: The URI string
"""
return self._get_base_model().upload_storage_uri
@property
def id(self) -> str:
from clearml import Task as OfflineTask
if OfflineTask.is_offline():
if not self._base_model_id:
self._base_model_id = "offline-{}".format(str(uuid4()).replace("-", ""))
return self._base_model_id
return super(OutputModel, self).id
def __init__(
self,
task: Optional["Task"] = None,
config_text: Optional[str] = None,
config_dict: Optional[dict] = None,
label_enumeration: Optional[Mapping[str, int]] = None,
name: Optional[str] = None,
tags: Optional[List[str]] = None,
comment: Optional[str] = None,
framework: Optional[Union[str, Framework]] = None,
base_model_id: Optional[str] = None,
) -> None:
"""
Create a new model and immediately connect it to a task.
We do not allow for Model creation without a task, so we always keep track on how we created the models
In remote execution, Model parameters can be overridden by the Task
(such as model configuration & label enumerator)
:param task: The Task object with which the OutputModel object is associated.
:type task: Task
:param config_text: The configuration as a string. This is usually the content of a configuration
dictionary file. Specify ``config_text`` or ``config_dict``, but not both.
:type config_text: unconstrained text string
:param dict config_dict: The configuration as a dictionary.
Specify ``config_dict`` or ``config_text``, but not both.
:param dict label_enumeration: The label enumeration dictionary of string (label) to integer (value) pairs.
(Optional)
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
:param str name: The name for the newly created model. (Optional)
:param list(str) tags: A list of strings which are tags for the model. (Optional)
:param str comment: A comment / description for the model. (Optional)
:param framework: The framework of the model or a Framework object. (Optional)
:type framework: str or Framework object
:param base_model_id: optional, model ID to be reused
"""
if not task:
from .task import Task
task = Task.current_task()
if not task:
raise ValueError("task object was not provided, and no current task was found")
super(OutputModel, self).__init__(task=task)
config_text = self._resolve_config(config_text=config_text, config_dict=config_dict)
self._model_local_filename = None
self._last_uploaded_url = None
self._base_model = None
self._base_model_id = None
self._task_connect_name = None
self._name = name
self._label_enumeration = label_enumeration
# noinspection PyProtectedMember
self._floating_data = create_dummy_model(
design=_Model._wrap_design(config_text),
labels=label_enumeration or task.get_labels_enumeration(),
name=name or self._task.name,
tags=tags,
comment="{} by task id: {}".format("Created" if not base_model_id else "Overwritten", task.id)
+ ("\n" + comment if comment else ""),
framework=framework,
upload_storage_uri=task.output_uri,
)
# If we have no real model ID, we are done
if not base_model_id:
return
# noinspection PyBroadException
try:
# noinspection PyProtectedMember
_base_model = self._task._get_output_model(model_id=base_model_id)
_base_model.update(
labels=self._floating_data.labels,
design=self._floating_data.design,
task_id=self._task.id,
project_id=self._task.project,
name=self._floating_data.name or self._task.name,
comment=(
"{}\n{}".format(_base_model.comment, self._floating_data.comment)
if (
_base_model.comment
and self._floating_data.comment
and self._floating_data.comment not in _base_model.comment
)
else (_base_model.comment or self._floating_data.comment)
),
tags=self._floating_data.tags,
framework=self._floating_data.framework,
upload_storage_uri=self._floating_data.upload_storage_uri,
)
self._base_model = _base_model
self._floating_data = None
name = self._task_connect_name or Path(_base_model.uri).stem
except Exception:
pass
self.connect(task, name=name)
def connect(self, task: "Task", name: Optional[str] = None, **kwargs: Any) -> None:
"""
Connect the current model to a Task object, if the model is a preexisting model. Preexisting models include:
- Imported models.
- Models whose metadata the **ClearML Server** (backend) is already storing.
- Models from another source, such as frameworks like TensorFlow.
:param object task: A Task object.
:param str name: The model name as it would appear on the Task object.
The model object itself can have a different name,
this is designed to support multiple models used/created by a single Task.
Use examples would be GANs or model ensemble
"""
if self._task != task:
raise ValueError("Can only connect preexisting model to task, but this is a fresh model")
if name:
self._task_connect_name = name
# we should set the task input model to point to us
model = self._get_base_model()
# only copy the model design if the task has no design to begin with
# noinspection PyProtectedMember
if not self._task._get_model_config_text():
# noinspection PyProtectedMember
task._set_model_config(
config_text=model.model_design if hasattr(model, "model_design") else model.design.get("design", "")
)
if not self._task.get_labels_enumeration():
task.set_model_label_enumeration(model.data.labels if hasattr(model, "data") else model.labels)
if self._base_model:
self._base_model.update_for_task(
task_id=self._task.id,
model_id=self.id,
type_="output",
name=self._task_connect_name,
)
def set_upload_destination(self, uri: str) -> None:
"""
Set the URI of the storage destination for uploaded model weight files.
Supported storage destinations include S3, Google Cloud Storage, and file locations.
Using this method, file uploads are separate and then a link to each is stored in the model object.
.. note::
For storage requiring credentials, the credentials are stored in the ClearML configuration file,
``~/clearml.conf``.
:param str uri: The URI of the upload storage destination.
For example:
- ``s3://bucket/directory/``
- ``file:///tmp/debug/``
:return bool: The status of whether the storage destination schema is supported.
- ``True`` - The storage destination scheme is supported.
- ``False`` - The storage destination scheme is not supported.
"""
if not uri:
return
# Test if we can update the model.
self._validate_update()
# Create the storage helper
storage = StorageHelper.get(uri)
# Verify that we can upload to this destination
try:
uri = storage.verify_upload(folder_uri=uri)
except Exception:
raise ValueError("Could not set destination uri to: %s [Check write permissions]" % uri)
# store default uri
self._get_base_model().upload_storage_uri = uri
def update_weights(
self,
weights_filename: Optional[str] = None,
upload_uri: Optional[str] = None,
target_filename: Optional[str] = None,
auto_delete_file: bool = True,
register_uri: Optional[str] = None,
iteration: Optional[int] = None,
update_comment: bool = True,
is_package: bool = False,
async_enable: bool = True,
) -> str:
"""
Update the model weights from a locally stored model filename.
.. note::
Uploading the model is a background process. A call to this method returns immediately.
:param str weights_filename: The name of the locally stored weights file to upload.
Specify ``weights_filename`` or ``register_uri``, but not both.
:param str upload_uri: The URI of the storage destination for model weights upload. The default value
is the previously used URI. (Optional)
:param str target_filename: The newly created filename in the storage destination location. The default value
is the ``weights_filename`` value. (Optional)
:param bool auto_delete_file: Delete the temporary file after uploading (Optional)
- ``True`` - Delete (Default)
- ``False`` - Do not delete
:param str register_uri: The URI of an already uploaded weights file. The URI must be valid. Specify
``register_uri`` or ``weights_filename``, but not both.
:param int iteration: The iteration number.
:param bool update_comment: Update the model comment with the local weights file name (to maintain provenance) (Optional)
- ``True`` - Update model comment (Default)
- ``False`` - Do not update
:param bool is_package: Mark the weights file as compressed package, usually a zip file.
:param bool async_enable: Whether to upload model in background or to block.
Will raise an error in the main thread if the weights failed to be uploaded or not.
:return: The uploaded URI.
"""
def delete_previous_weights_file(filename: str = weights_filename) -> None:
try:
if filename:
os.remove(filename)
except OSError:
self._log.debug("Failed removing temporary file %s" % filename)
# test if we can update the model
if self.id and self.published:
raise ValueError("Model is published and cannot be changed")
if (not weights_filename and not register_uri) or (weights_filename and register_uri):
raise ValueError(
"Model update must have either local weights file to upload, "
"or pre-uploaded register_uri, never both"
)
# only upload if we are connected to a task
if not self._task:
raise Exception("Missing a task for this model")
if self._task.is_offline() and (weights_filename is None or not Path(weights_filename).is_dir()):
return self._update_weights_offline(
weights_filename=weights_filename,
upload_uri=upload_uri,
target_filename=target_filename,
register_uri=register_uri,
iteration=iteration,
update_comment=update_comment,
is_package=is_package,
)
if weights_filename is not None:
# Check if weights_filename is a folder, is package upload
if Path(weights_filename).is_dir():
return self.update_weights_package(
weights_path=weights_filename,
upload_uri=upload_uri,
target_filename=target_filename or Path(weights_filename).name,
auto_delete_file=auto_delete_file,
iteration=iteration,
async_enable=async_enable,
)
# make sure we delete the previous file, if it exists
if self._model_local_filename != weights_filename:
delete_previous_weights_file(self._model_local_filename)
# store temp filename for deletion next time, if needed
if auto_delete_file:
self._model_local_filename = weights_filename
# make sure the created model is updated:
out_model_file_name = target_filename or weights_filename or register_uri
# prefer self._task_connect_name if exists
if self._task_connect_name:
name = self._task_connect_name
elif out_model_file_name:
name = Path(out_model_file_name).stem
else:
name = "Output Model"
if not self._base_model:
model = self._get_force_base_model(task_model_entry=name)
else:
self._update_base_model(task_model_entry=name)
model = self._base_model
if not model:
raise ValueError("Failed creating internal output model")
# select the correct file extension based on the framework,
# or update the framework based on the file extension
# noinspection PyProtectedMember
framework, file_ext = Framework._get_file_ext(
framework=self._get_model_data().framework,
filename=target_filename or weights_filename or register_uri,
)
if weights_filename:
target_filename = target_filename or Path(weights_filename).name
if not target_filename.lower().endswith(file_ext):
target_filename += file_ext
# set target uri for upload (if specified)
if upload_uri:
self.set_upload_destination(upload_uri)
# let us know the iteration number, we put it in the comment section for now.
if update_comment:
comment = self.comment or ""
iteration_msg = "snapshot {} stored".format(weights_filename or register_uri)
if not comment.startswith("\n"):
comment = "\n" + comment
comment = iteration_msg + comment
else:
comment = None
# if we have no output destination, just register the local model file
if weights_filename and not self.upload_storage_uri and not self._task.storage_uri:
register_uri = weights_filename
weights_filename = None
auto_delete_file = False
self._log.info("No output storage destination defined, registering local model %s" % register_uri)
# start the upload
if weights_filename:
if not model.upload_storage_uri:
self.set_upload_destination(self.upload_storage_uri or self._task.storage_uri)
output_uri = model.update_and_upload(
model_file=weights_filename,
task_id=self._task.id,
async_enable=async_enable,
target_filename=target_filename,
framework=self.framework or framework,
comment=comment,
cb=delete_previous_weights_file if auto_delete_file else None,
iteration=iteration or self._task.get_last_iteration(),
)
elif register_uri:
register_uri = StorageHelper.conform_url(register_uri)
output_uri = model.update(
uri=register_uri,
task_id=self._task.id,
framework=framework,
comment=comment,
)
else:
output_uri = None
self._last_uploaded_url = output_uri
if is_package:
self._set_package_tag()
return output_uri
def update_weights_package(
self,
weights_filenames: Optional[Sequence[str]] = None,
weights_path: Optional[str] = None,
upload_uri: Optional[str] = None,
target_filename: Optional[str] = None,
auto_delete_file: bool = True,
iteration: Optional[int] = None,
async_enable: bool = True,
) -> str:
"""
Update the model weights from locally stored model files, or from directory containing multiple files.
.. note::
Uploading the model weights is a background process. A call to this method returns immediately.
:param weights_filenames: The file names of the locally stored model files. Specify ``weights_filenames``,
or ``weights_path``, but not both.
:type weights_filenames: list(str)
:param weights_path: The directory path to a package. All the files in the directory will be uploaded.
Specify ``weights_path`` or ``weights_filenames``, but not both.
:type weights_path: str
:param str upload_uri: The URI of the storage destination for the model weights upload. The default
is the previously used URI. (Optional)
:param str target_filename: The newly created filename in the storage destination URI location. The default
is the value specified in the ``weights_filename`` parameter. (Optional)
:param bool auto_delete_file: Delete temporary file after uploading (Optional)
- ``True`` - Delete (Default)
- ``False`` - Do not delete
:param int iteration: The iteration number.
:param bool async_enable: Whether to upload model in background or to block.
Will raise an error in the main thread if the weights failed to be uploaded or not.
:return: The uploaded URI for the weights package.
"""
# create list of files
if (not weights_filenames and not weights_path) or (weights_filenames and weights_path):
raise ValueError("Model update weights package should get either directory path to pack or a list of files")
if not weights_filenames:
weights_filenames = list(map(six.text_type, Path(weights_path).rglob("*")))
elif weights_filenames and len(weights_filenames) > 1:
weights_path = get_common_path(weights_filenames)
# create packed model from all the files
fd, zip_file = mkstemp(prefix="model_package.", suffix=".zip")
try:
with zipfile.ZipFile(zip_file, "w", allowZip64=True, compression=zipfile.ZIP_STORED) as zf:
for filename in weights_filenames:
relative_file_name = (
Path(filename).name
if not weights_path
else Path(filename).absolute().relative_to(Path(weights_path).absolute()).as_posix()
)
zf.write(filename, arcname=relative_file_name)
finally:
os.close(fd)
# now we can delete the files (or path if provided)
if auto_delete_file:
def safe_remove(path: str, is_dir: bool = False) -> None:
try:
(os.rmdir if is_dir else os.remove)(path)
except OSError:
self._log.info("Failed removing temporary {}".format(path))
for filename in weights_filenames:
safe_remove(filename)
if weights_path:
safe_remove(weights_path, is_dir=True)
if target_filename and not target_filename.lower().endswith(".zip"):
target_filename += ".zip"
# and now we should upload the file, always delete the temporary zip file
comment = self.comment or ""
iteration_msg = "snapshot {} stored".format(str(weights_filenames))
if not comment.startswith("\n"):
comment = "\n" + comment
comment = iteration_msg + comment
self.comment = comment
uploaded_uri = self.update_weights(
weights_filename=zip_file,
auto_delete_file=True,
upload_uri=upload_uri,
target_filename=target_filename or "model_package.zip",
iteration=iteration,
update_comment=False,
async_enable=async_enable,
)
# set the model tag (by now we should have a model object) so we know we have packaged file
self._set_package_tag()
return uploaded_uri
def update_design(
self,
config_text: Optional[str] = None,
config_dict: Optional[dict] = None,
) -> bool:
"""
Update the model configuration. Store a blob of text for custom usage.
.. note::
This method's behavior is lazy. The design update is only forced when the weights
are updated.
:param config_text: The configuration as a string. This is usually the content of a configuration
dictionary file. Specify ``config_text`` or ``config_dict``, but not both.
:type config_text: unconstrained text string
:param dict config_dict: The configuration as a dictionary. Specify ``config_text`` or ``config_dict``,
but not both.
:return: True, update successful. False, update not successful.
"""
if not self._validate_update():
return False
config_text = self._resolve_config(config_text=config_text, config_dict=config_dict)
if self._task and not self._task.get_model_config_text():
self._task.set_model_config(config_text=config_text)
if self.id:
# update the model object (this will happen if we resumed a training task)
result = self._get_force_base_model().edit(design=config_text)
else:
# noinspection PyProtectedMember
self._floating_data.design = _Model._wrap_design(config_text)
result = Waitable()
# you can wait on this object
return result
def update_labels(self, labels: Mapping[str, int]) -> Any:
"""
Update the label enumeration.
:param dict labels: The label enumeration dictionary of string (label) to integer (value) pairs.
For example:
.. code-block:: javascript
{
"background": 0,
"person": 1
}
:return:
"""
validate_dict(
labels,
key_types=six.string_types,
value_types=six.integer_types,
desc="label enumeration",
)
if not self._validate_update():
return
if self._task:
self._task.set_model_label_enumeration(labels)
if self.id:
# update the model object (this will happen if we resumed a training task)
result = self._get_force_base_model().edit(labels=labels)
else:
self._floating_data.labels = labels
result = Waitable()
# you can wait on this object
return result
@classmethod
def wait_for_uploads(
cls,
timeout: Optional[float] = None,
max_num_uploads: Optional[int] = None,
) -> None:
"""
Wait for any pending or in-progress model uploads to complete. If no uploads are pending or in-progress,
then the ``wait_for_uploads`` returns immediately.
:param float timeout: The timeout interval to wait for uploads (seconds). (Optional).
:param int max_num_uploads: The maximum number of uploads to wait for. (Optional).
"""
_Model.wait_for_results(timeout=timeout, max_num_uploads=max_num_uploads)
@classmethod
def set_default_upload_uri(cls, output_uri: Optional[str]) -> None:
"""
Set the default upload uri for all OutputModels
:param output_uri: URL for uploading models. examples:
https://demofiles.demo.clear.ml, s3://bucket/, gs://bucket/, azure://bucket/, file:///mnt/shared/nfs
"""
cls._default_output_uri = str(output_uri) if output_uri else None
def _update_weights_offline(
self,
weights_filename: Optional[str] = None,
upload_uri: Optional[str] = None,
target_filename: Optional[str] = None,
register_uri: Optional[str] = None,
iteration: Optional[int] = None,
update_comment: bool = True,
is_package: bool = False,
) -> str:
if (not weights_filename and not register_uri) or (weights_filename and register_uri):
raise ValueError(
"Model update must have either local weights file to upload, "
"or pre-uploaded register_uri, never both"
)
if not self._task:
raise Exception("Missing a task for this model")
weights_filename_offline = None
if weights_filename:
weights_filename_offline = (
self._task.get_offline_mode_folder() / self._offline_folder / Path(weights_filename).name
).as_posix()
os.makedirs(os.path.dirname(weights_filename_offline), exist_ok=True)
shutil.copyfile(weights_filename, weights_filename_offline)
# noinspection PyProtectedMember
self._task._offline_output_models.append(
dict(
init=dict(
config_text=self.config_text,
config_dict=self.config_dict,
label_enumeration=self._label_enumeration,
name=self.name,
tags=self.tags,
comment=self.comment,
framework=self.framework,
),
weights=dict(
weights_filename=weights_filename_offline,
upload_uri=upload_uri,
target_filename=target_filename,
register_uri=register_uri,
iteration=iteration,
update_comment=update_comment,
is_package=is_package,
),
output_uri=self._get_base_model().upload_storage_uri or self._default_output_uri,
id=self.id,
)
)
return weights_filename_offline or register_uri
def _get_base_model(self) -> Union[_Model, None]:
if self._floating_data:
return self._floating_data
return self._get_force_base_model()
def _get_model_data(self) -> Any:
if self._base_model:
return self._base_model.data
return self._floating_data
def _validate_update(self) -> bool:
# test if we can update the model
if self.id and self.published:
raise ValueError("Model is published and cannot be changed")
return True
def _get_last_uploaded_filename(self) -> Optional[str]:
if not self._last_uploaded_url and not self.url:
return None
return Path(self._last_uploaded_url or self.url).name
| OutputModel |
python | mlflow__mlflow | mlflow/store/tracking/dbmodels/models.py | {
"start": 34746,
"end": 38189
} | class ____(Base):
__tablename__ = "logged_models"
model_id = Column(String(36), nullable=False)
"""
Model ID: `String` (limit 36 characters). *Primary Key* for ``logged_models`` table.
"""
experiment_id = Column(Integer, nullable=False)
"""
Experiment ID to which this model belongs: *Foreign Key* into ``experiments`` table.
"""
name = Column(String(500), nullable=False)
"""
Model name: `String` (limit 500 characters).
"""
artifact_location = Column(String(1000), nullable=False)
"""
Artifact location: `String` (limit 1000 characters).
"""
creation_timestamp_ms = Column(BigInteger, nullable=False)
"""
Creation timestamp: `BigInteger`.
"""
last_updated_timestamp_ms = Column(BigInteger, nullable=False)
"""
Last updated timestamp: `BigInteger`.
"""
status = Column(Integer, nullable=False)
"""
Status: `Integer`.
"""
lifecycle_stage = Column(String(32), default=LifecycleStage.ACTIVE)
"""
Lifecycle Stage of model: `String` (limit 32 characters).
"""
model_type = Column(String(500), nullable=True)
"""
Model type: `String` (limit 500 characters).
"""
source_run_id = Column(String(32), nullable=True)
"""
Source run ID: `String` (limit 32 characters).
"""
status_message = Column(String(1000), nullable=True)
"""
Status message: `String` (limit 1000 characters).
"""
tags = relationship("SqlLoggedModelTag", backref="logged_model", cascade="all")
params = relationship("SqlLoggedModelParam", backref="logged_model", cascade="all")
metrics = relationship("SqlLoggedModelMetric", backref="logged_model", cascade="all")
__table_args__ = (
PrimaryKeyConstraint("model_id", name="logged_models_pk"),
CheckConstraint(
lifecycle_stage.in_(LifecycleStage.view_type_to_stages(ViewType.ALL)),
name="logged_models_lifecycle_stage_check",
),
ForeignKeyConstraint(
["experiment_id"],
["experiments.experiment_id"],
ondelete="CASCADE",
name="fk_logged_models_experiment_id",
),
)
def to_mlflow_entity(self) -> LoggedModel:
return LoggedModel(
model_id=self.model_id,
experiment_id=str(self.experiment_id),
name=self.name,
artifact_location=self.artifact_location,
creation_timestamp=self.creation_timestamp_ms,
last_updated_timestamp=self.last_updated_timestamp_ms,
status=LoggedModelStatus.from_int(self.status),
model_type=self.model_type,
source_run_id=self.source_run_id,
status_message=self.status_message,
tags={t.tag_key: t.tag_value for t in self.tags} if self.tags else None,
params={p.param_key: p.param_value for p in self.params} if self.params else None,
metrics=[m.to_mlflow_entity() for m in self.metrics] if self.metrics else None,
)
ALIASES = {
"creation_time": "creation_timestamp_ms",
"creation_timestamp": "creation_timestamp_ms",
"last_updated_timestamp": "last_updated_timestamp_ms",
}
@staticmethod
def is_numeric(s: str) -> bool:
return SqlLoggedModel.ALIASES.get(s, s) in {
"creation_timestamp_ms",
"last_updated_timestamp_ms",
}
| SqlLoggedModel |
python | sympy__sympy | sympy/functions/special/hyper.py | {
"start": 31740,
"end": 32285
} | class ____(HyperRep):
""" Represent hyper([1/2, 1/2], [3/2], z) == asin(sqrt(z))/sqrt(z). """
@classmethod
def _expr_small(cls, z):
return asin(sqrt(z))/sqrt(z)
@classmethod
def _expr_small_minus(cls, z):
return asinh(sqrt(z))/sqrt(z)
@classmethod
def _expr_big(cls, z, n):
return S.NegativeOne**n*((S.Half - n)*pi/sqrt(z) + I*acosh(sqrt(z))/sqrt(z))
@classmethod
def _expr_big_minus(cls, z, n):
return S.NegativeOne**n*(asinh(sqrt(z))/sqrt(z) + n*pi*I/sqrt(z))
| HyperRep_asin1 |
python | readthedocs__readthedocs.org | readthedocs/config/tests/test_validation.py | {
"start": 2219,
"end": 3067
} | class ____:
def test_it_accepts_relative_path(self, tmpdir):
tmpdir.mkdir("a directory")
validate_path("a directory", str(tmpdir))
def test_it_accepts_files(self, tmpdir):
tmpdir.join("file").write("content")
validate_path("file", str(tmpdir))
def test_it_accepts_absolute_path(self, tmpdir):
path = str(tmpdir.mkdir("a directory"))
validate_path(path, "does not matter")
def test_it_returns_relative_path(self, tmpdir):
tmpdir.mkdir("a directory")
path = validate_path("a directory", str(tmpdir))
assert path == "a directory"
def test_it_only_accepts_strings(self):
with raises(ConfigValidationError) as excinfo:
validate_path(None, "")
assert excinfo.value.message_id == ConfigValidationError.INVALID_STRING
| TestValidatePath |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol1.py | {
"start": 1684,
"end": 1735
} | class ____(Protocol[T_co], Generic[T_co]): ...
| Proto2 |
python | tiangolo__fastapi | tests/test_dependency_yield_scope.py | {
"start": 296,
"end": 781
} | class ____:
def __init__(self) -> None:
self.open = True
def dep_session() -> Any:
s = Session()
yield s
s.open = False
def raise_after_yield() -> Any:
yield
raise HTTPException(status_code=503, detail="Exception after yield")
SessionFuncDep = Annotated[Session, Depends(dep_session, scope="function")]
SessionRequestDep = Annotated[Session, Depends(dep_session, scope="request")]
SessionDefaultDep = Annotated[Session, Depends(dep_session)]
| Session |
python | donnemartin__interactive-coding-challenges | recursion_dynamic/fibonacci/test_fibonacci.py | {
"start": 18,
"end": 513
} | class ____(unittest.TestCase):
def test_fib(self, func):
result = []
expected = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34]
for i in range(len(expected)):
result.append(func(i))
self.assertEqual(result, expected)
print('Success: test_fib')
def main():
test = TestFib()
math = Math()
test.test_fib(math.fib_recursive)
test.test_fib(math.fib_dynamic)
test.test_fib(math.fib_iterative)
if __name__ == '__main__':
main()
| TestFib |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI059.py | {
"start": 1216,
"end": 1349
} | class ____(Sized, Generic[T]): # Generic already in last place
def __init__(self) -> None:
self._items: List[T] = []
| MyList |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 10557,
"end": 11436
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_delete_inspect_template(self, mock_hook):
mock_hook.return_value.delete_inspect_template.return_value = mock.MagicMock()
operator = CloudDLPDeleteInspectTemplateOperator(
template_id=TEMPLATE_ID, organization_id=ORGANIZATION_ID, task_id="id"
)
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_inspect_template.assert_called_once_with(
template_id=TEMPLATE_ID,
organization_id=ORGANIZATION_ID,
project_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPDeleteInspectTemplateOperator |
python | scikit-learn__scikit-learn | sklearn/utils/_testing.py | {
"start": 40953,
"end": 42609
} | class ____:
"""Minimal classifier implementation without inheriting from BaseEstimator.
This estimator should be tested with:
* `check_estimator` in `test_estimator_checks.py`;
* within a `Pipeline` in `test_pipeline.py`;
* within a `SearchCV` in `test_search.py`.
"""
def __init__(self, param=None):
self.param = param
def get_params(self, deep=True):
return {"param": self.param}
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
return self
def fit(self, X, y):
X, y = check_X_y(X, y)
check_classification_targets(y)
self.classes_, counts = np.unique(y, return_counts=True)
self._most_frequent_class_idx = counts.argmax()
return self
def predict_proba(self, X):
check_is_fitted(self)
X = check_array(X)
proba_shape = (X.shape[0], self.classes_.size)
y_proba = np.zeros(shape=proba_shape, dtype=np.float64)
y_proba[:, self._most_frequent_class_idx] = 1.0
return y_proba
def predict(self, X):
y_proba = self.predict_proba(X)
y_pred = y_proba.argmax(axis=1)
return self.classes_[y_pred]
def score(self, X, y):
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X))
def __sklearn_tags__(self):
return Tags(
estimator_type="classifier",
classifier_tags=ClassifierTags(),
regressor_tags=None,
transformer_tags=None,
target_tags=TargetTags(required=True),
)
| MinimalClassifier |
python | ray-project__ray | python/ray/_private/worker.py | {
"start": 48214,
"end": 130585
} | class ____(BaseContext, Mapping):
"""
Context manager for attached drivers.
"""
dashboard_url: Optional[str]
python_version: str
ray_version: str
ray_commit: str
def __init__(self, address_info: Dict[str, Optional[str]]):
super().__init__()
self.dashboard_url = get_dashboard_url()
self.python_version = "{}.{}.{}".format(*sys.version_info[:3])
self.ray_version = ray.__version__
self.ray_commit = ray.__commit__
self.address_info = address_info
def __getitem__(self, key):
if log_once("ray_context_getitem"):
warnings.warn(
f'Accessing values through ctx["{key}"] is deprecated. '
f'Use ctx.address_info["{key}"] instead.',
DeprecationWarning,
stacklevel=2,
)
return self.address_info[key]
def __len__(self):
if log_once("ray_context_len"):
warnings.warn("len(ctx) is deprecated. Use len(ctx.address_info) instead.")
return len(self.address_info)
def __iter__(self):
if log_once("ray_context_len"):
warnings.warn(
"iter(ctx) is deprecated. Use iter(ctx.address_info) instead."
)
return iter(self.address_info)
def __enter__(self) -> "RayContext":
return self
def __exit__(self, *exc):
ray.shutdown()
def disconnect(self):
# Include disconnect() to stay consistent with ClientContext
ray.shutdown()
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray._private.node.Node: The global node object that is created by ray.init()."""
def _maybe_modify_runtime_env(
runtime_env: Optional[Dict[str, Any]], _skip_env_hook: bool
) -> Dict[str, Any]:
"""
If you set RAY_ENABLE_UV_RUN_RUNTIME_ENV, which is the default, and run the driver with `uv run`,
this function sets up a runtime environment that replicates the driver's environment to the
workers. Otherwise, if a runtime environment hook is present it will modify the runtime environment.
"""
if ray_constants.RAY_ENABLE_UV_RUN_RUNTIME_ENV:
from ray._private.runtime_env.uv_runtime_env_hook import (
_get_uv_run_cmdline,
hook,
)
cmdline = _get_uv_run_cmdline()
if cmdline:
# This means the current driver is running in `uv run`, in which case we want
# to propagate the uv environment to the workers.
return hook(runtime_env)
if ray_constants.RAY_RUNTIME_ENV_HOOK in os.environ and not _skip_env_hook:
return load_class(os.environ[ray_constants.RAY_RUNTIME_ENV_HOOK])(runtime_env)
return runtime_env
@PublicAPI
@client_mode_hook
def init(
address: Optional[str] = None,
*,
num_cpus: Optional[int] = None,
num_gpus: Optional[int] = None,
resources: Optional[Dict[str, float]] = None,
labels: Optional[Dict[str, str]] = None,
object_store_memory: Optional[int] = None,
local_mode: bool = False,
ignore_reinit_error: bool = False,
include_dashboard: Optional[bool] = None,
dashboard_host: str = ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port: Optional[int] = None,
job_config: "ray.job_config.JobConfig" = None,
configure_logging: bool = True,
logging_level: int = ray_constants.LOGGER_LEVEL,
logging_format: Optional[str] = None,
logging_config: Optional[LoggingConfig] = None,
log_to_driver: Optional[bool] = None,
namespace: Optional[str] = None,
runtime_env: Optional[Union[Dict[str, Any], "RuntimeEnv"]] = None, # noqa: F821
enable_resource_isolation: bool = False,
system_reserved_cpu: Optional[float] = None,
system_reserved_memory: Optional[int] = None,
**kwargs,
) -> BaseContext:
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
Note: This method overwrite sigterm handler of the driver process.
In most cases, it is enough to just call this method with no arguments.
This will autodetect an existing Ray cluster or start a new Ray instance if
no existing cluster is found:
.. testcode::
ray.init()
To explicitly connect to an existing local cluster, use this as follows. A
ConnectionError will be thrown if no existing local cluster is found.
.. testcode::
:skipif: True
ray.init(address="auto")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address. This requires `ray[client]`.
.. testcode::
:skipif: True
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/getting-started.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address: The address of the Ray cluster to connect to. The provided
address is resolved as follows:
1. If a concrete address (e.g., localhost:<port>) is provided, try to
connect to it. Concrete addresses can be prefixed with "ray://" to
connect to a remote cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the given
address.
2. If no address is provided, try to find an existing Ray instance
to connect to. This is done by first checking the environment
variable `RAY_ADDRESS`. If this is not defined, check the address
of the latest cluster started (found in
/tmp/ray/ray_current_cluster) if available. If this is also empty,
then start a new local Ray instance.
3. If the provided address is "auto", then follow the same process
as above. However, if there is no existing cluster found, this will
throw a ConnectionError instead of starting a new local Ray
instance.
4. If the provided address is "local", start a new local Ray
instance, even if there is already an existing local Ray instance.
num_cpus: Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus: Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
labels: [Experimental] The key-value labels of the node.
object_store_memory: The amount of memory (in bytes) to start the
object store with.
By default, this is 30% of available system memory capped by
the shm size and 200G but can be set higher.
local_mode: Deprecated: consider using the Ray Distributed Debugger instead.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level for the "ray" logger of the driver process,
defaults to logging.INFO. Ignored unless "configure_logging" is true.
logging_format: Logging format for the "ray" logger of the driver process,
defaults to a string containing a timestamp, filename, line number, and
message. See the source file ray_constants.py for details. Ignored unless
"configure_logging" is true.
logging_config: [Experimental] Logging configuration will be applied to the
root loggers for both the driver process and all worker processes belonging
to the current job. See :class:`~ray.LoggingConfig` for details.
log_to_driver: If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace: A namespace is a logical grouping of jobs and named actors.
runtime_env: The runtime environment to use
for this job (see :ref:`runtime-environments` for details).
object_spilling_directory: The path to spill objects to. The same path will
be used as the object store fallback directory as well.
enable_resource_isolation: Enable resource isolation through cgroupv2 by reserving
memory and cpu resources for ray system processes. To use, only cgroupv2 (not cgroupv1)
must be enabled with read and write permissions for the raylet. Cgroup memory and
cpu controllers must also be enabled.
system_reserved_cpu: The number of cpu cores to reserve for ray system processes.
Cores can be fractional i.e. 1.5 means one and a half a cpu core.
By default, the value will be atleast 1 core, and at maximum 3 cores. The default value
is calculated using the formula min(3.0, max(1.0, 0.05 * num_cores_on_the_system))
This option only works if enable_resource_isolation is True.
system_reserved_memory: The amount of memory (in bytes) to reserve for ray system processes.
By default, the value will be atleast 500MB, and at most 10GB. The default value is
calculated using the formula min(10GB, max(500MB, 0.10 * memory_available_on_the_system))
This option only works if enable_resource_isolation is True.
_cgroup_path: The path for the cgroup the raylet should use to enforce resource isolation.
By default, the cgroup used for resource isolation will be /sys/fs/cgroup.
The process starting ray must have read/write permissions to this path.
Cgroup memory and cpu controllers be enabled for this cgroup.
This option only works if enable_resource_isolation is True.
_enable_object_reconstruction: If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address: The IP address of the node that we are on.
_driver_object_store_memory: Deprecated.
_memory: Amount of reservable memory resource in bytes rounded
down to the nearest integer.
_redis_username: Prevents external clients without the username
from connecting to Redis if provided.
_redis_password: Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir: If provided, specifies the root temporary
directory for the Ray process. Must be an absolute path. Defaults to an
OS-specific conventional location, e.g., "/tmp/ray".
_metrics_export_port: Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config: Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook: If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
_node_name: User-provided node name or identifier. Defaults to
the node IP address.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
a RayContext is returned with ray and python versions, and address
information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
if log_to_driver is None:
log_to_driver = ray_constants.RAY_LOG_TO_DRIVER
# Configure the "ray" logger for the driver process.
if configure_logging:
setup_logger(logging_level, logging_format or ray_constants.LOGGER_FORMAT)
else:
logging.getLogger("ray").handlers.clear()
# Configure the logging settings for the driver process.
if logging_config or ray_constants.RAY_LOGGING_CONFIG_ENCODING:
logging_config = logging_config or LoggingConfig(
encoding=ray_constants.RAY_LOGGING_CONFIG_ENCODING
)
logging_config._apply()
# Parse the hidden options
_cgroup_path: str = kwargs.pop("_cgroup_path", None)
_enable_object_reconstruction: bool = kwargs.pop(
"_enable_object_reconstruction", False
)
_plasma_directory: Optional[str] = kwargs.pop("_plasma_directory", None)
_object_spilling_directory: Optional[str] = kwargs.pop(
"object_spilling_directory", None
)
_node_ip_address: str = kwargs.pop("_node_ip_address", None)
_driver_object_store_memory: Optional[int] = kwargs.pop(
"_driver_object_store_memory", None
)
_memory: Optional[int] = kwargs.pop("_memory", None)
_redis_username: str = kwargs.pop(
"_redis_username", ray_constants.REDIS_DEFAULT_USERNAME
)
_redis_password: str = kwargs.pop(
"_redis_password", ray_constants.REDIS_DEFAULT_PASSWORD
)
_temp_dir: Optional[str] = kwargs.pop("_temp_dir", None)
_metrics_export_port: Optional[int] = kwargs.pop("_metrics_export_port", None)
_system_config: Optional[Dict[str, str]] = kwargs.pop("_system_config", None)
_tracing_startup_hook: Optional[Callable] = kwargs.pop(
"_tracing_startup_hook", None
)
_node_name: str = kwargs.pop("_node_name", None)
# Fix for https://github.com/ray-project/ray/issues/26729
_skip_env_hook: bool = kwargs.pop("_skip_env_hook", False)
resource_isolation_config = ResourceIsolationConfig(
enable_resource_isolation=enable_resource_isolation,
cgroup_path=_cgroup_path,
system_reserved_cpu=system_reserved_cpu,
system_reserved_memory=system_reserved_memory,
)
# terminate any signal before connecting driver
def sigterm_handler(signum, frame):
sys.exit(signum)
if threading.current_thread() is threading.main_thread():
ray._private.utils.set_sigterm_handler(sigterm_handler)
else:
logger.warning(
"SIGTERM handler is not set because current thread "
"is not the main thread."
)
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var and (address is None or address == "auto"):
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}"
)
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address, _deprecation_warn_enabled=False)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
ctx = builder.connect()
from ray._common.usage import usage_lib
if passed_kwargs.get("allow_multiple") is True:
with ctx:
usage_lib.put_pre_init_usage_stats()
else:
usage_lib.put_pre_init_usage_stats()
usage_lib.record_library_usage("client")
return ctx
if kwargs.get("allow_multiple"):
raise RuntimeError(
"`allow_multiple` argument is passed to `ray.init` when the "
"ray client is not used ("
f"https://docs.ray.io/en/{get_ray_doc_version()}/cluster"
"/running-applications/job-submission"
"/ray-client.html#connect-to-multiple-ray-clusters-experimental). "
"Do not pass the `allow_multiple` to `ray.init` to fix the issue."
)
if kwargs.get("storage"):
raise RuntimeError(
"Cluster-wide storage configuration has been removed. "
"The last Ray version supporting the `storage` argument is `ray==2.47`."
)
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug(
f"Automatically increasing RLIMIT_NOFILE to max value of {hard}"
)
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft)
)
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if job_config is None:
job_config = ray.job_config.JobConfig()
if RAY_JOB_CONFIG_JSON_ENV_VAR in os.environ:
injected_job_config_json = json.loads(
os.environ.get(RAY_JOB_CONFIG_JSON_ENV_VAR)
)
injected_job_config: ray.job_config.JobConfig = (
ray.job_config.JobConfig.from_json(injected_job_config_json)
)
driver_runtime_env = runtime_env
runtime_env = _merge_runtime_env(
injected_job_config.runtime_env,
driver_runtime_env,
override=os.getenv("RAY_OVERRIDE_JOB_RUNTIME_ENV") == "1",
)
if runtime_env is None:
# None means there was a conflict.
raise ValueError(
"Failed to merge the Job's runtime env "
f"{injected_job_config.runtime_env} with "
f"a ray.init's runtime env {driver_runtime_env} because "
"of a conflict. Specifying the same runtime_env fields "
"or the same environment variable keys is not allowed. "
"Use RAY_OVERRIDE_JOB_RUNTIME_ENV=1 to instruct Ray to "
"combine Job and Driver's runtime environment in the event of "
"a conflict."
)
runtime_env = _maybe_modify_runtime_env(runtime_env, _skip_env_hook)
job_config.set_runtime_env(runtime_env)
# Similarly, we prefer metadata provided via job submission API
for key, value in injected_job_config.metadata.items():
job_config.set_metadata(key, value)
# RAY_JOB_CONFIG_JSON_ENV_VAR is only set at ray job manager level and has
# higher priority in case user also provided runtime_env for ray.init()
else:
runtime_env = _maybe_modify_runtime_env(runtime_env, _skip_env_hook)
if runtime_env:
# Set runtime_env in job_config if passed in as part of ray.init()
job_config.set_runtime_env(runtime_env)
# Pass the logging_config to job_config to configure loggers of all worker
# processes belonging to the job.
if logging_config is not None:
job_config.set_py_logging_config(logging_config)
redis_address, gcs_address = None, None
bootstrap_address = services.canonicalize_bootstrap_address(address, _temp_dir)
if bootstrap_address is not None:
gcs_address = bootstrap_address
logger.info("Connecting to existing Ray cluster at address: %s...", gcs_address)
if local_mode:
driver_mode = LOCAL_MODE
warnings.warn(
"`local_mode` is an experimental feature that is no "
"longer maintained and will be removed in the near future. "
"For debugging consider using the Ray distributed debugger.",
FutureWarning,
stacklevel=2,
)
else:
driver_mode = SCRIPT_MODE
global _global_node
if global_worker.connected:
if ignore_reinit_error:
logger.info("Calling ray.init() again after it has already been called.")
node_id = global_worker.core_worker.get_current_node_id()
return RayContext(dict(_global_node.address_info, node_id=node_id.hex()))
else:
raise RuntimeError(
"Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'."
)
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
if bootstrap_address is None:
# In this case, we need to start a new cluster.
# Setup and verify authentication for new cluster
ensure_token_if_auth_enabled(_system_config, create_token_if_missing=True)
# Don't collect usage stats in ray.init() unless it's a nightly wheel.
from ray._common.usage import usage_lib
if usage_lib.is_nightly_wheel():
usage_lib.show_usage_stats_prompt(cli=False)
else:
usage_lib.set_usage_stats_enabled_via_env_var(False)
# Use a random port by not specifying Redis port / GCS server port.
ray_params = ray._private.parameter.RayParams(
node_ip_address=_node_ip_address,
driver_mode=driver_mode,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
labels=labels,
num_redis_shards=None,
redis_max_clients=None,
redis_username=_redis_username,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
object_spilling_directory=_object_spilling_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook,
node_name=_node_name,
resource_isolation_config=resource_isolation_config,
)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray._private.node.Node(
ray_params=ray_params,
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_init_cluster=True,
)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided."
)
if resources is not None:
raise ValueError(
"When connecting to an existing cluster, "
"resources must not be provided."
)
if labels is not None:
raise ValueError(
"When connecting to an existing cluster, "
"labels must not be provided."
)
if object_store_memory is not None:
raise ValueError(
"When connecting to an existing cluster, "
"object_store_memory must not be provided."
)
if _system_config is not None and len(_system_config) != 0:
raise ValueError(
"When connecting to an existing cluster, "
"_system_config must not be provided."
)
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided."
)
if _node_name is not None:
raise ValueError(
"_node_name cannot be configured when connecting to "
"an existing cluster."
)
# Setup and verify authentication for connecting to existing cluster
ensure_token_if_auth_enabled(_system_config, create_token_if_missing=False)
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=_node_ip_address,
gcs_address=gcs_address,
redis_address=redis_address,
redis_username=_redis_username,
redis_password=_redis_password,
temp_dir=_temp_dir,
_system_config=_system_config,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
)
try:
_global_node = ray._private.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True,
)
except (ConnectionError, RuntimeError):
if gcs_address == ray._private.utils.read_ray_address(_temp_dir):
logger.info(
"Failed to connect to the default Ray cluster address at "
f"{gcs_address}. This is most likely due to a previous Ray "
"instance that has since crashed. To reset the default "
"address to connect to, run `ray stop` or restart Ray with "
"`ray start`."
)
raise ConnectionError
# Log a message to find the Ray address that we connected to and the
# dashboard URL.
if ray_constants.RAY_OVERRIDE_DASHBOARD_URL in os.environ:
dashboard_url = os.environ.get(ray_constants.RAY_OVERRIDE_DASHBOARD_URL)
else:
dashboard_url = _global_node.webui_url
# Add http protocol to dashboard URL if it doesn't
# already contain a protocol.
if dashboard_url and not urlparse(dashboard_url).scheme:
dashboard_url = "http://" + dashboard_url
# We logged the address before attempting the connection, so we don't need
# to log it again.
info_str = "Connected to Ray cluster."
if gcs_address is None:
info_str = "Started a local Ray instance."
if dashboard_url:
logger.info(
info_str + " View the dashboard at %s%s%s %s%s",
colorama.Style.BRIGHT,
colorama.Fore.GREEN,
dashboard_url,
colorama.Fore.RESET,
colorama.Style.NORMAL,
)
else:
logger.info(info_str)
connect(
_global_node,
_global_node.session_name,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config,
entrypoint=ray._private.utils.get_entrypoint_name(),
)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
# Check and show accelerator override warning during driver initialization
from ray._private.ray_constants import env_bool
override_on_zero = env_bool(
ray._private.accelerators.RAY_ACCEL_ENV_VAR_OVERRIDE_ON_ZERO_ENV_VAR,
True,
)
if override_on_zero and log_once("ray_accel_env_var_override_on_zero"):
warnings.warn(
"Tip: In future versions of Ray, Ray will no longer override accelerator "
"visible devices env var if num_gpus=0 or num_gpus=None (default). To enable "
"this behavior and turn off this error message, set RAY_ACCEL_ENV_VAR_OVERRIDE_ON_ZERO=0",
FutureWarning,
)
node_id = global_worker.core_worker.get_current_node_id()
global_node_address_info = _global_node.address_info.copy()
global_node_address_info["webui_url"] = _remove_protocol_from_url(dashboard_url)
return RayContext(dict(global_node_address_info, node_id=node_id.hex()))
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook
@with_connect_or_shutdown_lock
def shutdown(_exiting_interpreter: bool = False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter: True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
# Make sure to clean up compiled dag node if exists.
from ray.dag.compiled_dag_node import _shutdown_all_compiled_dags
_shutdown_all_compiled_dags()
global_worker.shutdown_gpu_object_manager()
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# disconnect internal kv
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
_internal_kv_reset()
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "core_worker"):
if global_worker.mode == SCRIPT_MODE or global_worker.mode == LOCAL_MODE:
global_worker.core_worker.shutdown_driver()
del global_worker.core_worker
# We need to reset function actor manager to clear the context
global_worker.function_actor_manager = FunctionActorManager(global_worker)
# Disconnect global state from GCS.
ray._private.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
global_worker.set_cached_job_id(None)
atexit.register(shutdown, True)
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
import ray.core.generated.common_pb2 as common_pb2
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker, "worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = common_pb2.DRIVER
worker_info = {"exception": error_message}
ray._private.state.state._connect_and_get_accessor()
ray._private.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data, ignore_prefix: bool):
should_dedup = data.get("pid") not in ["autoscaler"]
if data["is_err"]:
if should_dedup:
batches = stderr_deduplicator.deduplicate(data)
else:
batches = [data]
sink = sys.stderr
else:
if should_dedup:
batches = stdout_deduplicator.deduplicate(data)
else:
batches = [data]
sink = sys.stdout
for batch in batches:
print_worker_logs(batch, sink, ignore_prefix)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
For Autoscaler V1:
Autoscaler events are denoted by the ":event_summary:" magic token.
For Autoscaler V2:
Autoscaler events are published from log_monitor.py which read
them from the `event_AUTOSCALER.log`.
"""
if not ray_constants.AUTOSCALER_EVENTS:
return
AUTOSCALER_LOG_FYI = (
"Tip: use `ray status` to view detailed "
"cluster status. To disable these "
"messages, set RAY_SCHEDULER_EVENTS=0."
)
def autoscaler_log_fyi_needed() -> bool:
global autoscaler_log_fyi_printed
if not autoscaler_log_fyi_printed:
autoscaler_log_fyi_printed = True
return True
return False
from ray.autoscaler.v2.utils import is_autoscaler_v2
if is_autoscaler_v2():
from ray._private.event.event_logger import filter_event_by_level, parse_event
for event_line in lines:
if autoscaler_log_fyi_needed():
yield AUTOSCALER_LOG_FYI
event = parse_event(event_line)
if not event or not event.message:
continue
if filter_event_by_level(
event, ray_constants.RAY_LOG_TO_DRIVER_EVENT_LEVEL
):
continue
yield event.message
else:
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ray_constants.LOG_PREFIX_EVENT_SUMMARY in line:
if autoscaler_log_fyi_needed():
yield AUTOSCALER_LOG_FYI
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(ray_constants.LOG_PREFIX_EVENT_SUMMARY)[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += f"{hours}h"
if minutes:
output += f"{minutes}m"
output += f"{int(delta)}s"
return output
# When we enter a breakpoint, worker logs are automatically disabled via this.
_worker_logs_enabled = True
def print_worker_logs(
data: Dict[str, str], print_file: Any, ignore_prefix: bool = False
):
if not _worker_logs_enabled:
return
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data.get("pid") in ["autoscaler", "raylet"]:
return ""
else:
res = "pid="
if data.get("actor_name"):
res = f"{data['actor_name']} {res}"
elif data.get("task_name"):
res = f"{data['task_name']} {res}"
return res
def message_for(data: Dict[str, str], line: str) -> str:
"""The printed message of this log line."""
if ray_constants.LOG_PREFIX_INFO_MESSAGE in line:
return line.split(ray_constants.LOG_PREFIX_INFO_MESSAGE)[1]
return line
def color_for(data: Dict[str, str], line: str) -> str:
"""The color for this log line."""
if (
data.get("pid") == "raylet"
and ray_constants.LOG_PREFIX_INFO_MESSAGE not in line
):
return colorama.Fore.YELLOW
elif data.get("pid") == "autoscaler":
if "Error:" in line or "Warning:" in line:
return colorama.Fore.YELLOW
else:
return colorama.Fore.CYAN
elif os.getenv("RAY_COLOR_PREFIX") == "1":
colors = [
# colorama.Fore.BLUE, # Too dark
colorama.Fore.MAGENTA,
colorama.Fore.CYAN,
colorama.Fore.GREEN,
# colorama.Fore.WHITE, # Too light
# colorama.Fore.RED,
colorama.Fore.LIGHTBLACK_EX,
colorama.Fore.LIGHTBLUE_EX,
# colorama.Fore.LIGHTCYAN_EX, # Too light
# colorama.Fore.LIGHTGREEN_EX, # Too light
colorama.Fore.LIGHTMAGENTA_EX,
# colorama.Fore.LIGHTWHITE_EX, # Too light
# colorama.Fore.LIGHTYELLOW_EX, # Too light
]
pid = data.get("pid", 0)
try:
i = int(pid)
except ValueError:
i = 0
return colors[i % len(colors)]
else:
return colorama.Fore.CYAN
if data.get("pid") == "autoscaler":
pid = "autoscaler +{}".format(time_string())
lines = filter_autoscaler_events(data.get("lines", []))
else:
pid = data.get("pid")
lines = data.get("lines", [])
ip = data.get("ip")
ip_prefix = "" if ip == data.get("localhost") else f", ip={ip}"
for line in lines:
if RAY_TQDM_MAGIC in line:
process_tqdm(line)
else:
hide_tqdm()
# If RAY_COLOR_PREFIX=0, do not wrap with any color codes
if os.getenv("RAY_COLOR_PREFIX") == "0":
color_pre = ""
color_post = ""
else:
color_pre = color_for(data, line)
color_post = colorama.Style.RESET_ALL
if ignore_prefix:
print(
f"{message_for(data, line)}",
file=print_file,
)
else:
print(
f"{color_pre}({prefix_for(data)}{pid}{ip_prefix}){color_post} "
f"{message_for(data, line)}",
file=print_file,
)
# Restore once at end of batch to avoid excess hiding/unhiding of tqdm.
restore_tqdm()
def process_tqdm(line):
"""Experimental distributed tqdm: see ray.experimental.tqdm_ray."""
try:
data = json.loads(line)
tqdm_ray.instance().process_state_update(data)
except Exception:
if log_once("tqdm_corruption"):
logger.warning(
f"[tqdm_ray] Failed to decode {line}, this may be due to "
"logging too fast. This warning will not be printed again."
)
def hide_tqdm():
"""Hide distributed tqdm bars temporarily to avoid conflicts with other logs."""
tqdm_ray.instance().hide_bars()
def restore_tqdm():
"""Undo hide_tqdm()."""
tqdm_ray.instance().unhide_bars()
def listen_error_messages(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to be published.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
# TODO: we should just subscribe to the errors for this specific job.
worker.gcs_error_subscriber.subscribe()
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(ray_constants.DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if received a signal that the thread should stop.
if threads_stopped.is_set():
return
_, error_data = worker.gcs_error_subscriber.poll()
if error_data is None:
continue
if error_data["job_id"] is not None and error_data["job_id"] not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data["error_message"]
print_to_stdstream(
{
"lines": [error_message],
"pid": "raylet",
"is_err": False,
},
ignore_prefix=False,
)
except (OSError, ConnectionError) as e:
logger.error(f"listen_error_messages: {e}")
@PublicAPI
@client_mode_hook
def is_initialized() -> bool:
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray._private.worker.global_worker.connected
@with_connect_or_shutdown_lock
def connect(
node,
session_name: str,
mode=WORKER_MODE,
log_to_driver: bool = False,
worker=global_worker,
driver_object_store_memory: Optional[int] = None,
job_id=None,
namespace: Optional[str] = None,
job_config=None,
runtime_env_hash: int = 0,
startup_token: int = 0,
ray_debugger_external: bool = False,
entrypoint: str = "",
worker_launch_time_ms: int = -1,
worker_launched_time_ms: int = -1,
debug_source: str = "",
):
"""Connect this worker to the raylet, to Plasma, and to GCS.
Args:
node (ray._private.node.Node): The node to connect.
session_name: The current Ray session name.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
log_to_driver: If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
namespace: Namespace to use.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash: The hash of the runtime env for this worker.
startup_token: The startup token of the process assigned to
it during startup as a command line argument.
ray_debugger_external: If True, make the debugger external to the
node this worker is running on.
entrypoint: The name of the entrypoint script. Ignored if the
mode != SCRIPT_MODE
worker_launch_time_ms: The time when the worker process for this worker
is launched. If the worker is not launched by raylet (e.g.,
driver), this must be -1 (default value).
worker_launched_time_ms: The time when the worker process for this worker
finshes launching. If the worker is not launched by raylet (e.g.,
driver), this must be -1 (default value).
debug_source: Source information for `CoreWorker`, used for debugging and informational purpose, rather than functional purpose.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
worker.gcs_client = node.get_gcs_client()
assert worker.gcs_client is not None
_initialize_internal_kv(worker.gcs_client)
ray._private.state.state._initialize_global_state(
ray._raylet.GcsClientOptions.create(
node.gcs_address,
node.cluster_id.hex(),
allow_cluster_id_nil=False,
fetch_cluster_id_if_nil=False,
)
)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray._private.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE
elif mode is RESTORE_WORKER_MODE:
process_name = ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE
ray._raylet.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
node.check_version_info()
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.publish_error_to_driver(
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
gcs_client=worker.gcs_client,
)
driver_name = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError("Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
gcs_options = ray._raylet.GcsClientOptions.create(
node.gcs_address,
node.cluster_id.hex(),
allow_cluster_id_nil=False,
fetch_cluster_id_if_nil=False,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
else:
# Add hook to suppress worker logs during breakpoint.
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb._driver_set_trace"
worker.ray_debugger_external = ray_debugger_external
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environment will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config._client_job and job_config.runtime_env:
from ray._private.ray_constants import RAY_RUNTIME_ENV_IGNORE_GITIGNORE
scratch_dir: str = worker.node.get_runtime_env_dir_path()
runtime_env = job_config.runtime_env or {}
# Determine whether to respect .gitignore files based on environment variable
# Default is True (respect .gitignore). Set to False if env var is "1".
include_gitignore = os.environ.get(RAY_RUNTIME_ENV_IGNORE_GITIGNORE, "0") != "1"
runtime_env = upload_py_modules_if_needed(
runtime_env,
include_gitignore=include_gitignore,
scratch_dir=scratch_dir,
logger=logger,
)
runtime_env = upload_working_dir_if_needed(
runtime_env,
include_gitignore=include_gitignore,
scratch_dir=scratch_dir,
logger=logger,
)
runtime_env = upload_worker_process_setup_hook_if_needed(
runtime_env,
worker,
)
# Remove excludes, it isn't relevant after the upload step.
runtime_env.pop("excludes", None)
job_config.set_runtime_env(runtime_env, validate=True)
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
# We also want to skip adding script directory when running from dashboard.
code_paths = []
if not interactive_mode and not (
namespace and namespace == ray._raylet.RAY_INTERNAL_DASHBOARD_NAMESPACE
):
script_directory = os.path.dirname(os.path.realpath(sys.argv[0]))
# If driver's sys.path doesn't include the script directory
# (e.g driver is started via `python -m`,
# see https://peps.python.org/pep-0338/),
# then we shouldn't add it to the workers.
if script_directory in sys.path:
code_paths.append(script_directory)
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config._client_job and not job_config._runtime_env_has_working_dir():
current_directory = os.path.abspath(os.path.curdir)
code_paths.append(current_directory)
if len(code_paths) != 0:
job_config._py_driver_sys_path.extend(code_paths)
serialized_job_config = job_config._serialize()
if not node.should_redirect_logs():
# Logging to stderr, so give core worker empty logs directory.
logs_dir = ""
else:
logs_dir = node.get_logs_dir_path()
worker.core_worker = ray._raylet.CoreWorker(
mode,
node.plasma_store_socket_name,
node.raylet_socket_name,
job_id,
gcs_options,
logs_dir,
node.node_ip_address,
node.node_manager_port,
(mode == LOCAL_MODE),
driver_name,
serialized_job_config,
node.metrics_agent_port,
runtime_env_hash,
startup_token,
session_name,
node.cluster_id.hex(),
"" if mode != SCRIPT_MODE else entrypoint,
worker_launch_time_ms,
worker_launched_time_ms,
debug_source,
)
if mode == SCRIPT_MODE:
worker_id = worker.worker_id
worker.gcs_error_subscriber = ray._raylet.GcsErrorSubscriber(
worker_id=worker_id, address=worker.gcs_client.address
)
worker.gcs_log_subscriber = ray._raylet.GcsLogSubscriber(
worker_id=worker_id, address=worker.gcs_client.address
)
if driver_object_store_memory is not None:
logger.warning(
"`driver_object_store_memory` is deprecated"
" and will be removed in the future."
)
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped),
)
worker.listener_thread.daemon = True
worker.listener_thread.start()
# If the job's logging config is set, don't add the prefix
# (task/actor's name and its PID) to the logs.
ignore_prefix = global_worker.job_logging_config is not None
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs",
functools.partial(print_to_stdstream, ignore_prefix=ignore_prefix),
)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs"
)
worker.logger_thread.daemon = True
worker.logger_thread.start()
# Setup tracing here
tracing_hook_val = worker.gcs_client.internal_kv_get(
b"tracing_startup_hook", ray_constants.KV_NAMESPACE_TRACING
)
if tracing_hook_val is not None:
ray.util.tracing.tracing_helper._enable_tracing()
if not getattr(ray, "__traced__", False):
_setup_tracing = _import_from_string(tracing_hook_val.decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
# Mark the worker as connected.
worker.set_is_connected(True)
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "gcs_error_subscriber"):
worker.gcs_error_subscriber.close()
if hasattr(worker, "gcs_log_subscriber"):
worker.gcs_log_subscriber.close()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
# Ignore the prefix if the logging config is set.
ignore_prefix = worker.job_logging_config is not None
for leftover in stdout_deduplicator.flush():
print_worker_logs(leftover, sys.stdout, ignore_prefix)
for leftover in stderr_deduplicator.flush():
print_worker_logs(leftover, sys.stderr, ignore_prefix)
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor._ActorClassMethodMetadata.reset_cache()
# Mark the worker as disconnected.
worker.set_is_connected(False)
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
ray._raylet.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
ray._raylet.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message: str, key: str = "", dtype: str = "text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message: Message to be displayed.
key: The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
dtype: The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, f"dtype accepts only: {acceptable_dtypes}"
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@overload
def get(
object_refs: "Sequence[ObjectRef[Any]]", *, timeout: Optional[float] = None
) -> List[Any]:
...
@overload
def get(
object_refs: "Sequence[ObjectRef[R]]", *, timeout: Optional[float] = None
) -> List[R]:
...
@overload
def get(object_refs: "ObjectRef[R]", *, timeout: Optional[float] = None) -> R:
...
@overload
def get(
object_refs: Sequence[CompiledDAGRef], *, timeout: Optional[float] = None
) -> List[Any]:
...
@overload
def get(object_refs: CompiledDAGRef, *, timeout: Optional[float] = None) -> Any:
...
@PublicAPI
@client_mode_hook
def get(
object_refs: Union[
"ObjectRef[Any]",
Sequence["ObjectRef[Any]"],
CompiledDAGRef,
Sequence[CompiledDAGRef],
],
*,
timeout: Optional[float] = None,
_tensor_transport: Optional[str] = None,
) -> Union[Any, List[Any]]:
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Passing :class:`~ObjectRefGenerator` is not allowed.
Related patterns and anti-patterns:
- :doc:`/ray-core/patterns/ray-get-loop`
- :doc:`/ray-core/patterns/unnecessary-ray-get`
- :doc:`/ray-core/patterns/ray-get-submission-order`
- :doc:`/ray-core/patterns/ray-get-too-many-objects`
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning. Set this to None will block until the
corresponding object becomes available. Setting ``timeout=0`` will
return the object immediately if it's available, else raise
GetTimeoutError in accordance with the above docstring.
_tensor_transport: [Alpha] The tensor transport to use to fetch `torch.Tensors` found in the Ray Direct Transport object. Currently, this supports "object_store" and "nixl".
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised immediately if any task that created
the object or that created one of the objects raised an exception,
without waiting for the remaining ones to finish.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker, "core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
if ray_constants.env_bool(
RAY_WARN_BLOCKING_GET_INSIDE_ASYNC_ENV_VAR,
True,
):
logger.warning(
"Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead."
)
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
# TODO(sang): Should make ObjectRefGenerator
# compatible to ray.get for dataset.
if isinstance(object_refs, ObjectRefGenerator):
return object_refs
if isinstance(object_refs, CompiledDAGRef):
return object_refs.get(timeout=timeout)
if isinstance(object_refs, list):
all_compiled_dag_refs = True
any_compiled_dag_refs = False
for object_ref in object_refs:
is_dag_ref = isinstance(object_ref, CompiledDAGRef)
all_compiled_dag_refs = all_compiled_dag_refs and is_dag_ref
any_compiled_dag_refs = any_compiled_dag_refs or is_dag_ref
if all_compiled_dag_refs:
return [object_ref.get(timeout=timeout) for object_ref in object_refs]
elif any_compiled_dag_refs:
raise ValueError(
"Invalid type of object refs. 'object_refs' must be a list of "
"CompiledDAGRefs if there is any CompiledDAGRef within it. "
)
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError(
f"Invalid type of object refs, {type(object_refs)}, is given. "
"'object_refs' must either be an ObjectRef or a list of ObjectRefs. "
)
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout, _tensor_transport=_tensor_transport
)
for i, value in enumerate(values):
if isinstance(value, RayError):
# If the object was lost and it wasn't due to owner death, it may be
# because the object store is full and objects needed to be evicted.
if isinstance(value, ray.exceptions.ObjectLostError) and not isinstance(
value, ray.exceptions.OwnerDiedError
):
worker.core_worker.log_plasma_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb._connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=(
debugger_breakpoint.decode() if debugger_breakpoint else None
),
debugger_external=worker.ray_debugger_external,
)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook
def put(
value: Any,
*,
_owner: Optional["ray.actor.ActorHandle"] = None,
_tensor_transport: str = "object_store",
) -> "ray.ObjectRef":
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Related patterns and anti-patterns:
- :doc:`/ray-core/patterns/return-ray-put`
- :doc:`/ray-core/patterns/pass-large-arg-by-value`
- :doc:`/ray-core/patterns/closure-capture-large-objects`
Args:
value: The Python object to be stored.
_owner [Experimental]: The actor that should own this object. This
allows creating objects with lifetimes decoupled from that of the
creating process. The owner actor must be passed a reference to the
object prior to the object creator exiting, otherwise the reference
will still be lost. *Note that this argument is an experimental API
and should be avoided if possible.*
_tensor_transport: [Alpha] The tensor transport to use for the GPU object. Currently, this supports "object_store" and "nixl" for tensor transport in ray.put().
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure GlobalState is connected
ray._private.state.state._connect_and_get_accessor()
serialize_owner_address = (
ray._raylet._get_actor_serialized_owner_address_or_none(
ray._private.state.state.get_actor_info(_owner._actor_id)
)
)
if not serialize_owner_address:
raise RuntimeError(f"{_owner} is not alive, it's worker_id is empty!")
else:
raise TypeError(f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value,
owner_address=serialize_owner_address,
_tensor_transport=_tensor_transport,
)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects."
)
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook
def wait(
ray_waitables: List[Union[ObjectRef, ObjectRefGenerator]],
*,
num_returns: int = 1,
timeout: Optional[float] = None,
fetch_local: bool = True,
) -> Tuple[
List[Union[ObjectRef, ObjectRefGenerator]],
List[Union[ObjectRef, ObjectRefGenerator]],
]:
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
`ray_waitables` is a list of :class:`~ray.ObjectRef` and
:class:`~ray.ObjectRefGenerator`.
The method returns two lists, ready and unready `ray_waitables`.
ObjectRef:
object refs that correspond to objects that are available
in the object store are in the first list.
The rest of the object refs are in the second list.
ObjectRefGenerator:
Generators whose next reference (that will be obtained
via `next(generator)`) has a corresponding object available
in the object store are in the first list.
All other generators are placed in the second list.
Ordering of the input list of ray_waitables is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(ray_waitables)``, you can use
``await asyncio.wait(ray_waitables)``.
Related patterns and anti-patterns:
- :doc:`/ray-core/patterns/limit-pending-tasks`
- :doc:`/ray-core/patterns/ray-get-submission-order`
Args:
ray_waitables: List of :class:`~ObjectRef` or
:class:`~ObjectRefGenerator` for objects that may or may
not be ready. Note that these must be unique.
num_returns: The number of ray_waitables that should be returned.
timeout: The maximum amount of time in seconds to wait before
returning.
fetch_local: If True, wait for the object to be downloaded onto
the local node before returning it as ready. If the `ray_waitable`
is a generator, it will wait until the next object in the generator
is downloaed. If False, ray.wait() will not trigger fetching of
objects to the local node and will return immediately once the
object is available anywhere in the cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if (
hasattr(worker, "core_worker")
and worker.core_worker.current_actor_is_asyncio()
and timeout != 0
):
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug(
"Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. "
)
blocking_wait_inside_async_warned = True
if isinstance(ray_waitables, ObjectRef) or isinstance(
ray_waitables, ObjectRefGenerator
):
raise TypeError(
"wait() expected a list of ray.ObjectRef or ray.ObjectRefGenerator"
", got a single ray.ObjectRef or ray.ObjectRefGenerator "
f"{ray_waitables}"
)
if not isinstance(ray_waitables, list):
raise TypeError(
"wait() expected a list of ray.ObjectRef or "
"ray.ObjectRefGenerator, "
f"got {type(ray_waitables)}"
)
if timeout is not None and timeout < 0:
raise ValueError(
"The 'timeout' argument must be nonnegative. " f"Received {timeout}"
)
for ray_waitable in ray_waitables:
if not isinstance(ray_waitable, ObjectRef) and not isinstance(
ray_waitable, ObjectRefGenerator
):
raise TypeError(
"wait() expected a list of ray.ObjectRef or "
"ray.ObjectRefGenerator, "
f"got list containing {type(ray_waitable)}"
)
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(ray_waitables) == 0:
return [], []
if len(ray_waitables) != len(set(ray_waitables)):
raise ValueError("Wait requires a list of unique ray_waitables.")
if num_returns <= 0:
raise ValueError("Invalid number of objects to return %d." % num_returns)
if num_returns > len(ray_waitables):
raise ValueError(
"num_returns cannot be greater than the number "
"of ray_waitables provided to ray.wait."
)
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
ray_waitables,
num_returns,
timeout_milliseconds,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook
def get_actor(name: str, namespace: Optional[str] = None) -> "ray.actor.ActorHandle":
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
This method is a sync call and it'll timeout after 60s. This can be modified
by setting OS env RAY_gcs_server_request_timeout_seconds before starting
the cluster.
Args:
name: The name of the actor.
namespace: The namespace of the actor, or None to specify the current
namespace.
Returns:
ActorHandle to the actor.
Raises:
ValueError: if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
if namespace is not None:
ray._private.utils.validate_namespace(namespace)
worker = global_worker
worker.check_connected()
return worker.core_worker.get_named_actor_handle(name, namespace or "")
@PublicAPI
@client_mode_hook
def kill(actor: "ray.actor.ActorHandle", *, no_restart: bool = True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor: Handle to the actor to kill.
no_restart: Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError(
"ray.kill() only supported for actors. For tasks, try ray.cancel(). "
"Got: {}.".format(type(actor))
)
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook
def cancel(
ray_waitable: Union["ObjectRef[R]", "ObjectRefGenerator[R]"],
*,
force: bool = False,
recursive: bool = True,
) -> None:
"""Cancels a task.
Cancel API has a different behavior depending on if it is a remote function
(Task) or a remote Actor method (Actor Task).
Task:
If the specified Task is pending execution, it is cancelled and not
executed. If the Task is currently executing, the behavior depends
on the `force` flag. When `force=False`, a KeyboardInterrupt is
raised in Python and when `force=True`, the executing Task
immediately exits. If the Task is already finished, nothing happens.
Cancelled Tasks aren't retried. `max_task_retries` aren't respected.
Calling ray.get on a cancelled Task raises a TaskCancelledError
if the Task has been scheduled or interrupted.
It raises a WorkerCrashedError if `force=True`.
If `recursive=True`, all the child Tasks and Actor Tasks
are cancelled. If `force=True` and `recursive=True`, `force=True`
is ignored for child Actor Tasks.
Actor Task:
If the specified Task is pending execution, it is cancelled and not
executed. If the Task is currently executing, the behavior depends
on the execution model of an Actor. If it is a regular Actor
or a threaded Actor, the execution isn't cancelled.
Actor Tasks cannot be interrupted because Actors have
states. If it is an async Actor, Ray cancels a `asyncio.Task`.
The semantic of cancellation is equivalent to asyncio's cancellation.
https://docs.python.org/3/library/asyncio-task.html#task-cancellation
If the Task has finished, nothing happens.
Only `force=False` is allowed for an Actor Task. Otherwise, it raises
`ValueError`. Use `ray.kill(actor)` instead to kill an Actor.
Cancelled Tasks aren't retried. `max_task_retries` aren't respected.
Calling ray.get on a cancelled Task raises a TaskCancelledError
if the Task has been scheduled or interrupted. Also note that
only async actor tasks can be interrupted.
If `recursive=True`, all the child Tasks and actor Tasks
are cancelled.
Args:
ray_waitable: :class:`~ObjectRef` and
:class:`~ObjectRefGenerator`
returned by the task that should be canceled.
force: Whether to force-kill a running task by killing
the worker that is running the task.
recursive: Whether to try to cancel tasks submitted by the
task specified.
"""
worker = ray._private.worker.global_worker
worker.check_connected()
if isinstance(ray_waitable, ray._raylet.ObjectRefGenerator):
assert hasattr(ray_waitable, "_generator_ref")
ray_waitable = ray_waitable._generator_ref
if not isinstance(ray_waitable, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for object refs. "
f"For actors, try ray.kill(). Got: {type(ray_waitable)}."
)
return worker.core_worker.cancel_task(ray_waitable, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def _make_remote(function_or_class, options):
if not function_or_class.__module__:
function_or_class.__module__ = "global"
if inspect.isfunction(function_or_class) or is_cython(function_or_class):
ray_option_utils.validate_task_options(options, in_options=False)
return ray.remote_function.RemoteFunction(
Language.PYTHON,
function_or_class,
None,
options,
)
if inspect.isclass(function_or_class):
ray_option_utils.validate_actor_options(options, in_options=False)
return ray.actor._make_actor(function_or_class, options)
raise TypeError(
"The @ray.remote decorator must be applied to either a function or a class."
)
| RayContext |
python | allegroai__clearml | clearml/backend_api/services/v2_13/tasks.py | {
"start": 188639,
"end": 191077
} | class ____(Request):
"""
Remove a task from its queue.
Fails if task status is not queued.
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task: str, status_reason: Optional[str] = None, status_message: Optional[str] = None, **kwargs: Any
) -> None:
super(DequeueRequest, self).__init__(**kwargs)
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self) -> Optional[str]:
return self._property_status_reason
@status_reason.setter
def status_reason(self, value: Optional[str]) -> None:
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self) -> Optional[str]:
return self._property_status_message
@status_message.setter
def status_message(self, value: Optional[str]) -> None:
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
| DequeueRequest |
python | google__pytype | build_scripts/build_utils.py | {
"start": 751,
"end": 5329
} | class ____:
"""Utility class to create and manage the build config cache."""
BUILD_CONFIG_CACHE = os.path.join(OUT_DIR, ".build_config.json")
def __init__(self, **kwargs):
self.py_version = kwargs.get("py_version")
self.build_type = kwargs.get("build_type")
def save_to_cache_file(self):
with open(self.BUILD_CONFIG_CACHE, "w") as f:
json.dump(
{"py_version": self.py_version, "build_type": self.build_type}, f
)
def __eq__(self, other):
return (
self.py_version == other.py_version
and self.build_type == other.build_type
)
@classmethod
def current_build_config(cls, debug):
return BuildConfig(**{
"py_version": current_py_version(),
"build_type": "debug" if debug else "None",
})
@classmethod
def read_cached_config(cls):
if os.path.exists(cls.BUILD_CONFIG_CACHE):
with open(cls.BUILD_CONFIG_CACHE) as f:
return BuildConfig(**json.load(f))
else:
# There is no python version cache file during the very first run.
return BuildConfig(**{})
def clean_dir(dir_path, exclude_file_list=None):
exclude_list = exclude_file_list or []
for item in os.listdir(dir_path):
path = os.path.join(dir_path, item)
if os.path.isdir(path):
shutil.rmtree(path)
elif item not in exclude_list:
os.remove(path)
def _clean_out_dir(msg):
print(msg)
clean_dir(OUT_DIR, ["README.md", ".gitignore"])
def parse_ninja_output_line(line):
if line.startswith(NINJA_FAILURE_PREFIX):
return _NINJA_FAILURE_MSG, None, None
elif line.startswith(FAILURE_MSG_PREFIX):
components = line.split(RESULT_MSG_SEP)
log_file = components[2] if len(components) == 3 else None
return _TEST_MODULE_FAIL_MSG, components[1], log_file
elif line.startswith(PASS_MSG_PREFIX):
_, mod_name = line.split(RESULT_MSG_SEP)
return _TEST_MODULE_PASS_MSG, mod_name, None
else:
return _NOT_A_MSG, None, None
def failure_msg(mod_name, log_file):
components = [FAILURE_MSG_PREFIX, mod_name]
if log_file:
components.append(log_file)
return RESULT_MSG_SEP.join(components)
def pass_msg(mod_name):
return RESULT_MSG_SEP.join([PASS_MSG_PREFIX, mod_name])
def run_cmd(cmd, cwd=None, pipe=True):
process_options = {}
if pipe:
process_options = {
"stdout": subprocess.PIPE,
"stderr": subprocess.STDOUT,
"text": True,
}
if cwd:
process_options["cwd"] = cwd
print("+", shlex.join(cmd), flush=True)
with subprocess.Popen(cmd, **process_options) as process:
stdout, _ = process.communicate()
return process.returncode, stdout
def run_cmake(force_clean=False, log_output=False, debug_build=False):
"""Run cmake in the 'out' directory."""
current_config = BuildConfig.current_build_config(debug_build)
if force_clean:
_clean_out_dir("Force-cleaning 'out' directory.")
elif BuildConfig.read_cached_config() != current_config:
_clean_out_dir(
"Previous build config was different; cleaning 'out' directory.\n"
)
else:
print(
"Running with build config same as cached build config; "
"not cleaning 'out' directory.\n"
)
if os.path.exists(os.path.join(OUT_DIR, "build.ninja")):
# Run CMake if it was not already run. If CMake was already run, it
# generates a build.ninja file in the "out" directory.
msg = "Running CMake skipped as the build.ninja file is present ...\n"
print(msg)
if log_output:
with open(CMAKE_LOG, "w") as cmake_log:
cmake_log.write(msg)
return True
print("Running CMake ...\n")
cmd = [
"cmake",
PYTYPE_SRC_ROOT,
"-G",
"Ninja",
f"-DPython_ADDITIONAL_VERSIONS={current_config.py_version}",
"-DCMAKE_POLICY_VERSION_MINIMUM=3.16",
]
if debug_build:
cmd.append("-DCMAKE_BUILD_TYPE=Debug")
else:
if os.name == "nt":
cmd.append("-DCMAKE_BUILD_TYPE=Release")
returncode, stdout = run_cmd(cmd, cwd=OUT_DIR)
# Print the full CMake output to stdout. It is not a lot that it would
# clutter the output, and also gives information about the Python version
# found etc.
print(stdout)
if log_output:
with open(CMAKE_LOG, "w") as cmake_log:
cmake_log.write(stdout)
if returncode != 0:
print(f">>> FAILED: CMake command '{shlex.join(cmd)}'")
if log_output:
print(f">>> Full CMake output is available in '{CMAKE_LOG}'.")
return False
# Cache the config for which the build files have been generated.
current_config.save_to_cache_file()
return True
| BuildConfig |
python | lazyprogrammer__machine_learning_examples | rl3/a2c/atari_wrappers.py | {
"start": 1868,
"end": 3335
} | class ____(gym.Wrapper):
def __init__(self, env):
"""Make end-of-life == end-of-episode, but only reset on true game over.
Done by DeepMind for the DQN and co. since it helps value estimation.
"""
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
obs, reward, done, info = self.env.step(action)
self.was_real_done = done
# check current lives, make loss of life terminal,
# then update lives to handle bonus lives
lives = self.env.unwrapped.ale.lives()
if lives < self.lives and lives > 0:
# for Qbert sometimes we stay in lives == 0 condtion for a few frames
# so its important to keep lives > 0, so that we only reset once
# the environment advertises done.
done = True
self.lives = lives
return obs, reward, done, info
def reset(self, **kwargs):
"""Reset only when lives are exhausted.
This way all states are still reachable even though lives are episodic,
and the learner need not know about any of this behind-the-scenes.
"""
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
# no-op step to advance from terminal/lost life state
obs, _, _, _ = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
| EpisodicLifeEnv |
python | SmileyChris__easy-thumbnails | easy_thumbnails/tests/test_files.py | {
"start": 17756,
"end": 18916
} | class ____(TestCase):
def setUp(self):
self.source = BytesIO(b'file-contents')
def test_single_fail(self):
source_generators = [FakeSourceGenerator(fail=True)]
self.assertRaises(
ValueError, engine.generate_source_image,
self.source, {}, source_generators, fail_silently=False)
def test_single_silent_fail(self):
source_generators = [FakeSourceGenerator(fail=True)]
image = engine.generate_source_image(
self.source, {}, source_generators)
self.assertEqual(image, None)
def test_multiple_fail(self):
source_generators = [
FakeSourceGenerator(fail=True), FakeSourceGenerator(fail=True)]
self.assertRaises(
engine.NoSourceGenerator, engine.generate_source_image,
self.source, {}, source_generators, fail_silently=False)
def test_multiple_silent_fail(self):
source_generators = [
FakeSourceGenerator(fail=True), FakeSourceGenerator(fail=True)]
image = engine.generate_source_image(
self.source, {}, source_generators)
self.assertEqual(image, None)
| EngineTest |
python | apache__airflow | devel-common/src/tests_common/test_utils/terraform.py | {
"start": 889,
"end": 1669
} | class ____(SystemTest):
"""Base class for Terraform tests."""
TERRAFORM_DIR: str
def setup_method(self) -> None:
self.execute_cmd(["terraform", "init", "-input=false", self.TERRAFORM_DIR])
self.execute_cmd(["terraform", "plan", "-input=false", self.TERRAFORM_DIR])
self.execute_cmd(["terraform", "apply", "-input=false", "-auto-approve", self.TERRAFORM_DIR])
def get_tf_output(self, name):
return "".join(self.check_output(["terraform", "output", name]).decode("utf-8").splitlines())
def teardown_method(self) -> None:
self.execute_cmd(["terraform", "plan", "-destroy", "-input=false", self.TERRAFORM_DIR])
self.execute_cmd(["terraform", "destroy", "-input=false", "-auto-approve", self.TERRAFORM_DIR])
| Terraform |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 23857,
"end": 43833
} | class ____(test_util.TensorFlowTestCase):
def testTraceback(self):
g = ops.Graph()
op1 = ops.Operation.from_node_def(
ops._NodeDef("None", "op1"), g, [], [dtypes.float32_ref, dtypes.float32]
)
self.assertIn("testTraceback", op1.traceback[-2])
@test_util.run_deprecated_v1
def testNoInputs(self):
op = test_ops.float_output_string_output(name="myop").a.op
self.assertEqual(2, len(op.values()))
self.assertEqual(0, len(op.inputs))
self.assertEqual("myop", op.name)
float_t, label_str_t = op.values()
self.assertEqual(dtypes.float32, float_t.dtype)
self.assertEqual(op, float_t.op)
self.assertEqual(0, float_t.value_index)
self.assertEqual(0, len(float_t.consumers()))
self.assertEqual("myop", float_t._as_node_def_input())
self.assertEqual(dtypes.string, label_str_t.dtype)
self.assertEqual(op, label_str_t.op)
self.assertEqual(1, label_str_t.value_index)
self.assertEqual(0, len(label_str_t.consumers()))
self.assertEqual("myop:1", label_str_t._as_node_def_input())
self.assertProtoEquals("op:'FloatOutputStringOutput' name:'myop'",
op.node_def)
@test_util.run_deprecated_v1
def testNoOutputs(self):
op1 = test_ops.float_output(name="myop1").op
float_t, = op1.values()
op2 = test_ops.float_input(float_t, name="myop2")
self.assertEqual(0, len(op2.values()))
self.assertEqual(1, len(op2.inputs))
self.assertIs(float_t, op2.inputs[0])
self.assertEqual(1, len(float_t.consumers()))
self.assertEqual(op2, float_t.consumers()[0])
self.assertProtoEquals("op:'FloatOutput' name:'myop1'", op1.node_def)
self.assertProtoEquals("op:'FloatInput' name:'myop2' input:'myop1'",
op2.node_def)
@test_util.run_deprecated_v1
def testInputsAndOutputs(self):
op1 = test_ops.float_output(name="myop1").op
self.assertEqual(1, len(op1.values()))
float1_t, = op1.values()
op2 = test_ops.float_output_string_output(name="myop2").a.op
self.assertEqual(2, len(op2.values()))
float2_t, label2_str_t = op2.values()
# Note that we consume label2_str_t twice here.
op3 = test_ops.foo2(float1_t, label2_str_t, label2_str_t, name="myop3").d.op
self.assertEqual(2, len(op3.values()))
self.assertEqual(1, len(float1_t.consumers()))
self.assertEqual(op3, float1_t.consumers()[0])
self.assertEqual(0, len(float2_t.consumers()))
self.assertEqual(2, len(label2_str_t.consumers()))
self.assertEqual(op3, label2_str_t.consumers()[0])
self.assertEqual(op3, label2_str_t.consumers()[1])
self.assertProtoEquals("""
op:'Foo2' name:'myop3'
input:'myop1' input:'myop2:1' input:'myop2:1'
""", op3.node_def)
def testDeviceObject(self):
op = ops.Operation.from_node_def(
ops._NodeDef("None", "myop"), ops.Graph(), [], []
)
op._set_device("/job:goo/device:GPU:0")
self.assertProtoEquals(
"op:'None' name:'myop' device:'/job:goo/device:GPU:0' ", op.node_def)
op = ops.Operation.from_node_def(
ops._NodeDef("None", "op2"), ops.Graph(), [], []
)
op._set_device(
pydev.DeviceSpec(
job="muu", device_type="CPU", device_index=0))
self.assertProtoEquals(
"op:'None' name:'op2' device:'/job:muu/device:CPU:0'", op.node_def)
def testReferenceInput(self):
g = ops.Graph()
op1 = ops.Operation.from_node_def(
ops._NodeDef("RefOutputFloatOutput", "op1"),
g,
[],
[dtypes.float32_ref, dtypes.float32],
)
self.assertProtoEquals("op:'RefOutputFloatOutput' name:'op1'", op1.node_def)
self.assertEqual([], list(op1.inputs))
ref_t, nonref_t = op1.values()
# NOTE(mrry): Must specify input_types to preserve ref-typed input.
op2 = ops.Operation.from_node_def(
ops._NodeDef("RefInputFloatInput", "op2"),
g,
[ref_t, nonref_t],
[],
input_types=[dtypes.float32_ref, dtypes.float32],
)
self.assertProtoEquals(
"op:'RefInputFloatInput' name:'op2' input:'op1' input:'op1:1'",
op2.node_def)
self.assertEqual([ref_t, nonref_t], list(op2.inputs))
op3 = ops.Operation.from_node_def(
ops._NodeDef("TwoFloatInputs", "op3"), g, [ref_t, nonref_t], []
)
self.assertProtoEquals(
"op:'TwoFloatInputs' name:'op3' input:'op1' input:'op1:1'",
op3.node_def)
def testInvalidNames(self):
g = ops.Graph()
with self.assertRaises(ValueError):
ops.Operation.from_node_def(ops._NodeDef("op", ""), g)
with self.assertRaises(ValueError):
ops.Operation.from_node_def(ops._NodeDef("op", "_invalid"), g)
with self.assertRaises(ValueError):
ops.Operation.from_node_def(ops._NodeDef("op", "-invalid"), g)
with self.assertRaises(ValueError):
ops.Operation.from_node_def(ops._NodeDef("op", "/invalid"), g)
with self.assertRaises(ValueError):
ops.Operation.from_node_def(ops._NodeDef("op", "invalid:0"), g)
@test_util.run_deprecated_v1
def testNoShapeFunction(self):
op = test_ops.a()
self.assertEqual(tensor_shape.unknown_shape(), op.get_shape())
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedArray(self):
values = [[2], [3], [5], [7]]
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
def testShapeTuple(self):
with self.cached_session():
c = constant_op.constant(1)
self.assertEqual(c._shape_tuple(), ()) # pylint: disable=protected-access
def testConvertToTensorEager(self):
with context.eager_mode():
t = constant_op.constant(1)
self.assertTrue(isinstance(t, ops.EagerTensor))
converted = ops.convert_to_tensor(t)
self.assertTrue(isinstance(converted, ops.EagerTensor))
converted = ops.convert_to_tensor(1)
self.assertTrue(isinstance(converted, ops.EagerTensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTuple(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(ops.convert_to_tensor(values)))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedTensors(self):
values = ((2,), (3,), (5,), (7,))
tensor = ops.convert_to_tensor(
[constant_op.constant(row) for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
tensor = ops.convert_to_tensor(
[[constant_op.constant(v) for v in row] for row in values])
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(values, self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorNestedMix(self):
values = ([2], (3,), [constant_op.constant(5)], constant_op.constant([7]))
tensor = ops.convert_to_tensor(values)
self.assertAllEqual((4, 1), tensor.get_shape().as_list())
self.assertAllEqual(((2,), (3,), (5,), (7,)), self.evaluate(tensor))
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorPreferred(self):
values = [2, 3, 5, 7]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.float32)
self.assertEqual(dtypes.float32, tensor.dtype)
# Convert empty tensor to anything.
values = []
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
# The preferred dtype is a type error and will convert to
# float32 instead.
values = [1.23]
tensor = ops.convert_to_tensor(values, preferred_dtype=dtypes.int64)
self.assertEqual(dtypes.float32, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToInvalidTensorType(self):
with self.assertRaises(TypeError):
# Forcing an invalid dtype should fail with a type error.
values = [1.23]
ops.convert_to_tensor(values, dtype=dtypes.int64)
@test_util.run_in_graph_and_eager_modes
def testConvertToLongLongTensorType(self):
tensor = ops.convert_to_tensor(
# Get a numpy array of dtype NPY_LONGLONG
np.prod(constant_op.constant([1])._shape_tuple()),
dtype=dtypes.int64)
self.assertEqual(dtypes.int64, tensor.dtype)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromValidTensor(self):
tensor = constant_op.constant(413, dtype=dtypes.int64)
converted = ops.convert_to_tensor(tensor, dtype=dtypes.int64)
# If dtype is compatible, the returned tensor should be the same instance.
self.assertEqual(tensor, converted)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorFromInvalidTensor(self):
tensor = constant_op.constant(42.0, dtype=dtypes.float32)
with self.assertRaises(ValueError):
ops.convert_to_tensor(tensor, dtype=dtypes.int32)
@test_util.run_in_graph_and_eager_modes
def testConvertToTensorProtocol(self):
class TensorCompatible:
def __tf_tensor__(self, dtype=None, name=None):
return constant_op.constant((1, 2, 3), dtype=dtype, name=name)
tc = TensorCompatible()
tensor = ops.convert_to_tensor(tc, dtype=dtypes.int32)
self.assertEqual(tensor.dtype, dtypes.int32)
self.assertAllEqual((1, 2, 3), self.evaluate(tensor))
@test_util.run_deprecated_v1
def testNoConvert(self):
# Operation cannot be converted to Tensor.
op = gen_control_flow_ops.no_op()
with self.assertRaisesRegex(TypeError,
"can't convert Operation '.+' to Tensor"):
ops.convert_to_tensor(op)
def testStr(self):
node_def = ops._NodeDef("None", "op1")
op = ops.Operation.from_node_def(
node_def, ops.Graph(), [], [dtypes.float32]
)
self.assertEqual(str(node_def), str(op))
def testRepr(self):
op = ops.Operation.from_node_def(
ops._NodeDef("None", "op1"), ops.Graph(), [], [dtypes.float32]
)
self.assertEqual("<tf.Operation 'op1' type=None>", repr(op))
@test_util.run_deprecated_v1
def testGetAttr(self):
op = test_ops.default_attrs()
self.assertEqual(op.get_attr("string_val"), b"abc")
self.assertEqual(op.get_attr("string_list_val"), [b"abc", b""])
self.assertEqual(op.get_attr("int_val"), 123)
self.assertEqual(op.get_attr("int_list_val"), [1, 2, 3])
self.assertEqual(op.get_attr("float_val"), 10.0)
self.assertEqual(op.get_attr("float_list_val"), [10.0])
self.assertEqual(op.get_attr("bool_val"), True)
self.assertEqual(op.get_attr("bool_list_val"), [True, False])
self.assertEqual(op.get_attr("shape_val"),
tensor_shape.as_shape([2, 1]).as_proto())
self.assertEqual(op.get_attr("shape_list_val"),
[tensor_shape.as_shape([]).as_proto(),
tensor_shape.as_shape([1]).as_proto()])
self.assertEqual(op.get_attr("tensor_val"),
tensor_util.make_tensor_proto(1, dtypes.int32))
self.assertEqual(op.get_attr("tensor_list_val"),
[tensor_util.make_tensor_proto(1, dtypes.int32)])
type_val = op.get_attr("type_val")
# First check that type_val is a DType, because the assertEqual will work
# no matter what since DType overrides __eq__
self.assertIsInstance(type_val, dtypes.DType)
self.assertEqual(type_val, dtypes.int32)
type_list_val = op.get_attr("type_list_val")
self.assertTrue(all(isinstance(x, dtypes.DType) for x in type_list_val))
self.assertEqual(type_list_val, [dtypes.int32, dtypes.float32])
@function.Defun(dtypes.float32, func_name="MyFunc")
def func(x):
return x
op = test_ops.func_attr(func)
self.assertEqual(op.get_attr("f"),
attr_value_pb2.NameAttrList(name="MyFunc"))
# Try fetching missing attr
with self.assertRaisesRegex(
ValueError, "Operation 'FuncAttr' has no attr named 'FakeAttr'."):
op.get_attr("FakeAttr")
# TODO(b/65162920): remove this test when users who are directly mutating the
# node_def have been updated to proper usage.
@test_util.run_deprecated_v1
def testSetAttr(self):
op = test_ops.int_attr().op
op._set_attr("foo", attr_value_pb2.AttrValue(i=2))
# TODO(skyewm): add node_def check
self.assertEqual(op.get_attr("foo"), 2)
@test_util.run_v2_only
def testSetFullType(self):
@def_function.function
def test_fn():
ds = dataset_ops.Dataset.range(3)._variant_tensor
ds.op.experimental_set_type(
full_type_pb2.FullTypeDef(type_id=full_type_pb2.TFT_PRODUCT))
self.assertEqual(ds.op.node_def.experimental_type.type_id,
full_type_pb2.TFT_PRODUCT)
test_fn()
# TODO(nolivia): test all error cases
def testAddControlInput(self):
with ops.Graph().as_default():
x = constant_op.constant(1).op
y = constant_op.constant(2).op
z = constant_op.constant(3).op
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_input(x) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x])
z._add_control_inputs([x, y, y]) # pylint: disable=protected-access
self.assertEqual(z.control_inputs, [x, y])
self.assertEqual(x._control_outputs, [z])
@test_util.run_deprecated_v1
def testRemoveAllControlInputs(self):
a = constant_op.constant(1)
with ops.control_dependencies([a]):
b = constant_op.constant(2)
c = constant_op.constant(3)
d = constant_op.constant(4)
e = constant_op.constant(5)
with ops.control_dependencies([a, c]):
f = d + e
self.assertEqual(a.op.control_inputs, [])
self.assertEqual(b.op.control_inputs, [a.op])
self.assertEqual(f.op.control_inputs, [a.op, c.op])
a.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(a.op.control_inputs, [])
b.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(b.op.control_inputs, [])
f.op._remove_all_control_inputs() # pylint: disable=protected-access
self.assertEqual(f.op.control_inputs, [])
self.assertEqual(list(f.op.inputs), [d, e])
@test_util.run_deprecated_v1
def testControlInputCycle(self):
graph = ops.Graph()
with graph.as_default():
z = constant_op.constant(0)
x = constant_op.constant(1)
y = constant_op.constant(2)
y.op._add_control_input(z.op) # pylint: disable=protected-access
y.op._add_control_input(x.op) # pylint: disable=protected-access
x.op._add_control_input(y.op) # pylint: disable=protected-access
with self.session(graph=graph) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Graph is invalid, contains a cycle with 2 nodes"):
self.evaluate(x)
def testUpdateInput(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = x + y
z.op._update_input(0, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [y, y])
self.assertEqual(x.consumers(), [])
self.assertEqual(y.consumers(), [z.op, z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 4)
z.op._update_input(0, x) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
z.op._update_input(1, y) # pylint: disable=protected-access
self.assertEqual(list(z.op.inputs), [x, y])
self.assertEqual(x.consumers(), [z.op])
self.assertEqual(y.consumers(), [z.op])
with session.Session(graph=g) as sess:
self.assertEqual(self.evaluate(z), 3)
def testUpdateInputGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
z = y * 2
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
z.op._update_input(0, x) # pylint: disable=protected-access
def testUpdateInputTypeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(0)
x = constant_op.constant("")
y = constant_op.constant(1)
z = y + w
z.op._update_input(0, x) # pylint: disable=protected-access
with session.Session(graph=g) as sess:
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"Input 0 of node add was passed string from Const_1:0 incompatible "
"with expected int32"):
self.evaluate(z)
def testUpdateInputShapeError(self):
g = ops.Graph()
with g.as_default():
w = constant_op.constant(2, shape=[3, 1])
x = constant_op.constant(0, shape=[3, 1])
y = constant_op.constant(1, shape=[2, 2])
z = w + x
with self.assertRaisesRegex(
errors.InvalidArgumentError,
r"Cannot update edge, incompatible shapes: \[2,2\] and \[3,1\]"):
z.op._update_input(0, y) # pylint: disable=protected-access
def testUpdateInputOutOfRange(self):
g = ops.Graph()
with g.as_default():
x = constant_op.constant(1)
with self.assertRaisesRegex(
errors.OutOfRangeError,
r"Cannot update edge. Input index \[1\] is greater than the number of "
r"total inputs \[0\]."):
x.op._update_input(1, x) # pylint: disable=protected-access
@test_util.enable_control_flow_v2
@test_util.run_v1_only("b/120545219")
def testAddWhileInput(self):
@def_function.function
def test():
output = while_loop.while_loop(lambda x: x < 3, lambda x: x + 1, [1])
while_op = output.op
self.assertEqual(while_op.type, "StatelessWhile")
orig_num_inputs = len(while_op.inputs)
# Make sure we can handle the while op having a control input.
while_op._add_control_input(constant_op.constant(0).op)
new_input1 = constant_op.constant(1.0)
new_input2 = constant_op.constant(True)
# Clear output shapes to bypass shape checking.
while_op._set_shape_list_attr("output_shapes", [])
while_op._set_type_list_attr("T", [t.dtype for t in while_op.inputs] +
[new_input1.dtype, new_input2.dtype])
while_op._add_while_inputs([new_input1, new_input2])
# Can't add an edge beyond what's specified by "T"
with self.assertRaises(errors.OutOfRangeError):
while_op._add_while_inputs([new_input2])
self.assertLen(while_op.inputs, orig_num_inputs + 2) # pylint: disable=g-deprecated-assert
test()
@test_util.run_deprecated_v1
def testOpDef(self):
x = constant_op.constant(0)
y = constant_op.constant(1)
z = x + y
self.assertEqual(x.op.op_def.name, "Const")
self.assertLen(x.op.op_def.input_arg, 0)
self.assertLen(x.op.op_def.output_arg, 1)
self.assertRegex(z.op.op_def.name, "Add(V2)?")
self.assertLen(z.op.op_def.input_arg, 2)
self.assertLen(z.op.op_def.output_arg, 1)
def testInputFromDifferentGraphError(self):
g_0 = ops.Graph()
g_1 = ops.Graph()
with g_0.as_default():
x = constant_op.constant(1)
with g_1.as_default():
y = constant_op.constant(2)
with self.assertRaisesRegex(ValueError, "must be from the same graph"):
y * x # pylint: disable=pointless-statement
def testInputsAreImmutable(self):
g = ops.Graph()
with g.as_default():
x = test_ops.int_output()
op = test_ops.int_input_int_output(x, name="myop").op
with self.assertRaisesRegex(AttributeError,
"'tuple' object has no attribute 'append'"):
op.inputs.append(None)
| OperationTest |
python | getsentry__sentry | src/sentry/api/serializers/models/organization.py | {
"start": 31098,
"end": 31300
} | class ____(
DetailedOrganizationSerializerResponse
):
teams: list[TeamSerializerResponse]
projects: list[OrganizationProjectResponse]
| DetailedOrganizationSerializerWithProjectsAndTeamsResponse |
python | kamyu104__LeetCode-Solutions | Python/find-the-k-th-character-in-string-game-ii.py | {
"start": 50,
"end": 427
} | class ____(object):
def kthCharacter(self, k, operations):
"""
:type k: int
:type operations: List[int]
:rtype: str
"""
result = 0
k -= 1
for i in xrange(min(len(operations), k.bit_length())):
if k&(1<<i):
result = (result+operations[i])%26
return chr(ord('a')+result)
| Solution |
python | pydantic__pydantic | pydantic/networks.py | {
"start": 1683,
"end": 4147
} | class ____:
"""Url constraints.
Attributes:
max_length: The maximum length of the url. Defaults to `None`.
allowed_schemes: The allowed schemes. Defaults to `None`.
host_required: Whether the host is required. Defaults to `None`.
default_host: The default host. Defaults to `None`.
default_port: The default port. Defaults to `None`.
default_path: The default path. Defaults to `None`.
preserve_empty_path: Whether to preserve empty URL paths. Defaults to `None`.
"""
max_length: int | None = None
allowed_schemes: list[str] | None = None
host_required: bool | None = None
default_host: str | None = None
default_port: int | None = None
default_path: str | None = None
preserve_empty_path: bool | None = None
def __hash__(self) -> int:
return hash(
(
self.max_length,
tuple(self.allowed_schemes) if self.allowed_schemes is not None else None,
self.host_required,
self.default_host,
self.default_port,
self.default_path,
self.preserve_empty_path,
)
)
@property
def defined_constraints(self) -> dict[str, Any]:
"""Fetch a key / value mapping of constraints to values that are not None. Used for core schema updates."""
return {field.name: value for field in fields(self) if (value := getattr(self, field.name)) is not None}
def __get_pydantic_core_schema__(self, source: Any, handler: GetCoreSchemaHandler) -> core_schema.CoreSchema:
schema = handler(source)
# for function-wrap schemas, url constraints is applied to the inner schema
# because when we generate schemas for urls, we wrap a core_schema.url_schema() with a function-wrap schema
# that helps with validation on initialization, see _BaseUrl and _BaseMultiHostUrl below.
schema_to_mutate = schema['schema'] if schema['type'] == 'function-wrap' else schema
if annotated_type := schema_to_mutate['type'] not in ('url', 'multi-host-url'):
raise PydanticUserError(
f"'UrlConstraints' cannot annotate '{annotated_type}'.", code='invalid-annotated-type'
)
for constraint_key, constraint_value in self.defined_constraints.items():
schema_to_mutate[constraint_key] = constraint_value
return schema
| UrlConstraints |
python | PrefectHQ__prefect | src/integrations/prefect-gcp/prefect_gcp/workers/vertex.py | {
"start": 1908,
"end": 8693
} | class ____(BaseVariables):
"""
Default variables for the Vertex AI worker.
The schema for this class is used to populate the `variables` section of the default
base job template.
"""
region: str = Field(
description="The region where the Vertex AI Job resides.",
examples=["us-central1"],
)
image: str = Field(
title="Image Name",
description=(
"The URI of a container image in the Container or Artifact Registry, "
"used to run your Vertex AI Job. Note that Vertex AI will need access"
"to the project and region where the container image is stored. See "
"https://cloud.google.com/vertex-ai/docs/training/create-custom-container"
),
examples=["gcr.io/your-project/your-repo:latest"],
)
credentials: Optional[GcpCredentials] = Field(
title="GCP Credentials",
default_factory=GcpCredentials,
description="The GCP Credentials used to initiate the "
"Vertex AI Job. If not provided credentials will be "
"inferred from the local environment.",
)
machine_type: str = Field(
title="Machine Type",
description=(
"The machine type to use for the run, which controls "
"the available CPU and memory. "
"See https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec"
),
default="n1-standard-4",
)
accelerator_type: Optional[str] = Field(
title="Accelerator Type",
description=(
"The type of accelerator to attach to the machine. "
"See https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec"
),
examples=["NVIDIA_TESLA_K80"],
default=None,
)
accelerator_count: Optional[int] = Field(
title="Accelerator Count",
description=(
"The number of accelerators to attach to the machine. "
"See https://cloud.google.com/vertex-ai/docs/reference/rest/v1/MachineSpec"
),
examples=[1],
default=None,
)
boot_disk_type: str = Field(
title="Boot Disk Type",
description="The type of boot disk to attach to the machine.",
default="pd-ssd",
)
boot_disk_size_gb: int = Field(
title="Boot Disk Size (GB)",
description="The size of the boot disk to attach to the machine, in gigabytes.",
default=100,
)
maximum_run_time_hours: int = Field(
default=1,
title="Maximum Run Time (Hours)",
description="The maximum job running time, in hours",
)
network: Optional[str] = Field(
default=None,
title="Network",
description="The full name of the Compute Engine network"
"to which the Job should be peered. Private services access must "
"already be configured for the network. If left unspecified, the job "
"is not peered with any network. "
"For example: projects/12345/global/networks/myVPC",
)
reserved_ip_ranges: Optional[List[str]] = Field(
default=None,
title="Reserved IP Ranges",
description="A list of names for the reserved ip ranges under the VPC "
"network that can be used for this job. If set, we will deploy the job "
"within the provided ip ranges. Otherwise, the job will be deployed to "
"any ip ranges under the provided VPC network.",
)
scheduling: Optional[dict[str, Any]] = Field(
default=None,
title="Scheduling Options",
description=(
"A dictionary with scheduling options for a CustomJob, "
"these are parameters related to queuing, and scheduling custom jobs. "
"If unspecified default scheduling options are used. "
"The 'maximum_run_time_hours' variable sets the job timeout "
"field 'scheduling.timeout' for backward compatibility. "
"See SDK: https://cloud.google.com/python/docs/reference/aiplatform/latest/google.cloud.aiplatform_v1.types.Scheduling "
"See REST: https://cloud.google.com/vertex-ai/docs/reference/rest/v1/CustomJobSpec#Scheduling"
),
examples=[
{"scheduling": {"strategy": "FLEX_START", "max_wait_duration": "1800s"}}
],
)
service_account_name: Optional[str] = Field(
default=None,
title="Service Account Name",
description=(
"Specifies the service account to use "
"as the run-as account in Vertex AI. The worker submitting jobs must have "
"act-as permission on this run-as account. If unspecified, the AI "
"Platform Custom Code Service Agent for the CustomJob's project is "
"used. Takes precedence over the service account found in GCP credentials, "
"and required if a service account cannot be detected in GCP credentials."
),
)
enable_web_access: bool = Field(
default=False,
title="Enable Web Access",
description=(
"Whether you want Vertex AI to enable `interactive shell access` "
"See https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell for how to access your job via interactive console when running."
),
examples=[True],
)
enable_dashboard_access: bool = Field(
default=False,
title="Enable Dashboard Access",
description=(
"Whether to enable access to the customized dashboard in the training chief container. "
"See https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell#get-uri for access instructions."
),
examples=[True],
)
job_watch_poll_interval: float = Field(
default=5.0,
title="Poll Interval (Seconds)",
description=(
"The amount of time to wait between GCP API calls while monitoring the "
"state of a Vertex AI Job."
),
)
def _get_base_job_spec() -> Dict[str, Any]:
"""Returns a base job body to use for job spec validation.
Note that the values are stubbed and are not used for the actual job."""
return {
"maximum_run_time_hours": "1",
"worker_pool_specs": [
{
"replica_count": 1,
"container_spec": {
"image_uri": "gcr.io/your-project/your-repo:latest",
},
"machine_spec": {
"machine_type": "n1-standard-4",
},
"disk_spec": {
"boot_disk_type": "pd-ssd",
"boot_disk_size_gb": "100",
},
}
],
}
| VertexAIWorkerVariables |
python | davidhalter__parso | parso/python/pep8.py | {
"start": 1143,
"end": 1621
} | class ____(object):
type = IndentationTypes.SUITE
def __init__(self, config, indentation, parent=None):
self.bracket_indentation = self.indentation = indentation
self.parent = parent
def __repr__(self):
return '<%s>' % self.__class__.__name__
def get_latest_suite_node(self):
n = self
while n is not None:
if n.type == IndentationTypes.SUITE:
return n
n = n.parent
| IndentationNode |
python | kamyu104__LeetCode-Solutions | Python/parse-lisp-expression.py | {
"start": 33,
"end": 1212
} | class ____(object):
def evaluate(self, expression):
"""
:type expression: str
:rtype: int
"""
def getval(lookup, x):
return lookup.get(x, x)
def evaluate(tokens, lookup):
if tokens[0] in ('add', 'mult'):
a, b = map(int, map(lambda x: getval(lookup, x), tokens[1:]))
return str(a+b if tokens[0] == 'add' else a*b)
for i in xrange(1, len(tokens)-1, 2):
if tokens[i+1]:
lookup[tokens[i]] = getval(lookup, tokens[i+1])
return getval(lookup, tokens[-1])
tokens, lookup, stk = [''], {}, []
for c in expression:
if c == '(':
if tokens[0] == 'let':
evaluate(tokens, lookup)
stk.append((tokens, dict(lookup)))
tokens = ['']
elif c == ' ':
tokens.append('')
elif c == ')':
val = evaluate(tokens, lookup)
tokens, lookup = stk.pop()
tokens[-1] += val
else:
tokens[-1] += c
return int(tokens[0])
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.