language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pallets__markupsafe | src/markupsafe/__init__.py | {
"start": 264,
"end": 332
} | class ____(t.Protocol):
def __html__(self, /) -> str: ...
| _HasHTML |
python | huggingface__transformers | src/transformers/models/dpt/modeling_dpt.py | {
"start": 20320,
"end": 25609
} | class ____(nn.Module):
"""
This class reassembles the hidden states of the backbone into image-like feature representations at various
resolutions.
This happens in 3 stages:
1. Map the N + 1 tokens to a set of N tokens, by taking into account the readout ([CLS]) token according to
`config.readout_type`.
2. Project the channel dimension of the hidden states according to `config.neck_hidden_sizes`.
3. Resizing the spatial dimensions (height, width).
Args:
config (`[DPTConfig]`):
Model configuration class defining the model architecture.
"""
def __init__(self, config):
super().__init__()
self.config = config
self.layers = nn.ModuleList()
if config.is_hybrid:
self._init_reassemble_dpt_hybrid(config)
else:
self._init_reassemble_dpt(config)
self.neck_ignore_stages = config.neck_ignore_stages
def _init_reassemble_dpt_hybrid(self, config):
r""" "
For DPT-Hybrid the first 2 reassemble layers are set to `nn.Identity()`, please check the official
implementation: https://github.com/isl-org/DPT/blob/f43ef9e08d70a752195028a51be5e1aff227b913/dpt/vit.py#L438
for more details.
"""
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
if i <= 1:
self.layers.append(nn.Identity())
elif i > 1:
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type != "project":
raise ValueError(f"Readout type {config.readout_type} is not supported for DPT-Hybrid.")
# When using DPT-Hybrid the readout type is set to "project". The sanity check is done on the config file
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for i in range(len(config.neck_hidden_sizes)):
if i <= 1:
self.readout_projects.append(nn.Sequential(nn.Identity()))
elif i > 1:
self.readout_projects.append(
nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])
)
def _init_reassemble_dpt(self, config):
for i, factor in zip(range(len(config.neck_hidden_sizes)), config.reassemble_factors):
self.layers.append(DPTReassembleLayer(config, channels=config.neck_hidden_sizes[i], factor=factor))
if config.readout_type == "project":
self.readout_projects = nn.ModuleList()
hidden_size = _get_backbone_hidden_size(config)
for _ in range(len(config.neck_hidden_sizes)):
self.readout_projects.append(
nn.Sequential(nn.Linear(2 * hidden_size, hidden_size), ACT2FN[config.hidden_act])
)
def forward(self, hidden_states: list[torch.Tensor], patch_height=None, patch_width=None) -> list[torch.Tensor]:
"""
Args:
hidden_states (`list[torch.FloatTensor]`, each of shape `(batch_size, sequence_length + 1, hidden_size)`):
List of hidden states from the backbone.
"""
out = []
for i, hidden_state in enumerate(hidden_states):
if i not in self.neck_ignore_stages:
# reshape to (batch_size, num_channels, height, width)
cls_token, hidden_state = hidden_state[:, 0], hidden_state[:, 1:]
batch_size, sequence_length, num_channels = hidden_state.shape
if patch_height is not None and patch_width is not None:
hidden_state = hidden_state.reshape(batch_size, patch_height, patch_width, num_channels)
else:
size = torch_int(sequence_length**0.5)
hidden_state = hidden_state.reshape(batch_size, size, size, num_channels)
hidden_state = hidden_state.permute(0, 3, 1, 2).contiguous()
feature_shape = hidden_state.shape
if self.config.readout_type == "project":
# reshape to (batch_size, height*width, num_channels)
hidden_state = hidden_state.flatten(2).permute((0, 2, 1))
readout = cls_token.unsqueeze(1).expand_as(hidden_state)
# concatenate the readout token to the hidden states and project
hidden_state = self.readout_projects[i](torch.cat((hidden_state, readout), -1))
# reshape back to (batch_size, num_channels, height, width)
hidden_state = hidden_state.permute(0, 2, 1).reshape(feature_shape)
elif self.config.readout_type == "add":
hidden_state = hidden_state.flatten(2) + cls_token.unsqueeze(-1)
hidden_state = hidden_state.reshape(feature_shape)
hidden_state = self.layers[i](hidden_state)
out.append(hidden_state)
return out
def _get_backbone_hidden_size(config):
if config.backbone_config is not None and config.is_hybrid is False:
return config.backbone_config.hidden_size
else:
return config.hidden_size
| DPTReassembleStage |
python | coleifer__peewee | peewee.py | {
"start": 152328,
"end": 152914
} | class ____(object):
def __init__(self, cursor_wrapper):
self.cursor_wrapper = cursor_wrapper
self.index = 0
def __iter__(self):
return self
def next(self):
if self.index < self.cursor_wrapper.count:
obj = self.cursor_wrapper.row_cache[self.index]
elif not self.cursor_wrapper.populated:
self.cursor_wrapper.iterate()
obj = self.cursor_wrapper.row_cache[self.index]
else:
raise StopIteration
self.index += 1
return obj
__next__ = next
# FIELDS
| ResultIterator |
python | dagster-io__dagster | python_modules/libraries/dagster-tableau/dagster_tableau/resources.py | {
"start": 36799,
"end": 37485
} | class ____(BaseTableauWorkspace):
"""Represents a workspace in Tableau Server and provides utilities
to interact with Tableau APIs.
"""
server_name: str = Field(..., description="The server name of the Tableau Server workspace.")
def build_client(self) -> None:
self._client = TableauServerClient(
connected_app_client_id=self.connected_app_client_id,
connected_app_secret_id=self.connected_app_secret_id,
connected_app_secret_value=self.connected_app_secret_value,
username=self.username,
site_name=self.site_name,
server_name=self.server_name,
)
@record
| TableauServerWorkspace |
python | fluentpython__example-code-2e | 21-async/domains/asyncio/domainlib.py | {
"start": 123,
"end": 835
} | class ____(NamedTuple): # <1>
domain: str
found: bool
OptionalLoop = Optional[asyncio.AbstractEventLoop] # <2>
async def probe(domain: str, loop: OptionalLoop = None) -> Result: # <3>
if loop is None:
loop = asyncio.get_running_loop()
try:
await loop.getaddrinfo(domain, None)
except socket.gaierror:
return Result(domain, False)
return Result(domain, True)
async def multi_probe(domains: Iterable[str]) -> AsyncIterator[Result]: # <4>
loop = asyncio.get_running_loop()
coros = [probe(domain, loop) for domain in domains] # <5>
for coro in asyncio.as_completed(coros): # <6>
result = await coro # <7>
yield result # <8>
| Result |
python | Netflix__metaflow | metaflow/plugins/azure/azure_exceptions.py | {
"start": 267,
"end": 433
} | class ____(MetaflowException):
headline = "Missing required packages 'azure-identity' and 'azure-storage-blob' and 'azure-keyvault-secrets'"
| MetaflowAzurePackageError |
python | huggingface__transformers | src/transformers/models/dinov2_with_registers/modeling_dinov2_with_registers.py | {
"start": 21790,
"end": 23992
} | class ____(Dinov2WithRegistersPreTrainedModel):
def __init__(self, config: Dinov2WithRegistersConfig) -> None:
super().__init__(config)
self.num_labels = config.num_labels
self.dinov2_with_registers = Dinov2WithRegistersModel(config)
# Classifier head
self.classifier = (
nn.Linear(config.hidden_size * 2, config.num_labels) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@can_return_tuple
@auto_docstring
def forward(
self,
pixel_values: Optional[torch.Tensor] = None,
labels: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> ImageClassifierOutput:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
outputs: BaseModelOutputWithPooling = self.dinov2_with_registers(pixel_values, **kwargs)
sequence_output = outputs.last_hidden_state # batch_size, sequence_length, hidden_size
cls_token = sequence_output[:, 0]
# cls and register tokens should not be included in patch tokens variable
patch_tokens = sequence_output[:, 1 + self.config.num_register_tokens :]
linear_input = torch.cat([cls_token, patch_tokens.mean(dim=1)], dim=1)
logits = self.classifier(linear_input)
loss = None
if labels is not None:
loss = self.loss_function(labels, logits, self.config, **kwargs)
return ImageClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@auto_docstring(
custom_intro="""
Dinov2WithRegisters backbone, to be used with frameworks like DETR and MaskFormer.
"""
)
| Dinov2WithRegistersForImageClassification |
python | getsentry__sentry | src/sentry/models/organizationslugreservationreplica.py | {
"start": 446,
"end": 1496
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_slug_reservation_id = HybridCloudForeignKey(
"sentry.organizationslugreservation",
on_delete="CASCADE",
unique=True,
)
slug = models.SlugField(unique=True, db_index=True)
organization_id = BoundedBigIntegerField(db_index=True)
user_id = BoundedBigIntegerField(db_index=True, null=True)
region_name = models.CharField(max_length=REGION_NAME_LENGTH, null=False)
reservation_type = BoundedBigIntegerField(
choices=OrganizationSlugReservationType.as_choices(),
null=False,
default=OrganizationSlugReservationType.PRIMARY.value,
)
date_added = models.DateTimeField(null=False, default=timezone.now, editable=False)
class Meta:
app_label = "hybridcloud"
db_table = "hybridcloud_organizationslugreservationreplica"
unique_together = (("organization_id", "reservation_type"),)
__repr__ = sane_repr("organization_id", "slug")
| OrganizationSlugReservationReplica |
python | facebookresearch__faiss | tests/test_fast_scan.py | {
"start": 10935,
"end": 11778
} | class ____(TestImplems):
def build_fast_scan_index(self, index, params):
qbs, bbs = params
index2 = faiss.IndexPQFastScan(index, bbs)
index2.qbs = qbs
index2.implem = 14
return index2
def test_1_32(self):
self.do_with_params(32, (1, 32))
def test_1_64(self):
self.do_with_params(32, (1, 64))
def test_2_32(self):
self.do_with_params(32, (2, 32))
def test_2_64(self):
self.do_with_params(32, (2, 64))
def test_qbs_1_32_k1(self):
self.k = 1
self.do_with_params(32, (1, 32))
def test_qbs_1_64_k1(self):
self.k = 1
self.do_with_params(32, (1, 64))
def test_1_32_odd_dim(self):
self.do_with_params(30, (1, 32))
def test_1_64_odd_dim(self):
self.do_with_params(30, (1, 64))
| TestImplem14 |
python | getsentry__sentry | src/sentry/incidents/subscription_processor.py | {
"start": 1787,
"end": 1988
} | class ____(TypedDict):
"""
Schema for Metric Issue Detector.config.
"""
comparison_delta: int | None
detection_type: Literal["static", "percent", "dynamic"]
| MetricIssueDetectorConfig |
python | getsentry__sentry | src/sentry_plugins/gitlab/plugin.py | {
"start": 417,
"end": 7182
} | class ____(CorePluginMixin, IssuePlugin2):
description = "Integrate GitLab issues by linking a repository to a project"
slug = "gitlab"
title = "GitLab"
conf_title = title
conf_key = "gitlab"
required_field = "gitlab_url"
feature_descriptions = [
FeatureDescription(
"""
Create GitLab issues from Sentry
""",
IntegrationFeatures.ISSUE_BASIC,
),
FeatureDescription(
"""
Link Sentry issues to existing GitLab issues
""",
IntegrationFeatures.ISSUE_BASIC,
),
]
def is_configured(self, project) -> bool:
return bool(
self.get_option("gitlab_repo", project)
and self.get_option("gitlab_token", project)
and self.get_option("gitlab_url", project)
)
def get_new_issue_fields(self, request: Request, group, event, **kwargs):
fields = super().get_new_issue_fields(request, group, event, **kwargs)
return [
{
"name": "repo",
"label": "Repository",
"default": self.get_option("gitlab_repo", group.project),
"type": "text",
"readonly": True,
},
*fields,
{
"name": "assignee",
"label": "Assignee",
"default": "",
"type": "select",
"required": False,
"choices": self.get_allowed_assignees(request, group),
},
{
"name": "labels",
"label": "Labels",
"default": self.get_option("gitlab_labels", group.project),
"type": "text",
"placeholder": "e.g. high, bug",
"required": False,
},
]
def get_link_existing_issue_fields(self, request: Request, group, event, **kwargs):
return [
{
"name": "issue_id",
"label": "Issue #",
"default": "",
"placeholder": "e.g. 1543",
"type": "text",
},
{
"name": "comment",
"label": "Comment",
"default": absolute_uri(
group.get_absolute_url(params={"referrer": "gitlab_plugin"})
),
"type": "textarea",
"help": ("Leave blank if you don't want to " "add a comment to the GitLab issue."),
"required": False,
},
]
def get_allowed_assignees(self, request: Request, group):
repo = self.get_option("gitlab_repo", group.project)
client = self.get_client(group.project)
try:
response = client.list_project_members(repo)
except ApiError as e:
self.raise_error(e)
users = tuple((u["id"], u["username"]) for u in response)
return (("", "(Unassigned)"),) + users
def get_new_issue_title(self, **kwargs) -> str:
return "Create GitLab Issue"
def get_client(self, project):
url = self.get_option("gitlab_url", project).rstrip("/")
token = self.get_option("gitlab_token", project)
return GitLabClient(url, token)
def create_issue(self, request: Request, group, form_data):
repo = self.get_option("gitlab_repo", group.project)
client = self.get_client(group.project)
try:
response = client.create_issue(
repo,
{
"title": form_data["title"],
"description": form_data["description"],
"labels": form_data.get("labels"),
"assignee_id": form_data.get("assignee"),
},
)
except Exception as e:
self.raise_error(e)
return response["iid"]
def link_issue(self, request: Request, group, form_data, **kwargs):
client = self.get_client(group.project)
repo = self.get_option("gitlab_repo", group.project)
try:
issue = client.get_issue(repo=repo, issue_id=form_data["issue_id"])
except Exception as e:
self.raise_error(e)
comment = form_data.get("comment")
if comment:
try:
client.create_note(repo=repo, issue_iid=issue["iid"], data={"body": comment})
except Exception as e:
self.raise_error(e)
return {"title": issue["title"]}
def get_issue_label(self, group, issue_id: str) -> str:
return f"GL-{issue_id}"
def get_issue_url(self, group, issue_id: str) -> str:
url = self.get_option("gitlab_url", group.project).rstrip("/")
repo = self.get_option("gitlab_repo", group.project)
return f"{url}/{repo}/issues/{issue_id}"
def get_configure_plugin_fields(self, project, **kwargs):
gitlab_token = self.get_option("gitlab_token", project)
secret_field = get_secret_field_config(
gitlab_token, "Enter your GitLab API token.", include_prefix=True
)
secret_field.update(
{
"name": "gitlab_token",
"label": "Access Token",
"placeholder": "e.g. g5DWFtLzaztgYFrqhVfE",
}
)
return [
{
"name": "gitlab_url",
"label": "GitLab URL",
"type": "url",
"default": "https://gitlab.com",
"placeholder": "e.g. https://gitlab.example.com",
"required": True,
"help": "Enter the URL for your GitLab server.",
},
secret_field,
{
"name": "gitlab_repo",
"label": "Repository Name",
"type": "text",
"placeholder": "e.g. getsentry/sentry",
"required": True,
"help": "Enter your repository name, including the owner.",
},
{
"name": "gitlab_labels",
"label": "Issue Labels",
"type": "text",
"placeholder": "e.g. high, bug",
"required": False,
"help": "Enter the labels you want to auto assign to new issues.",
},
]
def validate_config(self, project, config, actor=None):
url = config["gitlab_url"].rstrip("/")
token = config["gitlab_token"]
repo = config["gitlab_repo"]
client = GitLabClient(url, token)
try:
client.get_project(repo)
except Exception as e:
self.raise_error(e)
return config
| GitLabPlugin |
python | apache__airflow | airflow-core/tests/unit/models/test_deadline.py | {
"start": 18687,
"end": 22236
} | class ____:
"""DeadlineReference lives in definitions/deadlines.py but properly testing them requires DB access."""
DEFAULT_INTERVAL = timedelta(hours=1)
DEFAULT_ARGS = {"interval": DEFAULT_INTERVAL}
@pytest.mark.parametrize("reference", REFERENCE_TYPES)
@pytest.mark.db_test
def test_deadline_evaluate_with(self, reference, session):
"""Test that all deadline types evaluate correctly with their required conditions."""
conditions = {
"dag_id": DAG_ID,
"run_id": "dagrun_1",
"unexpected": "param", # Add an unexpected parameter.
"extra": "kwarg", # Add another unexpected parameter.
}
with mock.patch.object(reference, "_evaluate_with") as mock_evaluate:
mock_evaluate.return_value = DEFAULT_DATE
if reference.required_kwargs:
result = reference.evaluate_with(**self.DEFAULT_ARGS, session=session, **conditions)
else:
result = reference.evaluate_with(**self.DEFAULT_ARGS, session=session)
# Verify only expected kwargs are passed through.
expected_kwargs = {k: conditions[k] for k in reference.required_kwargs if k in conditions}
expected_kwargs["session"] = session
mock_evaluate.assert_called_once_with(**expected_kwargs)
assert result == DEFAULT_DATE + self.DEFAULT_INTERVAL
@pytest.mark.parametrize("reference", REFERENCE_TYPES)
@pytest.mark.db_test
def test_deadline_missing_required_kwargs(self, reference, session):
"""Test that deadlines raise appropriate errors for missing required parameters."""
if reference.required_kwargs:
with pytest.raises(
ValueError, match=re.escape(f"{reference.__class__.__name__} is missing required parameters:")
) as raised_exception:
reference.evaluate_with(session=session, **self.DEFAULT_ARGS)
assert all(substring in str(raised_exception.value) for substring in reference.required_kwargs)
else:
# Let the lack of an exception here effectively assert that no exception is raised.
reference.evaluate_with(session=session, **self.DEFAULT_ARGS)
for required_param in reference.required_kwargs:
assert required_param in str(raised_exception.value)
def test_deadline_reference_creation(self):
"""Test that DeadlineReference provides consistent interface and types."""
fixed_reference = DeadlineReference.FIXED_DATETIME(DEFAULT_DATE)
assert isinstance(fixed_reference, ReferenceModels.FixedDatetimeDeadline)
assert fixed_reference._datetime == DEFAULT_DATE
logical_date_reference = DeadlineReference.DAGRUN_LOGICAL_DATE
assert isinstance(logical_date_reference, ReferenceModels.DagRunLogicalDateDeadline)
queued_reference = DeadlineReference.DAGRUN_QUEUED_AT
assert isinstance(queued_reference, ReferenceModels.DagRunQueuedAtDeadline)
average_runtime_reference = DeadlineReference.AVERAGE_RUNTIME()
assert isinstance(average_runtime_reference, ReferenceModels.AverageRuntimeDeadline)
assert average_runtime_reference.max_runs == 10
assert average_runtime_reference.min_runs == 10
# Test with custom parameters
custom_reference = DeadlineReference.AVERAGE_RUNTIME(max_runs=5, min_runs=3)
assert custom_reference.max_runs == 5
assert custom_reference.min_runs == 3
| TestDeadlineReference |
python | mlflow__mlflow | mlflow/pyspark/optuna/study.py | {
"start": 638,
"end": 4354
} | class ____:
is_resumed: bool
study_name: str | None = None
existing_trials: int | None = None
completed_trials: int | None = None
best_value: float | None = None
best_params: dict[str, Any] | None = None
def is_spark_connect_mode() -> bool:
"""Check if the current Spark session is running in client mode."""
try:
from pyspark.sql.utils import is_remote
except ImportError:
return False
return is_remote()
def _optimize_sequential(
study: "optuna.Study",
func: "optuna.study.study.ObjectiveFuncType",
mlflow_client: MlflowClient,
n_trials: int = 1,
timeout: float | None = None,
catch: Iterable[type[Exception]] = (),
callbacks: Iterable[Callable[[Study, FrozenTrial], None]] | None = None,
) -> None:
"""
Run optimization sequentially. It is modified from _optimize_sequential in optuna
(https://github.com/optuna/optuna/blob/e1e30e7150047e5f582b8fef1eeb65386cb1c4c1/optuna/study/_optimize.py#L121)
Convert the nested call to one function and log the error messages to mlflow.
"""
i_trial = 0
time_start = datetime.datetime.now()
while True:
if study._stop_flag:
break
if i_trial >= n_trials:
break
i_trial += 1
if timeout is not None:
elapsed_seconds = (datetime.datetime.now() - time_start).total_seconds()
if elapsed_seconds >= timeout:
break
state = None
value_or_values = None
func_err = None
func_err_fail_exc_info = None
trial = study.ask()
try:
value_or_values = func(trial)
except exceptions.TrialPruned as e:
state = TrialState.PRUNED
func_err = e
except (Exception, KeyboardInterrupt) as e:
state = TrialState.FAIL
func_err = e
func_err_fail_exc_info = traceback.format_exc()
try:
frozen_trial, warning_message = optuna.study._tell._tell_with_warning(
study=study,
trial=trial,
value_or_values=value_or_values,
state=state,
suppress_warning=True,
)
except Exception:
frozen_trial = study._storage.get_trial(trial._trial_id)
warning_message = None
if frozen_trial.state == TrialState.COMPLETE:
_logger.info(f"Trial {trial.number} finished with parameters: {trial.params}.")
elif frozen_trial.state == TrialState.PRUNED:
_logger.info("Trial {} pruned. {}".format(frozen_trial._trial_id, str(func_err)))
mlflow_client.set_terminated(frozen_trial._trial_id, status="KILLED")
elif frozen_trial.state == TrialState.FAIL:
error_message = None
if func_err is not None:
error_message = func_err_fail_exc_info
elif warning_message is not None:
error_message = warning_message
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir, "error_message.txt")
path.write_text(error_message)
# Log the file as an artifact in the active MLflow run
mlflow_client.log_artifact(frozen_trial._trial_id, path)
mlflow_client.set_terminated(frozen_trial._trial_id, status="FAILED")
if (
frozen_trial.state == TrialState.FAIL
and func_err is not None
and not isinstance(func_err, catch)
):
raise func_err
if callbacks is not None:
for callback in callbacks:
callback(study, frozen_trial)
| ResumeInfo |
python | automl__auto-sklearn | test/test_metalearning/pyMetaLearn/test_optimizer_base.py | {
"start": 119,
"end": 1518
} | class ____(unittest.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.hyperparameters = OrderedDict()
self.hyperparameters["x"] = [-5, 0, 5, 10]
self.hyperparameters["y"] = [0, 5, 10, 15]
def test_parse_hyperopt_string(self):
hyperparameter_string = "x {-5, 0, 5, 10}\ny {0, 5, 10, 15}"
expected = OrderedDict(
[["x", ["-5", "0", "5", "10"]], ["y", ["0", "5", "10", "15"]]]
)
ret = optimizer_base.parse_hyperparameter_string(hyperparameter_string)
self.assertEqual(ret, expected)
hyperparameter_string = "x {-5, 0, 5, 10} [5]\ny {0, 5, 10, 15}"
ret = optimizer_base.parse_hyperparameter_string(hyperparameter_string)
self.assertEqual(ret, expected)
hyperparameter_string = "x {-5, 0, 5, 10}\ny {0, 5, 10, 15} [5]"
ret = optimizer_base.parse_hyperparameter_string(hyperparameter_string)
self.assertEqual(ret, expected)
hyperparameter_string = "x {-5, 0, 5, 10}\ny 0, 5, 10, 15} [5]"
self.assertRaises(
ValueError,
optimizer_base.parse_hyperparameter_string,
hyperparameter_string,
)
def test_construct_cli_call(self):
cli_call = optimizer_base.construct_cli_call("cv.py", {"x": -5, "y": 0})
self.assertEqual(cli_call, "cv.py -x \"'-5'\" -y \"'0'\"")
| OptimizerBaseTest |
python | pytorch__pytorch | torch/_dynamo/utils.py | {
"start": 42636,
"end": 61120
} | class ____:
compile_id: Optional[str] = None
frame_key: Optional[str] = None
co_name: Optional[str] = None
co_filename: Optional[str] = None
co_firstlineno: Optional[int] = None
cache_size: Optional[int] = None
accumulated_cache_size: Optional[int] = None
guard_count: Optional[int] = None
shape_env_guard_count: Optional[int] = None
graph_op_count: Optional[int] = None
graph_node_count: Optional[int] = None
graph_input_count: Optional[int] = None
start_time: Optional[float] = None
entire_frame_compile_time_s: Optional[float] = None
backend_compile_time_s: Optional[float] = None
inductor_compile_time_s: Optional[float] = None
code_gen_time_s: Optional[float] = None
fail_type: Optional[str] = None
fail_reason: Optional[str] = None
fail_user_frame_filename: Optional[str] = None
fail_user_frame_lineno: Optional[int] = None
non_compliant_ops: Optional[set[str]] = None
compliant_custom_ops: Optional[set[str]] = None
restart_reasons: Optional[set[str]] = None
dynamo_time_before_restart_s: Optional[float] = None
stack_trace: Optional[list[str]] = None
exception_stack_trace: Optional[list[str]] = None
graph_node_shapes: Optional[str] = None
# Sometimes, we will finish analyzing a frame but conclude we don't want
# to install any guarded code. True means we actually decided to install
# a compiled frame
has_guarded_code: Optional[bool] = None
remote_cache_time_saved_s: Optional[float] = None
structured_logging_overhead_s: Optional[float] = None
config_suppress_errors: Optional[bool] = None
config_inline_inbuilt_nn_modules: Optional[bool] = None
specialize_float: Optional[bool] = None
dynamo_config: Optional[str] = None
compiler_config: Optional[str] = None
is_forward: Optional[bool] = None
num_triton_bundles: Optional[int] = None
remote_fx_graph_cache_get_time_ms: Optional[int] = None
remote_fx_graph_cache_put_time_ms: Optional[int] = None
start_time_us: Optional[int] = None
duration_us: Optional[int] = None
dynamo_cumulative_compile_time_us: Optional[int] = None
aot_autograd_cumulative_compile_time_us: Optional[int] = None
inductor_cumulative_compile_time_us: Optional[int] = None
inductor_code_gen_cumulative_compile_time_us: Optional[int] = None
triton_compile_time_us: Optional[int] = None
runtime_cudagraphify_time_us: Optional[int] = None
runtime_triton_autotune_time_us: Optional[int] = None
dynamo_compile_time_before_restart_us: Optional[int] = None
distributed_ephemeral_timeout_us: Optional[int] = None
structured_logging_overhead_us: Optional[int] = None
remote_fx_graph_cache_get_time_us: Optional[int] = None
remote_fx_graph_cache_put_time_us: Optional[int] = None
backward_cumulative_compile_time_us: Optional[int] = None
end_time_us: Optional[int] = None
pre_grad_pass_time_us: Optional[int] = None
post_grad_pass_time_us: Optional[int] = None
joint_graph_pass_time_us: Optional[int] = None
log_format_version: int = LOG_FORMAT_VERSION
inductor_config: Optional[str] = None
remote_cache_version: Optional[int] = None
inductor_fx_remote_cache_hit_count: Optional[int] = None
inductor_fx_remote_cache_miss_count: Optional[int] = None
inductor_fx_remote_cache_backend_type: Optional[str] = None
inductor_fx_remote_cache_hit_keys: Optional[str] = None
inductor_fx_remote_cache_miss_keys: Optional[str] = None
cuda_version: Optional[str] = None
triton_version: Optional[str] = None
feature_usage: Optional[dict[str, bool]] = None
compile_time_autotune_time_us: Optional[int] = None
is_runtime: Optional[bool] = False
gc_time_us: Optional[int] = None
tensorify_float_attempt: Optional[bool] = None
tensorify_float_success: Optional[bool] = None
tensorify_float_failure: Optional[set[str]] = None
guard_latency_us: Optional[float] = None
recompile_reason: Optional[str] = None
num_graph_breaks: Optional[int] = None
triton_kernel_compile_times_us: Optional[str] = None
ir_count: Optional[int] = None
cudagraph_skip_reason: Optional[str] = None
python_version: Optional[str] = None
pgo_put_remote_code_state_time_us: Optional[int] = None
pgo_get_remote_code_state_time_us: Optional[int] = None
# The number of elements within parameters. This is classically what people
# think of when they think of parameters in a ML model.
param_numel: Optional[int] = None
# The number of elements counted by bytes - i.e. a float32 is 4 bytes
# per element.
param_bytes: Optional[int] = None
# The number of parameters counted by fields. This is mostly a proxy for
# the number of distinct type of params.
param_count: Optional[int] = None
recompile_user_contexts: Optional[set[str]] = None
inline_inbuilt_nn_modules_candidate: Optional[bool] = False
pytorch_version: Optional[str] = None
inductor_provenance: Optional[set[str]] = None
@classmethod
def create(cls, metrics: dict[str, Any]) -> CompilationMetrics:
"""
Factory method to create a CompilationMetrics from a dict of fields.
Includes the logic to add legacy fields and any pre-processing, e.g.,
we transform some fields to comma-separated strings for scuba logging.
"""
def us_to_s(metric: Optional[int]) -> Optional[float]:
return metric / 1e6 if metric is not None else None
def us_to_ms(metric: Optional[int]) -> Optional[int]:
return metric // 1000 if metric is not None else None
def collection_to_str(metric: Optional[Any]) -> Optional[str]:
def safe_str(item: Any) -> str:
try:
return str(item)
except Exception:
return "<unknown>"
if metric is None:
return None
if not isinstance(metric, (set, list)):
return "<unknown>"
return ",".join(safe_str(item) for item in sorted(metric))
def collection_to_json_str(metric: Optional[Any]) -> Optional[str]:
if metric is None:
return None
try:
return json.dumps(list(metric))
except Exception:
return "<unknown>"
# TODO: The following are legacy fields, populated from the fields that replace
# them. Remove these when we decide we can really deprecate them.
legacy_metrics = {
"start_time": us_to_s(metrics.get("start_time_us")),
"entire_frame_compile_time_s": us_to_s(
metrics.get("dynamo_cumulative_compile_time_us")
),
"backend_compile_time_s": us_to_s(
metrics.get("aot_autograd_cumulative_compile_time_us")
),
"inductor_compile_time_s": us_to_s(
metrics.get("inductor_cumulative_compile_time_us")
),
"code_gen_time_s": us_to_s(
metrics.get("inductor_code_gen_cumulative_compile_time_us")
),
"remote_cache_time_saved_s": us_to_s(
metrics.get("distributed_ephemeral_timeout_us")
),
"remote_fx_graph_cache_get_time_ms": us_to_ms(
metrics.get("remote_fx_graph_cache_get_time_us")
),
"remote_fx_graph_cache_put_time_ms": us_to_ms(
metrics.get("remote_fx_graph_cache_put_time_us")
),
"structured_logging_overhead_s": us_to_s(
metrics.get("structured_logging_overhead_us")
),
}
all_metrics = {**legacy_metrics, **metrics}
# Processing before logging:
all_metrics["inductor_fx_remote_cache_hit_keys"] = collection_to_str(
all_metrics.get("inductor_fx_remote_cache_hit_keys")
)
all_metrics["inductor_fx_remote_cache_miss_keys"] = collection_to_str(
all_metrics.get("inductor_fx_remote_cache_miss_keys")
)
all_metrics["triton_kernel_compile_times_us"] = collection_to_json_str(
all_metrics.get("triton_kernel_compile_times_us")
)
compile_id = all_metrics.get("compile_id")
all_metrics["compile_id"] = str(compile_id) if compile_id else None
# pyrefly: ignore [bad-argument-type]
return cls(**all_metrics)
DEFAULT_COMPILATION_METRICS_LIMIT = 64
_compilation_metrics: collections.deque[CompilationMetrics] = collections.deque(
maxlen=DEFAULT_COMPILATION_METRICS_LIMIT
)
def add_compilation_metrics_to_chromium(c: CompilationMetrics) -> None:
"""
These are the common fields in CompilationMetrics that existed before
metrics_context, and aren't set by MetricsContext.set(). We add the subset
of them that make sense in `dynamo`/toplevel events in PT2 Compile Events
directly.
If you're tempted to add to this list, consider using CompileEventLogger.compilation_metric()
instead, which will automatically also add it to tlparse and PT2 Compile Events.
TODO: Get rid of this function and replace it with CompileEventLogger directly instead.
"""
event_logger = get_chromium_event_logger()
event_name = event_logger.get_outermost_event()
if not event_name:
return
event_logger.add_event_data(
event_name=event_name,
frame_key=c.frame_key,
co_name=c.co_name,
co_filename=c.co_filename,
co_firstlineno=c.co_firstlineno,
cache_size=c.cache_size,
accumulated_cache_size=c.accumulated_cache_size,
guard_count=c.guard_count,
shape_env_guard_count=c.shape_env_guard_count,
graph_op_count=c.graph_op_count,
graph_node_count=c.graph_node_count,
graph_input_count=c.graph_input_count,
fail_type=c.fail_type,
fail_reason=c.fail_reason,
fail_user_frame_filename=c.fail_user_frame_filename,
fail_user_frame_lineno=c.fail_user_frame_lineno,
# Sets aren't JSON serializable
non_compliant_ops=(
list(c.non_compliant_ops) if c.non_compliant_ops is not None else None
),
compliant_custom_ops=(
list(c.compliant_custom_ops) if c.compliant_custom_ops is not None else None
),
restart_reasons=(
list(c.restart_reasons) if c.restart_reasons is not None else None
),
dynamo_time_before_restart_s=c.dynamo_time_before_restart_s,
has_guarded_code=c.has_guarded_code,
dynamo_config=c.dynamo_config,
)
def _get_dynamo_config_for_logging() -> Optional[str]:
def clean_for_json(d: dict[str, Any]) -> dict[str, Any]:
blocklist = {
"TYPE_CHECKING",
"log_file_name",
"verbose",
"repro_after",
"repro_level",
"repro_forward_only",
"repro_tolerance",
"repro_ignore_non_fp",
"same_two_models_use_fp64",
"base_dir",
"debug_dir_root",
"_save_config_ignore",
"log_compilation_metrics",
"inject_BUILD_SET_unimplemented_TESTING_ONLY",
"_autograd_backward_strict_mode_banned_ops",
"reorderable_logging_functions",
"ignore_logger_methods",
"traceable_tensor_subclasses",
"nontraceable_tensor_subclasses",
"_custom_ops_profile",
}
return {
key: sorted(value) if isinstance(value, set) else value
for key, value in d.items()
if key not in blocklist
}
config_dict = clean_for_json(config.get_config_copy())
return json.dumps(config_dict, sort_keys=True)
def _compiler_config_for_logging() -> Optional[str]:
def clean_for_json(d: dict[str, Any]) -> dict[str, Any]:
blocklist = {
"TYPE_CHECKING",
}
return {
key: sorted(value) if isinstance(value, set) else value
for key, value in d.items()
if key not in blocklist
}
if not torch.compiler.config:
return None
try:
compiler_config_copy = torch.compiler.config.get_config_copy() # type: ignore[attr-defined]
except (TypeError, AttributeError):
return "Compiler Config cannot be pickled"
config_dict = clean_for_json(compiler_config_copy)
return json.dumps(config_dict, sort_keys=True)
def _scrubbed_inductor_config_for_logging() -> Optional[str]:
"""
Method to parse and scrub uninteresting configs from inductor config
"""
# TypeSafeSerializer for json.dumps()
# Skips complex types as values in config dict
class TypeSafeSerializer(json.JSONEncoder):
def default(self, o: Any) -> Any:
try:
return super().default(o)
except Exception:
return "Value is not JSON serializable"
keys_to_scrub: set[Any] = set()
inductor_conf_str = None
inductor_config_copy = None
if torch._inductor.config:
try:
inductor_config_copy = torch._inductor.config.get_config_copy()
except (TypeError, AttributeError, RuntimeError, AssertionError):
inductor_conf_str = "Inductor Config cannot be pickled"
if inductor_config_copy is not None:
try:
for key, val in inductor_config_copy.items():
if not isinstance(key, str):
keys_to_scrub.add(key)
# Convert set() to list for json.dumps()
if isinstance(val, set):
inductor_config_copy[key] = list(val)
# Evict unwanted keys
for key in keys_to_scrub:
del inductor_config_copy[key]
# Stringify Inductor config
inductor_conf_str = json.dumps(
inductor_config_copy,
cls=TypeSafeSerializer,
skipkeys=True,
sort_keys=True,
)
except Exception:
# Don't crash because of runtime logging errors
inductor_conf_str = "Inductor Config is not JSON serializable"
return inductor_conf_str
def record_compilation_metrics(
start_time_ns: int,
end_time_ns: int,
metrics: dict[str, Any],
exc_type: Optional[type[BaseException]],
exc_value: Optional[BaseException],
) -> None:
if torch._inductor.utils.should_use_remote_fx_graph_cache():
try:
from torch._inductor.fb.remote_cache import REMOTE_CACHE_VERSION
remote_cache_version = REMOTE_CACHE_VERSION
inductor_fx_remote_cache_backend_type = "_ManifoldCache"
except ModuleNotFoundError:
remote_cache_version = None
inductor_fx_remote_cache_backend_type = None
else:
inductor_fx_remote_cache_backend_type = None
remote_cache_version = None
# Populate the compile_id from the metrics context if it's set. Otherwise,
# look for it in the current compile context.
compile_id = metrics.get("compile_id")
if not compile_id:
compile_id = torch._guards.CompileContext.current_compile_id()
common_metrics = {
"compile_id": compile_id,
"start_time_us": start_time_ns // 1000,
"end_time_us": end_time_ns // 1000,
"fail_type": exc_type.__qualname__ if exc_type else None,
"fail_reason": str(exc_value) if exc_value else None,
"structured_logging_overhead_us": to_int_us(
torch._logging.get_structured_logging_overhead()
),
"dynamo_config": _get_dynamo_config_for_logging(),
"config_suppress_errors": config.suppress_errors,
"config_inline_inbuilt_nn_modules": config.inline_inbuilt_nn_modules,
"inductor_config": _scrubbed_inductor_config_for_logging(),
"compiler_config": _compiler_config_for_logging(),
"cuda_version": torch.version.cuda,
"triton_version": triton.__version__ if has_triton() else "",
"remote_cache_version": remote_cache_version,
"inductor_fx_remote_cache_backend_type": inductor_fx_remote_cache_backend_type,
"python_version": sys.version,
"pytorch_version": torch.__version__,
}
compilation_metrics = CompilationMetrics.create({**common_metrics, **metrics})
_compilation_metrics.append(compilation_metrics)
name = "compilation_metrics"
if compilation_metrics.is_forward is False:
name = "bwd_" + name
if compilation_metrics.is_runtime is True:
name = name + "_runtime"
torch._logging.trace_structured(
name,
lambda: {
k: list(v) if isinstance(v, set) else v
for k, v in dataclasses.asdict(compilation_metrics).items()
},
# NB: Because compilation metrics *includes* the logging overhead time,
# we can't both *measure* the logging overhead of compilation metrics
# without making it inconsistent with compilation metrics itself, so
# we ignore the (hopefully small) time spent logging compilation metrics
record_logging_overhead=False,
# These may be runtime logs, e.g., runtime autotunning, so we provide
# the CompileId from the compilation metrics in case it's not available
# in the current trace.
compile_id=compile_id,
)
# If there's a chromium event in flight, add the CompilationMetrics to it.
add_compilation_metrics_to_chromium(compilation_metrics)
# Finally log the compilation metrics.
if config.log_compilation_metrics:
log_compilation_event(compilation_metrics)
# record_compilation_metrics is called by the singleton MetricsContext exit handler.
_METRICS_CONTEXT = MetricsContext(on_exit=record_compilation_metrics)
_RUNTIME_METRICS_CONTEXT = RuntimeMetricsContext(on_exit=record_compilation_metrics)
def set_compilation_metrics_limit(new_size: int) -> None:
global _compilation_metrics
while len(_compilation_metrics) > new_size:
_compilation_metrics.popleft()
new_deque = collections.deque(_compilation_metrics, maxlen=new_size)
_compilation_metrics = new_deque
def clear_compilation_metrics() -> None:
global _compilation_metrics
_compilation_metrics.clear()
def get_compilation_metrics() -> list[CompilationMetrics]:
return list(_compilation_metrics)
| CompilationMetrics |
python | readthedocs__readthedocs.org | readthedocs/integrations/models.py | {
"start": 5205,
"end": 7262
} | class ____(models.Model):
"""HTTP request/response exchange."""
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
related_object = GenericForeignKey("content_type", "object_id")
date = models.DateTimeField(_("Date"), auto_now_add=True)
request_headers = models.JSONField(
_("Request headers"),
# Delete after deploy
null=True,
blank=True,
)
request_body = models.TextField(_("Request body"))
response_headers = models.JSONField(
_("Request headers"),
# Delete after deploy
null=True,
blank=True,
)
response_body = models.TextField(_("Response body"))
status_code = models.IntegerField(
_("Status code"),
default=status.HTTP_200_OK,
)
objects = HttpExchangeManager()
class Meta:
ordering = ["-date"]
indexes = [models.Index(fields=["content_type", "object_id", "date"])]
def __str__(self):
return _("Exchange {0}").format(self.pk)
@property
def failed(self):
# Assume anything that isn't 2xx level status code is an error
return not (200 <= self.status_code < 300)
def formatted_json(self, field):
"""Try to return pretty printed and Pygment highlighted code."""
value = getattr(self, field) or ""
try:
if not isinstance(value, dict):
value = json.loads(value)
json_value = json.dumps(value, sort_keys=True, indent=2)
formatter = HtmlFormatter()
html = highlight(json_value, JsonLexer(), formatter)
return mark_safe(html)
except (ValueError, TypeError):
return value
@property
def formatted_request_body(self):
return self.formatted_json("request_body")
@property
def formatted_response_body(self):
return self.formatted_json("response_body")
| HttpExchange |
python | doocs__leetcode | solution/2300-2399/2309.Greatest English Letter in Upper and Lower Case/Solution.py | {
"start": 0,
"end": 208
} | class ____:
def greatestLetter(self, s: str) -> str:
ss = set(s)
for c in ascii_uppercase[::-1]:
if c in ss and c.lower() in ss:
return c
return ''
| Solution |
python | scipy__scipy | scipy/stats/tests/test_correlation.py | {
"start": 293,
"end": 4387
} | class ____:
@pytest.mark.parametrize('case', [
dict(y_cont=True, statistic=-0.303030303030303, pvalue=0.9351329808526656),
dict(y_cont=False, statistic=0.07407407407407396, pvalue=0.3709859367123997)])
@pytest.mark.parametrize('dtype', ['float32', 'float64', None])
def test_against_R_XICOR(self, case, dtype, xp):
# Test against R package XICOR, e.g.
# library(XICOR)
# options(digits=16)
# x = c(0.11027287231363914, 0.8154770102474279, 0.7073943466920335,
# 0.6651317324378386, 0.6905752850115503, 0.06115250587536558,
# 0.5209906494474178, 0.3155763519785274, 0.18405731803625924,
# 0.8613557911541495)
# y = c(0.8402081904493103, 0.5946972833914318, 0.23481606164114155,
# 0.49754786197715384, 0.9146460831206026, 0.5848057749217579,
# 0.7620801065573549, 0.31410063302647495, 0.7935620302236199,
# 0.5423085761365468)
# xicor(x, y, ties=FALSE, pvalue=TRUE)
if dtype == 'float32' and np.__version__ < "2":
pytest.skip("Scalar dtypes only respected after NEP 50.")
dtype = xp_default_dtype(xp) if dtype is None else getattr(xp, dtype)
rng = np.random.default_rng(25982435982346983)
x = rng.random(size=10)
y = (rng.random(size=10) if case['y_cont']
else rng.integers(0, 5, size=10))
x, y = xp.asarray(x, dtype=dtype), xp.asarray(y, dtype=dtype)
res = stats.chatterjeexi(x, y, y_continuous=case['y_cont'])
xp_assert_close(res.statistic, xp.asarray(case['statistic'], dtype=dtype))
xp_assert_close(res.pvalue, xp.asarray(case['pvalue'], dtype=dtype))
@pytest.mark.parametrize('y_continuous', (False, True))
def test_permutation_asymptotic(self, y_continuous):
# XICOR doesn't seem to perform the permutation test as advertised, so
# compare the result of a permutation test against an asymptotic test.
rng = np.random.default_rng(2524579827426)
n = np.floor(rng.uniform(100, 150)).astype(int)
shape = (2, n)
x = rng.random(size=shape)
y = (rng.random(size=shape) if y_continuous
else rng.integers(0, 10, size=shape))
method = stats.PermutationMethod(rng=rng)
res = stats.chatterjeexi(x, y, method=method,
y_continuous=y_continuous, axis=-1)
ref = stats.chatterjeexi(x, y, y_continuous=y_continuous, axis=-1)
np.testing.assert_allclose(res.statistic, ref.statistic, rtol=1e-15)
np.testing.assert_allclose(res.pvalue, ref.pvalue, rtol=2e-2)
def test_input_validation(self, xp):
rng = np.random.default_rng(25932435798274926)
x, y = rng.random(size=(2, 10))
x, y = xp.asarray(x), xp.asarray(y)
message = 'Array shapes are incompatible for broadcasting.|Incompatible shapes'
with pytest.raises((ValueError, TypeError), match=message):
stats.chatterjeexi(x, y[:-1])
if not is_jax(xp):
# jax misses out on some input validation from _axis_nan_policy decorator
message = '...axis 10 is out of bounds for array...|out of range'
with pytest.raises((ValueError, IndexError), match=message):
stats.chatterjeexi(x, y, axis=10)
message = '`y_continuous` must be boolean.'
with pytest.raises(ValueError, match=message):
stats.chatterjeexi(x, y, y_continuous='a herring')
message = "`method` must be 'asymptotic' or"
with pytest.raises(ValueError, match=message):
stats.chatterjeexi(x, y, method='ekki ekii')
@pytest.mark.skip_xp_backends('jax.numpy', reason='no SmallSampleWarning (lazy)')
def test_special_cases(self, xp):
message = 'One or more sample arguments is too small...'
with pytest.warns(SmallSampleWarning, match=message):
res = stats.chatterjeexi(xp.asarray([1]), xp.asarray([2]))
assert xp.isnan(res.statistic)
assert xp.isnan(res.pvalue)
| TestChatterjeeXi |
python | anthropics__anthropic-sdk-python | src/anthropic/_client.py | {
"start": 21875,
"end": 23030
} | class ____:
_client: AsyncAnthropic
def __init__(self, client: AsyncAnthropic) -> None:
self._client = client
@cached_property
def completions(self) -> completions.AsyncCompletionsWithStreamingResponse:
from .resources.completions import AsyncCompletionsWithStreamingResponse
return AsyncCompletionsWithStreamingResponse(self._client.completions)
@cached_property
def messages(self) -> messages.AsyncMessagesWithStreamingResponse:
from .resources.messages import AsyncMessagesWithStreamingResponse
return AsyncMessagesWithStreamingResponse(self._client.messages)
@cached_property
def models(self) -> models.AsyncModelsWithStreamingResponse:
from .resources.models import AsyncModelsWithStreamingResponse
return AsyncModelsWithStreamingResponse(self._client.models)
@cached_property
def beta(self) -> beta.AsyncBetaWithStreamingResponse:
from .resources.beta import AsyncBetaWithStreamingResponse
return AsyncBetaWithStreamingResponse(self._client.beta)
Client = Anthropic
AsyncClient = AsyncAnthropic
| AsyncAnthropicWithStreamedResponse |
python | apache__airflow | task-sdk/tests/task_sdk/bases/test_operator.py | {
"start": 3162,
"end": 3361
} | class ____(BaseOperator):
def __init__(self, **kwargs):
warnings.warn("This operator is deprecated.", DeprecationWarning, stacklevel=2)
super().__init__(**kwargs)
| DeprecatedOperator |
python | langchain-ai__langchain | libs/partners/ollama/langchain_ollama/llms.py | {
"start": 615,
"end": 19875
} | class ____(BaseLLM):
"""Ollama large language models.
Setup:
Install `langchain-ollama` and install/run the Ollama server locally:
```bash
pip install -U langchain-ollama
# Visit https://ollama.com/download to download and install Ollama
# (Linux users): start the server with `ollama serve`
```
Download a model to use:
```bash
ollama pull llama3.1
```
Key init args — generation params:
model: str
Name of the Ollama model to use (e.g. `'llama4'`).
temperature: float | None
Sampling temperature. Higher values make output more creative.
num_predict: int | None
Maximum number of tokens to predict.
top_k: int | None
Limits the next token selection to the K most probable tokens.
top_p: float | None
Nucleus sampling parameter. Higher values lead to more diverse text.
mirostat: int | None
Enable Mirostat sampling for controlling perplexity.
seed: int | None
Random number seed for generation reproducibility.
Key init args — client params:
base_url:
Base URL where Ollama server is hosted.
keep_alive:
How long the model stays loaded into memory.
format:
Specify the format of the output.
See full list of supported init args and their descriptions in the params section.
Instantiate:
```python
from langchain_ollama import OllamaLLM
model = OllamaLLM(
model="llama3.1",
temperature=0.7,
num_predict=256,
# base_url="http://localhost:11434",
# other params...
)
```
Invoke:
```python
input_text = "The meaning of life is "
response = model.invoke(input_text)
print(response)
```
```txt
"a philosophical question that has been contemplated by humans for
centuries..."
```
Stream:
```python
for chunk in model.stream(input_text):
print(chunk, end="")
```
```txt
a philosophical question that has been contemplated by humans for
centuries...
```
Async:
```python
response = await model.ainvoke(input_text)
# stream:
# async for chunk in model.astream(input_text):
# print(chunk, end="")
```
"""
model: str
"""Model name to use."""
reasoning: bool | None = None
"""Controls the reasoning/thinking mode for
[supported models](https://ollama.com/search?c=thinking).
- `True`: Enables reasoning mode. The model's reasoning process will be
captured and returned separately in the `additional_kwargs` of the
response message, under `reasoning_content`. The main response
content will not include the reasoning tags.
- `False`: Disables reasoning mode. The model will not perform any reasoning,
and the response will not include any reasoning content.
- `None` (Default): The model will use its default reasoning behavior. If
the model performs reasoning, the `<think>` and `</think>` tags will
be present directly within the main response content."""
validate_model_on_init: bool = False
"""Whether to validate the model exists in ollama locally on initialization.
!!! version-added "Added in `langchain-ollama` 0.3.4"
"""
mirostat: int | None = None
"""Enable Mirostat sampling for controlling perplexity.
(default: `0`, `0` = disabled, `1` = Mirostat, `2` = Mirostat 2.0)"""
mirostat_eta: float | None = None
"""Influences how quickly the algorithm responds to feedback
from the generated text. A lower learning rate will result in
slower adjustments, while a higher learning rate will make
the algorithm more responsive. (Default: `0.1`)"""
mirostat_tau: float | None = None
"""Controls the balance between coherence and diversity
of the output. A lower value will result in more focused and
coherent text. (Default: `5.0`)"""
num_ctx: int | None = None
"""Sets the size of the context window used to generate the
next token. (Default: `2048`)"""
num_gpu: int | None = None
"""The number of GPUs to use. On macOS it defaults to `1` to
enable metal support, `0` to disable."""
num_thread: int | None = None
"""Sets the number of threads to use during computation.
By default, Ollama will detect this for optimal performance.
It is recommended to set this value to the number of physical
CPU cores your system has (as opposed to the logical number of cores)."""
num_predict: int | None = None
"""Maximum number of tokens to predict when generating text.
(Default: `128`, `-1` = infinite generation, `-2` = fill context)"""
repeat_last_n: int | None = None
"""Sets how far back for the model to look back to prevent
repetition. (Default: `64`, `0` = disabled, `-1` = `num_ctx`)"""
repeat_penalty: float | None = None
"""Sets how strongly to penalize repetitions. A higher value (e.g., `1.5`)
will penalize repetitions more strongly, while a lower value (e.g., `0.9`)
will be more lenient. (Default: `1.1`)"""
temperature: float | None = None
"""The temperature of the model. Increasing the temperature will
make the model answer more creatively. (Default: `0.8`)"""
seed: int | None = None
"""Sets the random number seed to use for generation. Setting this
to a specific number will make the model generate the same text for
the same prompt."""
stop: list[str] | None = None
"""Sets the stop tokens to use."""
tfs_z: float | None = None
"""Tail free sampling is used to reduce the impact of less probable
tokens from the output. A higher value (e.g., `2.0`) will reduce the
impact more, while a value of 1.0 disables this setting. (default: `1`)"""
top_k: int | None = None
"""Reduces the probability of generating nonsense. A higher value (e.g. `100`)
will give more diverse answers, while a lower value (e.g. `10`)
will be more conservative. (Default: `40`)"""
top_p: float | None = None
"""Works together with top-k. A higher value (e.g., `0.95`) will lead
to more diverse text, while a lower value (e.g., `0.5`) will
generate more focused and conservative text. (Default: `0.9`)"""
format: Literal["", "json"] = ""
"""Specify the format of the output (options: `'json'`)"""
keep_alive: int | str | None = None
"""How long the model will stay loaded into memory."""
base_url: str | None = None
"""Base url the model is hosted under.
If none, defaults to the Ollama client default.
Supports `userinfo` auth in the format `http://username:password@localhost:11434`.
Useful if your Ollama server is behind a proxy.
!!! warning
`userinfo` is not secure and should only be used for local testing or
in secure environments. Avoid using it in production or over unsecured
networks.
!!! note
If using `userinfo`, ensure that the Ollama server is configured to
accept and validate these credentials.
!!! note
`userinfo` headers are passed to both sync and async clients.
"""
client_kwargs: dict | None = {}
"""Additional kwargs to pass to the httpx clients. Pass headers in here.
These arguments are passed to both synchronous and async clients.
Use `sync_client_kwargs` and `async_client_kwargs` to pass different arguments
to synchronous and asynchronous clients.
"""
async_client_kwargs: dict | None = {}
"""Additional kwargs to merge with `client_kwargs` before passing to httpx client.
These are clients unique to the async client; for shared args use `client_kwargs`.
For a full list of the params, see the [httpx documentation](https://www.python-httpx.org/api/#asyncclient).
"""
sync_client_kwargs: dict | None = {}
"""Additional kwargs to merge with `client_kwargs` before passing to httpx client.
These are clients unique to the sync client; for shared args use `client_kwargs`.
For a full list of the params, see the [httpx documentation](https://www.python-httpx.org/api/#client).
"""
_client: Client | None = PrivateAttr(default=None)
"""The client to use for making requests."""
_async_client: AsyncClient | None = PrivateAttr(default=None)
"""The async client to use for making requests."""
def _generate_params(
self,
prompt: str,
stop: list[str] | None = None,
**kwargs: Any,
) -> dict[str, Any]:
if self.stop is not None and stop is not None:
msg = "`stop` found in both the input and default params."
raise ValueError(msg)
if self.stop is not None:
stop = self.stop
options_dict = kwargs.pop(
"options",
{
"mirostat": self.mirostat,
"mirostat_eta": self.mirostat_eta,
"mirostat_tau": self.mirostat_tau,
"num_ctx": self.num_ctx,
"num_gpu": self.num_gpu,
"num_thread": self.num_thread,
"num_predict": self.num_predict,
"repeat_last_n": self.repeat_last_n,
"repeat_penalty": self.repeat_penalty,
"temperature": self.temperature,
"seed": self.seed,
"stop": self.stop if stop is None else stop,
"tfs_z": self.tfs_z,
"top_k": self.top_k,
"top_p": self.top_p,
},
)
return {
"prompt": prompt,
"stream": kwargs.pop("stream", True),
"model": kwargs.pop("model", self.model),
"think": kwargs.pop("reasoning", self.reasoning),
"format": kwargs.pop("format", self.format),
"options": Options(**options_dict),
"keep_alive": kwargs.pop("keep_alive", self.keep_alive),
**kwargs,
}
@property
def _llm_type(self) -> str:
"""Return type of LLM."""
return "ollama-llm"
def _get_ls_params(
self, stop: list[str] | None = None, **kwargs: Any
) -> LangSmithParams:
"""Get standard params for tracing."""
params = super()._get_ls_params(stop=stop, **kwargs)
if max_tokens := kwargs.get("num_predict", self.num_predict):
params["ls_max_tokens"] = max_tokens
return params
@model_validator(mode="after")
def _set_clients(self) -> Self:
"""Set clients to use for ollama."""
client_kwargs = self.client_kwargs or {}
cleaned_url, auth_headers = parse_url_with_auth(self.base_url)
merge_auth_headers(client_kwargs, auth_headers)
sync_client_kwargs = client_kwargs
if self.sync_client_kwargs:
sync_client_kwargs = {**sync_client_kwargs, **self.sync_client_kwargs}
async_client_kwargs = client_kwargs
if self.async_client_kwargs:
async_client_kwargs = {**async_client_kwargs, **self.async_client_kwargs}
self._client = Client(host=cleaned_url, **sync_client_kwargs)
self._async_client = AsyncClient(host=cleaned_url, **async_client_kwargs)
if self.validate_model_on_init:
validate_model(self._client, self.model)
return self
async def _acreate_generate_stream(
self,
prompt: str,
stop: list[str] | None = None,
**kwargs: Any,
) -> AsyncIterator[Mapping[str, Any] | str]:
if self._async_client:
async for part in await self._async_client.generate(
**self._generate_params(prompt, stop=stop, **kwargs)
):
yield part
def _create_generate_stream(
self,
prompt: str,
stop: list[str] | None = None,
**kwargs: Any,
) -> Iterator[Mapping[str, Any] | str]:
if self._client:
yield from self._client.generate(
**self._generate_params(prompt, stop=stop, **kwargs)
)
async def _astream_with_aggregation(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
verbose: bool = False, # noqa: FBT002
**kwargs: Any,
) -> GenerationChunk:
final_chunk = None
thinking_content = ""
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
if not isinstance(stream_resp, str):
if stream_resp.get("thinking"):
thinking_content += stream_resp["thinking"]
chunk = GenerationChunk(
text=stream_resp.get("response", ""),
generation_info=(
dict(stream_resp) if stream_resp.get("done") is True else None
),
)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
msg = "No data received from Ollama stream."
raise ValueError(msg)
if thinking_content:
if final_chunk.generation_info:
final_chunk.generation_info["thinking"] = thinking_content
else:
final_chunk.generation_info = {"thinking": thinking_content}
return final_chunk
def _stream_with_aggregation(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
verbose: bool = False, # noqa: FBT002
**kwargs: Any,
) -> GenerationChunk:
final_chunk = None
thinking_content = ""
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if not isinstance(stream_resp, str):
if stream_resp.get("thinking"):
thinking_content += stream_resp["thinking"]
chunk = GenerationChunk(
text=stream_resp.get("response", ""),
generation_info=(
dict(stream_resp) if stream_resp.get("done") is True else None
),
)
if final_chunk is None:
final_chunk = chunk
else:
final_chunk += chunk
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
chunk=chunk,
verbose=verbose,
)
if final_chunk is None:
msg = "No data received from Ollama stream."
raise ValueError(msg)
if thinking_content:
if final_chunk.generation_info:
final_chunk.generation_info["thinking"] = thinking_content
else:
final_chunk.generation_info = {"thinking": thinking_content}
return final_chunk
def _generate(
self,
prompts: list[str],
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> LLMResult:
generations = []
for prompt in prompts:
final_chunk = self._stream_with_aggregation(
prompt,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations) # type: ignore[arg-type]
async def _agenerate(
self,
prompts: list[str],
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> LLMResult:
generations = []
for prompt in prompts:
final_chunk = await self._astream_with_aggregation(
prompt,
stop=stop,
run_manager=run_manager,
verbose=self.verbose,
**kwargs,
)
generations.append([final_chunk])
return LLMResult(generations=generations) # type: ignore[arg-type]
def _stream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: CallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> Iterator[GenerationChunk]:
reasoning = kwargs.get("reasoning", self.reasoning)
for stream_resp in self._create_generate_stream(prompt, stop, **kwargs):
if not isinstance(stream_resp, str):
additional_kwargs = {}
if reasoning and (thinking_content := stream_resp.get("thinking")):
additional_kwargs["reasoning_content"] = thinking_content
chunk = GenerationChunk(
text=(stream_resp.get("response", "")),
generation_info={
"finish_reason": self.stop,
**additional_kwargs,
**(
dict(stream_resp) if stream_resp.get("done") is True else {}
),
},
)
if run_manager:
run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
async def _astream(
self,
prompt: str,
stop: list[str] | None = None,
run_manager: AsyncCallbackManagerForLLMRun | None = None,
**kwargs: Any,
) -> AsyncIterator[GenerationChunk]:
reasoning = kwargs.get("reasoning", self.reasoning)
async for stream_resp in self._acreate_generate_stream(prompt, stop, **kwargs):
if not isinstance(stream_resp, str):
additional_kwargs = {}
if reasoning and (thinking_content := stream_resp.get("thinking")):
additional_kwargs["reasoning_content"] = thinking_content
chunk = GenerationChunk(
text=(stream_resp.get("response", "")),
generation_info={
"finish_reason": self.stop,
**additional_kwargs,
**(
dict(stream_resp) if stream_resp.get("done") is True else {}
),
},
)
if run_manager:
await run_manager.on_llm_new_token(
chunk.text,
verbose=self.verbose,
)
yield chunk
| OllamaLLM |
python | joke2k__faker | faker/providers/date_time/ro_RO/__init__.py | {
"start": 46,
"end": 781
} | class ____(DateTimeProvider):
DAY_NAMES = {
"0": "duminica",
"1": "luni",
"2": "marti",
"3": "miercuri",
"4": "joi",
"5": "vineri",
"6": "sambata",
}
MONTH_NAMES = {
"01": "ianuarie",
"02": "februarie",
"03": "martie",
"04": "aprilie",
"05": "mai",
"06": "iunie",
"07": "iulie",
"08": "august",
"09": "septembrie",
"10": "octombrie",
"11": "noiembrie",
"12": "decembrie",
}
def day_of_week(self):
day = self.date("%w")
return self.DAY_NAMES[day]
def month_name(self):
month = self.month()
return self.MONTH_NAMES[month]
| Provider |
python | kubernetes-client__python | kubernetes/client/models/v1_service_cidr.py | {
"start": 383,
"end": 7226
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'kind': 'str',
'metadata': 'V1ObjectMeta',
'spec': 'V1ServiceCIDRSpec',
'status': 'V1ServiceCIDRStatus'
}
attribute_map = {
'api_version': 'apiVersion',
'kind': 'kind',
'metadata': 'metadata',
'spec': 'spec',
'status': 'status'
}
def __init__(self, api_version=None, kind=None, metadata=None, spec=None, status=None, local_vars_configuration=None): # noqa: E501
"""V1ServiceCIDR - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._kind = None
self._metadata = None
self._spec = None
self._status = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
if spec is not None:
self.spec = spec
if status is not None:
self.status = status
@property
def api_version(self):
"""Gets the api_version of this V1ServiceCIDR. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1ServiceCIDR. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1ServiceCIDR.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1ServiceCIDR. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def kind(self):
"""Gets the kind of this V1ServiceCIDR. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1ServiceCIDR. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1ServiceCIDR.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1ServiceCIDR. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1ServiceCIDR. # noqa: E501
:return: The metadata of this V1ServiceCIDR. # noqa: E501
:rtype: V1ObjectMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1ServiceCIDR.
:param metadata: The metadata of this V1ServiceCIDR. # noqa: E501
:type: V1ObjectMeta
"""
self._metadata = metadata
@property
def spec(self):
"""Gets the spec of this V1ServiceCIDR. # noqa: E501
:return: The spec of this V1ServiceCIDR. # noqa: E501
:rtype: V1ServiceCIDRSpec
"""
return self._spec
@spec.setter
def spec(self, spec):
"""Sets the spec of this V1ServiceCIDR.
:param spec: The spec of this V1ServiceCIDR. # noqa: E501
:type: V1ServiceCIDRSpec
"""
self._spec = spec
@property
def status(self):
"""Gets the status of this V1ServiceCIDR. # noqa: E501
:return: The status of this V1ServiceCIDR. # noqa: E501
:rtype: V1ServiceCIDRStatus
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this V1ServiceCIDR.
:param status: The status of this V1ServiceCIDR. # noqa: E501
:type: V1ServiceCIDRStatus
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ServiceCIDR):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ServiceCIDR):
return True
return self.to_dict() != other.to_dict()
| V1ServiceCIDR |
python | mwaskom__seaborn | seaborn/_statistics.py | {
"start": 20817,
"end": 24946
} | class ____:
def __init__(self, k_depth, outlier_prop, trust_alpha):
"""
Compute percentiles of a distribution using various tail stopping rules.
Parameters
----------
k_depth: "tukey", "proportion", "trustworthy", or "full"
Stopping rule for choosing tail percentiled to show:
- tukey: Show a similar number of outliers as in a conventional boxplot.
- proportion: Show approximately `outlier_prop` outliers.
- trust_alpha: Use `trust_alpha` level for most extreme tail percentile.
outlier_prop: float
Parameter for `k_depth="proportion"` setting the expected outlier rate.
trust_alpha: float
Parameter for `k_depth="trustworthy"` setting the confidence threshold.
Notes
-----
Based on the proposal in this paper:
https://vita.had.co.nz/papers/letter-value-plot.pdf
"""
k_options = ["tukey", "proportion", "trustworthy", "full"]
if isinstance(k_depth, str):
_check_argument("k_depth", k_options, k_depth)
elif not isinstance(k_depth, int):
err = (
"The `k_depth` parameter must be either an integer or string "
f"(one of {k_options}), not {k_depth!r}."
)
raise TypeError(err)
self.k_depth = k_depth
self.outlier_prop = outlier_prop
self.trust_alpha = trust_alpha
def _compute_k(self, n):
# Select the depth, i.e. number of boxes to draw, based on the method
if self.k_depth == "full":
# extend boxes to 100% of the data
k = int(np.log2(n)) + 1
elif self.k_depth == "tukey":
# This results with 5-8 points in each tail
k = int(np.log2(n)) - 3
elif self.k_depth == "proportion":
k = int(np.log2(n)) - int(np.log2(n * self.outlier_prop)) + 1
elif self.k_depth == "trustworthy":
normal_quantile_func = np.vectorize(NormalDist().inv_cdf)
point_conf = 2 * normal_quantile_func(1 - self.trust_alpha / 2) ** 2
k = int(np.log2(n / point_conf)) + 1
else:
# Allow having k directly specified as input
k = int(self.k_depth)
return max(k, 1)
def __call__(self, x):
"""Evaluate the letter values."""
k = self._compute_k(len(x))
exp = np.arange(k + 1, 1, -1), np.arange(2, k + 2)
levels = k + 1 - np.concatenate([exp[0], exp[1][1:]])
percentiles = 100 * np.concatenate([0.5 ** exp[0], 1 - 0.5 ** exp[1]])
if self.k_depth == "full":
percentiles[0] = 0
percentiles[-1] = 100
values = np.percentile(x, percentiles)
fliers = np.asarray(x[(x < values.min()) | (x > values.max())])
median = np.percentile(x, 50)
return {
"k": k,
"levels": levels,
"percs": percentiles,
"values": values,
"fliers": fliers,
"median": median,
}
def _percentile_interval(data, width):
"""Return a percentile interval from data of a given width."""
edge = (100 - width) / 2
percentiles = edge, 100 - edge
return np.nanpercentile(data, percentiles)
def _validate_errorbar_arg(arg):
"""Check type and value of errorbar argument and assign default level."""
DEFAULT_LEVELS = {
"ci": 95,
"pi": 95,
"se": 1,
"sd": 1,
}
usage = "`errorbar` must be a callable, string, or (string, number) tuple"
if arg is None:
return None, None
elif callable(arg):
return arg, None
elif isinstance(arg, str):
method = arg
level = DEFAULT_LEVELS.get(method, None)
else:
try:
method, level = arg
except (ValueError, TypeError) as err:
raise err.__class__(usage) from err
_check_argument("errorbar", list(DEFAULT_LEVELS), method)
if level is not None and not isinstance(level, Number):
raise TypeError(usage)
return method, level
| LetterValues |
python | pytorch__pytorch | torch/_dynamo/testing.py | {
"start": 9277,
"end": 10216
} | class ____:
def __init__(self) -> None:
self.graphs: list[torch.fx.GraphModule] = []
self.fw_graphs: list[torch.fx.GraphModule] = []
self.bw_graphs: list[torch.fx.GraphModule] = []
def __call__(
self, gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor]
) -> Callable[..., Any]:
self.graphs.append(gm)
def fw_compiler(
gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor]
) -> Callable[..., Any]:
self.fw_graphs.append(gm)
return gm.forward
def bw_compiler(
gm: torch.fx.GraphModule, example_inputs: list[torch.Tensor]
) -> Callable[..., Any]:
self.bw_graphs.append(gm)
return gm.forward
return aot_eager(
gm,
example_inputs,
fw_compiler=fw_compiler,
bw_compiler=bw_compiler,
)
| AotEagerAndRecordGraphs |
python | great-expectations__great_expectations | contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_zip9.py | {
"start": 629,
"end": 1610
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_zip9"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_zip9(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidZip9 |
python | squidfunk__mkdocs-material | material/extensions/preview.py | {
"start": 5611,
"end": 8283
} | class ____(Extension):
"""
A Markdown extension to enable instant previews on links.
This extensions allows to automatically add the `data-preview` attribute to
internal links matching specific criteria, so Material for MkDocs renders a
nice preview on hover as part of a tooltip. It is the recommended way to
add previews to links in a programmatic way.
"""
def __init__(self, *args, **kwargs):
"""
"""
self.config = {
"configurations": [[], "Filter configurations"],
"sources": [{}, "Link sources"],
"targets": [{}, "Link targets"]
}
super().__init__(*args, **kwargs)
def extendMarkdown(self, md: Markdown):
"""
Register Markdown extension.
Arguments:
md: The Markdown instance.
"""
md.registerExtension(self)
# Create and register treeprocessor - we use the same priority as the
# `relpath` treeprocessor, the latter of which is guaranteed to run
# after our treeprocessor, so we can check the original Markdown URIs
# before they are resolved to URLs.
processor = PreviewProcessor(md, self.getConfigs())
md.treeprocessors.register(processor, "preview", 0)
# -----------------------------------------------------------------------------
# Functions
# -----------------------------------------------------------------------------
def get_filter(settings: dict, key: str):
"""
Get file filter from settings.
Arguments:
settings: The settings.
key: The key in the settings.
Returns:
The file filter.
"""
config = FilterConfig()
config.load_dict(settings.get(key) or {})
# Validate filter configuration
errors, warnings = config.validate()
for _, w in warnings:
log.warning(
f"Error reading filter configuration in '{key}':\n"
f"{w}"
)
for _, e in errors:
raise ConfigurationError(
f"Error reading filter configuration in '{key}':\n"
f"{e}"
)
# Return file filter
return FileFilter(config = config) # type: ignore
def makeExtension(**kwargs):
"""
Register Markdown extension.
Arguments:
**kwargs: Configuration options.
Returns:
The Markdown extension.
"""
return PreviewExtension(**kwargs)
# -----------------------------------------------------------------------------
# Data
# -----------------------------------------------------------------------------
# Set up logging
log = logging.getLogger("mkdocs.material.extensions.preview")
| PreviewExtension |
python | kamyu104__LeetCode-Solutions | Python/choose-k-elements-with-maximum-sum.py | {
"start": 83,
"end": 805
} | class ____(object):
def findMaxSum(self, nums1, nums2, k):
"""
:type nums1: List[int]
:type nums2: List[int]
:type k: int
:rtype: List[int]
"""
result = [0]*len(nums1)
min_heap = []
idxs = range(len(nums1))
idxs.sort(key=lambda x: nums1[x])
total = j = 0
for i in xrange(len(idxs)):
while nums1[idxs[j]] < nums1[idxs[i]]:
total += nums2[idxs[j]]
heapq.heappush(min_heap, nums2[idxs[j]])
if len(min_heap) == k+1:
total -= heapq.heappop(min_heap)
j += 1
result[idxs[i]] = total
return result
| Solution |
python | keras-team__keras | keras/src/metrics/confusion_metrics_test.py | {
"start": 6520,
"end": 9549
} | class ____(testing.TestCase):
def test_config(self):
tn_obj = metrics.TrueNegatives(name="my_tn", thresholds=[0.4, 0.9])
self.assertEqual(tn_obj.name, "my_tn")
self.assertLen(tn_obj.variables, 1)
self.assertEqual(tn_obj.thresholds, [0.4, 0.9])
# Check save and restore config
tn_obj2 = metrics.TrueNegatives.from_config(tn_obj.get_config())
self.assertEqual(tn_obj2.name, "my_tn")
self.assertLen(tn_obj2.variables, 1)
self.assertEqual(tn_obj2.thresholds, [0.4, 0.9])
def test_unweighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose(3.0, tn_obj.result())
def test_weighted(self):
tn_obj = metrics.TrueNegatives()
y_true = np.array(
((0, 1, 0, 1, 0), (0, 0, 1, 1, 1), (1, 1, 1, 1, 0), (0, 0, 0, 0, 1))
)
y_pred = np.array(
((0, 0, 1, 1, 0), (1, 1, 1, 1, 1), (0, 1, 0, 1, 0), (1, 1, 1, 1, 1))
)
sample_weight = np.array((1.0, 1.5, 2.0, 2.5))
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose(4.0, result)
def test_unweighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
tn_obj.update_state(y_true, y_pred)
self.assertAllClose([2.0, 5.0, 7.0], tn_obj.result())
def test_weighted_with_thresholds(self):
tn_obj = metrics.TrueNegatives(thresholds=[0.15, 0.5, 0.85])
y_pred = np.array(
(
(0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3),
(0, 1, 0.7, 0.3),
)
)
y_true = np.array(
((0, 1, 1, 0), (1, 0, 0, 0), (0, 0, 0, 0), (1, 1, 1, 1))
)
sample_weight = ((0.0, 2.0, 3.0, 5.0),)
result = tn_obj(y_true, y_pred, sample_weight=sample_weight)
self.assertAllClose([5.0, 15.0, 23.0], result)
def test_threshold_limit(self):
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[-1, 2\]",
):
metrics.TrueNegatives(thresholds=[-1, 0.5, 2])
with self.assertRaisesRegex(
ValueError,
r"Threshold values must be in \[0, 1\]. Received: \[None\]",
):
metrics.TrueNegatives(thresholds=[None])
| TrueNegativesTest |
python | bokeh__bokeh | src/bokeh/models/tickers.py | {
"start": 4896,
"end": 5648
} | class ____(Ticker):
''' A base class for non-categorical ticker types.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
num_minor_ticks = Int(5, help="""
The number of minor tick positions to generate between
adjacent major tick values.
""")
desired_num_ticks = Int(6, help="""
A desired target number of major tick positions to generate across
the plot range.
.. note:
This value is a suggestion, and ticker subclasses may ignore
it entirely, or use it only as an ideal goal to approach as well
as can be, in the context of a specific ticking strategy.
""")
| ContinuousTicker |
python | astropy__astropy | astropy/units/errors.py | {
"start": 418,
"end": 607
} | class ____(UnitsError, ValueError):
"""
Used specifically for errors related to converting between units or
interpreting units in terms of other units.
"""
| UnitConversionError |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_optimize05.py | {
"start": 315,
"end": 1426
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("optimize05.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(
self.got_filename, {"constant_memory": True, "in_memory": False}
)
worksheet = workbook.add_worksheet()
bold = workbook.add_format({"bold": 1})
italic = workbook.add_format({"italic": 1})
worksheet.write("A1", "Foo", bold)
worksheet.write("A2", "Bar", italic)
worksheet.write_rich_string("A3", "a", bold, "bc", "defg")
worksheet.write_rich_string("B4", "abc", italic, "de", "fg")
worksheet.write_rich_string("C5", "a", bold, "bc", "defg")
worksheet.write_rich_string("D6", "abc", italic, "de", "fg")
worksheet.write_rich_string("E7", "a", bold, "bcdef", "g")
worksheet.write_rich_string("F8", italic, "abcd", "efg")
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | skorch-dev__skorch | skorch/tests/test_dataset.py | {
"start": 12581,
"end": 14982
} | class ____:
"""Huggingface tokenizers should work without special adjustments"""
@pytest.fixture(scope='session')
def tokenizer(self):
transformers = pytest.importorskip('transformers')
tokenizer = transformers.AutoTokenizer.from_pretrained('bert-base-uncased')
return tokenizer
@pytest.fixture(scope='session')
def data(self, tokenizer):
"""A simple dataset that the model should be able to learn (or overfit)
on
"""
X = [paragraph for paragraph in unittest.__doc__.split('\n') if paragraph]
Xt = tokenizer(
X,
max_length=12,
padding='max_length',
truncation=True,
return_token_type_ids=False,
return_tensors='pt',
)
y = np.array(['test' in x.lower() for x in X], dtype=np.int64)
return Xt, y
@pytest.fixture(scope='session')
def module_cls(self, tokenizer):
"""Return a simple module using embedding + linear + softmax instead of
a full-fledged BERT module.
"""
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.emb = nn.Embedding(tokenizer.vocab_size, 6)
self.dense = nn.Linear(6, 2)
self.sm = nn.Softmax(dim=-1)
# pylint: disable=arguments-differ
def forward(self, input_ids, attention_mask):
assert input_ids.shape == attention_mask.shape
X = self.emb(input_ids).mean(1)
return self.sm(self.dense(X))
return MyModule
@pytest.fixture(scope='session')
def net_cls(self):
from skorch import NeuralNetClassifier
return NeuralNetClassifier
@pytest.fixture(scope='module')
def net(self, net_cls, module_cls):
return net_cls(
module_cls,
optimizer=torch.optim.Adam,
max_epochs=5,
batch_size=8,
lr=0.1,
)
def test_fit_predict_proba(self, net, data):
X, y = data
net.fit(X, y)
y_proba = net.predict_proba(X)
assert np.allclose(y_proba.sum(1), 1)
train_losses = net.history[:, 'train_loss']
# make sure the network trained successfully with an arbitrary wide margin
assert train_losses[0] > 5 * train_losses[-1]
| TestNetWithTokenizers |
python | doocs__leetcode | solution/2400-2499/2465.Number of Distinct Averages/Solution2.py | {
"start": 0,
"end": 307
} | class ____:
def distinctAverages(self, nums: List[int]) -> int:
nums.sort()
ans = 0
cnt = Counter()
for i in range(len(nums) >> 1):
x = nums[i] + nums[-i - 1]
cnt[x] += 1
if cnt[x] == 1:
ans += 1
return ans
| Solution |
python | run-llama__llama_index | llama-index-integrations/embeddings/llama-index-embeddings-google/llama_index/embeddings/google/palm.py | {
"start": 587,
"end": 2727
} | class ____(BaseEmbedding):
"""
Class for Google PaLM embeddings.
Args:
model_name (str): Model for embedding.
Defaults to "models/embedding-gecko-001".
api_key (Optional[str]): API key to access the model. Defaults to None.
"""
_model: Any = PrivateAttr()
def __init__(
self,
model_name: str = "models/embedding-gecko-001",
api_key: Optional[str] = None,
embed_batch_size: int = DEFAULT_EMBED_BATCH_SIZE,
callback_manager: Optional[CallbackManager] = None,
**kwargs: Any,
):
super().__init__(
model_name=model_name,
embed_batch_size=embed_batch_size,
callback_manager=callback_manager,
**kwargs,
)
palm.configure(api_key=api_key)
self._model = palm
@classmethod
def class_name(cls) -> str:
return "PaLMEmbedding"
def _get_query_embedding(self, query: str) -> List[float]:
"""Get query embedding."""
return self._model.generate_embeddings(model=self.model_name, text=query)[
"embedding"
]
async def _aget_query_embedding(self, query: str) -> List[float]:
"""The asynchronous version of _get_query_embedding."""
return await self._model.aget_embedding(query)
def _get_text_embedding(self, text: str) -> List[float]:
"""Get text embedding."""
return self._model.generate_embeddings(model=self.model_name, text=text)[
"embedding"
]
async def _aget_text_embedding(self, text: str) -> List[float]:
"""Asynchronously get text embedding."""
return self._model._get_text_embedding(text)
def _get_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Get text embeddings."""
return self._model.generate_embeddings(model=self.model_name, text=texts)[
"embedding"
]
async def _aget_text_embeddings(self, texts: List[str]) -> List[List[float]]:
"""Asynchronously get text embeddings."""
return await self._model._get_embeddings(texts)
| GooglePaLMEmbedding |
python | pytorch__pytorch | test/ao/sparsity/test_scheduler.py | {
"start": 2792,
"end": 7248
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.model_sparse_config = [
{"tensor_fqn": "0.weight", "sparsity_level": 0.8},
{"tensor_fqn": "2.weight", "sparsity_level": 0.4},
]
self.sorted_sparse_levels = [
conf["sparsity_level"] for conf in self.model_sparse_config
]
self.initial_sparsity = 0.1
self.initial_step = 3
def _make_model(self, **kwargs):
model = nn.Sequential(
nn.Linear(13, 17),
nn.Dropout(0.5),
nn.Linear(17, 3),
)
return model
def _make_scheduler(self, model, **kwargs):
sparsifier = WeightNormSparsifier()
sparsifier.prepare(model, config=self.model_sparse_config)
scheduler_args = {
"init_sl": self.initial_sparsity,
"init_t": self.initial_step,
}
scheduler_args.update(kwargs)
scheduler = CubicSL(sparsifier, **scheduler_args)
return sparsifier, scheduler
@staticmethod
def _get_sparsity_levels(sparsifier, precision=32):
r"""Gets the current levels of sparsity in a sparsifier."""
return [
round(group["sparsity_level"], precision) for group in sparsifier.groups
]
def test_constructor(self):
model = self._make_model()
sparsifier, scheduler = self._make_scheduler(model=model, initially_zero=True)
self.assertIs(
scheduler.sparsifier, sparsifier, msg="Sparsifier is not properly attached"
)
self.assertEqual(
scheduler._step_count,
1,
msg="Scheduler is initialized with incorrect step count",
)
self.assertEqual(
scheduler.base_sl,
self.sorted_sparse_levels,
msg="Scheduler did not store the target sparsity levels correctly",
)
# Value before t_0 is 0
self.assertEqual(
self._get_sparsity_levels(sparsifier),
scheduler._make_sure_a_list(0.0),
msg="Sparsifier is not reset correctly after attaching to the Scheduler",
)
# Value before t_0 is s_0
model = self._make_model()
sparsifier, scheduler = self._make_scheduler(model=model, initially_zero=False)
self.assertEqual(
self._get_sparsity_levels(sparsifier),
scheduler._make_sure_a_list(self.initial_sparsity),
msg="Sparsifier is not reset correctly after attaching to the Scheduler",
)
def test_step(self):
# For n=5, dt=2, there will be totally 10 steps between s_0 and s_f, starting from t_0
model = self._make_model()
sparsifier, scheduler = self._make_scheduler(
model=model, initially_zero=True, init_t=3, delta_t=2, total_t=5
)
scheduler.step()
scheduler.step()
self.assertEqual(
scheduler._step_count,
3,
msg="Scheduler step_count is expected to increment",
)
# Value before t_0 is supposed to be 0
self.assertEqual(
self._get_sparsity_levels(sparsifier),
scheduler._make_sure_a_list(0.0),
msg="Scheduler step updating the sparsity level before t_0",
)
scheduler.step() # Step = 3 => sparsity = initial_sparsity
self.assertEqual(
self._get_sparsity_levels(sparsifier),
scheduler._make_sure_a_list(self.initial_sparsity),
msg="Sparsifier is not reset to initial sparsity at the first step",
)
scheduler.step() # Step = 4 => sparsity ~ [0.3, 0.2]
self.assertEqual(
self._get_sparsity_levels(sparsifier, 1),
[0.3, 0.2],
msg="Sparsity level is not set correctly after the first step",
)
current_step = scheduler._step_count - scheduler.init_t[0] - 1
more_steps_needed = scheduler.delta_t[0] * scheduler.total_t[0] - current_step
for _ in range(more_steps_needed): # More steps needed to final sparsity level
scheduler.step()
self.assertEqual(
self._get_sparsity_levels(sparsifier),
self.sorted_sparse_levels,
msg="Sparsity level is not reaching the target level after delta_t * n steps ",
)
if __name__ == "__main__":
raise_on_run_directly("test/test_ao_sparsity.py")
| TestCubicScheduler |
python | facelessuser__soupsieve | soupsieve/css_match.py | {
"start": 1625,
"end": 2087
} | class ____:
"""
Fake parent class.
When we have a fragment with no `BeautifulSoup` document object,
we can't evaluate `nth` selectors properly. Create a temporary
fake parent so we can traverse the root element as a child.
"""
def __init__(self, element: bs4.Tag) -> None:
"""Initialize."""
self.contents = [element]
def __len__(self) -> int:
"""Length."""
return len(self.contents)
| _FakeParent |
python | django__django | tests/contenttypes_tests/test_models.py | {
"start": 12789,
"end": 13256
} | class ____(TestCase):
databases = {"default", "other"}
def test_multidb(self):
"""
When using multiple databases, ContentType.objects.get_for_model() uses
db_for_read().
"""
ContentType.objects.clear_cache()
with (
self.assertNumQueries(0, using="default"),
self.assertNumQueries(1, using="other"),
):
ContentType.objects.get_for_model(Author)
| ContentTypesMultidbTests |
python | ray-project__ray | python/ray/tests/test_autoscaler.py | {
"start": 11367,
"end": 146911
} | class ____(unittest.TestCase):
def setUp(self):
_NODE_PROVIDERS["mock"] = lambda config: self.create_provider
_DEFAULT_CONFIGS["mock"] = _DEFAULT_CONFIGS["aws"]
self.provider = None
self.tmpdir = tempfile.mkdtemp()
def tearDown(self):
self.provider = None
del _NODE_PROVIDERS["mock"]
_clear_provider_cache()
shutil.rmtree(self.tmpdir)
ray.shutdown()
def waitFor(self, condition, num_retries=50, fail_msg=None):
for _ in range(num_retries):
if condition():
return
time.sleep(0.1)
fail_msg = fail_msg or "Timed out waiting for {}".format(condition)
raise TimeoutError(fail_msg)
def waitForUpdatersToFinish(self, autoscaler):
self.waitFor(
lambda: all(
not updater.is_alive() for updater in autoscaler.updaters.values()
),
num_retries=500,
fail_msg="Last round of updaters didn't complete on time.",
)
def num_nodes(self, tag_filters=None):
if tag_filters is None:
tag_filters = {}
return len(self.provider.non_terminated_nodes(tag_filters))
def waitForNodes(self, expected, comparison=None, tag_filters=None):
if comparison is None:
comparison = self.assertEqual
MAX_ITER = 50
for i in range(MAX_ITER):
n = self.num_nodes(tag_filters)
try:
comparison(n, expected, msg="Unexpected node quantity.")
return
except Exception:
if i == MAX_ITER - 1:
print(self.provider.non_terminated_nodes(tag_filters))
raise
time.sleep(0.1)
def create_provider(self, config, cluster_name):
assert self.provider
return self.provider
def write_config(self, config, call_prepare_config=True):
new_config = copy.deepcopy(config)
if call_prepare_config:
new_config = prepare_config(new_config)
path = os.path.join(self.tmpdir, "simple.yaml")
with open(path, "w") as f:
f.write(yaml.dump(new_config))
return path
def worker_node_thread_check(self, foreground_node_launcher: bool):
"""Confirms that worker nodes were launched in the main thread if foreground
node launch is enabled, in a subthread otherwise.
Args:
foreground_node_launcher: Whether workers nodes are expected to be
launched in the foreground.
"""
worker_ids = self.provider.non_terminated_nodes(tag_filters=WORKER_FILTER)
worker_nodes = [self.provider.mock_nodes[worker_id] for worker_id in worker_ids]
if foreground_node_launcher:
# All workers were created in the main thread.
assert all(
worker_node.created_in_main_thread for worker_node in worker_nodes
)
else:
# All workers were created in a background thread.
assert not any(
worker_node.created_in_main_thread for worker_node in worker_nodes
)
def testAutoscalerConfigValidationFailNotFatal(self):
invalid_config = {**SMALL_CLUSTER, "invalid_property_12345": "test"}
# First check that this config is actually invalid
with pytest.raises(ValidationError):
validate_config(invalid_config)
config_path = self.write_config(invalid_config)
self.provider = MockProvider()
runner = MockProcessRunner()
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert len(self.provider.non_terminated_nodes({})) == 0
autoscaler.update()
self.waitForNodes(1)
autoscaler.update()
self.waitForNodes(1)
def testValidation(self):
"""Ensures that schema validation is working."""
config = copy.deepcopy(SMALL_CLUSTER)
try:
validate_config(config)
except Exception:
self.fail("Test config did not pass validation test!")
config["blah"] = "blah"
with pytest.raises(ValidationError):
validate_config(config)
del config["blah"]
del config["provider"]
with pytest.raises(ValidationError):
validate_config(config)
def testValidateDefaultConfig(self):
config = {}
config["provider"] = {
"type": "aws",
"region": "us-east-1",
"availability_zone": "us-east-1a",
}
config = prepare_config(config)
try:
validate_config(config)
except ValidationError:
self.fail("Default config did not pass validation test!")
def testGetOrCreateHeadNode(self):
config = copy.deepcopy(SMALL_CLUSTER)
head_run_option = "--kernel-memory=10g"
standard_run_option = "--memory-swap=5g"
config["docker"]["head_run_options"] = [head_run_option]
config["docker"]["run_options"] = [standard_run_option]
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Mounts", ["[]"])
# Two initial calls to rsync, + 2 more calls during run_init
runner.respond_to_call(".State.Running", ["false", "false", "false", "false"])
runner.respond_to_call("json .Config.Env", ["[]"])
def _create_node(node_config, tags, count, _skip_wait=False):
assert tags[TAG_RAY_NODE_STATUS] == STATUS_UNINITIALIZED
if not _skip_wait:
self.provider.ready_to_create.wait()
if self.provider.fail_creates:
return
with self.provider.lock:
if self.provider.cache_stopped:
for node in self.provider.mock_nodes.values():
if node.state == "stopped" and count > 0:
count -= 1
node.state = "pending"
node.tags.update(tags)
for _ in range(count):
self.provider.mock_nodes[str(self.provider.next_id)] = MockNode(
str(self.provider.next_id),
tags.copy(),
node_config,
tags.get(TAG_RAY_USER_NODE_TYPE),
unique_ips=self.provider.unique_ips,
)
self.provider.next_id += 1
self.provider.create_node = _create_node
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "head_setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "head")
runner.assert_has_call("1.2.3.4", pattern="docker run")
runner.assert_has_call("1.2.3.4", pattern=head_run_option)
runner.assert_has_call("1.2.3.4", pattern=standard_run_option)
docker_mount_prefix = get_docker_host_mount_location(
SMALL_CLUSTER["cluster_name"]
)
runner.assert_not_has_call(
"1.2.3.4", pattern=f"-v {docker_mount_prefix}/~/ray_bootstrap_config"
)
common_container_copy = f"rsync -e.*docker exec -i.*{docker_mount_prefix}/~/"
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_key.pem"
)
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_config.yaml"
)
return config
def testNodeTypeNameChange(self):
"""
Tests that cluster launcher and autoscaler have correct behavior under
changes and deletions of node type keys.
Specifically if we change the key from "old-type" to "new-type", nodes
of type "old-type" are deleted and (if required by the config) replaced
by nodes of type "new-type".
Strategy:
1. launch a test cluster with a head and one `min_worker`
2. change node type keys for both head and worker in cluster yaml
3. update cluster with new yaml
4. verify graceful replacement of the two nodes with old node types
with two nodes with new node types.
"""
# Default config with renamed node types, min_worker 1, docker off.
config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
config["docker"] = {}
node_types = config["available_node_types"]
node_types["ray.head.old"] = node_types.pop("ray.head.default")
node_types["ray.worker.old"] = node_types.pop("ray.worker.default")
config["head_node_type"] = "ray.head.old"
node_types["ray.worker.old"]["min_workers"] = 1
# Create head and launch autoscaler
runner = MockProcessRunner()
self.provider = MockProvider()
config_path = self.write_config(config)
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(2)
head_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_HEAD}
)
worker_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
# One head (as always)
# One worker (min_workers 1 with no resource demands)
assert len(head_list) == 1 and len(worker_list) == 1
worker, head = worker_list.pop(), head_list.pop()
# Confirm node type tags
assert (
self.provider.node_tags(head).get(TAG_RAY_USER_NODE_TYPE) == "ray.head.old"
)
assert (
self.provider.node_tags(worker).get(TAG_RAY_USER_NODE_TYPE)
== "ray.worker.old"
)
# Rename head and worker types
new_config = copy.deepcopy(config)
node_types = new_config["available_node_types"]
node_types["ray.head.new"] = node_types.pop("ray.head.old")
node_types["ray.worker.new"] = node_types.pop("ray.worker.old")
new_config["head_node_type"] = "ray.head.new"
config_path = self.write_config(new_config)
# Expect this to delete "ray.head.old" head and create "ray.head.new"
# head.
commands.get_or_create_head_node(
new_config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(2)
head_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_HEAD}
)
worker_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
# One head (as always)
# One worker (maintained from previous autoscaler update)
assert len(head_list) == 1 and len(worker_list) == 1
worker, head = worker_list.pop(), head_list.pop()
# Confirm new head
assert (
self.provider.node_tags(head).get(TAG_RAY_USER_NODE_TYPE) == "ray.head.new"
)
# Still old worker, as we haven't made an autoscaler update yet.
assert (
self.provider.node_tags(worker).get(TAG_RAY_USER_NODE_TYPE)
== "ray.worker.old"
)
fill_in_node_ids(self.provider, lm)
autoscaler.update()
self.waitForNodes(2)
events = autoscaler.event_summarizer.summary()
# Just one node (node_id 1) terminated in the last update.
# Validates that we didn't try to double-terminate node 0.
assert sorted(events) == [
"Adding 1 node(s) of type ray.worker.new.",
"Adding 1 node(s) of type ray.worker.old.",
"Removing 1 nodes of type ray.worker.old (not "
"in available_node_types: ['ray.head.new', 'ray.worker.new']).",
]
head_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_HEAD}
)
worker_list = self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
# One head (as always)
# One worker (min_workers 1 with no resource demands)
assert len(head_list) == 1 and len(worker_list) == 1
worker, head = worker_list.pop(), head_list.pop()
# After the autoscaler update, new head and new worker.
assert (
self.provider.node_tags(head).get(TAG_RAY_USER_NODE_TYPE) == "ray.head.new"
)
assert (
self.provider.node_tags(worker).get(TAG_RAY_USER_NODE_TYPE)
== "ray.worker.new"
)
def testGetOrCreateHeadNodePodman(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["docker"]["use_podman"] = True
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Mounts", ["[]"])
# Two initial calls to rsync, + 2 more calls during run_init
runner.respond_to_call(".State.Running", ["false", "false", "false", "false"])
runner.respond_to_call("json .Config.Env", ["[]"])
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "podman exec .*head_setup_cmd.*")
runner.assert_has_call("1.2.3.4", "podman exec .*start_ray_head.*")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "head")
runner.assert_has_call("1.2.3.4", pattern="podman run")
docker_mount_prefix = get_docker_host_mount_location(
SMALL_CLUSTER["cluster_name"]
)
runner.assert_not_has_call(
"1.2.3.4", pattern=f"-v {docker_mount_prefix}/~/ray_bootstrap_config"
)
common_container_copy = f"rsync -e.*podman exec -i.*{docker_mount_prefix}/~/"
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_key.pem"
)
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_config.yaml"
)
for cmd in runner.command_history():
assert "docker" not in cmd, "Docker (not podman) found in call: " f"{cmd}"
runner.assert_has_call("1.2.3.4", "podman inspect")
runner.assert_has_call("1.2.3.4", "podman exec")
def testGetOrCreateHeadNodeFromStopped(self):
config = self.testGetOrCreateHeadNode()
self.provider.cache_stopped = True
existing_nodes = self.provider.non_terminated_nodes({})
assert len(existing_nodes) == 1
self.provider.terminate_node(existing_nodes[0])
config_path = self.write_config(config)
runner = MockProcessRunner()
runner.respond_to_call("json .Mounts", ["[]"])
# Two initial calls to rsync, + 2 more calls during run_init
runner.respond_to_call(".State.Running", ["false", "false", "false", "false"])
runner.respond_to_call("json .Config.Env", ["[]"])
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
# Init & Setup commands must be run for Docker!
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "head_setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "head")
runner.assert_has_call("1.2.3.4", pattern="docker run")
docker_mount_prefix = get_docker_host_mount_location(
SMALL_CLUSTER["cluster_name"]
)
runner.assert_not_has_call(
"1.2.3.4", pattern=f"-v {docker_mount_prefix}/~/ray_bootstrap_config"
)
common_container_copy = f"rsync -e.*docker exec -i.*{docker_mount_prefix}/~/"
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_key.pem"
)
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_config.yaml"
)
# This next section of code ensures that the following order of
# commands are executed:
# 1. mkdir -p {docker_mount_prefix}
# 2. rsync bootstrap files (over ssh)
# 3. rsync bootstrap files into container
commands_with_mount = [
(i, cmd)
for i, cmd in enumerate(runner.command_history())
if docker_mount_prefix in cmd
]
rsync_commands = [x for x in commands_with_mount if "rsync --rsh" in x[1]]
copy_into_container = [
x
for x in commands_with_mount
if re.search("rsync -e.*docker exec -i", x[1])
]
first_mkdir = min(x[0] for x in commands_with_mount if "mkdir" in x[1])
docker_run_cmd_indx = [
i for i, cmd in enumerate(runner.command_history()) if "docker run" in cmd
][0]
for file_to_check in ["ray_bootstrap_config.yaml", "ray_bootstrap_key.pem"]:
first_rsync = min(
x[0] for x in rsync_commands if "ray_bootstrap_config.yaml" in x[1]
)
first_cp = min(x[0] for x in copy_into_container if file_to_check in x[1])
# Ensures that `mkdir -p` precedes `docker run` because Docker
# will auto-create the folder with wrong permissions.
assert first_mkdir < docker_run_cmd_indx
# Ensures that the folder is created before running rsync.
assert first_mkdir < first_rsync
# Checks that the file is present before copying into the container
assert first_rsync < first_cp
def testGetOrCreateHeadNodeFromStoppedRestartOnly(self):
config = self.testGetOrCreateHeadNode()
self.provider.cache_stopped = True
existing_nodes = self.provider.non_terminated_nodes({})
assert len(existing_nodes) == 1
self.provider.terminate_node(existing_nodes[0])
config_path = self.write_config(config)
runner = MockProcessRunner()
runner.respond_to_call("json .Mounts", ["[]"])
# Two initial calls to rsync, + 2 more calls during run_init
runner.respond_to_call(".State.Running", ["false", "false", "false", "false"])
runner.respond_to_call("json .Config.Env", ["[]"])
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=True,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
# Init & Setup commands must be run for Docker!
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "head_setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
def testDockerFileMountsAdded(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["file_mounts"] = {"source": "/dev/null"}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mounts = [
{
"Type": "bind",
"Source": "/sys",
"Destination": "/sys",
"Mode": "ro",
"RW": False,
"Propagation": "rprivate",
}
]
runner.respond_to_call("json .Mounts", [json.dumps(mounts)])
# Two initial calls to rsync, +1 more call during run_init
runner.respond_to_call(".State.Running", ["false", "false", "true", "true"])
runner.respond_to_call("json .Config.Env", ["[]"])
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "head_setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "head")
runner.assert_has_call("1.2.3.4", pattern="docker stop")
runner.assert_has_call("1.2.3.4", pattern="docker run")
docker_mount_prefix = get_docker_host_mount_location(
SMALL_CLUSTER["cluster_name"]
)
runner.assert_not_has_call(
"1.2.3.4", pattern=f"-v {docker_mount_prefix}/~/ray_bootstrap_config"
)
common_container_copy = f"rsync -e.*docker exec -i.*{docker_mount_prefix}/~/"
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_key.pem"
)
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_config.yaml"
)
def testDockerFileMountsRemoved(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["file_mounts"] = {}
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mounts = [
{
"Type": "bind",
"Source": "/sys",
"Destination": "/sys",
"Mode": "ro",
"RW": False,
"Propagation": "rprivate",
}
]
runner.respond_to_call("json .Mounts", [json.dumps(mounts)])
# Two initial calls to rsync, +1 more call during run_init
runner.respond_to_call(".State.Running", ["false", "false", "true", "true"])
runner.respond_to_call("json .Config.Env", ["[]"])
commands.get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
runner.assert_has_call("1.2.3.4", "init_cmd")
runner.assert_has_call("1.2.3.4", "head_setup_cmd")
runner.assert_has_call("1.2.3.4", "start_ray_head")
self.assertEqual(self.provider.mock_nodes["0"].node_type, "head")
# We only removed amount from the YAML, no changes should happen.
runner.assert_not_has_call("1.2.3.4", pattern="docker stop")
runner.assert_not_has_call("1.2.3.4", pattern="docker run")
docker_mount_prefix = get_docker_host_mount_location(
SMALL_CLUSTER["cluster_name"]
)
runner.assert_not_has_call(
"1.2.3.4", pattern=f"-v {docker_mount_prefix}/~/ray_bootstrap_config"
)
common_container_copy = f"rsync -e.*docker exec -i.*{docker_mount_prefix}/~/"
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_key.pem"
)
runner.assert_has_call(
"1.2.3.4", pattern=common_container_copy + "ray_bootstrap_config.yaml"
)
def testRsyncCommandWithDocker(self):
assert SMALL_CLUSTER["docker"]["container_name"]
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider(unique_ips=True)
self.provider.create_node(
{}, {TAG_RAY_NODE_KIND: "head", TAG_RAY_NODE_STATUS: "up-to-date"}, 1
)
self.provider.create_node(
{}, {TAG_RAY_NODE_KIND: "worker", TAG_RAY_NODE_STATUS: "up-to-date"}, 10
)
self.provider.finish_starting_nodes()
ray.autoscaler.node_provider._get_node_provider = Mock(
return_value=self.provider
)
ray.autoscaler._private.commands._bootstrap_config = Mock(
return_value=SMALL_CLUSTER
)
runner = MockProcessRunner()
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
override_cluster_name=None,
down=True,
_runner=runner,
)
runner.assert_has_call("1.2.3.0", pattern="rsync -e.*docker exec -i")
runner.assert_has_call("1.2.3.0", pattern="rsync --rsh")
runner.clear_history()
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
override_cluster_name=None,
down=True,
ip_address="1.2.3.5",
_runner=runner,
)
runner.assert_has_call("1.2.3.5", pattern="rsync -e.*docker exec -i")
runner.assert_has_call("1.2.3.5", pattern="rsync --rsh")
runner.clear_history()
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
ip_address="172.0.0.4",
override_cluster_name=None,
down=True,
use_internal_ip=True,
_runner=runner,
)
runner.assert_has_call("172.0.0.4", pattern="rsync -e.*docker exec -i")
runner.assert_has_call("172.0.0.4", pattern="rsync --rsh")
def testRsyncCommandWithoutDocker(self):
cluster_cfg = copy.deepcopy(SMALL_CLUSTER)
cluster_cfg["docker"] = {}
config_path = self.write_config(cluster_cfg)
self.provider = MockProvider(unique_ips=True)
self.provider.create_node(
{}, {TAG_RAY_NODE_KIND: "head", TAG_RAY_NODE_STATUS: "up-to-date"}, 1
)
self.provider.create_node(
{}, {TAG_RAY_NODE_KIND: "worker", TAG_RAY_NODE_STATUS: "up-to-date"}, 10
)
self.provider.finish_starting_nodes()
runner = MockProcessRunner()
ray.autoscaler.node_provider._get_node_provider = Mock(
return_value=self.provider
)
ray.autoscaler._private.commands._bootstrap_config = Mock(
return_value=cluster_cfg
)
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
override_cluster_name=None,
down=True,
_runner=runner,
)
runner.assert_has_call("1.2.3.0", pattern="rsync")
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
override_cluster_name=None,
down=True,
ip_address="1.2.3.5",
_runner=runner,
)
runner.assert_has_call("1.2.3.5", pattern="rsync")
runner.clear_history()
commands.rsync(
config_path,
source=config_path,
target="/tmp/test_path",
override_cluster_name=None,
down=True,
ip_address="172.0.0.4",
use_internal_ip=True,
_runner=runner,
)
runner.assert_has_call("172.0.0.4", pattern="rsync")
runner.clear_history()
def testSummarizerFailedCreate(self):
"""Checks that event summarizer reports failed node creation."""
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
self.provider.creation_error = Exception(":(")
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
autoscaler.update()
# Expect the next two messages in the logs.
msg = "Failed to launch 2 node(s) of type worker."
def expected_message_logged():
return msg in autoscaler.event_summarizer.summary()
self.waitFor(expected_message_logged)
def testSummarizerFailedCreateStructuredError(self):
"""Checks that event summarizer reports failed node creation with
additional details when the node provider thorws a
NodeLaunchException."""
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
self.provider.creation_error = NodeLaunchException(
"didn't work", "never did", exc_info
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
autoscaler.update()
# Expect the next message in the logs.
msg = "Failed to launch 2 node(s) of type worker. (didn't work): never did."
def expected_message_logged():
print(autoscaler.event_summarizer.summary())
return msg in autoscaler.event_summarizer.summary()
self.waitFor(expected_message_logged)
def testSummarizerFailedCreateStructuredErrorNoUnderlyingException(self):
"""Checks that event summarizer reports failed node creation with
additional details when the node provider thorws a
NodeLaunchException."""
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
self.provider.creation_error = NodeLaunchException(
"didn't work", "never did", src_exc_info=None
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
autoscaler.update()
# Expect the next message in the logs.
msg = "Failed to launch 2 node(s) of type worker. (didn't work): never did."
def expected_message_logged():
print(autoscaler.event_summarizer.summary())
return msg in autoscaler.event_summarizer.summary()
self.waitFor(expected_message_logged)
def testReadonlyNodeProvider(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = ReadOnlyNodeProvider(config_path, "readonly")
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
autoscaler = StandardAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes({})) == 0
# No updates in read-only mode.
autoscaler.update()
self.waitForNodes(0)
assert mock_metrics.started_nodes.inc.call_count == 0
assert len(runner.calls) == 0
# Reflect updates to the readonly provider.
self.provider._set_nodes(
[
("foo1", "1.1.1.1"),
("foo2", "1.1.1.1"),
("foo3", "1.1.1.1"),
]
)
# No updates in read-only mode.
autoscaler.update()
self.waitForNodes(3)
assert mock_metrics.started_nodes.inc.call_count == 0
assert mock_metrics.stopped_nodes.inc.call_count == 0
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
assert len(runner.calls) == 0
events = autoscaler.event_summarizer.summary()
assert not events, events
def ScaleUpHelper(self, disable_node_updaters):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
config["provider"]["disable_node_updaters"] = disable_node_updaters
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
if disable_node_updaters:
# This class raises an assertion error if we try to create
# a node updater thread.
autoscaler_class = NoUpdaterMockAutoscaler
else:
autoscaler_class = MockAutoscaler
autoscaler = autoscaler_class(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
# started_nodes metric should have been incremented by 2
assert mock_metrics.started_nodes.inc.call_count == 1
mock_metrics.started_nodes.inc.assert_called_with(2)
assert mock_metrics.worker_create_node_time.observe.call_count == 2
autoscaler.update()
# The two autoscaler update iterations in this test led to two
# observations of the update time.
assert mock_metrics.update_time.observe.call_count == 2
self.waitForNodes(2, tag_filters=WORKER_FILTER)
# running_workers metric should be set to 2
mock_metrics.running_workers.set.assert_called_with(2)
if disable_node_updaters:
# Node Updaters have NOT been invoked because they were explicitly
# disabled.
assert len(runner.calls) == 0
# Nodes were create in uninitialized and not updated.
self.waitForNodes(
2,
tag_filters={
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
**WORKER_FILTER,
},
)
else:
# Node Updaters have been invoked.
self.waitFor(lambda: len(runner.calls) > 0)
# The updates failed. Key thing is that the updates completed.
self.waitForNodes(
2,
tag_filters={
TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED,
**WORKER_FILTER,
},
)
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testScaleUp(self):
self.ScaleUpHelper(disable_node_updaters=False)
def testScaleUpNoUpdaters(self):
self.ScaleUpHelper(disable_node_updaters=True)
def testTerminateOutdatedNodesGracefully(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 5
config["max_workers"] = 5
config["available_node_types"]["worker"]["max_workers"] = 5
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "worker",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "worker",
},
10,
)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(10)])
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
self.waitForNodes(10, tag_filters=WORKER_FILTER)
fill_in_node_ids(self.provider, lm)
# Gradually scales down to meet target size, never going too low
for _ in range(10):
autoscaler.update()
self.waitForNodes(
5, comparison=self.assertLessEqual, tag_filters=WORKER_FILTER
)
self.waitForNodes(
4, comparison=self.assertGreaterEqual, tag_filters=WORKER_FILTER
)
# Eventually reaches steady state
self.waitForNodes(5, tag_filters=WORKER_FILTER)
# Check the outdated node removal event is generated.
autoscaler.update()
events = autoscaler.event_summarizer.summary()
assert "Removing 10 nodes of type worker (outdated)." in events, events
assert mock_metrics.stopped_nodes.inc.call_count == 10
mock_metrics.started_nodes.inc.assert_called_with(5)
assert mock_metrics.worker_create_node_time.observe.call_count == 5
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
# Parameterization functionality in the unittest module is not great.
# To test scale-down behavior, we parameterize the DynamicScaling test
# manually over outcomes for the DrainNode RPC call.
def testDynamicScaling1(self):
self.helperDynamicScaling(DrainNodeOutcome.Succeeded)
def testDynamicScaling2(self):
self.helperDynamicScaling(DrainNodeOutcome.NotAllDrained)
def testDynamicScaling3(self):
self.helperDynamicScaling(DrainNodeOutcome.Unimplemented)
def testDynamicScaling4(self):
self.helperDynamicScaling(DrainNodeOutcome.GenericRpcError)
def testDynamicScaling5(self):
self.helperDynamicScaling(DrainNodeOutcome.GenericException)
def testDynamicScaling6(self):
self.helperDynamicScaling(DrainNodeOutcome.FailedToFindIp)
def testDynamicScalingForegroundLauncher(self):
"""Test autoscaling with node launcher in the foreground."""
self.helperDynamicScaling(foreground_node_launcher=True)
def testDynamicScalingBatchingNodeProvider(self):
"""Test autoscaling with BatchingNodeProvider"""
self.helperDynamicScaling(
foreground_node_launcher=True, batching_node_provider=True
)
def helperDynamicScaling(
self,
drain_node_outcome: DrainNodeOutcome = DrainNodeOutcome.Succeeded,
foreground_node_launcher: bool = False,
batching_node_provider: bool = False,
):
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
mock_gcs_client = MockGcsClient(drain_node_outcome)
# Run the core of the test logic.
self._helperDynamicScaling(
mock_metrics,
mock_gcs_client,
foreground_node_launcher=foreground_node_launcher,
batching_node_provider=batching_node_provider,
)
# Make assertions about DrainNode error handling during scale-down.
if drain_node_outcome == DrainNodeOutcome.Succeeded:
# DrainNode call was made.
assert mock_gcs_client.drain_node_call_count > 0
# No drain node exceptions.
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
# Each drain node call succeeded.
assert (
mock_gcs_client.drain_node_reply_success
== mock_gcs_client.drain_node_call_count
)
elif drain_node_outcome == DrainNodeOutcome.Unimplemented:
# DrainNode call was made.
assert mock_gcs_client.drain_node_call_count > 0
# All errors were supressed.
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
# Every call failed.
assert mock_gcs_client.drain_node_reply_success == 0
elif drain_node_outcome in (
DrainNodeOutcome.GenericRpcError,
DrainNodeOutcome.GenericException,
):
# DrainNode call was made.
assert mock_gcs_client.drain_node_call_count > 0
# We encountered an exception.
assert mock_metrics.drain_node_exceptions.inc.call_count > 0
# Every call failed.
assert (
mock_metrics.drain_node_exceptions.inc.call_count
== mock_gcs_client.drain_node_call_count
)
assert mock_gcs_client.drain_node_reply_success == 0
elif drain_node_outcome == DrainNodeOutcome.FailedToFindIp:
# We never called the drain node api because we were unable to
# fetch ips
assert mock_gcs_client.drain_node_call_count == 0
# We encountered an exception fetching ip.
assert mock_metrics.drain_node_exceptions.inc.call_count > 0
def _helperDynamicScaling(
self,
mock_metrics,
mock_gcs_client,
foreground_node_launcher=False,
batching_node_provider=False,
):
if batching_node_provider:
assert (
foreground_node_launcher
), "BatchingNodeProvider requires foreground node launch."
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
if foreground_node_launcher:
config["provider"][FOREGROUND_NODE_LAUNCH_KEY] = True
if batching_node_provider:
config["provider"][FOREGROUND_NODE_LAUNCH_KEY] = True
config["provider"][DISABLE_LAUNCH_CONFIG_CHECK_KEY] = True
config["provider"][DISABLE_NODE_UPDATERS_KEY] = True
config_path = self.write_config(config)
if batching_node_provider:
self.provider = MockBatchingNodeProvider(
provider_config={
DISABLE_LAUNCH_CONFIG_CHECK_KEY: True,
DISABLE_NODE_UPDATERS_KEY: True,
FOREGROUND_NODE_LAUNCH_KEY: True,
},
cluster_name="test-cluster",
)
else:
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(12)])
lm = LoadMetrics()
# As part of setup for this test, ensure there is a head node.
if batching_node_provider:
# MockBatchingNodeProvider creates a head node in the __init__ method.
pass
else:
# MockProvider needs to create a head node with create_node.
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
lm.update("172.0.0.0", mock_node_id(), {"CPU": 1}, {"CPU": 0}, 0)
autoscaler = MockAutoscaler(
config_path,
lm,
mock_gcs_client,
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
if mock_gcs_client.drain_node_outcome == DrainNodeOutcome.FailedToFindIp:
autoscaler.fail_to_find_ip_during_drain = True
self.waitForNodes(0, tag_filters=WORKER_FILTER)
# Test aborting an autoscaler update with the batching NodeProvider.
if batching_node_provider:
self.provider.safe_to_scale_flag = False
autoscaler.update()
# The autoscaler update was aborted, so there's no change in worker count.
assert self.num_nodes(tag_filters=WORKER_FILTER) == 0
self.provider.safe_to_scale_flag = True
autoscaler.update()
if foreground_node_launcher:
# If we launched in the foreground, shouldn't need to wait for nodes
# to be available. (Node creation should block.)
assert self.num_nodes(tag_filters=WORKER_FILTER) == 2, (
self.provider.non_terminated_nodes(tag_filters=WORKER_FILTER),
self.provider.non_terminated_nodes(tag_filters={}),
)
else:
self.waitForNodes(2, tag_filters=WORKER_FILTER)
# Update the config to reduce the cluster size
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["max_workers"] = 1
new_config["available_node_types"]["worker"]["max_workers"] = 1
new_config["available_node_types"]["worker"]["min_workers"] = 1
self.write_config(new_config)
fill_in_node_ids(self.provider, lm)
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
# Check the scale-down event is generated.
events = autoscaler.event_summarizer.summary()
assert "Removing 1 nodes of type worker (max_workers_per_type)." in events
assert mock_metrics.stopped_nodes.inc.call_count == 1
# Update the config to increase the cluster size
new_config["available_node_types"]["worker"]["min_workers"] = 10
new_config["available_node_types"]["worker"]["max_workers"] = 10
new_config["max_workers"] = 10
self.write_config(new_config)
autoscaler.update()
# TODO(rickyx): This is a hack to avoid running into race conditions
# within v1 autoscaler. These should no longer be relevant in v2.
time.sleep(3)
# Because one worker already started, the scheduler waits for its
# resources to be updated before it launches the remaining min_workers.
worker_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
)[0]
lm.update(
worker_ip, mock_node_id(), {"CPU": 1}, {"CPU": 1}, DUMMY_IDLE_DURATION_S
)
autoscaler.update()
# TODO(rickyx): This is a hack to avoid running into race conditions
# within v1 autoscaler. These should no longer be relevant in v2.
time.sleep(3)
if foreground_node_launcher:
# If we launched in the foreground, shouldn't need to wait for nodes
# to be available. (Node creation should block.)
assert self.num_nodes(tag_filters=WORKER_FILTER) == 10
else:
self.waitForNodes(10, tag_filters=WORKER_FILTER)
# Awkward and unecessary to repeat the following check for BatchingNodeProvider.
if not batching_node_provider:
# Verify that worker nodes were launched in the main thread if foreground
# node launch is enabled, in a subthread otherwise.
self.worker_node_thread_check(foreground_node_launcher)
autoscaler.update()
assert mock_metrics.running_workers.set.call_args_list[-1][0][0] >= 10
# def testAggressiveAutoscaling(self):
# self._aggressiveAutoscalingHelper()
# def testAggressiveAutoscalingWithForegroundLauncher(self):
# self._aggressiveAutoscalingHelper(foreground_node_launcher=True)
# def _aggressiveAutoscalingHelper(self, foreground_node_launcher: bool = False):
# config = copy.deepcopy(SMALL_CLUSTER)
# config["available_node_types"]["worker"]["min_workers"] = 0
# config["available_node_types"]["worker"]["max_workers"] = 10
# config["max_workers"] = 10
# config["idle_timeout_minutes"] = 0
# config["upscaling_speed"] = config["available_node_types"]["worker"][
# "max_workers"
# ]
# if foreground_node_launcher:
# config["provider"][FOREGROUND_NODE_LAUNCH_KEY] = True
# config_path = self.write_config(config)
# self.provider = MockProvider()
# self.provider.create_node(
# {},
# {
# TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
# TAG_RAY_USER_NODE_TYPE: "head",
# },
# 1,
# )
# head_ip = self.provider.non_terminated_node_ips(
# tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
# )[0]
# runner = MockProcessRunner()
# runner.respond_to_call("json .Config.Env", ["[]" for i in range(11)])
# lm = LoadMetrics()
# autoscaler = MockAutoscaler(
# config_path,
# lm,
# MockGcsClient(),
# max_launch_batch=5,
# max_concurrent_launches=5,
# max_failures=0,
# process_runner=runner,
# update_interval_s=0,
# )
# self.waitForNodes(1)
# lm.update(
# head_ip,
# mock_node_id(),
# {"CPU": 1},
# {"CPU": 0},
# waiting_bundles=[{"CPU": 1}] * 7,
# infeasible_bundles=[{"CPU": 1}] * 3,
# )
# autoscaler.update()
# if foreground_node_launcher:
# # No wait if node launch is blocking and happens in the foreground.
# assert self.num_nodes() == 11
# else:
# self.waitForNodes(11)
# self.worker_node_thread_check(foreground_node_launcher)
# worker_ips = self.provider.non_terminated_node_ips(
# tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
# )
# for ip in worker_ips:
# # Mark workers inactive.
# lm.last_used_time_by_ip[ip] = 0
# # Clear the resource demands.
# # Otherwise in "foreground launcher" mode, workers would be deleted
# # for being idle and instantly re-created due to resource demand!
# lm.update(
# head_ip,
# mock_node_id(),
# {},
# {},
# waiting_bundles=[],
# infeasible_bundles=[],
# )
# autoscaler.update()
# self.waitForNodes(1) # only the head node
# # Make sure they don't get overwritten.
# assert autoscaler.resource_demand_scheduler.node_types["head"]["resources"]
# == {
# "CPU": 1
# }
# assert autoscaler.resource_demand_scheduler.node_types["worker"][
# "resources"
# ] == {"CPU": 1}
def testUnmanagedNodes(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 0
config["available_node_types"]["worker"]["max_workers"] = 20
config["max_workers"] = 20
config["idle_timeout_minutes"] = 0
config["upscaling_speed"] = 9999
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_USER_NODE_TYPE: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
},
1,
)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: "head"},
)[0]
self.provider.create_node({}, {TAG_RAY_NODE_KIND: "unmanaged"}, 1)
unmanaged_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: "unmanaged"},
)[0]
runner = MockProcessRunner()
lm = LoadMetrics()
lm.local_ip = head_ip
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(2)
# This node has num_cpus=0
lm.update(head_ip, mock_node_id(), {"CPU": 1}, {"CPU": 0}, 0)
lm.update(
unmanaged_ip,
mock_node_id(),
{"CPU": 0},
{"CPU": 0},
DUMMY_IDLE_DURATION_S,
)
autoscaler.update()
self.waitForNodes(2)
# 1 CPU task cannot be scheduled.
lm.update(
unmanaged_ip,
mock_node_id(),
{"CPU": 0},
{"CPU": 0},
DUMMY_IDLE_DURATION_S,
waiting_bundles=[{"CPU": 1}],
)
autoscaler.update()
self.waitForNodes(3)
def testUnmanagedNodes2(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 0
config["available_node_types"]["worker"]["max_workers"] = 20
config["max_workers"] = 20
config["idle_timeout_minutes"] = 0
config["upscaling_speed"] = 9999
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: "head",
TAG_RAY_USER_NODE_TYPE: "head",
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
},
1,
)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: "head"},
)[0]
self.provider.create_node({}, {TAG_RAY_NODE_KIND: "unmanaged"}, 1)
unmanaged_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: "unmanaged"},
)[0]
runner = MockProcessRunner()
lm = LoadMetrics()
lm.local_ip = head_ip
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
lm.update(head_ip, mock_node_id(), {"CPU": 1}, {"CPU": 0}, 0)
lm.update(
unmanaged_ip,
mock_node_id(),
{"CPU": 0},
{"CPU": 0},
DUMMY_IDLE_DURATION_S,
)
# Note that we shouldn't autoscale here because the resource demand
# vector is not set and target utilization fraction = 1.
autoscaler.update()
# If the autoscaler was behaving incorrectly, it needs time to start
# the new node, otherwise it could scale up after this check.
time.sleep(0.2)
self.waitForNodes(2)
def testDelayedLaunch(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
)[0]
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 0
)
# Update will try to create, but will block until we set the flag
self.provider.ready_to_create.clear()
lm.update(
head_ip,
mock_node_id(),
{"CPU": 1},
{"CPU": 0},
0,
waiting_bundles=[{"CPU": 1}] * 2,
)
autoscaler.update()
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 0
)
assert autoscaler.pending_launches.value == 2
# Set the flag, check it updates
self.provider.ready_to_create.set()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert autoscaler.pending_launches.value == 0
# Update the config to reduce the cluster size
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["available_node_types"]["worker"]["max_workers"] = 1
self.write_config(new_config)
fill_in_node_ids(self.provider, lm)
autoscaler.update()
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 1
)
def testDelayedLaunchWithMinWorkers(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 10
config["available_node_types"]["worker"]["max_workers"] = 10
config["max_workers"] = 10
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(10)])
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_launch_batch=5,
max_concurrent_launches=8,
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 0
)
# update() should launch a wave of 5 nodes (max_launch_batch)
# Force this first wave to block.
rtc1 = self.provider.ready_to_create
rtc1.clear()
autoscaler.update()
# Synchronization: wait for launchy thread to be blocked on rtc1
waiters = rtc1._cond._waiters
self.waitFor(lambda: len(waiters) == 2)
assert autoscaler.pending_launches.value == 10
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 0
)
autoscaler.update()
self.waitForNodes(
0, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
) # Nodes are not added on top of pending.
rtc1.set()
self.waitFor(lambda: autoscaler.pending_launches.value == 0)
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 10
)
self.waitForNodes(10, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert autoscaler.pending_launches.value == 0
autoscaler.update()
self.waitForNodes(10, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert autoscaler.pending_launches.value == 0
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testUpdateThrottling(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_launch_batch=5,
max_concurrent_launches=5,
max_failures=0,
process_runner=runner,
update_interval_s=10,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
assert autoscaler.pending_launches.value == 0
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["max_workers"] = 1
self.write_config(new_config)
autoscaler.update()
# not updated yet
# note that node termination happens in the main thread, so
# we do not need to add any delay here before checking
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 2
assert autoscaler.pending_launches.value == 0
def testLaunchConfigChange(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path, lm, MockGcsClient(), max_failures=0, update_interval_s=0
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
# Update the config to change the node type
new_config = copy.deepcopy(config)
new_config["available_node_types"]["worker"]["node_config"][
"InstanceType"
] = "updated"
self.write_config(new_config)
self.provider.ready_to_create.clear()
fill_in_node_ids(self.provider, lm)
for _ in range(5):
autoscaler.update()
self.waitForNodes(0, tag_filters=WORKER_FILTER)
self.provider.ready_to_create.set()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
def testIgnoresCorruptedConfig(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(11)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
lm = LoadMetrics()
lm.update("172.0.0.0", mock_node_id(), {"CPU": 1}, {"CPU": 0}, 0)
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_launch_batch=10,
max_concurrent_launches=10,
process_runner=runner,
max_failures=0,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
assert mock_metrics.config_validation_exceptions.inc.call_count == 0
self.waitForNodes(2, tag_filters=WORKER_FILTER)
# Write a corrupted config
self.write_config("asdf", call_prepare_config=False)
for _ in range(10):
autoscaler.update()
# config validation exceptions metrics should be incremented 10 times
assert mock_metrics.config_validation_exceptions.inc.call_count == 10
time.sleep(0.1)
assert autoscaler.pending_launches.value == 0
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 2
)
# New a good config again
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["available_node_types"]["worker"]["min_workers"] = 10
new_config["max_workers"] = 10
new_config["available_node_types"]["worker"]["max_workers"] = 10
self.write_config(new_config)
worker_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER},
)[0]
# Because one worker already started, the scheduler waits for its
# resources to be updated before it launches the remaining min_workers.
lm.update(
worker_ip, mock_node_id(), {"CPU": 1}, {"CPU": 1}, DUMMY_IDLE_DURATION_S
)
autoscaler.update()
self.waitForNodes(10, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testMaxFailures(self):
config_path = self.write_config(SMALL_CLUSTER)
self.provider = MockProvider()
self.provider.throw = True
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=2,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
assert autoscaler.summary() is None
assert mock_metrics.update_loop_exceptions.inc.call_count == 1
autoscaler.update()
assert mock_metrics.update_loop_exceptions.inc.call_count == 2
with pytest.raises(Exception):
autoscaler.update()
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testLaunchNewNodeOnOutOfBandTerminate(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(4)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
head_ip = self.provider.non_terminated_node_ips(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_HEAD},
)[0]
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
for node in self.provider.mock_nodes.values():
if node.internal_ip == head_ip:
continue
node.state = "terminated"
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
def testConfiguresNewNodes(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
# TODO(rueian): This is a hack to avoid running into race conditions
# within v1 autoscaler. These should no longer be relevant in v2.
self.waitForNodes(2)
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
def testReportsConfigFailures(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
config["provider"]["type"] = "mock"
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner(fail_cmds=["setup_cmd"])
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
fill_in_node_ids(self.provider, lm)
autoscaler.update()
try:
self.waitForNodes(
2,
tag_filters={
TAG_RAY_NODE_STATUS: STATUS_UPDATE_FAILED,
**WORKER_FILTER,
},
)
except AssertionError:
# The failed nodes might have been already terminated by autoscaler
assert len(self.provider.non_terminated_nodes({})) < 2
# Check the launch failure event is generated.
autoscaler.update()
events = autoscaler.event_summarizer.summary()
assert "Removing 2 nodes of type worker (launch failed)." in events, events
def testConfiguresOutdatedNodes(self):
from ray.autoscaler._private.cli_logger import cli_logger
def do_nothing(*args, **kwargs):
pass
cli_logger._print = type(cli_logger._print)(do_nothing, type(cli_logger))
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(4)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
# TODO(rickyx): This is a hack to avoid running into race conditions
# within v1 autoscaler. These should no longer be relevant in v2.
time.sleep(3)
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
time.sleep(3)
autoscaler.update()
time.sleep(3)
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
runner.calls = []
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["worker_setup_commands"] = ["cmdX", "cmdY"]
self.write_config(new_config)
autoscaler.update()
autoscaler.update()
self.waitFor(lambda: len(runner.calls) > 0)
def testScaleDownMaxWorkers(self):
"""Tests terminating nodes due to max_nodes per type."""
config = copy.deepcopy(MULTI_WORKER_CLUSTER)
config["available_node_types"]["m4.large"]["min_workers"] = 3
config["available_node_types"]["m4.large"]["max_workers"] = 3
config["available_node_types"]["m4.large"]["resources"] = {}
config["available_node_types"]["m4.16xlarge"]["resources"] = {}
config["available_node_types"]["p2.xlarge"]["min_workers"] = 5
config["available_node_types"]["p2.xlarge"]["max_workers"] = 8
config["available_node_types"]["p2.xlarge"]["resources"] = {}
config["available_node_types"]["p2.8xlarge"]["min_workers"] = 2
config["available_node_types"]["p2.8xlarge"]["max_workers"] = 4
config["available_node_types"]["p2.8xlarge"]["resources"] = {}
config["max_workers"] = 13
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(15)])
lm = LoadMetrics()
get_or_create_head_node(
config,
printable_config_file=config_path,
no_restart=False,
restart_only=False,
yes=True,
override_cluster_name=None,
_provider=self.provider,
_runner=runner,
)
self.waitForNodes(1)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
max_concurrent_launches=13,
max_launch_batch=13,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(11)
assert autoscaler.pending_launches.value == 0
assert (
len(
self.provider.non_terminated_nodes(
{TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
)
)
== 10
)
# Terminate some nodes
config["available_node_types"]["m4.large"]["min_workers"] = 2 # 3
config["available_node_types"]["m4.large"]["max_workers"] = 2
config["available_node_types"]["p2.8xlarge"]["min_workers"] = 0 # 2
config["available_node_types"]["p2.8xlarge"]["max_workers"] = 0
# And spawn one.
config["available_node_types"]["p2.xlarge"]["min_workers"] = 6 # 5
config["available_node_types"]["p2.xlarge"]["max_workers"] = 6
self.write_config(config)
fill_in_node_ids(self.provider, lm)
autoscaler.update()
events = autoscaler.event_summarizer.summary()
self.waitFor(lambda: autoscaler.pending_launches.value == 0)
self.waitForNodes(8, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
assert autoscaler.pending_launches.value == 0
events = autoscaler.event_summarizer.summary()
assert "Removing 1 nodes of type m4.large (max_workers_per_type)." in events
assert "Removing 2 nodes of type p2.8xlarge (max_workers_per_type)." in events
# We should not be starting/stopping empty_node at all.
for event in events:
assert "empty_node" not in event
node_type_counts = defaultdict(int)
for node_id in NonTerminatedNodes(self.provider).worker_ids:
tags = self.provider.node_tags(node_id)
if TAG_RAY_USER_NODE_TYPE in tags:
node_type = tags[TAG_RAY_USER_NODE_TYPE]
node_type_counts[node_type] += 1
assert node_type_counts == {"m4.large": 2, "p2.xlarge": 6}
def testFalseyLoadMetrics(self):
lm = LoadMetrics()
assert not lm
lm.update("172.0.0.0", mock_node_id(), {"CPU": 1}, {"CPU": 0}, 0)
assert lm
def testRecoverUnhealthyWorkers(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
lm = LoadMetrics()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
# Mark a node as unhealthy
for _ in range(5):
if autoscaler.updaters:
time.sleep(0.05)
autoscaler.update()
assert not autoscaler.updaters
mock_metrics.recovering_nodes.set.assert_called_with(0)
num_calls = len(runner.calls)
lm.last_heartbeat_time_by_ip["172.0.0.1"] = 0
autoscaler.update()
mock_metrics.recovering_nodes.set.assert_called_with(1)
self.waitFor(lambda: len(runner.calls) > num_calls, num_retries=150)
# Check the node removal event is generated.
autoscaler.update()
events = autoscaler.event_summarizer.summary()
assert (
"Restarting 1 nodes of type worker (lost contact with raylet)." in events
), events
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testTerminateUnhealthyWorkers(self):
"""Test termination of unhealthy workers, when
autoscaler.disable_node_updaters == True.
Similar to testRecoverUnhealthyWorkers.
"""
self.unhealthyWorkerHelper(disable_liveness_check=False)
def testDontTerminateUnhealthyWorkers(self):
"""Test that the autoscaler leaves unhealthy workers alone when the worker
liveness check is disabled.
"""
self.unhealthyWorkerHelper(disable_liveness_check=True)
def unhealthyWorkerHelper(self, disable_liveness_check: bool):
"""Helper used to test the autoscaler's handling of unhealthy worker nodes.
If disable liveness check is False, the default code path is tested and we
expect to see workers terminated.
If disable liveness check is True, we expect the autoscaler not to take action
on unhealthy nodes, instead delegating node management to another component.
"""
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
# Make it clear we're not timing out idle nodes here.
config["idle_timeout_minutes"] = 1000000000
if disable_liveness_check:
config["provider"][WORKER_LIVENESS_CHECK_KEY] = False
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
lm = LoadMetrics()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
# Clear out updaters.
for _ in range(5):
if autoscaler.updaters:
time.sleep(0.05)
autoscaler.update()
assert not autoscaler.updaters
num_calls = len(runner.calls)
# Mark a node as unhealthy
lm.last_heartbeat_time_by_ip["172.0.0.1"] = 0
# Turn off updaters.
autoscaler.disable_node_updaters = True
# Reduce min_workers to 1
autoscaler.config["available_node_types"]["worker"]["min_workers"] = 1
fill_in_node_ids(self.provider, lm)
if disable_liveness_check:
# We've disabled the liveness check, so the unhealthy node should stick
# around until someone else takes care of it.
# Do several autoscaler updates, to reinforce the fact that the
# autoscaler will never take down the unhealthy nodes.
for _ in range(10):
autoscaler.update()
# The nodes are still there.
assert self.num_nodes(tag_filters=WORKER_FILTER) == 2
# There's no synchronization required to make the last assertion valid:
# The autoscaler's node termination is synchronous and blocking, as is
# the terminate_node method of the mock node provider used in this test.
# No events generated indicating that we are removing nodes.
for event in autoscaler.event_summarizer.summary():
assert "Removing" not in event
else:
# We expect the unhealthy node to be cleared out with a single
# autoscaler update.
autoscaler.update()
# Stopped node metric incremented.
mock_metrics.stopped_nodes.inc.assert_called_once_with()
# One node left.
self.waitForNodes(1, tag_filters=WORKER_FILTER)
# Check the node removal event is generated.
autoscaler.update()
events = autoscaler.event_summarizer.summary()
assert (
"Removing 1 nodes of type "
"worker (lost contact with raylet)." in events
), events
# No additional runner calls, since updaters were disabled.
assert len(runner.calls) == num_calls
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testTerminateUnhealthyWorkers2(self):
"""Tests finer details of termination of unhealthy workers when
node updaters are disabled.
Specifically, test that newly up-to-date nodes which haven't sent a
heartbeat are marked active.
"""
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"]["disable_node_updaters"] = True
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
assert len(self.provider.non_terminated_nodes(WORKER_FILTER)) == 0
for _ in range(10):
autoscaler.update()
# Nodes stay in uninitialized state because no one has finished
# updating them.
self.waitForNodes(
2,
tag_filters={
TAG_RAY_NODE_STATUS: STATUS_UNINITIALIZED,
**WORKER_FILTER,
},
)
nodes = self.provider.non_terminated_nodes(WORKER_FILTER)
ips = [self.provider.internal_ip(node) for node in nodes]
# No heartbeats recorded yet.
assert not any(ip in lm.last_heartbeat_time_by_ip for ip in ips)
for node in nodes:
self.provider.set_node_tags(
node, {TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
autoscaler.update()
# Nodes marked active after up-to-date status detected.
assert all(ip in lm.last_heartbeat_time_by_ip for ip in ips)
# Nodes are kept.
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
# Mark nodes unhealthy.
for ip in ips:
lm.last_heartbeat_time_by_ip[ip] = 0
fill_in_node_ids(self.provider, lm)
autoscaler.update()
# Unhealthy nodes are gone.
self.waitForNodes(0, tag_filters=WORKER_FILTER)
autoscaler.update()
# IPs pruned
assert lm.last_heartbeat_time_by_ip == {}
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testExternalNodeScaler(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"] = {
"type": "external",
"module": "ray.autoscaler.node_provider.NodeProvider",
}
config_path = self.write_config(config)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
update_interval_s=0,
)
assert isinstance(autoscaler.provider, NodeProvider)
def testExternalNodeScalerWrongImport(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"] = {
"type": "external",
"module": "mymodule.provider_class",
}
invalid_provider = self.write_config(config)
with pytest.raises(ImportError):
MockAutoscaler(
invalid_provider, LoadMetrics(), MockGcsClient(), update_interval_s=0
)
def testExternalNodeScalerWrongModuleFormat(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["provider"] = {
"type": "external",
"module": "does-not-exist",
}
invalid_provider = self.write_config(config, call_prepare_config=False)
with pytest.raises(ValueError):
MockAutoscaler(
invalid_provider, LoadMetrics(), MockGcsClient(), update_interval_s=0
)
def testSetupCommandsWithNoNodeCaching(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 1
config["available_node_types"]["worker"]["max_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=False)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
runner.assert_has_call(worker_ip, "init_cmd")
runner.assert_has_call(worker_ip, "setup_cmd")
runner.assert_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
# Check the node was not reused
self.provider.terminate_node("1")
autoscaler.update()
runner.clear_history()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
new_worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
runner.assert_has_call(new_worker_ip, "init_cmd")
runner.assert_has_call(new_worker_ip, "setup_cmd")
runner.assert_has_call(new_worker_ip, "worker_setup_cmd")
runner.assert_has_call(new_worker_ip, "start_ray_worker")
assert worker_ip != new_worker_ip
def testSetupCommandsWithStoppedNodeCachingNoDocker(self):
file_mount_dir = tempfile.mkdtemp()
config = copy.deepcopy(SMALL_CLUSTER)
del config["docker"]
config["file_mounts"] = {"/root/test-folder": file_mount_dir}
config["file_mounts_sync_continuously"] = True
config["available_node_types"]["worker"]["min_workers"] = 1
config["available_node_types"]["worker"]["max_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
runner.assert_has_call(worker_ip, "init_cmd")
runner.assert_has_call(worker_ip, "setup_cmd")
runner.assert_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
# Check the node was indeed reused
self.provider.terminate_node("1")
runner.clear_history()
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
runner.assert_not_has_call(worker_ip, "init_cmd")
runner.assert_not_has_call(worker_ip, "setup_cmd")
runner.assert_not_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
with open(f"{file_mount_dir}/new_file", "w") as f:
f.write("abcdefgh")
# Check that run_init happens when file_mounts have updated
self.provider.terminate_node("1")
autoscaler.update()
runner.clear_history()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
runner.assert_not_has_call(worker_ip, "init_cmd")
runner.assert_not_has_call(worker_ip, "setup_cmd")
runner.assert_not_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
autoscaler.update()
runner.assert_not_has_call(worker_ip, "setup_cmd")
# We did not start any other nodes
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
def testSetupCommandsWithStoppedNodeCachingDocker(self):
# NOTE(ilr) Setup & Init commands **should** run with stopped nodes
# when Docker is in use.
file_mount_dir = tempfile.mkdtemp()
config = copy.deepcopy(SMALL_CLUSTER)
config["file_mounts"] = {"/root/test-folder": file_mount_dir}
config["file_mounts_sync_continuously"] = True
config["available_node_types"]["worker"]["min_workers"] = 1
config["available_node_types"]["worker"]["max_workers"] = 1
config["max_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(3)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
runner.assert_has_call(worker_ip, "init_cmd")
runner.assert_has_call(worker_ip, "setup_cmd")
runner.assert_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
runner.assert_has_call(worker_ip, "docker run")
# Check the node was indeed reused
self.provider.terminate_node("1")
runner.clear_history()
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
print(runner.command_history())
# These all must happen when the node is stopped and resued
runner.assert_has_call(worker_ip, "init_cmd")
runner.assert_has_call(worker_ip, "setup_cmd")
runner.assert_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
runner.assert_has_call(worker_ip, "docker run")
with open(f"{file_mount_dir}/new_file", "w") as f:
f.write("abcdefgh")
# Check that run_init happens when file_mounts have updated
self.provider.terminate_node("0")
runner.clear_history()
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
runner.assert_has_call(worker_ip, "init_cmd")
runner.assert_has_call(worker_ip, "setup_cmd")
runner.assert_has_call(worker_ip, "worker_setup_cmd")
runner.assert_has_call(worker_ip, "start_ray_worker")
runner.assert_has_call(worker_ip, "docker run")
docker_run_cmd_indx = [
i for i, cmd in enumerate(runner.command_history()) if "docker run" in cmd
][0]
mkdir_cmd_indx = [
i for i, cmd in enumerate(runner.command_history()) if "mkdir -p" in cmd
][0]
assert mkdir_cmd_indx < docker_run_cmd_indx
runner.clear_history()
autoscaler.update()
runner.assert_not_has_call(worker_ip, "setup_cmd")
# We did not start any other nodes
runner.assert_not_has_call("172.0.0.2", " ")
def testMultiNodeReuse(self):
config = copy.deepcopy(SMALL_CLUSTER)
# Docker re-runs setup commands when nodes are reused.
del config["docker"]
config["available_node_types"]["worker"]["min_workers"] = 3
config["available_node_types"]["worker"]["max_workers"] = 3
config["max_workers"] = 3
config_path = self.write_config(config)
self.provider = MockProvider(cache_stopped=True)
runner = MockProcessRunner()
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(3, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
3, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
self.provider.terminate_node("1")
self.provider.terminate_node("2")
self.provider.terminate_node("3")
runner.clear_history()
# Scale up to 10 nodes, check we reuse the first 3 and add 5 more.
config["available_node_types"]["worker"]["min_workers"] = 8
config["available_node_types"]["worker"]["max_workers"] = 8
config["max_workers"] = 8
self.write_config(config)
autoscaler.update()
autoscaler.update()
self.waitForNodes(8, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
8, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
autoscaler.update()
for i in [1, 2, 3]:
runner.assert_not_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
for i in range(4, 9):
runner.assert_has_call("172.0.0.{}".format(i), "setup_cmd")
runner.assert_has_call("172.0.0.{}".format(i), "start_ray_worker")
def testContinuousFileMounts(self):
file_mount_dir = tempfile.mkdtemp()
self.provider = MockProvider()
config = copy.deepcopy(SMALL_CLUSTER)
config["file_mounts"] = {"/home/test-folder": file_mount_dir}
config["file_mounts_sync_continuously"] = True
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(4)])
runner.respond_to_call("command -v docker", ["docker" for _ in range(4)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(3)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(3, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
autoscaler.update()
docker_mount_prefix = get_docker_host_mount_location(config["cluster_name"])
for i in self.provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
):
runner.assert_has_call(f"172.0.0.{i}", "setup_cmd")
runner.assert_has_call(
f"172.0.0.{i}",
f"{file_mount_dir}/ ubuntu@172.0.0.{i}:"
f"{docker_mount_prefix}/home/test-folder/",
)
runner.clear_history()
with open(os.path.join(file_mount_dir, "test.txt"), "wb") as temp_file:
temp_file.write("hello".encode())
runner.respond_to_call(".Config.Image", ["example" for _ in range(4)])
runner.respond_to_call(".State.Running", ["true" for _ in range(4)])
autoscaler.update()
self.waitForNodes(3)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(3, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
autoscaler.update()
for i in self.provider.non_terminated_nodes(
tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER}
):
runner.assert_not_has_call(f"172.0.0.{i}", "setup_cmd")
runner.assert_has_call(
f"172.0.0.{i}",
f"{file_mount_dir}/ ubuntu@172.0.0.{i}:"
f"{docker_mount_prefix}/home/test-folder/",
)
def testFileMountsNonContinuous(self):
file_mount_dir = tempfile.mkdtemp()
self.provider = MockProvider()
config = copy.deepcopy(SMALL_CLUSTER)
config["file_mounts"] = {"/home/test-folder": file_mount_dir}
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
autoscaler.update()
docker_mount_prefix = get_docker_host_mount_location(config["cluster_name"])
for i in self.provider.non_terminated_nodes(WORKER_FILTER):
runner.assert_has_call(f"172.0.0.{i}", "setup_cmd")
runner.assert_has_call(
f"172.0.0.{i}",
f"{file_mount_dir}/ ubuntu@172.0.0.{i}:"
f"{docker_mount_prefix}/home/test-folder/",
)
runner.clear_history()
with open(os.path.join(file_mount_dir, "test.txt"), "wb") as temp_file:
temp_file.write("hello".encode())
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
for i in self.provider.non_terminated_nodes(WORKER_FILTER):
runner.assert_not_has_call(f"172.0.0.{i}", "setup_cmd")
runner.assert_not_has_call(
f"172.0.0.{i}",
f"{file_mount_dir}/ ubuntu@172.0.0.{i}:"
f"{docker_mount_prefix}/home/test-folder/",
)
# Simulate a second `ray up` call
from ray.autoscaler._private import util
util._hash_cache = {}
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
self.waitForNodes(
2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
autoscaler.update()
for i in self.provider.non_terminated_nodes(WORKER_FILTER):
runner.assert_has_call(f"172.0.0.{i}", "setup_cmd")
runner.assert_has_call(
f"172.0.0.{i}",
f"{file_mount_dir}/ ubuntu@172.0.0.{i}:"
f"{docker_mount_prefix}/home/test-folder/",
)
def testDockerImageExistsBeforeInspect(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 1
config["available_node_types"]["worker"]["max_workers"] = 1
config["max_workers"] = 1
config["docker"]["pull_before_run"] = False
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(1)])
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
autoscaler.update()
self.waitForNodes(1, tag_filters={TAG_RAY_NODE_KIND: NODE_KIND_WORKER})
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1,
tag_filters={
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_NODE_KIND: NODE_KIND_WORKER,
},
)
first_pull = [
(i, cmd)
for i, cmd in enumerate(runner.command_history())
if "docker pull" in cmd
]
first_targeted_inspect = [
(i, cmd)
for i, cmd in enumerate(runner.command_history())
if "docker inspect -f" in cmd
]
# This checks for the bug mentioned #13128 where the image is inspected
# before the image is present.
assert min(x[0] for x in first_pull) < min(x[0] for x in first_targeted_inspect)
def testGetRunningHeadNode(self):
config = copy.deepcopy(SMALL_CLUSTER)
self.provider = MockProvider()
# Node 0 is failed.
self.provider.create_node(
{},
{
TAG_RAY_CLUSTER_NAME: "default",
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: "update-failed",
},
1,
)
# `_allow_uninitialized_state` should return the head node
# in the `update-failed` state.
allow_failed = commands._get_running_head_node(
config,
"/fake/path",
override_cluster_name=None,
create_if_needed=False,
_provider=self.provider,
_allow_uninitialized_state=True,
)
assert allow_failed == "0"
# Node 1 is okay.
self.provider.create_node(
{},
{
TAG_RAY_CLUSTER_NAME: "default",
TAG_RAY_NODE_KIND: "head",
TAG_RAY_NODE_STATUS: "up-to-date",
},
1,
)
node = commands._get_running_head_node(
config,
"/fake/path",
override_cluster_name=None,
create_if_needed=False,
_provider=self.provider,
)
assert node == "1"
# `_allow_uninitialized_state` should return the up-to-date head node
# if it is present.
optionally_failed = commands._get_running_head_node(
config,
"/fake/path",
override_cluster_name=None,
create_if_needed=False,
_provider=self.provider,
_allow_uninitialized_state=True,
)
assert optionally_failed == "1"
def testNodeTerminatedDuringUpdate(self):
"""
Tests autoscaler handling a node getting terminated during an update
triggered by the node missing a heartbeat.
Extension of testRecoverUnhealthyWorkers.
In this test, two nodes miss a heartbeat.
One of them (node 0) is terminated during its recovery update.
The other (node 1) just fails its update.
When processing completed updates, the autoscaler terminates node 1
but does not try to terminate node 0 again.
"""
cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 2
cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"]
# Don't need the extra node type or a docker config.
cluster_config["head_node_type"] = ["ray.worker.default"]
del cluster_config["available_node_types"]["ray.head.default"]
del cluster_config["docker"]
config_path = self.write_config(cluster_config)
self.provider = MockProvider()
runner = MockProcessRunner()
lm = LoadMetrics()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
# Scale up to two up-to-date workers
autoscaler.update()
self.waitForNodes(2)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(2, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE})
# Mark both nodes as unhealthy
for _ in range(5):
if autoscaler.updaters:
time.sleep(0.05)
autoscaler.update()
lm.last_heartbeat_time_by_ip["172.0.0.0"] = 0
lm.last_heartbeat_time_by_ip["172.0.0.1"] = 0
# Expect both updates to be successful, no nodes in updating state
assert mock_metrics.successful_updates.inc.call_count == 2
assert mock_metrics.worker_update_time.observe.call_count == 2
mock_metrics.updating_nodes.set.assert_called_with(0)
assert not autoscaler.updaters
# Set up process runner to terminate worker 0 during missed heartbeat
# recovery and also cause the updater to fail.
def terminate_worker_zero():
self.provider.terminate_node("0")
autoscaler.process_runner = MockProcessRunner(
fail_cmds=["ray_start_cmd"],
cmd_to_callback={"ray_start_cmd": terminate_worker_zero},
)
# ensures that no updates are completed until after the next call
# to update()
autoscaler.process_runner.ready_to_run.clear()
num_calls = len(autoscaler.process_runner.calls)
autoscaler.update()
mock_metrics.updating_nodes.set.assert_called_with(2)
mock_metrics.recovering_nodes.set.assert_called_with(2)
autoscaler.process_runner.ready_to_run.set()
# Wait for updaters spawned by last autoscaler update to finish.
self.waitForUpdatersToFinish(autoscaler)
# Check that updaters processed some commands in the last autoscaler
# update.
assert (
len(autoscaler.process_runner.calls) > num_calls
), "Did not get additional process runner calls on last autoscaler update."
# Missed heartbeat triggered recovery for both nodes.
events = autoscaler.event_summarizer.summary()
assert (
"Restarting 2 nodes of type "
"ray.worker.default (lost contact with raylet)." in events
), events
# Node 0 was terminated during the last update.
# Node 1's updater failed, but node 1 won't be terminated until the
# next autoscaler update.
assert (
"0" not in NonTerminatedNodes(self.provider).worker_ids
), "Node zero still non-terminated."
assert not self.provider.is_terminated("1"), "Node one terminated prematurely."
fill_in_node_ids(self.provider, lm)
autoscaler.update()
# Failed updates processed are now processed.
assert (
autoscaler.num_failed_updates["0"] == 1
), "Node zero update failure not registered"
assert (
autoscaler.num_failed_updates["1"] == 1
), "Node one update failure not registered"
assert mock_metrics.failed_updates.inc.call_count == 2
assert mock_metrics.failed_recoveries.inc.call_count == 2
assert mock_metrics.successful_recoveries.inc.call_count == 0
# Completed-update-processing logic should have terminated node 1.
assert self.provider.is_terminated("1"), "Node 1 not terminated on time."
events = autoscaler.event_summarizer.summary()
# Just one node (node_id 1) terminated in the last update.
# Validates that we didn't try to double-terminate node 0.
assert (
"Removing 1 nodes of type ray.worker.default (launch failed)." in events
), events
# To be more explicit,
assert (
"Removing 2 nodes of type "
"ray.worker.default (launch failed)." not in events
), events
# Should get two new nodes after the next update.
fill_in_node_ids(self.provider, lm)
autoscaler.update()
self.waitForNodes(2)
assert set(NonTerminatedNodes(self.provider).worker_ids) == {
"2",
"3",
}, "Unexpected node_ids"
assert mock_metrics.stopped_nodes.inc.call_count == 1
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testProviderException(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 2
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
self.provider.creation_error = Exception(":(")
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
def metrics_incremented():
exceptions = mock_metrics.node_launch_exceptions.inc.call_count == 1
create_failures = mock_metrics.failed_create_nodes.inc.call_count == 1
create_arg = False
if create_failures:
# number of failed creations should be incremented by 2
create_arg = mock_metrics.failed_create_nodes.inc.call_args[0] == (2,)
return exceptions and create_failures and create_arg
self.waitFor(metrics_incremented, fail_msg="Expected metrics to update")
assert mock_metrics.drain_node_exceptions.inc.call_count == 0
def testDefaultMinMaxWorkers(self):
config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
config = prepare_config(config)
node_types = config["available_node_types"]
head_node_config = node_types["ray.head.default"]
assert head_node_config["min_workers"] == 0
assert head_node_config["max_workers"] == 0
def testAutoscalerInitFailure(self):
"""Validates error handling for failed autoscaler initialization in the
Monitor.
"""
class AutoscalerInitFailException(Exception):
pass
class FaultyAutoscaler:
def __init__(self, *args, **kwargs):
raise AutoscalerInitFailException
prev_port = os.environ.get("RAY_GCS_SERVER_PORT")
os.environ["RAY_GCS_SERVER_PORT"] = "12345"
ray.init()
with patch("ray._private.utils.publish_error_to_driver") as mock_publish:
with patch.multiple(
"ray.autoscaler._private.monitor",
StandardAutoscaler=FaultyAutoscaler,
_internal_kv_initialized=Mock(return_value=False),
):
monitor = Monitor(
address="localhost:12345",
autoscaling_config="",
log_dir=self.tmpdir,
)
with pytest.raises(AutoscalerInitFailException):
monitor.run()
mock_publish.assert_called_once()
if prev_port is not None:
os.environ["RAY_GCS_SERVER_PORT"] = prev_port
else:
del os.environ["RAY_GCS_SERVER_PORT"]
def testInitializeSDKArguments(self):
# https://github.com/ray-project/ray/issues/23166
from ray.autoscaler.sdk import request_resources
with self.assertRaises(TypeError):
request_resources(num_cpus="bar")
with self.assertRaises(TypeError):
request_resources(bundles="bar")
with self.assertRaises(TypeError):
request_resources(bundles=["foo"])
with self.assertRaises(TypeError):
request_resources(bundles=[{"foo": "bar"}])
with self.assertRaises(TypeError):
request_resources(bundles=[{"foo": 1}, {"bar": "baz"}])
def test_autoscaler_status_log(self):
self._test_autoscaler_status_log(status_log_enabled_env=1)
self._test_autoscaler_status_log(status_log_enabled_env=0)
def _test_autoscaler_status_log(self, status_log_enabled_env: int):
mock_logger = Mock(spec=logging.Logger(""))
with patch.multiple(
"ray.autoscaler._private.autoscaler",
logger=mock_logger,
AUTOSCALER_STATUS_LOG=status_log_enabled_env,
):
config = copy.deepcopy(SMALL_CLUSTER)
config_path = self.write_config(config)
runner = MockProcessRunner()
mock_metrics = Mock(spec=AutoscalerPrometheusMetrics())
self.provider = MockProvider()
autoscaler = MockAutoscaler(
config_path,
LoadMetrics(),
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
status_log_found = False
for call in mock_logger.info.call_args_list:
args, _ = call
arg = args[0]
if " Autoscaler status: " in arg:
status_log_found = True
break
assert status_log_found is bool(status_log_enabled_env)
def testScaleDownIdleTimeOut(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["worker"]["min_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
runner = MockProcessRunner()
lm = LoadMetrics()
mock_gcs_client = MockGcsClient()
autoscaler = MockAutoscaler(
config_path,
lm,
mock_gcs_client,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
# Reduce cluster size to 1
new_config = copy.deepcopy(SMALL_CLUSTER)
new_config["available_node_types"]["worker"]["min_workers"] = 0
new_config["idle_timeout_minutes"] = 0.1
self.write_config(new_config)
autoscaler.update()
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
# Mark the node as idle
lm.update(worker_ip, mock_node_id(), {"CPU": 1}, {"CPU": 1}, 20)
autoscaler.update()
assert self.provider.internal_ip("1") == worker_ip
events = autoscaler.event_summarizer.summary()
assert "Removing 1 nodes of type worker (idle)." in events, events
autoscaler.update()
assert mock_gcs_client.drain_node_call_count == 1
def testDontScaleDownIdleTimeOutForPlacementGroups(self):
config = copy.deepcopy(SMALL_CLUSTER)
config["available_node_types"]["head"]["resources"][
"CPU"
] = 0 # make the head node not consume any resources.
config["available_node_types"]["worker"][
"min_workers"
] = 1 # prepare 1 worker upfront.
config["idle_timeout_minutes"] = 0.1
config_path = self.write_config(config)
self.provider = MockProvider()
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
runner = MockProcessRunner()
# Avoid the "Unable to deserialize `image_env` to Python object" error in the DockerCommandRunner.
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
mock_gcs_client = MockGcsClient()
autoscaler = MockAutoscaler(
config_path,
lm,
mock_gcs_client,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
# 1 worker is ready upfront.
self.waitForNodes(1, tag_filters=WORKER_FILTER)
# clear the summary for later check.
autoscaler.event_summarizer.clear()
# Restore min_workers to allow scaling down to 0.
config["available_node_types"]["worker"]["min_workers"] = 0
self.write_config(config)
# Create a placement group with 2 bundles that require 2 workers.
placement_group_table_data = gcs_pb2.PlacementGroupTableData(
placement_group_id=b"\000",
strategy=common_pb2.PlacementStrategy.SPREAD,
)
for i in range(2):
bundle = common_pb2.Bundle()
bundle.bundle_id.placement_group_id = (
placement_group_table_data.placement_group_id
)
bundle.bundle_id.bundle_index = i
bundle.unit_resources["CPU"] = 1
placement_group_table_data.bundles.append(bundle)
# Mark the first worker as idle, but it should not be scaled down by the autoscaler because it will be used by the placement group.
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
lm.update(
worker_ip,
mock_node_id(),
{"CPU": 1},
{"CPU": 1},
20, # idle for 20 seconds, which is longer than the idle_timeout_minutes.
None,
None,
[placement_group_table_data],
)
autoscaler.update()
# TODO(rueian): This is a hack to avoid running into race conditions
# within v1 autoscaler. These should no longer be relevant in v2.
self.waitForNodes(2, tag_filters=WORKER_FILTER)
events = autoscaler.event_summarizer.summary()
assert "Removing 1 nodes of type worker (idle)." not in events, events
assert "Adding 1 node(s) of type worker." in events, events
autoscaler.update()
self.waitForNodes(2, tag_filters=WORKER_FILTER)
def testRecoverUnhealthyWorkersWithNodeSpecificDocker(self):
"""Test that recovery uses node-specific docker configuration.
This test verifies that when a worker node becomes unhealthy and needs
recovery, the autoscaler uses the node-specific docker configuration
rather than the global docker configuration.
"""
config = copy.deepcopy(SMALL_CLUSTER)
# Top-level global docker config (should be overridden by node-specific config)
config["docker"]["image"] = "global-image:latest"
config["docker"]["worker_image"] = "global-worker-image:latest"
# Add node-specific docker configuration
config["available_node_types"]["worker"]["docker"] = {
"worker_image": "node-specific-worker-image:latest",
"worker_run_options": ["--gpus=all"],
}
config["available_node_types"]["worker"]["min_workers"] = 1
config_path = self.write_config(config)
self.provider = MockProvider()
runner = MockProcessRunner()
runner.respond_to_call("json .Config.Env", ["[]" for i in range(2)])
lm = LoadMetrics()
mock_metrics = Mock()
# Create head node
self.provider.create_node(
{},
{
TAG_RAY_NODE_KIND: NODE_KIND_HEAD,
TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE,
TAG_RAY_USER_NODE_TYPE: "head",
},
1,
)
autoscaler = MockAutoscaler(
config_path,
lm,
MockGcsClient(),
max_failures=0,
process_runner=runner,
update_interval_s=0,
prom_metrics=mock_metrics,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
# Wait for initial updaters to finish
self.waitForUpdatersToFinish(autoscaler)
autoscaler.update()
# Ensure initial updaters are cleared after they finish
assert not autoscaler.updaters
# Clear command history before triggering recovery to ensure we only check
# commands from the recovery process, not the initial node creation
runner.clear_history()
# Trigger node recovery by setting the last heartbeat time to be before the timeout
worker_ip = "172.0.0.1" # Expected IP of the first worker node
lm.last_heartbeat_time_by_ip[worker_ip] = (
time.time() - AUTOSCALER_HEARTBEAT_TIMEOUT_S - 1
)
autoscaler.update()
# Wait for recovery to start and finish
self.waitFor(lambda: len(autoscaler.updaters) > 0, num_retries=150)
self.waitForUpdatersToFinish(autoscaler)
# Verify that recovery has started by checking multiple indicators:
# 1. Check that an updater was created for recovery
assert len(autoscaler.updaters) == 1
node_id = list(autoscaler.updaters.keys())[0]
updater = autoscaler.updaters[node_id]
# 2. Verify the updater is marked as a recovery updater
assert updater.for_recovery is True
# 3. Verify the recovery event was logged
events = autoscaler.event_summarizer.summary()
assert any(
"Restarting" in event and "lost contact with raylet" in event
for event in events
)
# 4. Verify that the recovery process uses the node-specific docker image
# instead of the global docker image
runner.assert_has_call(worker_ip, pattern="node-specific-worker-image:latest")
# 5. Verify that the recovery process uses the node-specific run options
runner.assert_has_call(worker_ip, pattern="--gpus=all")
# 6. Verify that the recovery updater has the correct docker config
# by checking that it uses the node-specific docker configuration
assert (
updater.docker_config.get("worker_image")
== "node-specific-worker-image:latest"
)
assert "--gpus=all" in updater.docker_config.get("worker_run_options")
def test_node_becomes_inactive_after_heartbeat_timeout(self):
cluster_config = copy.deepcopy(MOCK_DEFAULT_CONFIG)
cluster_config["available_node_types"]["ray.worker.default"]["min_workers"] = 1
cluster_config["worker_start_ray_commands"] = ["ray_start_cmd"]
cluster_config["head_node_type"] = ["ray.worker.default"]
del cluster_config["available_node_types"]["ray.head.default"]
del cluster_config["docker"]
config_path = self.write_config(cluster_config)
self.provider = MockProvider()
runner = MockProcessRunner()
lm = LoadMetrics()
mock_gcs_client = MockGcsClient()
autoscaler = MockAutoscaler(
config_path,
lm,
mock_gcs_client,
max_failures=0,
process_runner=runner,
update_interval_s=0,
)
autoscaler.update()
self.waitForNodes(1, tag_filters=WORKER_FILTER)
self.provider.finish_starting_nodes()
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
self.waitForUpdatersToFinish(autoscaler)
autoscaler.update()
assert not autoscaler.updaters
worker_ip = self.provider.non_terminated_node_ips(WORKER_FILTER)[0]
now = time.time()
past_heartbeat = now - AUTOSCALER_HEARTBEAT_TIMEOUT_S - 1
lm.last_heartbeat_time_by_ip[worker_ip] = past_heartbeat
autoscaler.update()
self.waitForNodes(
1, tag_filters={TAG_RAY_NODE_STATUS: STATUS_UP_TO_DATE, **WORKER_FILTER}
)
events = autoscaler.summary()
assert events.failed_nodes == [("172.0.0.0", "ray.worker.default")]
def test_import():
"""This test ensures that all the autoscaler imports work as expected to
prevent errors such as #19840.
"""
import ray # noqa
ray.autoscaler.sdk.request_resources # noqa
import ray.autoscaler # noqa
import ray.autoscaler.sdk # noqa
from ray.autoscaler.sdk import request_resources # noqa
def test_prom_null_metric_inc_fix():
"""Verify the bug fix https://github.com/ray-project/ray/pull/27532
for NullMetric's signature.
Check that NullMetric can be called with or without an argument.
"""
NullMetric().inc()
NullMetric().inc(5)
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| AutoscalingTest |
python | kamyu104__LeetCode-Solutions | Python/longest-substring-with-at-most-two-distinct-characters.py | {
"start": 29,
"end": 724
} | class ____(object):
# @param s, a string
# @return an integer
def lengthOfLongestSubstringTwoDistinct(self, s):
longest, start, distinct_count, visited = 0, 0, 0, [0 for _ in xrange(256)]
for i, char in enumerate(s):
if visited[ord(char)] == 0:
distinct_count += 1
visited[ord(char)] += 1
while distinct_count > 2:
visited[ord(s[start])] -= 1
if visited[ord(s[start])] == 0:
distinct_count -= 1
start += 1
longest = max(longest, i - start + 1)
return longest
# Time: O(n)
# Space: O(1)
from collections import Counter
| Solution |
python | getsentry__sentry | fixtures/page_objects/transaction_summary.py | {
"start": 29,
"end": 252
} | class ____(BasePage):
def wait_until_loaded(self):
self.browser.wait_until_not('[data-test-id="loading-indicator"]')
self.browser.wait_until_not('[data-test-id="loading-placeholder"]')
| TransactionSummaryPage |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/dialects/mysql/json.py | {
"start": 541,
"end": 1250
} | class ____(sqltypes.JSON):
"""MySQL JSON type.
MySQL supports JSON as of version 5.7.
MariaDB supports JSON (as an alias for LONGTEXT) as of version 10.2.
:class:`_mysql.JSON` is used automatically whenever the base
:class:`_types.JSON` datatype is used against a MySQL or MariaDB backend.
.. seealso::
:class:`_types.JSON` - main documentation for the generic
cross-platform JSON datatype.
The :class:`.mysql.JSON` type supports persistence of JSON values
as well as the core index operations provided by :class:`_types.JSON`
datatype, by adapting the operations to render the ``JSON_EXTRACT``
function at the database level.
"""
pass
| JSON |
python | tensorflow__tensorflow | tensorflow/python/framework/convert_to_constants_test.py | {
"start": 19419,
"end": 33710
} | class ____(test.TestCase):
def _freezeModel(self, func):
"""Freezes the function.
Args:
func: Function.
Returns:
root: AutoTrackable object with original ConcreteFunction.
output_func: frozen ConcreteFunction.
"""
root = autotrackable.AutoTrackable()
root.f = func
input_func = root.f.get_concrete_function()
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func, lower_control_flow=False)
return root, output_func
def _testConvertedFunction(self, sess, obj, func, converted_concrete_func,
input_data):
# Ensure the converted graph has no variables and no function calls.
constant_graph_def = converted_concrete_func.graph.as_graph_def()
self.assertEqual(0, get_num_variables(constant_graph_def))
self.assertFalse(has_stateful_partitioned_call_op(constant_graph_def))
# Check that the converted ConcreteFunction produces the same result as the
# original Function.
expected_value = nest.flatten(func(**input_data))
actual_value = nest.flatten(converted_concrete_func(**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(sess.run(expected), sess.run(actual))
# Ensure the shape is retained.
for tensor in converted_concrete_func.inputs:
actual_shape = input_data[tensor.name.split(":")[0]].shape
self.assertEqual(tensor.shape, actual_shape)
# Save the converted ConcreteFunction as a signature.
save_dir = os.path.join(self.get_temp_dir(), "frozen_saved_model")
root = autotrackable.AutoTrackable()
root.f = converted_concrete_func
save(root, save_dir, {"mykey": converted_concrete_func})
# Load it back and make sure it works.
loaded_obj = load(save_dir)
actual_value = nest.flatten(loaded_obj.signatures["mykey"](**input_data))
for expected, actual in zip(expected_value, actual_value):
np.testing.assert_almost_equal(sess.run(expected), sess.run(actual))
def testRaiseErrorInEagerMode(self):
"""Test the raised exception in Eager mode."""
input_data = {"x": constant_op.constant(1., shape=[1])}
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
with self.assertRaisesRegex(RuntimeError,
"must be carried out in a Session"):
convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
def testConvertVariables(self):
"""Test a basic model with Variables."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(1., shape=[1])}
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, get_num_variables(variable_graph_def))
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testConvertVariablesWithAssignments(self):
"""Test a basic model with Variables and assignment ops."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(1., shape=[1])}
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
input_func = root.f.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(2, get_num_variables(variable_graph_def))
assign_op_1 = root.v1.assign(1.5)
assign_op_2 = root.v2.assign(3.0)
assign_op_3 = root.v1.assign(4.0)
ops.get_default_graph().add_to_collection(
convert_to_constants.VAR_ASSIGN_COLLECTION, assign_op_1)
ops.get_default_graph().add_to_collection(
convert_to_constants.VAR_ASSIGN_COLLECTION, assign_op_2)
ops.get_default_graph().add_to_collection(
convert_to_constants.VAR_ASSIGN_COLLECTION, assign_op_3)
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testConstSavedModel(self):
"""Test a basic model with constants while saving/loading the SavedModel."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(1., shape=[1])}
root = autotrackable.AutoTrackable()
root.f = def_function.function(lambda x: 2. * x)
to_save = root.f.get_concrete_function(input_data["x"])
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(0, get_num_variables(variable_graph_def))
self.assertTrue(variable_graph_def.library.function)
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testVariableSavedModel(self):
"""Test a basic model with Variables with saving/loading the SavedModel."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(1., shape=[1])}
root = autotrackable.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
to_save = root.f.get_concrete_function(input_data["x"])
sess.run(variables.global_variables_initializer())
save_dir = os.path.join(self.get_temp_dir(), "saved_model")
save(root, save_dir, to_save)
saved_model = load(save_dir)
input_func = saved_model.signatures["serving_default"]
variable_graph_def = input_func.graph.as_graph_def()
self.assertTrue(has_stateful_partitioned_call_op(variable_graph_def))
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testMultiFunctionModel(self):
"""Test a basic model with multiple tf.functions."""
class BasicModel(autotrackable.AutoTrackable):
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(1., shape=[1])}
root = BasicModel()
input_func = root.add.get_concrete_function(input_data["x"])
variable_graph_def = input_func.graph.as_graph_def()
self.assertEqual(1, get_num_variables(variable_graph_def))
output_func = convert_to_constants.convert_var_to_const_function_in_v1(
input_func)
self._testConvertedFunction(sess, root, root.add, output_func,
input_data)
def testIf(self):
"""Test a model with the If op."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {
"x": constant_op.constant([1., 2.], shape=[1, 2]),
"b": constant_op.constant(True)
}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]],
dtype=dtypes.float32)
def true_fn(x):
return math_ops.matmul(x, weights)
def false_fn(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[1, 2], dtype=dtypes.float32),
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(x, b):
return cond.cond(
b, true_fn=lambda: true_fn(x), false_fn=lambda: false_fn(x))
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testStatelessIf(self):
"""Test a model with the StatelessIf op."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"b": constant_op.constant(True)}
x = constant_op.constant([1., 2.], shape=[1, 2], name="x")
def true_fn():
return x
def false_fn():
return x + 2
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.bool)
])
def model(b):
return cond_v2.cond_v2(b, true_fn, false_fn)
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testStaticRnn(self):
"""Test a StaticRnn containing If ops."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {
"x":
constant_op.constant(
np.array(
np.random.random_sample((3, 10)), dtype=np.float32))
}
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10], dtype=dtypes.float32)
])
def model(x):
seq = array_ops.split(x, 3, 0)
return rnn.static_rnn(
cell, seq, dtype=dtypes.float32, sequence_length=[1])
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testWhile(self):
"""Test a While loop."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant([1., 2., 3., 4.], shape=[2, 2])}
weights = variables.Variable([[0.1, 0.2], [0.3, 0.4]],
dtype=dtypes.float32)
def condition(x):
return math_ops.reduce_sum(x) < 100
def body(x):
return math_ops.add(x, weights)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[2, 2], dtype=dtypes.float32)
])
def model(x):
return while_loop.while_loop(condition, body, [x])
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testStatelessWhile(self):
"""Test a StatelessWhile loop."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {"x": constant_op.constant(2.)}
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=(), dtype=dtypes.float32)
])
def model(x):
return while_v2.while_loop(
lambda v: v < 4.,
lambda v: v * v, [x],
return_same_structure=False,
name="while_1") # x**2
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
def testDynamicRnn(self):
"""Test a DynamicRnn containing While loops."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {
"x":
constant_op.constant(
np.array(
np.random.random_sample((3, 10, 10)), dtype=np.float32))
}
cell = rnn_cell_impl.LSTMCell(10)
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[3, 10, 10], dtype=dtypes.float32)
])
def model(x):
return rnn.dynamic_rnn(cell, x, dtype=dtypes.float32)
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
@test_util.disable_tfrt("b/180451239")
def testSwitchCase(self):
"""Test a switch_case statement."""
with ops.Graph().as_default():
with session_lib.Session() as sess:
input_data = {
"i":
constant_op.constant(np.random.randint(0, 3, dtype=np.int32)),
"x":
constant_op.constant(
np.asarray(
np.random.random_sample((10, 3)), dtype=np.float32)),
}
w0 = variables.Variable(
np.random.random_sample((3, 4)), dtype=np.float32)
w1 = variables.Variable(
np.random.random_sample((3, 4)), dtype=np.float32)
w2 = variables.Variable(np.random.random_sample((4,)), dtype=np.float32)
def branch0(x):
return math_ops.matmul(x, w0)
def branch1(x):
return math_ops.matmul(x, w1)
def branch2(x):
x = array_ops.pad(x, [[0, 0], [0, 1]])
return x + w2
@def_function.function(input_signature=[
tensor_spec.TensorSpec(shape=[], dtype=dtypes.int32),
tensor_spec.TensorSpec(shape=[10, 3], dtype=dtypes.float32),
])
def model(i, x):
return control_flow_switch_case.switch_case(
i, [lambda: branch0(x), lambda: branch1(x), lambda: branch2(x)])
root, output_func = self._freezeModel(model)
self._testConvertedFunction(sess, root, root.f, output_func, input_data)
| ConvertVariablesToConstantsV2SessionTest |
python | spack__spack | lib/spack/spack/binary_distribution.py | {
"start": 98993,
"end": 100632
} | class ____(IndexFetcher):
"""Fetcher for index.json, using ETags headers as cache invalidation strategy"""
def __init__(self, url, etag, urlopen=web_util.urlopen):
self.url = url
self.etag = etag
self.urlopen = urlopen
def conditional_fetch(self) -> FetchIndexResult:
# Just do a conditional fetch immediately
url = url_util.join(self.url, "build_cache", spack.database.INDEX_JSON_FILE)
headers = {"User-Agent": web_util.SPACK_USER_AGENT, "If-None-Match": f'"{self.etag}"'}
try:
response = self.urlopen(urllib.request.Request(url, headers=headers))
except urllib.error.HTTPError as e:
if e.getcode() == 304:
# Not modified; that means fresh.
return FetchIndexResult(etag=None, hash=None, data=None, fresh=True)
raise FetchIndexError(f"Could not fetch index {url}", e) from e
except OSError as e: # URLError, socket.timeout, etc.
raise FetchIndexError(f"Could not fetch index {url}", e) from e
try:
result = codecs.getreader("utf-8")(response).read()
except (ValueError, OSError) as e:
raise FetchIndexError(f"Remote index {url} is invalid", e) from e
warn_v2_layout(self.url, "Fetching an index")
headers = response.headers
etag_header_value = headers.get("Etag", None) or headers.get("etag", None)
return FetchIndexResult(
etag=web_util.parse_etag(etag_header_value),
hash=compute_hash(result),
data=result,
fresh=False,
)
| EtagIndexFetcherV2 |
python | openai__openai-python | src/openai/_models.py | {
"start": 24308,
"end": 30717
} | class ____:
field_name: str
"""The name of the discriminator field in the variant class, e.g.
```py
class Foo(BaseModel):
type: Literal['foo']
```
Will result in field_name='type'
"""
field_alias_from: str | None
"""The name of the discriminator field in the API response, e.g.
```py
class Foo(BaseModel):
type: Literal['foo'] = Field(alias='type_from_api')
```
Will result in field_alias_from='type_from_api'
"""
mapping: dict[str, type]
"""Mapping of discriminator value to variant type, e.g.
{'foo': FooVariant, 'bar': BarVariant}
"""
def __init__(
self,
*,
mapping: dict[str, type],
discriminator_field: str,
discriminator_alias: str | None,
) -> None:
self.mapping = mapping
self.field_name = discriminator_field
self.field_alias_from = discriminator_alias
def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
cached = DISCRIMINATOR_CACHE.get(union)
if cached is not None:
return cached
discriminator_field_name: str | None = None
for annotation in meta_annotations:
if isinstance(annotation, PropertyInfo) and annotation.discriminator is not None:
discriminator_field_name = annotation.discriminator
break
if not discriminator_field_name:
return None
mapping: dict[str, type] = {}
discriminator_alias: str | None = None
for variant in get_args(union):
variant = strip_annotated_type(variant)
if is_basemodel_type(variant):
if PYDANTIC_V1:
field_info = cast("dict[str, FieldInfo]", variant.__fields__).get(discriminator_field_name) # pyright: ignore[reportDeprecated, reportUnnecessaryCast]
if not field_info:
continue
# Note: if one variant defines an alias then they all should
discriminator_alias = field_info.alias
if (annotation := getattr(field_info, "annotation", None)) and is_literal_type(annotation):
for entry in get_args(annotation):
if isinstance(entry, str):
mapping[entry] = variant
else:
field = _extract_field_schema_pv2(variant, discriminator_field_name)
if not field:
continue
# Note: if one variant defines an alias then they all should
discriminator_alias = field.get("serialization_alias")
field_schema = field["schema"]
if field_schema["type"] == "literal":
for entry in cast("LiteralSchema", field_schema)["expected"]:
if isinstance(entry, str):
mapping[entry] = variant
if not mapping:
return None
details = DiscriminatorDetails(
mapping=mapping,
discriminator_field=discriminator_field_name,
discriminator_alias=discriminator_alias,
)
DISCRIMINATOR_CACHE.setdefault(union, details)
return details
def _extract_field_schema_pv2(model: type[BaseModel], field_name: str) -> ModelField | None:
schema = model.__pydantic_core_schema__
if schema["type"] == "definitions":
schema = schema["schema"]
if schema["type"] != "model":
return None
schema = cast("ModelSchema", schema)
fields_schema = schema["schema"]
if fields_schema["type"] != "model-fields":
return None
fields_schema = cast("ModelFieldsSchema", fields_schema)
field = fields_schema["fields"].get(field_name)
if not field:
return None
return cast("ModelField", field) # pyright: ignore[reportUnnecessaryCast]
def validate_type(*, type_: type[_T], value: object) -> _T:
"""Strict validation that the given value matches the expected type"""
if inspect.isclass(type_) and issubclass(type_, pydantic.BaseModel):
return cast(_T, parse_obj(type_, value))
return cast(_T, _validate_non_model_type(type_=type_, value=value))
def set_pydantic_config(typ: Any, config: pydantic.ConfigDict) -> None:
"""Add a pydantic config for the given type.
Note: this is a no-op on Pydantic v1.
"""
setattr(typ, "__pydantic_config__", config) # noqa: B010
def add_request_id(obj: BaseModel, request_id: str | None) -> None:
obj._request_id = request_id
# in Pydantic v1, using setattr like we do above causes the attribute
# to be included when serializing the model which we don't want in this
# case so we need to explicitly exclude it
if PYDANTIC_V1:
try:
exclude_fields = obj.__exclude_fields__ # type: ignore
except AttributeError:
cast(Any, obj).__exclude_fields__ = {"_request_id", "__exclude_fields__"}
else:
cast(Any, obj).__exclude_fields__ = {*(exclude_fields or {}), "_request_id", "__exclude_fields__"}
# our use of subclassing here causes weirdness for type checkers,
# so we just pretend that we don't subclass
if TYPE_CHECKING:
GenericModel = BaseModel
else:
class GenericModel(BaseGenericModel, BaseModel):
pass
if not PYDANTIC_V1:
from pydantic import TypeAdapter as _TypeAdapter
_CachedTypeAdapter = cast("TypeAdapter[object]", lru_cache(maxsize=None)(_TypeAdapter))
if TYPE_CHECKING:
from pydantic import TypeAdapter
else:
TypeAdapter = _CachedTypeAdapter
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
return TypeAdapter(type_).validate_python(value)
elif not TYPE_CHECKING: # TODO: condition is weird
class RootModel(GenericModel, Generic[_T]):
"""Used as a placeholder to easily convert runtime types to a Pydantic format
to provide validation.
For example:
```py
validated = RootModel[int](__root__="5").__root__
# validated: 5
```
"""
__root__: _T
def _validate_non_model_type(*, type_: type[_T], value: object) -> _T:
model = _create_pydantic_model(type_).validate(value)
return cast(_T, model.__root__)
def _create_pydantic_model(type_: _T) -> Type[RootModel[_T]]:
return RootModel[type_] # type: ignore
| DiscriminatorDetails |
python | pallets__werkzeug | src/werkzeug/routing/rules.py | {
"start": 5719,
"end": 6563
} | class ____(RuleFactory):
"""Prefixes all endpoints (which must be strings for this factory) with
another string. This can be useful for sub applications::
url_map = Map([
Rule('/', endpoint='index'),
EndpointPrefix('blog/', [Submount('/blog', [
Rule('/', endpoint='index'),
Rule('/entry/<entry_slug>', endpoint='show')
])])
])
"""
def __init__(self, prefix: str, rules: t.Iterable[RuleFactory]) -> None:
self.prefix = prefix
self.rules = rules
def get_rules(self, map: Map) -> t.Iterator[Rule]:
for rulefactory in self.rules:
for rule in rulefactory.get_rules(map):
rule = rule.empty()
rule.endpoint = self.prefix + rule.endpoint
yield rule
| EndpointPrefix |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/utils/utils.py | {
"start": 33214,
"end": 40234
} | class ____(InfoJsonEncodable):
"""Defines encoding TaskGroup object to JSON."""
renames = {
"_group_id": "group_id",
}
includes = [
"downstream_group_ids",
"downstream_task_ids",
"prefix_group_id",
"tooltip",
"upstream_group_ids",
"upstream_task_ids",
]
def get_airflow_dag_run_facet(dag_run: DagRun) -> dict[str, RunFacet]:
if not dag_run.dag:
return {}
return {
"airflowDagRun": AirflowDagRunFacet(
dag=DagInfo(dag_run.dag),
dagRun=DagRunInfo(dag_run),
)
}
@conf.cache
def _get_all_packages_installed() -> dict[str, str]:
"""
Retrieve a dictionary of all installed packages and their versions.
This operation involves scanning the system's installed packages, which can be a heavy operation.
It is recommended to cache the result to avoid repeated, expensive lookups.
"""
return {dist.metadata["Name"]: dist.version for dist in metadata.distributions()}
def get_processing_engine_facet() -> dict[str, processing_engine_run.ProcessingEngineRunFacet]:
from openlineage.client.facet_v2 import processing_engine_run
return {
"processing_engine": processing_engine_run.ProcessingEngineRunFacet(
version=AIRFLOW_VERSION,
name="Airflow",
openlineageAdapterVersion=OPENLINEAGE_PROVIDER_VERSION,
)
}
def get_airflow_debug_facet() -> dict[str, AirflowDebugRunFacet]:
if not conf.debug_mode():
return {}
log.warning("OpenLineage debug_mode is enabled. Be aware that this may log and emit extensive details.")
return {
"debug": AirflowDebugRunFacet(
packages=_get_all_packages_installed(),
)
}
def get_airflow_run_facet(
dag_run: DagRun,
dag: DAG,
task_instance: TaskInstance,
task: BaseOperator,
task_uuid: str,
) -> dict[str, AirflowRunFacet]:
return {
"airflow": AirflowRunFacet(
dag=DagInfo(dag),
dagRun=DagRunInfo(dag_run),
taskInstance=TaskInstanceInfo(task_instance),
task=TaskInfoComplete(task) if conf.include_full_task_info() else TaskInfo(task),
taskUuid=task_uuid,
)
}
def get_airflow_job_facet(dag_run: DagRun) -> dict[str, AirflowJobFacet]:
if not dag_run.dag:
return {}
return {
"airflow": AirflowJobFacet(
taskTree={}, # caused OOM errors, to be removed, see #41587
taskGroups=_get_task_groups_details(dag_run.dag),
tasks=_get_tasks_details(dag_run.dag),
)
}
def get_airflow_state_run_facet(
dag_id: str, run_id: str, task_ids: list[str], dag_run_state: DagRunState
) -> dict[str, AirflowStateRunFacet]:
tis = DagRun.fetch_task_instances(dag_id=dag_id, run_id=run_id, task_ids=task_ids)
def get_task_duration(ti):
if ti.duration is not None:
return ti.duration
if ti.end_date is not None and ti.start_date is not None:
return (ti.end_date - ti.start_date).total_seconds()
# Fallback to 0.0 for tasks with missing timestamps (e.g., skipped/terminated tasks)
return 0.0
return {
"airflowState": AirflowStateRunFacet(
dagRunState=dag_run_state,
tasksState={ti.task_id: ti.state for ti in tis},
tasksDuration={ti.task_id: get_task_duration(ti) for ti in tis},
)
}
def _get_tasks_details(dag: DAG | SerializedDAG) -> dict:
tasks = {
single_task.task_id: {
"operator": get_fully_qualified_class_name(single_task),
"task_group": single_task.task_group.group_id if single_task.task_group else None,
"emits_ol_events": _emits_ol_events(single_task),
"ui_color": single_task.ui_color,
"ui_fgcolor": single_task.ui_fgcolor,
"ui_label": single_task.label,
"is_setup": single_task.is_setup,
"is_teardown": single_task.is_teardown,
"downstream_task_ids": sorted(single_task.downstream_task_ids),
}
for single_task in sorted(dag.tasks, key=lambda x: x.task_id)
}
return tasks
def _get_task_groups_details(dag: DAG | SerializedDAG) -> dict:
return {
tg_id: {
"parent_group": tg.parent_group.group_id,
"ui_color": tg.ui_color,
"ui_fgcolor": tg.ui_fgcolor,
"ui_label": tg.label,
}
for tg_id, tg in dag.task_group_dict.items()
}
def _emits_ol_events(task: AnyOperator) -> bool:
config_selective_enabled = is_selective_lineage_enabled(task)
config_disabled_for_operators = is_operator_disabled(task)
is_task_schedulable_method = getattr(TaskInstance, "is_task_schedulable", None) # Added in 3.2.0 #56039
if is_task_schedulable_method and callable(is_task_schedulable_method):
is_skipped_as_empty_operator = not is_task_schedulable_method(task)
else:
# For older Airflow versions, re-create Airflow core internal logic as
# empty operators without callbacks/outlets are skipped for optimization by Airflow
# in airflow.models.taskinstance.TaskInstance._schedule_downstream_tasks or
# airflow.models.dagrun.DagRun.schedule_tis, depending on Airflow version
is_skipped_as_empty_operator = all(
(
task.inherits_from_empty_operator,
not getattr(task, "on_execute_callback", None),
not getattr(task, "on_success_callback", None),
not task.outlets,
not (task.inlets and get_base_airflow_version_tuple() >= (3, 0, 2)), # Added in 3.0.2 #50773
not (
getattr(task, "has_on_execute_callback", None) # Added in 3.1.0 #54569
and get_base_airflow_version_tuple() >= (3, 1, 0)
),
not (
getattr(task, "has_on_success_callback", None) # Added in 3.1.0 #54569
and get_base_airflow_version_tuple() >= (3, 1, 0)
),
)
)
emits_ol_events = all(
(
config_selective_enabled,
not config_disabled_for_operators,
not is_skipped_as_empty_operator,
)
)
return emits_ol_events
def get_unknown_source_attribute_run_facet(task: BaseOperator, name: str | None = None):
if not name:
name = get_operator_class(task).__name__
log.debug(
"UnknownOperatorAttributeRunFacet is deprecated and will be removed. "
"Use information from AirflowRunFacet instead."
)
return {
"unknownSourceAttribute": attrs.asdict(
UnknownOperatorAttributeRunFacet(
unknownItems=[
UnknownOperatorInstance(
name=name,
properties=TaskInfo(task),
)
]
)
)
}
| TaskGroupInfo |
python | pytorch__pytorch | test/distributed/tensor/test_dtensor_compile.py | {
"start": 1874,
"end": 3144
} | class ____:
"""
Tuple-like values that are treated as leaves of a PyTree.
"""
def __init__(self, *values):
self._values = tuple(values)
def __repr__(self):
pr = repr(self._values)[1:-1]
return f"{type(self).__name__}({pr})"
def __getitem__(self, i):
return self._values[i]
def __iter__(self):
return iter(self._values)
def __len__(self):
return len(self._values)
def __eq__(self, other: object) -> bool:
if isinstance(other, self.__class__):
return self._values == other._values
elif isinstance(other, tuple):
return self._values == other
return False
def __hash__(self) -> int:
return hash(self._values)
def __add__(self, other):
if isinstance(other, (self.__class__, tuple)):
return self.__class__(*self, *other)
raise NotImplementedError(type(other))
def __radd__(self, other):
if isinstance(other, (self.__class__, tuple)):
return self.__class__(*other, *self)
raise NotImplementedError(type(other))
def index(self, value):
return self._values.index(value)
def count(self, value):
return self._values.count(value)
| PytreeTuple |
python | django__django | tests/migrations/models.py | {
"start": 205,
"end": 561
} | class ____(models.Model):
title = models.CharField("ÚÑÍ¢ÓÐÉ", max_length=20, default="“Ðjáñgó”")
class Meta:
# Disable auto loading of this model as we load it on our own
apps = Apps()
verbose_name = "úñí©óðé µóðéø"
verbose_name_plural = "úñí©óðé µóðéøß"
def __str__(self):
return self.title
| UnicodeModel |
python | getsentry__sentry | tests/sentry/sentry_apps/test_webhooks.py | {
"start": 335,
"end": 23159
} | class ____(TestCase):
def setUp(self) -> None:
self.organization = self.create_organization()
self.project = self.create_project(organization=self.organization)
# Create sentry apps with different event subscriptions
self.sentry_app_1 = self.create_sentry_app(
name="App1",
organization=self.organization,
events=["seer.root_cause_started", "issue.created"],
)
self.sentry_app_2 = self.create_sentry_app(
name="App2", organization=self.organization, events=["error.created", "issue.assigned"]
)
self.sentry_app_3 = self.create_sentry_app(
name="App3", organization=self.organization, events=["metric_alert.open"]
)
# Create installations
self.installation_1 = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app_1.slug
)
self.installation_2 = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app_2.slug
)
self.installation_3 = self.create_sentry_app_installation(
organization=self.organization, slug=self.sentry_app_3.slug
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_valid_event_to_relevant_installations(
self, mock_installations, mock_send_webhook
):
"""Test that webhooks are sent to relevant installations for valid events."""
mock_installations.return_value = [
self.installation_1
] # Only installation_1 subscribes to issue events
payload = {"test": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Verify installations were fetched
mock_installations.assert_called_once_with(organization_id=self.organization.id)
# Verify webhook task was queued for installation_1 (subscribed to issue events)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_sends_to_multiple_relevant_installations(
self, mock_installations, mock_send_webhook
):
"""Test that webhooks are sent to all relevant installations."""
# Both apps subscribe to issue events
sentry_app_4 = self.create_sentry_app(
name="App4", organization=self.organization, events=["issue.created", "issue.assigned"]
)
installation_4 = self.create_sentry_app_installation(
organization=self.organization, slug=sentry_app_4.slug
)
mock_installations.return_value = [self.installation_1, installation_4]
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Verify both installations had webhook tasks queued
assert mock_send_webhook.delay.call_count == 2
mock_send_webhook.delay.assert_any_call(self.installation_1.id, "issue.created", payload)
mock_send_webhook.delay.assert_any_call(installation_4.id, "issue.created", payload)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
@patch("sentry.sentry_apps.tasks.sentry_apps.logger")
def test_broadcast_no_relevant_installations(
self, mock_logger, mock_installations, mock_send_webhook
):
"""Test that no webhooks are sent when no installations subscribe to the event."""
mock_installations.return_value = [
self.installation_3
] # Only subscribes to metric_alert events
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Verify no webhook tasks were queued
mock_send_webhook.delay.assert_not_called()
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_invalid_event_type_raises_error(self, mock_installations):
"""Test that invalid event types raise SentryAppSentryError."""
from sentry.sentry_apps.utils.errors import SentryAppSentryError
mock_installations.return_value = []
payload = {"event": "data"}
# Invalid event types should raise SentryAppSentryError
with pytest.raises(SentryAppSentryError) as exc_info:
broadcast_webhooks_for_organization(
resource_name="invalid_resource",
event_name="invalid_event",
organization_id=self.organization.id,
payload=payload,
)
assert "Invalid event type: invalid_resource.invalid_event" in str(exc_info.value.message)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_filters_by_consolidated_events(self, mock_installations, mock_send_webhook):
"""Test that installations are filtered based on consolidated events."""
# Create an app that doesn't subscribe to the event resource
sentry_app_5 = self.create_sentry_app(
name="App5",
organization=self.organization,
events=["metric_alert.open"], # Different resource
)
installation_5 = self.create_sentry_app_installation(
organization=self.organization, slug=sentry_app_5.slug
)
mock_installations.return_value = [self.installation_1, installation_5]
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Only installation_1 should have webhook task queued (subscribes to issue events)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", payload
)
def test_valid_event_types_accepted(self):
"""Test that all valid SentryAppEventType values are accepted."""
valid_combinations = [
("error", "created"),
("issue", "created"),
("event_alert", "triggered"),
("external_issue", "created"),
("external_issue", "linked"),
("select_options", "requested"),
("alert_rule_action", "requested"),
("metric_alert", "open"),
("metric_alert", "resolved"),
("metric_alert", "critical"),
("metric_alert", "warning"),
]
for resource_name, event_name in valid_combinations:
event_type = f"{resource_name}.{event_name}"
# This should not raise ValueError
SentryAppEventType(event_type)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_different_event_types(self, mock_installations, mock_send_webhook):
"""Test broadcasting different valid event types."""
mock_installations.return_value = [self.installation_2]
payload = {"event": "data"}
# Test error.created event
broadcast_webhooks_for_organization(
resource_name="error",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
mock_send_webhook.delay.assert_called_with(self.installation_2.id, "error.created", payload)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_empty_installations_list(self, mock_installations, mock_send_webhook):
"""Test broadcasting when no installations are returned."""
mock_installations.return_value = []
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# No webhook tasks should be queued
mock_send_webhook.delay.assert_not_called()
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_consolidate_events_integration(self, mock_installations, mock_send_webhook):
"""Test that consolidate_events function is used correctly for filtering."""
# Mock installation with specific events
mock_installation = Mock()
mock_installation.sentry_app.events = ["issue.created", "issue.assigned"]
mock_installations.return_value = [mock_installation]
payload = {"event": "data"}
# Mock consolidate_events to return the expected resource categories
with patch("sentry.sentry_apps.logic.consolidate_events") as mock_consolidate:
mock_consolidate.return_value = ["issue"]
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Verify consolidate_events was called with the installation's events
mock_consolidate.assert_called_once_with(mock_installation.sentry_app.events)
# Webhook task should be queued since "issue" is in consolidated events
mock_send_webhook.delay.assert_called_once_with(
mock_installation.id, "issue.created", payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_payload_passed_correctly(self, mock_installations, mock_send_webhook):
"""Test that payload data is passed correctly to send_webhooks."""
mock_installations.return_value = [self.installation_1]
complex_payload = {
"event_id": "12345",
"timestamp": "2023-01-01T00:00:00Z",
"issue_data": {
"id": 67890,
"title": "Test Issue",
"metadata": {"type": "error", "value": "Test Error"},
},
"nested_list": [1, 2, 3],
"nested_dict": {"key": "value"},
}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=complex_payload,
)
# Verify the complete payload is passed to webhook task
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", complex_payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_different_organization_ids(self, mock_installations, mock_send_webhook):
"""Test that the correct organization_id is used to fetch installations."""
different_org = self.create_organization()
mock_installations.return_value = []
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=different_org.id,
payload=payload,
)
# Verify installations were fetched for the correct organization
mock_installations.assert_called_once_with(organization_id=different_org.id)
def test_event_type_construction(self):
"""Test that event types are constructed correctly."""
test_cases = [
("issue", "created", "issue.created"),
("error", "created", "error.created"),
("metric_alert", "open", "metric_alert.open"),
("event_alert", "triggered", "event_alert.triggered"),
]
for resource_name, event_name, expected_event_type in test_cases:
constructed_event_type = f"{resource_name}.{event_name}"
assert constructed_event_type == expected_event_type
# Verify it's a valid SentryAppEventType
SentryAppEventType(constructed_event_type)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_handles_send_webhook_exception(self, mock_installations, mock_send_webhook):
"""Test that exceptions from send_resource_change_webhook are properly handled."""
mock_installations.return_value = [self.installation_1]
mock_send_webhook.delay.side_effect = Exception("Webhook task failed")
payload = {"event": "data"}
# The function should raise the exception from send_resource_change_webhook
with pytest.raises(Exception, match="Webhook task failed"):
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_with_special_characters_in_payload(
self, mock_installations, mock_send_webhook
):
"""Test broadcasting with special characters and unicode in payload."""
mock_installations.return_value = [self.installation_1]
payload = {
"message": "Test with special chars: äöü αβγ 🚀",
"code": "console.log('Hello \"World\"');",
"html": "<script>alert('xss')</script>",
"unicode": "Iñtërnâtiônàlizætiøn",
}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_with_large_payload(self, mock_installations, mock_send_webhook):
"""Test broadcasting with a large payload."""
mock_installations.return_value = [self.installation_1]
# Create a large payload
large_data = "x" * 10000 # 10KB of data
payload = {
"large_field": large_data,
"array_field": list(range(1000)),
"nested": {"deep": {"data": large_data}},
}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_case_sensitive_event_validation(self, mock_installations, mock_send_webhook):
"""Test that event type validation is case sensitive."""
from sentry.sentry_apps.utils.errors import SentryAppSentryError
mock_installations.return_value = []
payload = {"event": "data"}
# Test case sensitivity - invalid event types should raise exception
with pytest.raises(SentryAppSentryError):
broadcast_webhooks_for_organization(
resource_name="Issue", # Wrong case
event_name="Created", # Wrong case
organization_id=self.organization.id,
payload=payload,
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_with_empty_payload(self, mock_installations, mock_send_webhook):
"""Test broadcasting with an empty payload."""
mock_installations.return_value = [self.installation_1]
empty_payload: dict[str, Any] = {}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=empty_payload,
)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", empty_payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_none_values_in_payload(self, mock_installations, mock_send_webhook):
"""Test broadcasting with None values in payload."""
mock_installations.return_value = [self.installation_1]
payload = {
"normal_field": "value",
"none_field": None,
"nested": {"also_none": None, "normal": "value"},
}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
mock_send_webhook.delay.assert_called_once_with(
self.installation_1.id, "issue.created", payload
)
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_app_service_exception(self, mock_installations):
"""Test handling of exceptions from app_service.installations_for_organization."""
mock_installations.side_effect = Exception("App service unavailable")
payload = {"event": "data"}
with pytest.raises(Exception, match="App service unavailable"):
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
@patch("sentry.sentry_apps.logic.consolidate_events")
def test_broadcast_consolidate_events_exception(
self, mock_consolidate, mock_installations, mock_send_webhook
):
"""Test handling of exceptions from consolidate_events."""
mock_installations.return_value = [self.installation_1]
mock_consolidate.side_effect = Exception("Consolidation failed")
payload = {"event": "data"}
with pytest.raises(Exception, match="Consolidation failed"):
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_with_negative_organization_id(self, mock_installations, mock_send_webhook):
"""Test broadcasting with negative organization ID."""
mock_installations.return_value = []
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue", event_name="created", organization_id=-1, payload=payload
)
mock_installations.assert_called_once_with(organization_id=-1)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_with_zero_organization_id(self, mock_installations, mock_send_webhook):
"""Test broadcasting with zero organization ID."""
mock_installations.return_value = []
payload = {"event": "data"}
broadcast_webhooks_for_organization(
resource_name="issue", event_name="created", organization_id=0, payload=payload
)
mock_installations.assert_called_once_with(organization_id=0)
@patch("sentry.sentry_apps.tasks.sentry_apps.send_resource_change_webhook")
@patch("sentry.sentry_apps.tasks.sentry_apps.app_service.installations_for_organization")
def test_broadcast_queues_tasks_asynchronously(self, mock_installations, mock_send_webhook):
"""Test that webhook sending is queued as tasks, not executed synchronously."""
mock_installations.return_value = [self.installation_1, self.installation_2]
payload = {"test": "data"}
broadcast_webhooks_for_organization(
resource_name="issue",
event_name="created",
organization_id=self.organization.id,
payload=payload,
)
# Verify .delay() was called (async task queuing), not direct function calls
assert mock_send_webhook.delay.call_count == 2
# Verify the regular function was never called directly
mock_send_webhook.assert_not_called()
# Verify each installation had a task queued with correct parameters
mock_send_webhook.delay.assert_any_call(self.installation_1.id, "issue.created", payload)
mock_send_webhook.delay.assert_any_call(self.installation_2.id, "issue.created", payload)
| BroadcastWebhooksForOrganizationTest |
python | readthedocs__readthedocs.org | readthedocs/notifications/tests/test_querysets.py | {
"start": 401,
"end": 4722
} | class ____:
def test_add(self):
user = fixture.get(
User,
)
# There is any notification attached to this user
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
).count()
== 0
)
Notification.objects.add(
attached_to=user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
# There is 1 notification attached to this user
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
).count()
== 1
)
old_notification = Notification.objects.first()
old_notification.state = READ
old_notification.save()
# Add the same notification again
Notification.objects.add(
attached_to=user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
# Notification is not duplicated, but timestamp and state is updated
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
).count()
== 1
)
new_notification = Notification.objects.first()
assert old_notification.pk == new_notification.pk
assert old_notification.modified < new_notification.modified
assert old_notification.state == READ
assert new_notification.state == UNREAD
# Add another notification
Notification.objects.add(
attached_to=user,
message_id="user:another:notification",
)
# Notification is added
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
).count()
== 2
)
def test_cancel(self):
user = fixture.get(User)
Notification.objects.add(
attached_to=user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
# There is one UNREAD notification attached to this user
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
state=UNREAD,
).count()
== 1
)
Notification.objects.cancel(
attached_to=user,
message_id=MESSAGE_EMAIL_VALIDATION_PENDING,
)
# There is none UNREAD notification attached to this user
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
state=UNREAD,
).count()
== 0
)
# There is one CANCELLED notification attached to this user
assert (
Notification.objects.filter(
attached_to_content_type=ContentType.objects.get_for_model(User),
attached_to_id=user.id,
state=CANCELLED,
).count()
== 1
)
def test_for_user(self):
user = fixture.get(User)
Notification.objects.add(
attached_to=user,
message_id="user:notification:read",
state=READ,
)
Notification.objects.add(
attached_to=user,
message_id="user:notification:unread",
state=UNREAD,
)
Notification.objects.add(
attached_to=user,
message_id="user:notification:dismissed",
state=DISMISSED,
)
Notification.objects.add(
attached_to=user,
message_id="user:notification:cancelled",
state=CANCELLED,
)
assert [
n.message_id for n in Notification.objects.for_user(user, resource="all")
] == [
"user:notification:read",
"user:notification:unread",
]
| TestNotificationQuerySet |
python | coleifer__peewee | playhouse/dataset.py | {
"start": 10523,
"end": 10674
} | class ____(object):
def __init__(self, query):
self.query = query
def export(self, file_obj):
raise NotImplementedError
| Exporter |
python | kamyu104__LeetCode-Solutions | Python/semi-ordered-permutation.py | {
"start": 38,
"end": 278
} | class ____(object):
def semiOrderedPermutation(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
i, j = nums.index(1), nums.index(len(nums))
return i+((len(nums)-1)-j)-int(i > j)
| Solution |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 1038,
"end": 1118
} | class ____(BaseSlots):
y = attr.ib()
@attr.s(frozen=True, slots=True)
| SubSlots |
python | facebook__pyre-check | tools/upgrade/tests/ast_test.py | {
"start": 233,
"end": 1498
} | class ____(unittest.TestCase):
def test_check_stable(self) -> None:
ast.check_stable("def foo(): pass", "def foo():\n pass")
with self.assertRaises(ast.UnstableAST):
ast.check_stable("def foo(): pass", "def bar(): pass")
with self.assertRaises(SyntaxError):
ast.check_stable("def foo():", "def foo():")
def test_check_stable_decorator(self) -> None:
@ast.check_stable_transformation
def _stable_transformation(input: str) -> str:
return input + "\n\n# comment"
_stable_transformation("def foo(): pass")
with self.assertRaises(SyntaxError):
# Clients are responsible for passing valid inputs.
_stable_transformation("def foo(:")
@ast.check_stable_transformation
def _unstable_transformation(input: str) -> str:
return input + "\n\npass"
with self.assertRaises(ast.UnstableAST):
_unstable_transformation("def foo(): pass")
@ast.check_stable_transformation
def _invalid_syntax_transformation(input: str) -> str:
return input + "\n\ndef foo(:"
with self.assertRaises(ast.UnstableAST):
_invalid_syntax_transformation("def foo(): pass")
| ErrorsTest |
python | django__django | tests/forms_tests/models.py | {
"start": 713,
"end": 1469
} | class ____(models.Model):
"""For ModelChoiceField and ModelMultipleChoiceField tests."""
CHOICES = [
("", "No Preference"),
("f", "Foo"),
("b", "Bar"),
]
INTEGER_CHOICES = [
(None, "No Preference"),
(1, "Foo"),
(2, "Bar"),
]
STRING_CHOICES_WITH_NONE = [
(None, "No Preference"),
("f", "Foo"),
("b", "Bar"),
]
name = models.CharField(max_length=10)
choice = models.CharField(max_length=2, blank=True, choices=CHOICES)
choice_string_w_none = models.CharField(
max_length=2, blank=True, null=True, choices=STRING_CHOICES_WITH_NONE
)
choice_integer = models.IntegerField(choices=INTEGER_CHOICES, blank=True, null=True)
| ChoiceModel |
python | keon__algorithms | algorithms/compression/huffman_coding.py | {
"start": 3329,
"end": 5477
} | class ____:
def __init__(self, file):
self.file = file
self.buffer = ""
self.saved_bits = 0
def write_char(self, char):
self.write_int(ord(char))
def write_int(self, num):
bin_int = "{0:08b}".format(num)
self.write_bits(bin_int)
def write_bits(self, bits):
self.saved_bits += len(bits)
self.buffer += bits
while len(self.buffer) >= 8:
i = int(self.buffer[:8], 2)
self.file.write(bytes([i]))
self.buffer = self.buffer[8:]
def save_tree(self, tree):
"""
Generate and save tree code to file
:param tree:
:return:
"""
signs = []
tree_code = ""
def get_code_tree(T):
nonlocal tree_code
if T.sign is not None:
signs.append(T.sign)
if T.left:
tree_code += "0"
get_code_tree(T.left)
if T.right:
tree_code += "1"
get_code_tree(T.right)
get_code_tree(tree)
self.write_bits(tree_code + "1") # "1" indicates that tree ended (it will be needed to load the tree)
for int_sign in signs:
self.write_int(int_sign)
def _save_information_about_additional_bits(self, additional_bits: int):
"""
Overwrite first three bits in the file
:param additional_bits: number of bits that were appended to fill last byte
:return:
"""
self.file.seek(0)
first_byte_raw = self.file.read(1)
self.file.seek(0)
first_byte = "{0:08b}".format(int.from_bytes(first_byte_raw, "big"))
# overwrite first three bits
first_byte = first_byte[3:]
first_byte = "{0:03b}".format(additional_bits) + first_byte
self.write_bits(first_byte)
def close(self):
additional_bits = 8 - len(self.buffer)
if additional_bits != 8: # buffer is empty, no need to append extra "0"
self.write_bits("0" * additional_bits)
self._save_information_about_additional_bits(additional_bits)
| HuffmanWriter |
python | pdm-project__pdm | src/pdm/resolver/providers.py | {
"start": 21820,
"end": 22456
} | class ____(ReusePinProvider):
"""A provider that reuses installed packages if possible."""
@cached_property
def installed(self) -> WorkingSet:
return self.repository.environment.get_working_set()
def iter_reuse_candidates(self, identifier: str, requirement: Requirement | None) -> Iterable[Candidate]:
key = strip_extras(identifier)[0]
if key not in self.installed or requirement is None:
return super().iter_reuse_candidates(identifier, requirement)
else:
dist = self.installed[key]
return [Candidate(requirement, installed=dist)]
| ReuseInstalledProvider |
python | apache__airflow | task-sdk/src/airflow/sdk/api/datamodels/_generated.py | {
"start": 9153,
"end": 9338
} | class ____(BaseModel):
"""
Response for task states with run_id, task and state.
"""
task_states: Annotated[dict[str, Any], Field(title="Task States")]
| TaskStatesResponse |
python | django__django | tests/forms_tests/field_tests/test_multivaluefield.py | {
"start": 1754,
"end": 1841
} | class ____(Form):
field1 = ComplexField(widget=ComplexMultiWidget())
| ComplexFieldForm |
python | huggingface__transformers | tests/models/timm_wrapper/test_modeling_timm_wrapper.py | {
"start": 1446,
"end": 2579
} | class ____:
def __init__(
self,
parent,
batch_size=3,
image_size=32,
num_channels=3,
is_training=True,
):
self.parent = parent
self.architecture = "resnet26"
# We need this to make the model smaller
self.model_args = {"channels": (16, 16, 16, 16)}
self.batch_size = batch_size
self.image_size = image_size
self.num_channels = num_channels
self.is_training = is_training
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
config = self.get_config()
return config, pixel_values
def get_config(self):
return TimmWrapperConfig(architecture=self.architecture, model_args=self.model_args)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
| TimmWrapperModelTester |
python | google__jax | jax/_src/pallas/mosaic_gpu/core.py | {
"start": 19450,
"end": 21632
} | class ____(GPUMemoryRef):
"""A sequence of trees of refs that are allowed to reuse the same memory.
One should not make assumptions as to how each ref will map to the underlying
memory region, since arbitrary padding may be applied in between different
refs.
As such, ref unions are only safe to use when the groups of refs that we
intend to alias have disjoint lifetimes (i.e. one should never attempt to read
data using a different ref than the one that was used to write the data).
"""
refs: Sequence[_GPUMemoryRefTree] = ()
def __init__(self, *refs: _GPUMemoryRefTree):
ref_leaves = jax.tree.leaves(refs)
if all(ref.memory_space == SMEM for ref in ref_leaves):
object.__setattr__(self, "refs", refs)
num_bytes = max(map(_ref_group_size, self.refs))
super().__init__(
inner_aval=jax_core.ShapedArray(
(num_bytes,), jnp.int8
),
memory_space=SMEM,
transforms=(),
)
elif all(ref.memory_space == TMEM for ref in ref_leaves):
object.__setattr__(self, "refs", refs)
max_cols = max(map(_ref_group_tmem_col_size, self.refs))
is_collective = ref_leaves[0].collective
if any(r.collective != is_collective for r in ref_leaves):
raise ValueError(
"Some aliased TMEM references are collective and some are not."
)
super().__init__(
inner_aval=jax_core.ShapedArray(
shape=(128, max_cols,),
dtype=jnp.int32,
),
memory_space=TMEM,
transforms=(),
layout=tcgen05.tmem_default_layout(packing=1),
collective=all(ref.collective for ref in ref_leaves),
)
else:
raise NotImplementedError(
"All aliased Refs must have the same memory space (SMEM or TMEM). "
f"Got {(ref.memory_space for ref in ref_leaves)}.")
def get_ref_aval(self) -> AbstractRefUnion:
inner_aval = jax.core.ShapedArray(self.shape, self.dtype)
refs_aval = jax.tree.map(lambda ref: ref.get_ref_aval(), self.refs)
return AbstractRefUnion(inner_aval, refs_aval,
memory_space=self.memory_space)
| RefUnion |
python | getsentry__sentry | tests/sentry/notifications/api/endpoints/test_notification_defaults.py | {
"start": 156,
"end": 1436
} | class ____(APITestCase):
endpoint = "sentry-api-0-notification-defaults"
def test_basic(self) -> None:
response = self.get_success_response()
assert response.data == {
"providerDefaults": ["email", "slack"],
"typeDefaults": {
"alerts": "always",
"approval": "always",
"deploy": "committed_only",
"quota": "always",
"quotaThresholds": "always",
"quotaAttachments": "always",
"quotaErrors": "always",
"quotaReplays": "always",
"quotaSpendAllocations": "always",
"quotaTransactions": "always",
"quotaWarnings": "always",
"quotaMonitorSeats": "always",
"quotaSpans": "always",
"quotaProfileDuration": "always",
"quotaProfileDurationUI": "always",
"quotaSeerBudget": "always",
"quotaLogBytes": "always",
"reports": "always",
"spikeProtection": "always",
"workflow": "subscribe_only",
"brokenMonitors": "always",
"quotaSeerUsers": "always",
},
}
| NotificationDefaultTest |
python | kamyu104__LeetCode-Solutions | Python/maximum-distance-in-arrays.py | {
"start": 29,
"end": 545
} | class ____(object):
def maxDistance(self, arrays):
"""
:type arrays: List[List[int]]
:rtype: int
"""
result, min_val, max_val = 0, arrays[0][0], arrays[0][-1]
for i in xrange(1, len(arrays)):
result = max(result, \
max(max_val - arrays[i][0], \
arrays[i][-1] - min_val))
min_val = min(min_val, arrays[i][0])
max_val = max(max_val, arrays[i][-1])
return result
| Solution |
python | google__pytype | pytype/imports/typeshed.py | {
"start": 15506,
"end": 16485
} | class ____(base.BuiltinLoader):
"""Load modules from typeshed."""
def __init__(self, options, missing_modules):
self.options = options
self.typeshed = _get_typeshed(missing_modules)
# TODO(mdemello): Inject options.open_function into self.typeshed
def load_module(self, namespace, module_name):
"""Load and parse a *.pyi from typeshed.
Args:
namespace: one of "stdlib" or "third_party"
module_name: the module name (without any file extension or "__init__"
suffix).
Returns:
(None, None) if the module doesn't have a definition.
Else a tuple of the filename and the AST of the module.
"""
try:
filename, src = self.typeshed.get_module_file(
namespace, module_name, self.options.python_version
)
except OSError:
return None, None
ast = parser.parse_string(
src, filename=filename, name=module_name, options=self.options
)
return filename, ast
| TypeshedLoader |
python | huggingface__transformers | src/transformers/models/vits/modeling_vits.py | {
"start": 52194,
"end": 54073
} | class ____(PreTrainedModel):
config: VitsConfig
base_model_prefix = "vits"
main_input_name = "input_ids"
supports_gradient_checkpointing = True
@torch.no_grad()
def _init_weights(self, module: nn.Module):
"""Initialize the weights"""
std = self.config.initializer_range
if isinstance(module, nn.Linear):
init.normal_(module.weight, mean=0.0, std=std)
if module.bias is not None:
init.zeros_(module.bias)
elif isinstance(module, nn.LayerNorm):
init.zeros_(module.bias)
init.ones_(module.weight)
elif isinstance(module, (nn.Conv1d, nn.ConvTranspose1d)):
init.kaiming_normal_(module.weight)
if module.bias is not None:
k = math.sqrt(module.groups / (module.in_channels * module.kernel_size[0]))
init.uniform_(module.bias, a=-k, b=k)
elif isinstance(module, nn.Embedding):
init.normal_(module.weight, mean=0.0, std=std)
# Here we need the check explicitly, as we slice the weight in the `zeros_` call, so it looses the flag
if module.padding_idx is not None and not getattr(module.weight, "_is_hf_initialized", False):
init.zeros_(module.weight[module.padding_idx])
elif isinstance(module, VitsAttention):
if self.config.window_size:
head_dim = self.config.hidden_size // self.config.num_attention_heads
init.normal_(module.emb_rel_k, std=head_dim**-0.5)
init.normal_(module.emb_rel_v, std=head_dim**-0.5)
elif isinstance(module, VitsElementwiseAffine):
init.zeros_(module.translate)
init.zeros_(module.log_scale)
@auto_docstring(
custom_intro="""
The complete VITS model, for text-to-speech synthesis.
"""
)
| VitsPreTrainedModel |
python | keras-team__keras | keras/src/ops/linalg_test.py | {
"start": 11379,
"end": 21097
} | class ____(testing.TestCase):
def test_cholesky(self):
x_non_psd = np.random.rand(4, 3, 3).astype("float32")
with self.assertRaises(ValueError):
linalg.cholesky(x_non_psd)
x = np.random.rand(4, 3, 3).astype("float32")
x_psd = np.matmul(x, x.transpose((0, 2, 1))) + 1e-5 * np.eye(
3, dtype="float32"
)
l_out = linalg.cholesky(x_psd, upper=False)
l_expected = np.linalg.cholesky(x_psd)
self.assertAllClose(l_out, l_expected, atol=1e-4)
u_out = linalg.cholesky(x_psd, upper=True)
u_expected = l_expected.transpose((0, 2, 1))
self.assertAllClose(u_out, u_expected, atol=1e-4)
@parameterized.named_parameters(
{"testcase_name": "lower", "upper": False},
{"testcase_name": "upper", "upper": True},
)
def test_cholesky_inverse(self, upper):
A = np.array(
[
[4.0, 12.0, -16.0],
[12.0, 37.0, -43.0],
[-16.0, -43.0, 98.0],
],
dtype="float32",
)
if upper:
factor = np.linalg.cholesky(A, upper=True)
else:
factor = np.linalg.cholesky(A)
expected_inverse = np.array(
[
[49.36111, -13.555555, 2.111111],
[-13.555555, 3.777778, -0.555556],
[2.111111, -0.555556, 0.111111],
],
dtype="float32",
)
output_inverse = linalg.cholesky_inverse(factor, upper=upper)
self.assertAllClose(output_inverse, expected_inverse, atol=1e-5)
def test_det(self):
x = np.random.rand(4, 3, 3)
out = linalg.det(x)
self.assertAllClose(out, np.linalg.det(x), atol=1e-5)
with self.assertRaises(ValueError):
x = np.random.rand(4, 3, 4)
linalg.det(x)
def test_eig(self):
x = np.random.rand(2, 3, 3)
x = x @ x.transpose((0, 2, 1))
w, v = map(ops.convert_to_numpy, linalg.eig(x))
x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1))
self.assertAllClose(x_reconstructed, x, atol=1e-4)
def test_eigh(self):
x = np.random.rand(2, 3, 3)
x = x @ x.transpose((0, 2, 1))
w, v = map(ops.convert_to_numpy, linalg.eigh(x))
x_reconstructed = (v * w[..., None, :]) @ v.transpose((0, 2, 1))
self.assertAllClose(x_reconstructed, x, atol=1e-4)
def test_inv(self):
x = np.random.rand(4, 3, 3)
x_inv = ops.convert_to_numpy(linalg.inv(x))
x_reconstructed = x @ x_inv
# high tolerance due to numerical instability
self.assertAllClose(
x_reconstructed, np.repeat(np.eye(3)[None], 4, 0), atol=1e-3
)
def test_lu_factor(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
def _pivot_matrix(pivots, n):
p_matrix = np.eye(n)
for i, p in enumerate(pivots):
identity = np.eye(n, n)
q = identity[i, :].copy()
identity[i, :] = identity[p, :]
identity[p, :] = q
p_matrix = np.dot(p_matrix, identity)
return p_matrix
def _reconstruct(lu, pivots, m, n):
lower = np.tril(lu[:, : min(m, n)], -1) + np.eye(m, min(m, n))
upper = np.triu(lu[: min(m, n)])
# pivots are defined differently in tensorflow
# compared to the other backends
if backend.backend() == "tensorflow":
p_matrix = np.eye(m)[pivots]
else:
p_matrix = _pivot_matrix(pivots, m)
out = p_matrix @ lower @ upper
return out
m, n = 4, 4
x = np.random.rand(m, n)
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
m, n = 4, 3
x = np.random.rand(m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
x_reconstructed = _reconstruct(lu, pivots, m, n)
self.assertAllClose(x_reconstructed, x, atol=1e-5)
# batched case
m, n = 3, 4
x = np.random.rand(2, m, n)
if backend.backend() == "tensorflow":
with self.assertRaises(ValueError):
linalg.lu_factor(x)
else:
lu, pivots = map(ops.convert_to_numpy, linalg.lu_factor(x))
for i in range(2):
self.assertAllClose(
_reconstruct(lu[i], pivots[i], m, n), x[i], atol=1e-5
)
@parameterized.named_parameters(
named_product(
ndim=[1, 2],
ord=[None, "fro", "nuc", -np.inf, -2, -1, 0, 1, 2, np.inf, 3],
axis=[None, 1, -1, (0, 1)],
keepdims=[False, True],
)
)
def test_norm(self, ndim, ord, axis, keepdims):
if ndim == 1:
x = np.random.random((5,)).astype("float32")
else:
x = np.random.random((5, 6)).astype("float32")
vector_norm = (ndim == 1) or isinstance(axis, int)
axis_out_of_bounds = ndim == 1 and (
axis == 1 or isinstance(axis, tuple)
)
expected_error = None
# when an out of bounds axis triggers an IndexError on torch is complex
if (
axis_out_of_bounds
and (not isinstance(axis, tuple) or ord is None)
and ord not in ("fro", "nuc")
):
expected_error = IndexError
elif (
axis_out_of_bounds
or (vector_norm and isinstance(axis, tuple)) # inv. axis for vector
or (vector_norm and ord in ("fro", "nuc")) # invalid ord for vector
or (not vector_norm and ord in (0, 3)) # invalid ord for matrix
):
expected_error = RuntimeError
if expected_error is not None:
# Non-torch backends always throw a ValueError
expected_error = (
expected_error if backend.backend() == "torch" else ValueError
)
with self.assertRaises(expected_error):
linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
return
output = linalg.norm(x, ord=ord, axis=axis, keepdims=keepdims)
expected_result = np.linalg.norm(
x, ord=ord, axis=axis, keepdims=keepdims
)
self.assertAllClose(output, expected_result, atol=1e-5)
def test_qr(self):
x = np.random.random((4, 5))
q, r = linalg.qr(x, mode="reduced")
qref, rref = np.linalg.qr(x, mode="reduced")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
q, r = linalg.qr(x, mode="complete")
qref, rref = np.linalg.qr(x, mode="complete")
self.assertAllClose(qref, q)
self.assertAllClose(rref, r)
def test_solve(self):
x1 = np.array([[1, 2], [4, 5]], dtype="float32")
x2 = np.array([[2, 4], [8, 10]], dtype="float32")
output = linalg.solve(x1, x2)
expected_result = np.array([[2, 0], [0, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_solve_triangular(self):
if testing.jax_uses_gpu():
self.skipTest("Skipping test with JAX + GPU due to temporary error")
# 2d-case
x1 = np.array([[1, 2], [0, 5]], dtype="float32")
x2 = np.array([2, 10], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
output = linalg.solve_triangular(x1, x2, lower=False)
expected_result = np.array([-2, 2], dtype="float32")
self.assertAllClose(output, expected_result)
# batched case
x1 = np.array([[[1, 2], [0, 5]], [[1, 2], [0, 5]]], dtype="float32")
x2 = np.array([[2, 10], [2, 10]], dtype="float32")
output = linalg.solve_triangular(x1, x2, lower=True)
expected_result = np.array([[2, 2], [2, 2]], dtype="float32")
self.assertAllClose(output, expected_result)
def test_svd(self):
x = np.random.rand(4, 30, 20).astype("float32")
u, s, vh = linalg.svd(x)
x_reconstructed = (u[..., :, : s.shape[-1]] * s[..., None, :]) @ vh[
..., : s.shape[-1], :
]
# High tolerance due to numerical instability
self.assertAllClose(x_reconstructed, x, atol=1e-3)
# Test `compute_uv=False`
s_no_uv = linalg.svd(x, compute_uv=False)
self.assertAllClose(s_no_uv, s, atol=1e-5, rtol=1e-5)
@parameterized.named_parameters(
("b_rank_1", 1, None),
("b_rank_2", 2, None),
("rcond", 1, 1e-3),
)
def test_lstsq(self, b_rank, rcond):
a = np.random.random((5, 7)).astype("float32")
a_symb = backend.KerasTensor((5, 7))
if b_rank == 1:
b = np.random.random((5,)).astype("float32")
b_symb = backend.KerasTensor((5,))
else:
b = np.random.random((5, 4)).astype("float32")
b_symb = backend.KerasTensor((5, 4))
out = linalg.lstsq(a, b, rcond=rcond)
ref_out = np.linalg.lstsq(a, b, rcond=rcond)[0]
self.assertAllClose(out, ref_out, atol=1e-5)
out_symb = linalg.lstsq(a_symb, b_symb)
self.assertEqual(out_symb.shape, out.shape)
| LinalgOpsCorrectnessTest |
python | numpy__numpy | numpy/linalg/tests/test_linalg.py | {
"start": 11245,
"end": 11935
} | class ____:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
| LinalgTestCase |
python | doocs__leetcode | solution/2200-2299/2206.Divide Array Into Equal Pairs/Solution.py | {
"start": 0,
"end": 149
} | class ____:
def divideArray(self, nums: List[int]) -> bool:
cnt = Counter(nums)
return all(v % 2 == 0 for v in cnt.values())
| Solution |
python | pytorch__pytorch | torch/_inductor/template_heuristics/contiguous_mm.py | {
"start": 1149,
"end": 2232
} | class ____(GemmMaxAutotuneTemplateConfigHeuristics):
def _get_template_configs_impl(
self,
kernel_inputs: KernelInputs,
op_name: str,
) -> Generator[dict[str, Any], None, None]:
"""
Get all the valid k_splits for the given m, n, k.
"""
assert isinstance(kernel_inputs, MMKernelInputs), (
f"{self.__class__.__name__} requires MMKernelInputs"
)
# Check for unbacked symbols - if found, yield nothing
unbacked_symbols = any(
len(get_free_symbols(itr, unbacked_only=True)) > 0
for itr in (
*kernel_inputs.shapes_symbolic(),
*kernel_inputs.strides_symbolic(),
)
)
if unbacked_symbols:
return
mat2 = kernel_inputs.mat1mat2()[1]
if mat2.get_layout().is_contiguous():
# no need for contiguous decomposition
return
m, n, k = kernel_inputs.mnk_symbolic()
if not use_contiguous(m, n, k):
return
yield {}
| ContiguousMMHeuristics |
python | python__mypy | mypyc/irbuild/nonlocalcontrol.py | {
"start": 1988,
"end": 2667
} | class ____(NonlocalControl):
"""Nonlocal control within a loop."""
def __init__(
self, outer: NonlocalControl, continue_block: BasicBlock, break_block: BasicBlock
) -> None:
self.outer = outer
self.continue_block = continue_block
self.break_block = break_block
def gen_break(self, builder: IRBuilder, line: int) -> None:
builder.add(Goto(self.break_block))
def gen_continue(self, builder: IRBuilder, line: int) -> None:
builder.add(Goto(self.continue_block))
def gen_return(self, builder: IRBuilder, value: Value, line: int) -> None:
self.outer.gen_return(builder, value, line)
| LoopNonlocalControl |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pyi/PYI029.py | {
"start": 656,
"end": 822
} | class ____:
@abstractmethod
def __str__(self) -> builtins.str:
...
@abstractmethod
def __repr__(self) -> str:
...
| MatchingArgsButAbstract |
python | google__jax | jax/_src/prng.py | {
"start": 16535,
"end": 47153
} | class ____(dtypes.ExtendedDType):
_impl: PRNGImpl # TODO(mattjj,frostig): protocol really
_rules = KeyTyRules
type = dtypes.prng_key
def __init__(self, impl):
self._impl = impl
@property
def name(self) -> str:
return f'key<{self._impl.tag}>'
@property
def itemsize(self) -> int:
return math.prod(self._impl.key_shape) * np.dtype('uint32').itemsize
def __repr__(self) -> str:
return self.name
def __eq__(self, other):
return type(other) is KeyTy and self._impl == other._impl
def __hash__(self) -> int:
return hash((self.__class__, self._impl))
core.pytype_aval_mappings[PRNGKeyArray] = lambda x: x.aval
dtypes.canonicalize_value_handlers[PRNGKeyArray] = lambda x: x
def key_array_shard_arg_handler(xs: Sequence[PRNGKeyArray], shardings, layouts,
copy_semantics):
arrs = [x._base_array for x in xs]
phys_shardings = [physical_sharding(x.aval, sharding)
for x, sharding in zip(xs, shardings)]
# TODO(yashkatariya): `layouts` should be converted to physical layouts.
return pxla.shard_args(phys_shardings, layouts, copy_semantics, arrs)
pxla.shard_arg_handlers[PRNGKeyArray] = key_array_shard_arg_handler
def key_array_constant_handler(x, aval):
arr = x._base_array
return mlir.get_constant_handler(type(arr))(arr, aval)
mlir.register_constant_handler(PRNGKeyArray, key_array_constant_handler)
# -- primitives
def iterated_vmap_unary(n, f):
for _ in range(n):
f = api.vmap(f)
return f
# TODO(frostig): Revise the following two functions? These basically
# undo the singleton dimensions added by `batching.defbroadcasting`.
# It works, but introduces some possibly-redundant squeezes. Can we
# borrow from other broadcasting primitives instead?
def squeeze_vmap(f, left):
def squeeze_vmap_f(x, y):
if left:
x = jnp.squeeze(x, axis=0)
axes = (None, 0)
else:
y = jnp.squeeze(y, axis=0)
axes = (0, None)
return api.vmap(f, in_axes=axes, out_axes=0)(x, y)
return squeeze_vmap_f
def iterated_vmap_binary_bcast(shape1, shape2, f):
ndim1, ndim2 = len(shape1), len(shape2)
if ndim1 == ndim2 == 0:
return f
if 0 in [ndim1, ndim2]:
if ndim1 == 0:
return lambda x, y: iterated_vmap_unary(ndim2, lambda y: f(x, y))(y)
else:
return lambda x, y: iterated_vmap_unary(ndim1, lambda x: f(x, y))(x)
assert len(shape1) == len(shape2)
for sz1, sz2 in reversed(zip(shape1, shape2)):
if sz1 == sz2:
f = api.vmap(f, out_axes=0)
else:
assert sz1 == 1 or sz2 == 1, (sz1, sz2)
f = squeeze_vmap(f, sz1 == 1)
return f
def random_seed(seeds: int | typing.ArrayLike, impl: PRNGImpl) -> PRNGKeyArray:
# Avoid overflow error in X32 mode by first converting ints to int64.
# This breaks JIT invariance for large ints, but supports the common
# use-case of instantiating with Python hashes in X32 mode.
if isinstance(seeds, int):
seeds_arr = jnp.asarray(np.int64(seeds))
else:
seeds_arr = jnp.asarray(seeds)
if config.random_seed_offset.value:
seeds_arr += config.random_seed_offset.value
return random_seed_p.bind(seeds_arr, impl=impl)
random_seed_p = core.Primitive('random_seed')
ad.defjvp_zero(random_seed_p)
batching.defvectorized(random_seed_p)
@random_seed_p.def_abstract_eval
def random_seed_abstract_eval(seeds_aval, *, impl):
return keys_shaped_array(impl, seeds_aval.shape, seeds_aval.sharding,
seeds_aval.vma)
@random_seed_p.def_impl
def random_seed_impl(seeds, *, impl):
base_arr = random_seed_impl_base(seeds, impl=impl)
return PRNGKeyArray(impl, base_arr)
def random_seed_impl_base(seeds, *, impl):
seed = iterated_vmap_unary(np.ndim(seeds), impl.seed)
return seed(seeds)
def random_seed_lowering(ctx, seeds, *, impl):
aval, = ctx.avals_in
seed = iterated_vmap_unary(aval.ndim, impl.seed)
seed_lowering = mlir.lower_fun(seed, multiple_results=False)
return mlir.delegate_lowering(
ctx, seed_lowering, seeds,
avals_out=map(core.physical_aval, ctx.avals_out))
mlir.register_lowering(random_seed_p, random_seed_lowering)
def random_split(keys, shape: Shape):
return random_split_p.bind(keys, shape=shape)
random_split_p = core.Primitive('random_split')
ad.defjvp_zero(random_split_p)
batching.defvectorized(random_split_p)
@random_split_p.def_abstract_eval
def random_split_abstract_eval(keys_aval, *, shape):
# TODO(yashkatariya): random_split should take sharding as an arg too so we
# don't choose None here?
if keys_aval.sharding.mesh.empty:
out_sharding = core.get_cur_mesh_sharding()
else:
new_spec = (*keys_aval.sharding.spec, *[None] * len(shape))
out_sharding = keys_aval.sharding.update(spec=new_spec)
return keys_shaped_array(keys_aval.dtype._impl, (*keys_aval.shape, *shape),
out_sharding, keys_aval.vma)
@random_split_p.def_impl
def random_split_impl(keys, *, shape):
base_arr = random_split_impl_base(
keys._impl, keys._base_array, keys.ndim, shape=shape)
return PRNGKeyArray(keys._impl, base_arr)
def random_split_impl_base(impl, base_arr, keys_ndim, *, shape):
split = iterated_vmap_unary(keys_ndim, lambda k: impl.split(k, shape))
return split(base_arr)
def random_split_lowering(ctx, keys, *, shape):
aval, = ctx.avals_in
impl = aval.dtype._impl
split = iterated_vmap_unary(aval.ndim, lambda k: impl.split(k, shape))
split_lowering = mlir.lower_fun(split, multiple_results=False)
return mlir.delegate_lowering(
ctx, split_lowering, keys,
avals_in=[core.physical_aval(aval)],
avals_out=map(core.physical_aval, ctx.avals_out))
mlir.register_lowering(random_split_p, random_split_lowering)
def random_fold_in(keys, msgs):
msgs = jnp.asarray(msgs)
keys, msgs = core.standard_insert_pvary(keys, msgs)
return random_fold_in_p.bind(keys, msgs)
random_fold_in_p = core.Primitive('random_fold_in')
ad.defjvp_zero(random_fold_in_p)
batching.defbroadcasting(random_fold_in_p)
@random_fold_in_p.def_abstract_eval
def random_fold_in_abstract_eval(keys_aval, msgs_aval):
shape = lax.broadcasting_shape_rule(
'random_fold_in', keys_aval, msgs_aval)
sharding = lax.broadcasting_sharding_rule(
'random_fold_in', keys_aval, msgs_aval)
vma = core.standard_vma_rule('random_fold_in', keys_aval, msgs_aval)
return core.ShapedArray(shape, keys_aval.dtype, sharding=sharding, vma=vma)
@random_fold_in_p.def_impl
def random_fold_in_impl(keys, msgs):
base_arr = random_fold_in_impl_base(
keys._impl, keys._base_array, msgs, keys.shape)
return PRNGKeyArray(keys._impl, base_arr)
def random_fold_in_impl_base(impl, base_arr, msgs, keys_shape):
fold_in = iterated_vmap_binary_bcast(
keys_shape, np.shape(msgs), impl.fold_in)
return fold_in(base_arr, msgs)
def random_fold_in_lowering(ctx, keys, msgs):
keys_aval, msgs_aval = ctx.avals_in
impl = keys_aval.dtype._impl
fold_in = iterated_vmap_binary_bcast(
keys_aval.shape, msgs_aval.shape, impl.fold_in)
fold_in_lowering = mlir.lower_fun(fold_in, multiple_results=False)
return mlir.delegate_lowering(
ctx, fold_in_lowering, keys, msgs,
avals_in=[core.physical_aval(keys_aval), msgs_aval],
avals_out=map(core.physical_aval, ctx.avals_out))
mlir.register_lowering(random_fold_in_p, random_fold_in_lowering)
def random_bits(keys, bit_width, shape):
return random_bits_p.bind(keys, bit_width=bit_width, shape=shape)
random_bits_p = core.Primitive('random_bits')
ad.defjvp_zero(random_bits_p)
batching.defvectorized(random_bits_p)
@random_bits_p.def_abstract_eval
def random_bits_abstract_eval(keys_aval, *, bit_width, shape):
out_shape = (*keys_aval.shape, *shape)
out_dtype = dtypes.dtype(f'uint{bit_width}')
# TODO(yashkatariya): random_bits should take an out_sharding argument.
if keys_aval.sharding.mesh.empty:
out_sharding = core.get_cur_mesh_sharding()
else:
new_spec = (*keys_aval.sharding.spec, *[None] * len(shape))
out_sharding = keys_aval.sharding.update(spec=new_spec)
return core.ShapedArray(out_shape, out_dtype, sharding=out_sharding,
vma=keys_aval.vma)
@random_bits_p.def_impl
def random_bits_impl(keys, *, bit_width, shape):
return random_bits_impl_base(keys._impl, keys._base_array, keys.ndim,
bit_width=bit_width, shape=shape)
def random_bits_impl_base(impl, base_arr, keys_ndim, *, bit_width, shape):
bits = iterated_vmap_unary(
keys_ndim, lambda k: impl.random_bits(k, bit_width, shape))
return bits(base_arr)
def random_bits_lowering(ctx, keys, *, bit_width, shape):
aval, = ctx.avals_in
impl = aval.dtype._impl
bits = iterated_vmap_unary(
aval.ndim, lambda k: impl.random_bits(k, bit_width, shape))
bits_lowering = mlir.lower_fun(bits, multiple_results=False)
ctx_new = ctx.replace(avals_in=[core.physical_aval(aval)])
out = bits_lowering(ctx_new, keys)
ctx.set_tokens_out(ctx_new.tokens_out)
return out
mlir.register_lowering(random_bits_p, random_bits_lowering)
# The following wrap/unwrap primitives are at least a stopgap for
# backwards compatibility, namely when `config.jax_enable_custom_prng`
# is False. We need to convert key arrays to and from underlying
# uint32 base array, and we may need to do so under a jit. For
# example, we want to support:
#
# keys = jax.jit(random.split)(key)
#
# where `key` and `keys` are both acceptably old-style uint32 arrays
# so long as enable_custom_prng is False. The way we handle this is
# that `random.split` adapts the input/output by converting to/from
# key arrays across its call to `random_split`. So we rely on these
# wrap/unwrap casting primitives to allow that conversion under jit.
#
# We may want to keep both around for testing and debugging escape
# hatches. We can rename them `unsafe` for emphasis, and/or issue a
# warning on entry to the traceable.
#
# TODO(frostig): Consider removal once we always enable_custom_prng.
def random_wrap(base_arr, *, impl):
_check_prng_key_data(impl, base_arr)
return random_wrap_p.bind(base_arr, impl=impl)
random_wrap_p = core.Primitive('random_wrap')
ad.defjvp_zero(random_wrap_p)
@random_wrap_p.def_abstract_eval
def random_wrap_abstract_eval(base_arr_aval, *, impl):
shape = base_arr_shape_to_keys_shape(impl, base_arr_aval.shape)
sharding = logical_sharding(shape, KeyTy(impl), base_arr_aval.sharding)
return keys_shaped_array(impl, shape, sharding, base_arr_aval.vma)
@random_wrap_p.def_impl
def random_wrap_impl(base_arr, *, impl):
return PRNGKeyArray(impl, base_arr)
def random_wrap_lowering(ctx, base_arr, *, impl):
return [base_arr]
def random_wrap_batch_rule(batched_args, batch_dims, *, impl):
x, = batched_args
d, = batch_dims
x = batching.bdim_at_front(x, d, 1)
return random_wrap(x, impl=impl), 0
mlir.register_lowering(random_wrap_p, random_wrap_lowering)
batching.primitive_batchers[random_wrap_p] = random_wrap_batch_rule
def random_unwrap(keys):
if not dtypes.issubdtype(keys.dtype, dtypes.prng_key):
raise TypeError(f'random_unwrap takes key array operand, got {keys.dtype=}')
return random_unwrap_p.bind(keys)
random_unwrap_p = core.Primitive('random_unwrap')
ad.defjvp_zero(random_unwrap_p)
batching.defvectorized(random_unwrap_p)
@random_unwrap_p.def_abstract_eval
def random_unwrap_abstract_eval(keys_aval):
return core.physical_aval(keys_aval)
@random_unwrap_p.def_impl
def random_unwrap_impl(keys):
return keys._base_array
def random_unwrap_lowering(ctx, keys):
return [keys]
mlir.register_lowering(random_unwrap_p, random_unwrap_lowering)
# -- threefry2x32 PRNG implementation
def _is_threefry_prng_key(key: typing.Array) -> bool:
try:
return key.shape == (2,) and key.dtype == np.uint32
except AttributeError:
return False
def threefry_seed(seed: typing.Array) -> typing.Array:
"""Create a single raw threefry PRNG key from an integer seed.
Args:
seed: a 64- or 32-bit integer used as the value of the key.
Returns:
The PRNG key contents, modeled as an array of shape (2,) and dtype
uint32. The key is constructed from a 64-bit seed by effectively
bit-casting to a pair of uint32 values (or from a 32-bit seed by
first padding out with zeros).
"""
return _threefry_seed(seed)
@jit(inline=True)
def _threefry_seed(seed: typing.Array) -> typing.Array:
if seed.shape:
raise TypeError(f"PRNG key seed must be a scalar; got {seed!r}.")
if not np.issubdtype(seed.dtype, np.integer):
raise TypeError(f"PRNG key seed must be an integer; got {seed!r}")
convert = lambda k: lax.expand_dims(lax.convert_element_type(k, np.uint32), [0])
k1 = convert(
lax.shift_right_logical(seed, lax._const(seed, 32)))
with config.numpy_dtype_promotion('standard'):
# TODO(jakevdp): in X64 mode, this can generate 64-bit computations for 32-bit
# inputs. We should avoid this.
k2 = convert(jnp.bitwise_and(seed, np.uint32(0xFFFFFFFF)))
return lax.concatenate([k1, k2], 0)
def _make_rotate_left(dtype):
if not dtypes.issubdtype(dtype, np.integer):
raise TypeError("_rotate_left only accepts integer dtypes.")
nbits = np.array(dtypes.iinfo(dtype).bits, dtype)
def _rotate_left(x, d):
if lax.dtype(d) != dtype:
d = lax.convert_element_type(d, dtype)
if lax.dtype(x) != dtype:
x = lax.convert_element_type(x, dtype)
return lax.shift_left(x, d) | lax.shift_right_logical(x, nbits - d)
return _rotate_left
### hash function and split
def _threefry2x32_abstract_eval(*args):
if any(a.dtype != np.uint32 for a in args):
raise TypeError("Arguments to threefry2x32 must have uint32 type, got {}"
.format(args))
if all(isinstance(arg, core.ShapedArray) for arg in args):
shape = lax.broadcasting_shape_rule(*args)
aval = core.ShapedArray(shape, np.dtype('uint32'))
else:
raise TypeError(f"Arguments to threefry2x32 must all be arrays, got {args}")
return (aval,) * 2
rotate_left = _make_rotate_left(np.uint32)
def apply_round(v, rot):
v = v[:]
v[0] = v[0] + v[1]
v[1] = rotate_left(v[1], rot)
v[1] = v[0] ^ v[1]
return v
def rotate_list(xs):
return xs[1:] + xs[:1]
def rolled_loop_step(i, state):
x, ks, rotations = state
for r in rotations[0]:
x = apply_round(x, r)
new_x = [x[0] + ks[0], x[1] + ks[1] + jnp.asarray(i + 1, dtype=np.uint32)]
return new_x, rotate_list(ks), rotate_list(rotations)
def _threefry2x32_lowering(key1, key2, x1, x2, use_rolled_loops=True):
"""Apply the Threefry 2x32 hash.
Args:
keypair: a pair of 32bit unsigned integers used for the key.
count: an array of dtype uint32 used for the counts.
Returns:
An array of dtype uint32 with the same shape as `count`.
"""
x = [x1, x2]
rotations = [np.array([13, 15, 26, 6], dtype=np.uint32),
np.array([17, 29, 16, 24], dtype=np.uint32)]
ks = [key1, key2, key1 ^ key2 ^ np.uint32(0x1BD11BDA)]
x[0] = x[0] + ks[0]
x[1] = x[1] + ks[1]
if use_rolled_loops:
x, _, _ = lax_control_flow.fori_loop(
0, 5, rolled_loop_step, (x, rotate_list(ks), rotations)
)
else:
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[1]
x[1] = x[1] + ks[2] + np.uint32(1)
for r in rotations[1]:
x = apply_round(x, r)
x[0] = x[0] + ks[2]
x[1] = x[1] + ks[0] + np.uint32(2)
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[0]
x[1] = x[1] + ks[1] + np.uint32(3)
for r in rotations[1]:
x = apply_round(x, r)
x[0] = x[0] + ks[1]
x[1] = x[1] + ks[2] + np.uint32(4)
for r in rotations[0]:
x = apply_round(x, r)
x[0] = x[0] + ks[2]
x[1] = x[1] + ks[0] + np.uint32(5)
return tuple(x)
# Since the unrolled lowering is large, emit it as an out-of-line function.
_threefry2x32_lowering_rule = mlir.lower_fun(
partial(_threefry2x32_lowering, use_rolled_loops=False),
multiple_results=True)
_threefry2x32_cpu_lowering_rule = mlir.lower_fun(
partial(_threefry2x32_lowering, use_rolled_loops=True),
multiple_results=True)
def _threefry2x32_gpu_lowering_rule(ctx, k1, k2, x1, x2, *, target_name_prefix):
if not config.threefry_gpu_kernel_lowering.value: # back to default lowering
return _threefry2x32_lowering_rule(ctx, k1, k2, x1, x2)
aval_out, aval_out_2 = ctx.avals_out
assert aval_out == aval_out_2
k1_aval, k2_aval, x1_aval, x2_aval = ctx.avals_in
rank = len(aval_out.shape)
if 0 in aval_out.shape:
zeros = mlir.full_like_aval(ctx, 0, aval_out)
return [zeros, zeros]
def _broadcast(x, aval):
return mlir.broadcast_in_dim(ctx, x, aval_out,
broadcast_dimensions=range(rank - len(aval.shape), rank))
sub_ctx = ctx.replace(avals_in=(aval_out,) * 4)
rule = ffi.ffi_lowering(
f"{target_name_prefix}_threefry2x32_ffi")
return rule(sub_ctx, _broadcast(k1, k1_aval), _broadcast(k2, k2_aval),
_broadcast(x1, x1_aval), _broadcast(x2, x2_aval))
threefry2x32_p = core.Primitive("threefry2x32")
threefry2x32_p.multiple_results = True
threefry2x32_p.def_impl(partial(dispatch.apply_primitive, threefry2x32_p))
threefry2x32_p.def_abstract_eval(_threefry2x32_abstract_eval)
batching.defbroadcasting(threefry2x32_p)
mlir.register_lowering(
threefry2x32_p, _threefry2x32_lowering_rule, inline=False)
mlir.register_lowering(
threefry2x32_p, _threefry2x32_cpu_lowering_rule, platform='cpu')
mlir.register_lowering(
threefry2x32_p,
partial(_threefry2x32_gpu_lowering_rule, target_name_prefix='cu'),
platform='cuda',
inline=False)
mlir.register_lowering(
threefry2x32_p,
partial(_threefry2x32_gpu_lowering_rule, target_name_prefix='hip'),
platform='rocm',
inline=False)
def iota_2x32_shape(shape):
"""Reshaped ``uint64`` iota, as two parallel ``uint32`` arrays.
Setting aside representation, this function essentially computes the
equivalent of::
jax.lax.iota(dtype=np.uint64, size=math.prod(shape)).reshape(shape)
However:
* It returns two parallel ``uint32`` arrays instead of one
``uint64`` array. This renders it invariant under either setting of
the system-wide ``jax_enable_x64`` configuration flag.
* It lowers in a way such that the compiler's automatic SPMD
partitioner recognizes its partitionability.
For example::
>>> import numpy as np
>>> from jax import lax
>>> from jax._src import prng
>>> prng.iota_2x32_shape((3, 4))
[Array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=uint32),
Array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]], dtype=uint32)]
>>> def reshaped_iota(shape):
... return lax.iota(size=math.prod(shape), dtype=np.uint32).reshape(shape)
...
>>> reshaped_iota((3, 4))
Array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]], dtype=uint32)
Args:
shape: the output shape
Returns:
A pair of ``uint32`` arrays ``(counts_hi, counts_lo)``, both of
shape ``shape``, representing the higher-order and lower-order 32
bits of the 64 bit unsigned iota.
"""
if len(shape) == 0:
return (jnp.zeros((), np.dtype('uint32')),) * 2
return iota_2x32_shape_p.bind(shape=shape)
iota_2x32_shape_p = core.Primitive('iota_2x32_shape')
iota_2x32_shape_p.multiple_results = True
iota_2x32_shape_p.def_impl(partial(dispatch.apply_primitive, iota_2x32_shape_p))
@iota_2x32_shape_p.def_abstract_eval
def iota_2x32_shape_abstract_eval(*, shape):
return (core.ShapedArray(shape, np.dtype('uint32')),) * 2
def bcast_iotas_to_reshaped_iota(
add: Callable[[ir.Value, ir.Value], ir.Value],
mul: Callable[[core.DimSize, ir.Value], ir.Value],
shape: core.Shape,
iotas: Sequence[ir.Value]) -> ir.Value:
strides: core.Shape = (*(np.cumprod(shape[1:][::-1])[::-1]), 1)
return reduce(add, [mul(s, i) for i, s in zip(iotas, strides)])
def iota_2x32_shape_lowering(ctx, *, shape):
aval_out, _ = ctx.avals_out
aval_u64 = core.ShapedArray(shape, np.dtype('uint64'))
def _add(x: ir.Value, y: ir.Value) -> ir.Value:
return mlir.hlo.add(x, y)
def _mul(x: core.DimSize, y: ir.Value) -> ir.Value:
if core.is_constant_dim(x):
x_const = mlir.ir_constant(np.array(x, np.dtype('uint64')))
else:
x_shape, = mlir.eval_dynamic_shape(ctx, (x,))
x_const = hlo.convert(
ir.RankedTensorType.get(
[],
mlir.dtype_to_ir_type(np.dtype('uint64'))), x_shape)
x_bcast = mlir.broadcast_in_dim(ctx, x_const, aval_u64,
broadcast_dimensions=[])
return mlir.hlo.multiply(x_bcast, y)
assert len(shape) > 0
iotas = [mlir.iota(ctx, aval_u64, dimension=dimension)
for dimension in range(len(shape))]
counts = bcast_iotas_to_reshaped_iota(_add, _mul, shape, iotas)
shift = mlir.ir_constant(np.array(32, np.dtype('uint64')))
shift = mlir.broadcast_in_dim(ctx, shift, aval_u64,
broadcast_dimensions=[])
counts_shifted = mlir.hlo.shift_right_logical(counts, shift)
counts_lo = mlir.hlo.convert(mlir.aval_to_ir_type(aval_out), counts)
counts_hi = mlir.hlo.convert(mlir.aval_to_ir_type(aval_out), counts_shifted)
return counts_hi, counts_lo
mlir.register_lowering(iota_2x32_shape_p, iota_2x32_shape_lowering)
@jit(inline=True)
def threefry_2x32(keypair, count):
"""Apply the Threefry 2x32 hash.
Args:
keypair: a pair of 32bit unsigned integers used for the key.
count: an array of dtype uint32 used for the counts.
Returns:
An array of dtype uint32 with the same shape as `count`.
"""
key1, key2 = keypair
if not lax.dtype(key1) == lax.dtype(key2) == lax.dtype(count) == np.uint32:
msg = "threefry_2x32 requires uint32 arguments, got {}"
raise TypeError(msg.format([lax.dtype(x) for x in [key1, key2, count]]))
flat_count = count.ravel()
odd_size = flat_count.shape[0] % 2
if core.is_constant_dim(odd_size):
if odd_size:
x = list(jnp.split(jnp.concatenate([flat_count, np.uint32([0])]), 2))
else:
x = list(jnp.split(flat_count, 2))
else:
# With symbolic shapes we cannot always tell statically if odd_size is true
# or false, so we rewrite this without a conditional.
flat_count_padded = jnp.concatenate([flat_count, np.uint32([0])])
flat_count_padded_half_size = flat_count_padded.shape[0] // 2
x = [
lax_slicing.dynamic_slice(flat_count_padded, (0,),
(flat_count_padded_half_size,)),
lax_slicing.dynamic_slice(flat_count_padded,
(flat_count_padded_half_size,),
(flat_count_padded_half_size,))
]
assert x[0].shape == x[1].shape, (x[0].shape, x[1].shape)
x = threefry2x32_p.bind(key1, key2, x[0], x[1])
out = jnp.concatenate(x)
assert out.dtype == np.uint32
if core.is_constant_dim(odd_size):
return lax.reshape(out[:-1] if odd_size else out, count.shape)
else:
out_no_padding = lax_slicing.dynamic_slice(out, (0,), (flat_count.shape[0],))
return lax.reshape(out_no_padding, count.shape)
def threefry_split(key: typing.Array, shape: Shape) -> typing.Array:
shape = tuple(unsafe_map(core.concrete_dim_or_error, shape))
return _threefry_split(key, shape)
@jit(static_argnums=(1,))
def _threefry_split(key, shape) -> typing.Array:
if config.threefry_partitionable.value:
return _threefry_split_foldlike(key, shape)
else:
return _threefry_split_original(key, shape)
@jit(static_argnums=(1,), inline=True)
def _threefry_split_original(key, shape) -> typing.Array:
num = math.prod(shape)
counts = lax.iota(np.uint32, num * 2)
return lax.reshape(threefry_2x32(key, counts), (*shape, 2))
@jit(static_argnums=(1,), inline=True)
def _threefry_split_foldlike(key, shape) -> typing.Array:
k1, k2 = key
counts1, counts2 = iota_2x32_shape(shape)
bits1, bits2 = threefry2x32_p.bind(k1, k2, counts1, counts2)
return jnp.stack([bits1, bits2], axis=bits1.ndim)
def threefry_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
assert not data.shape
return _threefry_fold_in(key, jnp.asarray(data, dtype='uint32'))
@jit
def _threefry_fold_in(key, data):
return threefry_2x32(key, threefry_seed(data))
def threefry_random_bits(key: typing.Array, bit_width, shape):
"""Sample uniform random bits of given width and shape using PRNG key."""
if not _is_threefry_prng_key(key):
raise TypeError("threefry_random_bits got invalid prng key.")
if bit_width not in (8, 16, 32, 64):
raise TypeError("requires 8-, 16-, 32- or 64-bit field width.")
if config.threefry_partitionable.value:
return _threefry_random_bits_partitionable(key, bit_width, shape)
else:
return _threefry_random_bits_original(key, bit_width, shape)
def _threefry_random_bits_partitionable(key: typing.Array, bit_width, shape):
if all(core.is_constant_dim(d) for d in shape) and math.prod(shape) > 2 ** 64:
raise NotImplementedError('random bits array of size exceeding 2 ** 64')
k1, k2 = key
counts1, counts2 = iota_2x32_shape(shape)
bits1, bits2 = threefry2x32_p.bind(k1, k2, counts1, counts2)
dtype = UINT_DTYPES[bit_width]
if bit_width == 64:
bits_hi = lax.convert_element_type(bits1, dtype)
bits_lo = lax.convert_element_type(bits2, dtype)
return lax.shift_left(bits_hi, jnp.asarray(32, dtype=dtype)) | bits_lo
elif bit_width == 32:
return bits1 ^ bits2
else:
return lax.convert_element_type(bits1 ^ bits2, dtype)
@jit(static_argnums=(1, 2), inline=True)
def _threefry_random_bits_original(key: typing.Array, bit_width, shape):
size = math.prod(shape)
# Compute ceil(bit_width * size / 32) in a way that is friendly to shape
# polymorphism
max_count, r = divmod(bit_width * size, 32)
if r > 0:
max_count += 1
if core.is_constant_dim(max_count):
nblocks, rem = divmod(max_count, dtypes.iinfo(np.uint32).max)
else:
nblocks, rem = 0, max_count
if not nblocks:
bits = threefry_2x32(key, lax.iota(np.uint32, rem))
else:
keys = threefry_split(key, (nblocks + 1,))
subkeys, last_key = keys[:-1], keys[-1]
blocks = vmap(threefry_2x32, in_axes=(0, None))(subkeys, lax.iota(np.uint32, dtypes.iinfo(np.uint32).max))
last = threefry_2x32(last_key, lax.iota(np.uint32, rem))
bits = lax.concatenate([blocks.ravel(), last], 0)
dtype = UINT_DTYPES[bit_width]
if bit_width == 64:
bits = [lax.convert_element_type(x, dtype) for x in jnp.split(bits, 2)]
bits = lax.shift_left(bits[0], jnp.asarray(32, dtype=dtype)) | bits[1]
elif bit_width in [8, 16]:
# this is essentially bits.view(dtype)[:size]
bits = lax.bitwise_and(
jnp.asarray(np.iinfo(dtype).max, dtype='uint32'),
lax.shift_right_logical(
lax.broadcast(bits, (1,)),
lax.mul(
np.uint32(bit_width),
lax.broadcasted_iota(np.uint32, (32 // bit_width, 1), 0)
)
)
)
bits = lax.reshape(bits, ((max_count * 32 // bit_width),), (1, 0))
bits = lax.convert_element_type(bits, dtype)[:size]
return lax.reshape(bits, shape)
threefry_prng_impl = PRNGImpl(
key_shape=(2,),
seed=threefry_seed,
split=threefry_split,
random_bits=threefry_random_bits,
fold_in=threefry_fold_in,
name='threefry2x32',
tag='fry')
register_prng(threefry_prng_impl)
# -- RngBitGenerator PRNG implementation
# This code is experimental!
# https://www.openxla.org/xla/operation_semantics#rngbitgenerator
# Notice that the RngBitGenerator operations are not guaranteed to be
# stable/deterministic across backends or compiler versions. Correspondingly, we
# reserve the right to change any of these implementations at any time!
def _rbg_seed(seed: typing.Array) -> typing.Array:
assert not seed.shape
halfkey = threefry_seed(seed)
return jnp.concatenate([halfkey, halfkey])
def _rbg_split(key: typing.Array, shape: Shape) -> typing.Array:
if config.threefry_partitionable.value:
_threefry_split = _threefry_split_foldlike
else:
_threefry_split = _threefry_split_original
halfkeys = key.reshape(2, 2)
return vmap(
_threefry_split, (0, None), len(shape))(halfkeys, shape).reshape(
*shape, 4)
def _rbg_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
assert not data.shape
return vmap(_threefry_fold_in, (0, None), 0)(key.reshape(2, 2), data).reshape(4)
def _rbg_random_bits(key: typing.Array, bit_width: int, shape: Sequence[int]
) -> typing.Array:
if not key.shape == (4,) and key.dtype == np.dtype('uint32'):
raise TypeError("_rbg_random_bits got invalid prng key.")
if bit_width not in (8, 16, 32, 64):
raise TypeError("requires 8-, 16-, 32- or 64-bit field width.")
_, bits = lax.rng_bit_generator(key, shape, dtype=UINT_DTYPES[bit_width])
return bits
rbg_prng_impl = PRNGImpl(
key_shape=(4,),
seed=_rbg_seed,
split=_rbg_split,
random_bits=_rbg_random_bits,
fold_in=_rbg_fold_in,
name='rbg',
tag='rbg')
register_prng(rbg_prng_impl)
def _unsafe_rbg_split(key: typing.Array, shape: Shape) -> typing.Array:
# treat 10 iterations of random bits as a 'hash function'
num = math.prod(shape)
_, keys = lax.rng_bit_generator(key, (10 * num, 4), dtype='uint32')
return lax_slicing.slice_in_dim(
keys, start_index=None, limit_index=None, stride=10).reshape(*shape, 4)
def _unsafe_rbg_fold_in(key: typing.Array, data: typing.Array) -> typing.Array:
assert not data.shape
_, random_bits = lax.rng_bit_generator(_rbg_seed(data), (10, 4), dtype='uint32')
return key ^ random_bits[-1]
unsafe_rbg_prng_impl = PRNGImpl(
key_shape=(4,),
seed=_rbg_seed,
split=_unsafe_rbg_split,
random_bits=_rbg_random_bits,
fold_in=_unsafe_rbg_fold_in,
name='unsafe_rbg',
tag='urbg')
register_prng(unsafe_rbg_prng_impl)
# Register export serialization for PRNG key types.
try:
from jax._src.export import serialization # pytype: disable=import-error
from jax._src.export import serialization_generated as ser_flatbuf # pytype: disable=import-error
except ImportError:
# This can happen if flatbuffers is not installed, in which case export
# serialization is not supported and it is safe to skip the registration.
pass
else:
serialization.register_dtype_kind(
KeyTy(prngs["threefry2x32"]), ser_flatbuf.DType.key_fry)
serialization.register_dtype_kind(
KeyTy(prngs["rbg"]), ser_flatbuf.DType.key_rbg)
serialization.register_dtype_kind(
KeyTy(prngs["unsafe_rbg"]), ser_flatbuf.DType.key_unsafe_rbg)
| KeyTy |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/auth/managers/models/resource_details.py | {
"start": 1687,
"end": 1825
} | class ____:
"""Represents the details of a pool."""
name: str | None = None
team_name: str | None = None
@dataclass
| PoolDetails |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/assignment10.py | {
"start": 425,
"end": 2038
} | class ____(Generic[T]): ...
def func1(v1: list[Any | None], v2: list[int | str]):
x1: list[int | None] = v1
reveal_type(x1, expected_text="list[int | None]")
x2: list[Any] = v2
reveal_type(x2, expected_text="list[Any]")
x3: list[Any | str] = v2
reveal_type(x3, expected_text="list[Any | str]")
def func2(v1: dict[int, Any | None], v2: dict[int, int | str]):
x1: dict[int, int | None] = v1
reveal_type(x1, expected_text="dict[int, int | None]")
x2: dict[Any, Any] = v2
reveal_type(x2, expected_text="dict[Any, Any]")
x3: dict[Any, Any | str] = v2
reveal_type(x3, expected_text="dict[Any, Any | str]")
def func3(y: list[int]):
x1: Iterable[int | B[Any]] = y
reveal_type(x1, expected_text="list[int]")
x2: Iterable[Any | B[Any]] = y
reveal_type(x2, expected_text="list[int]")
x3: Iterable[Any] = y
reveal_type(x3, expected_text="list[int]")
def func4(y: list[Any]):
x1: Iterable[int | B[Any]] = y
reveal_type(x1, expected_text="list[Any]")
x2: Iterable[Any | B[Any]] = y
reveal_type(x2, expected_text="list[Any]")
x3: Iterable[Any] = y
reveal_type(x3, expected_text="list[Any]")
def func5(v1: list[Any | None]):
x1: list[int | None] = v1
reveal_type(x1, expected_text="list[int | None]")
def func6(v1: tuple[Any], v2: tuple[int, Any], v3: tuple[Any, ...]):
x1: tuple[int] = v1
reveal_type(x1, expected_text="tuple[int]")
x2: tuple[int, str] = v2
reveal_type(x2, expected_text="tuple[int, str]")
x3: tuple[str, ...] = v3
reveal_type(x3, expected_text="tuple[str, ...]")
| B |
python | neetcode-gh__leetcode | python/0055-jump-game.py | {
"start": 0,
"end": 227
} | class ____:
def canJump(self, nums: List[int]) -> bool:
goal = len(nums) - 1
for i in range(len(nums) - 2, -1, -1):
if i + nums[i] >= goal:
goal = i
return goal == 0
| Solution |
python | kubernetes-client__python | kubernetes/client/models/v1_endpoint_address.py | {
"start": 383,
"end": 6257
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'hostname': 'str',
'ip': 'str',
'node_name': 'str',
'target_ref': 'V1ObjectReference'
}
attribute_map = {
'hostname': 'hostname',
'ip': 'ip',
'node_name': 'nodeName',
'target_ref': 'targetRef'
}
def __init__(self, hostname=None, ip=None, node_name=None, target_ref=None, local_vars_configuration=None): # noqa: E501
"""V1EndpointAddress - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._hostname = None
self._ip = None
self._node_name = None
self._target_ref = None
self.discriminator = None
if hostname is not None:
self.hostname = hostname
self.ip = ip
if node_name is not None:
self.node_name = node_name
if target_ref is not None:
self.target_ref = target_ref
@property
def hostname(self):
"""Gets the hostname of this V1EndpointAddress. # noqa: E501
The Hostname of this endpoint # noqa: E501
:return: The hostname of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._hostname
@hostname.setter
def hostname(self, hostname):
"""Sets the hostname of this V1EndpointAddress.
The Hostname of this endpoint # noqa: E501
:param hostname: The hostname of this V1EndpointAddress. # noqa: E501
:type: str
"""
self._hostname = hostname
@property
def ip(self):
"""Gets the ip of this V1EndpointAddress. # noqa: E501
The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
:return: The ip of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._ip
@ip.setter
def ip(self, ip):
"""Sets the ip of this V1EndpointAddress.
The IP of this endpoint. May not be loopback (127.0.0.0/8 or ::1), link-local (169.254.0.0/16 or fe80::/10), or link-local multicast (224.0.0.0/24 or ff02::/16). # noqa: E501
:param ip: The ip of this V1EndpointAddress. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and ip is None: # noqa: E501
raise ValueError("Invalid value for `ip`, must not be `None`") # noqa: E501
self._ip = ip
@property
def node_name(self):
"""Gets the node_name of this V1EndpointAddress. # noqa: E501
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
:return: The node_name of this V1EndpointAddress. # noqa: E501
:rtype: str
"""
return self._node_name
@node_name.setter
def node_name(self, node_name):
"""Sets the node_name of this V1EndpointAddress.
Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. # noqa: E501
:param node_name: The node_name of this V1EndpointAddress. # noqa: E501
:type: str
"""
self._node_name = node_name
@property
def target_ref(self):
"""Gets the target_ref of this V1EndpointAddress. # noqa: E501
:return: The target_ref of this V1EndpointAddress. # noqa: E501
:rtype: V1ObjectReference
"""
return self._target_ref
@target_ref.setter
def target_ref(self, target_ref):
"""Sets the target_ref of this V1EndpointAddress.
:param target_ref: The target_ref of this V1EndpointAddress. # noqa: E501
:type: V1ObjectReference
"""
self._target_ref = target_ref
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1EndpointAddress):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1EndpointAddress):
return True
return self.to_dict() != other.to_dict()
| V1EndpointAddress |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-amazon-ads/unit_tests/integrations/ad_requests/sponsored_brands_report_v3_request_builder.py | {
"start": 280,
"end": 3617
} | class ____(AmazonAdsBaseRequestBuilder):
@classmethod
def _init_report_endpoint(
cls,
client_id: str,
client_access_token: str,
profile_id: str,
report_type: str,
metrics: List[str],
report_date: Optional[str] = None,
) -> "SponsoredBrandsV3ReportRequestBuilder":
return (
cls(f"reporting/reports")
.with_client_id(client_id)
.with_client_access_token(client_access_token)
.with_profile_id(profile_id)
.with_metrics(metrics)
.with_report_date(report_date)
.with_report_type(report_type)
)
@classmethod
def init_purchased_asin_report_endpoint(
cls, client_id: str, client_access_token: str, profile_id: str, metrics: List[str], report_date: Optional[str]
) -> "SponsoredBrandsV3ReportRequestBuilder":
return cls._init_report_endpoint(client_id, client_access_token, profile_id, "purchasedAsin", report_date, metrics)
def __init__(self, resource: str) -> None:
super().__init__(resource)
self._metrics: List[str] = None
self._report_date: str = None
self._report_type: str = None
@property
def _report_config_group_by(self) -> List[str]:
return {
"purchasedAsin": ["purchasedAsin"],
}[self._report_type]
@property
def _report_config_report_type_id(self) -> str:
return {
"purchasedAsin": "sbPurchasedProduct",
}[self._report_type]
@property
def _report_config_filters(self) -> List[str]:
return {
"purchasedAsin": [],
}[self._report_type]
@property
def query_params(self) -> Dict[str, Any]:
return None
@property
def request_body(self) -> Optional[str]:
body: dict = OrderedDict()
if self._report_type and self._report_date:
body["name"] = f"{self._report_type} report {self._report_date}"
if self._report_date:
body["startDate"] = self._report_date
body["endDate"] = self._report_date
if self._report_type:
body["configuration"] = {"adProduct": "SPONSORED_BRANDS", "groupBy": self._report_config_group_by}
if self._metrics:
body["configuration"]["columns"] = self._metrics
if self._report_type:
body["configuration"]["reportTypeId"] = self._report_config_report_type_id
body["configuration"]["filters"] = self._report_config_filters
body["configuration"]["timeUnit"] = "SUMMARY"
body["configuration"]["format"] = "GZIP_JSON"
return json.dumps(body)
def with_report_date(self, report_date: AirbyteDateTime) -> "SponsoredBrandsV3ReportRequestBuilder":
self._report_date = report_date.strftime("%Y-%m-%d")
return self
def with_report_type(self, report_type: str) -> "SponsoredBrandsV3ReportRequestBuilder":
self._report_type = report_type
return self
def with_tactics(self, tactics: str) -> "SponsoredBrandsV3ReportRequestBuilder":
self._tactics = tactics
return self
def with_metrics(self, metrics: List[str]) -> "SponsoredBrandsV3ReportRequestBuilder":
self._metrics = metrics
return self
| SponsoredBrandsV3ReportRequestBuilder |
python | tensorflow__tensorflow | tensorflow/python/distribute/coordinator/watchdog_test.py | {
"start": 883,
"end": 2028
} | class ____(test.TestCase, parameterized.TestCase):
@parameterized.parameters(True, False)
def testWatchDogTimeout(self, use_env_var):
tmp_file = self.create_tempfile()
f = open(tmp_file, "w+")
triggerred_count = [0]
def on_triggered_fn():
triggerred_count[0] += 1
timeout = 3
if use_env_var:
os.environ["TF_CLUSTER_COORDINATOR_WATCH_DOG_TIMEOUT"] = str(timeout)
wd = watchdog.WatchDog(traceback_file=f, on_triggered=on_triggered_fn)
else:
wd = watchdog.WatchDog(
timeout=timeout, traceback_file=f, on_triggered=on_triggered_fn)
time.sleep(6)
self.assertGreaterEqual(triggerred_count[0], 1)
wd.report_closure_done()
time.sleep(1)
self.assertGreaterEqual(triggerred_count[0], 1)
time.sleep(5)
self.assertGreaterEqual(triggerred_count[0], 2)
wd.stop()
time.sleep(5)
last_triggered_count = triggerred_count[0]
time.sleep(10)
self.assertEqual(last_triggered_count, triggerred_count[0])
f.close()
with open(tmp_file) as f:
self.assertIn("Current thread", f.read())
if __name__ == "__main__":
test.main()
| WatchDogTest |
python | ray-project__ray | rllib/connectors/connector.py | {
"start": 567,
"end": 2618
} | class ____:
"""Data bits that may be needed for running connectors.
Note(jungong) : we need to be really careful with the data fields here.
E.g., everything needs to be serializable, in case we need to fetch them
in a remote setting.
"""
# TODO(jungong) : figure out how to fetch these in a remote setting.
# Probably from a policy server when initializing a policy client.
def __init__(
self,
config: AlgorithmConfigDict = None,
initial_states: List[TensorType] = None,
observation_space: gym.Space = None,
action_space: gym.Space = None,
view_requirements: Dict[str, ViewRequirement] = None,
is_policy_recurrent: bool = False,
):
"""Construct a ConnectorContext instance.
Args:
initial_states: States that are used for constructing
the initial input dict for RNN models. [] if a model is not recurrent.
action_space_struct: a policy's action space, in python
data format. E.g., python dict instead of DictSpace, python tuple
instead of TupleSpace.
"""
self.config = config or {}
self.initial_states = initial_states or []
self.observation_space = observation_space
self.action_space = action_space
self.view_requirements = view_requirements
self.is_policy_recurrent = is_policy_recurrent
@staticmethod
def from_policy(policy: "Policy") -> "ConnectorContext":
"""Build ConnectorContext from a given policy.
Args:
policy: Policy
Returns:
A ConnectorContext instance.
"""
return ConnectorContext(
config=policy.config,
initial_states=policy.get_initial_state(),
observation_space=policy.observation_space,
action_space=policy.action_space,
view_requirements=policy.view_requirements,
is_policy_recurrent=policy.is_recurrent(),
)
@OldAPIStack
| ConnectorContext |
python | ray-project__ray | python/ray/dashboard/head.py | {
"start": 1460,
"end": 18787
} | class ____:
def __init__(
self,
http_host: str,
http_port: int,
http_port_retries: int,
gcs_address: str,
cluster_id_hex: str,
node_ip_address: str,
log_dir: str,
logging_level: int,
logging_format: str,
logging_filename: str,
logging_rotate_bytes: int,
logging_rotate_backup_count: int,
temp_dir: str,
session_dir: str,
minimal: bool,
serve_frontend: bool,
modules_to_load: Optional[Set[str]] = None,
):
"""
Args:
http_host: The host address for the Http server.
http_port: The port for the Http server.
http_port_retries: The maximum retry to bind ports for the Http server.
gcs_address: The GCS address in the {address}:{port} format.
log_dir: The log directory. E.g., /tmp/session_latest/logs.
logging_level: The logging level (e.g. logging.INFO, logging.DEBUG)
logging_format: The format string for log messages
logging_filename: The name of the log file
logging_rotate_bytes: Max size in bytes before rotating log file
logging_rotate_backup_count: Number of backup files to keep when rotating
temp_dir: The temp directory. E.g., /tmp.
session_dir: The session directory. E.g., tmp/session_latest.
minimal: Whether or not it will load the minimal modules.
serve_frontend: If configured, frontend HTML is
served from the dashboard.
modules_to_load: A set of module name in string to load.
By default (None), it loads all available modules.
Note that available modules could be changed depending on
minimal flags.
"""
self.minimal = minimal
self.serve_frontend = serve_frontend
# If it is the minimal mode, we shouldn't serve frontend.
if self.minimal:
self.serve_frontend = False
# Public attributes are accessible for all head modules.
# Walkaround for issue: https://github.com/ray-project/ray/issues/7084
self.http_host = "127.0.0.1" if http_host == "localhost" else http_host
self.http_port = http_port
self.http_port_retries = http_port_retries
self._modules_to_load = modules_to_load
self._modules_loaded = False
self.metrics = None
self._executor = ThreadPoolExecutor(
max_workers=RAY_DASHBOARD_DASHBOARD_HEAD_TPE_MAX_WORKERS,
thread_name_prefix="dashboard_head_executor",
)
assert gcs_address is not None
self.gcs_address = gcs_address
self.cluster_id_hex = cluster_id_hex
self.log_dir = log_dir
self.logging_level = logging_level
self.logging_format = logging_format
self.logging_filename = logging_filename
self.logging_rotate_bytes = logging_rotate_bytes
self.logging_rotate_backup_count = logging_rotate_backup_count
self.temp_dir = temp_dir
self.session_dir = session_dir
self.session_name = Path(session_dir).name
self.gcs_error_subscriber = None
self.gcs_log_subscriber = None
self.ip = node_ip_address
self.pid = os.getpid()
self.dashboard_proc = psutil.Process()
# If the dashboard is started as non-minimal version, http server should
# be configured to expose APIs.
self.http_server = None
async def _configure_http_server(
self,
dashboard_head_modules: List[DashboardHeadModule],
subprocess_module_handles: List["SubprocessModuleHandle"],
):
from ray.dashboard.http_server_head import HttpServerDashboardHead
self.http_server = HttpServerDashboardHead(
self.ip,
self.http_host,
self.http_port,
self.http_port_retries,
self.gcs_address,
self.session_name,
self.metrics,
)
await self.http_server.run(dashboard_head_modules, subprocess_module_handles)
@property
def http_session(self):
if not self._modules_loaded and not self.http_server:
# When the dashboard is still starting up, this property gets
# called as part of the method_route_table_factory magic. In
# this case, the property is not actually used but the magic
# method calls every property to look for a route to add to
# the global route table. It should be okay for http_server
# to still be None at this point.
return None
assert self.http_server, "Accessing unsupported API in a minimal ray."
return self.http_server.http_session
@async_loop_forever(dashboard_consts.GCS_CHECK_ALIVE_INTERVAL_SECONDS)
async def _gcs_check_alive(self):
try:
# If gcs is permanently dead, gcs client will exit the process
# (see gcs_rpc_client.h)
await self.gcs_client.async_check_alive(node_ids=[], timeout=None)
except Exception:
logger.warning("Failed to check gcs aliveness, will retry", exc_info=True)
def _load_modules(
self, modules_to_load: Optional[Set[str]] = None
) -> Tuple[List[DashboardHeadModule], List["SubprocessModuleHandle"]]:
"""
If minimal, only load DashboardHeadModule.
If non-minimal, load both kinds of modules: DashboardHeadModule, SubprocessModule.
If modules_to_load is not None, only load the modules in the set.
"""
dashboard_head_modules = self._load_dashboard_head_modules(modules_to_load)
subprocess_module_handles = self._load_subprocess_module_handles(
modules_to_load
)
all_names = {type(m).__name__ for m in dashboard_head_modules} | {
h.module_cls.__name__ for h in subprocess_module_handles
}
assert len(all_names) == len(dashboard_head_modules) + len(
subprocess_module_handles
), "Duplicate module names. A module name can't be a DashboardHeadModule and a SubprocessModule at the same time."
# Verify modules are loaded as expected.
if modules_to_load is not None and all_names != modules_to_load:
assert False, (
f"Actual loaded modules {all_names}, doesn't match the requested modules "
f"to load, {modules_to_load}."
)
self._modules_loaded = True
return dashboard_head_modules, subprocess_module_handles
def _load_dashboard_head_modules(
self, modules_to_load: Optional[Set[str]] = None
) -> List[DashboardHeadModule]:
"""Load `DashboardHeadModule`s.
Args:
modules: A list of module names to load. By default (None),
it loads all modules.
"""
modules = []
head_cls_list = dashboard_utils.get_all_modules(DashboardHeadModule)
config = DashboardHeadModuleConfig(
minimal=self.minimal,
cluster_id_hex=self.cluster_id_hex,
session_name=self.session_name,
gcs_address=self.gcs_address,
log_dir=self.log_dir,
temp_dir=self.temp_dir,
session_dir=self.session_dir,
ip=self.ip,
http_host=self.http_host,
http_port=self.http_port,
)
# Select modules to load.
if modules_to_load is not None:
head_cls_list = [
cls for cls in head_cls_list if cls.__name__ in modules_to_load
]
logger.info(f"DashboardHeadModules to load: {modules_to_load}.")
for cls in head_cls_list:
logger.info(f"Loading {DashboardHeadModule.__name__}: {cls}.")
c = cls(config)
modules.append(c)
logger.info(f"Loaded {len(modules)} dashboard head modules: {modules}.")
return modules
def _load_subprocess_module_handles(
self, modules_to_load: Optional[Set[str]] = None
) -> List["SubprocessModuleHandle"]:
"""
If minimal, return an empty list.
If non-minimal, load `SubprocessModule`s by creating Handles to them.
Args:
modules: A list of module names to load. By default (None),
it loads all modules.
"""
if self.minimal:
logger.info("Subprocess modules not loaded in minimal mode.")
return []
from ray.dashboard.subprocesses.handle import SubprocessModuleHandle
from ray.dashboard.subprocesses.module import (
SubprocessModule,
SubprocessModuleConfig,
)
handles = []
subprocess_cls_list = dashboard_utils.get_all_modules(SubprocessModule)
loop = ray._common.utils.get_or_create_event_loop()
config = SubprocessModuleConfig(
cluster_id_hex=self.cluster_id_hex,
gcs_address=self.gcs_address,
session_name=self.session_name,
temp_dir=self.temp_dir,
session_dir=self.session_dir,
logging_level=self.logging_level,
logging_format=self.logging_format,
log_dir=self.log_dir,
logging_filename=self.logging_filename,
logging_rotate_bytes=self.logging_rotate_bytes,
logging_rotate_backup_count=self.logging_rotate_backup_count,
socket_dir=str(Path(self.session_dir) / "sockets"),
)
# Select modules to load.
if modules_to_load is not None:
subprocess_cls_list = [
cls for cls in subprocess_cls_list if cls.__name__ in modules_to_load
]
for cls in subprocess_cls_list:
logger.info(f"Loading {SubprocessModule.__name__}: {cls}.")
handle = SubprocessModuleHandle(loop, cls, config)
handles.append(handle)
logger.info(f"Loaded {len(handles)} subprocess modules: {handles}.")
return handles
async def _setup_metrics(self, gcs_client):
metrics = DashboardPrometheusMetrics()
# Setup prometheus metrics export server
assert internal_kv._internal_kv_initialized()
assert gcs_client is not None
address = build_address(self.ip, DASHBOARD_METRIC_PORT)
await gcs_client.async_internal_kv_put(
"DashboardMetricsAddress".encode(), address.encode(), True, namespace=None
)
if prometheus_client:
try:
logger.info(
"Starting dashboard metrics server on port {}".format(
DASHBOARD_METRIC_PORT
)
)
kwargs = {"addr": "127.0.0.1"} if self.ip == "127.0.0.1" else {}
prometheus_client.start_http_server(
port=DASHBOARD_METRIC_PORT,
registry=metrics.registry,
**kwargs,
)
except Exception:
logger.exception(
"An exception occurred while starting the metrics server."
)
elif not prometheus_client:
logger.warning(
"`prometheus_client` not found, so metrics will not be exported."
)
return metrics
@dashboard_utils.async_loop_forever(dashboard_consts.METRICS_RECORD_INTERVAL_S)
async def _record_dashboard_metrics(
self, subprocess_module_handles: List["SubprocessModuleHandle"]
):
labels = {
"ip": self.ip,
"pid": self.pid,
"Version": ray.__version__,
"Component": "dashboard",
"SessionName": self.session_name,
}
assert "dashboard" in AVAILABLE_COMPONENT_NAMES_FOR_METRICS
self._record_cpu_mem_metrics_for_proc(self.dashboard_proc)
for subprocess_module_handle in subprocess_module_handles:
assert subprocess_module_handle.process is not None
proc = psutil.Process(subprocess_module_handle.process.pid)
self._record_cpu_mem_metrics_for_proc(
proc, subprocess_module_handle.module_cls.__name__
)
loop = ray._common.utils.get_or_create_event_loop()
self.metrics.metrics_event_loop_tasks.labels(**labels).set(
len(asyncio.all_tasks(loop))
)
# Report the max lag since the last export, if any.
if self._event_loop_lag_s_max is not None:
self.metrics.metrics_event_loop_lag.labels(**labels).set(
float(self._event_loop_lag_s_max)
)
self._event_loop_lag_s_max = None
def _record_cpu_mem_metrics_for_proc(
self, proc: psutil.Process, module_name: str = ""
):
labels = {
"ip": self.ip,
"pid": proc.pid,
"Version": ray.__version__,
"Component": "dashboard" if not module_name else "dashboard_" + module_name,
"SessionName": self.session_name,
}
proc_attrs = proc.as_dict(attrs=["cpu_percent", "memory_full_info"])
self.metrics.metrics_dashboard_cpu.labels(**labels).set(
float(proc_attrs.get("cpu_percent", 0.0))
)
# memory_full_info is None on Mac due to the permission issue
# (https://github.com/giampaolo/psutil/issues/883)
if proc_attrs.get("memory_full_info") is not None:
self.metrics.metrics_dashboard_mem_uss.labels(**labels).set(
float(proc_attrs.get("memory_full_info").uss) / 1.0e6
)
self.metrics.metrics_dashboard_mem_rss.labels(**labels).set(
float(proc_attrs.get("memory_full_info").rss) / 1.0e6
)
async def run(self):
gcs_address = self.gcs_address
# Dashboard will handle connection failure automatically
self.gcs_client = GcsClient(address=gcs_address, cluster_id=self.cluster_id_hex)
internal_kv._initialize_internal_kv(self.gcs_client)
dashboard_head_modules, subprocess_module_handles = self._load_modules(
self._modules_to_load
)
# Parallel start all subprocess modules.
for handle in subprocess_module_handles:
handle.start_module()
# Wait for all subprocess modules to be ready.
for handle in subprocess_module_handles:
handle.wait_for_module_ready()
if not self.minimal:
self.metrics = await self._setup_metrics(self.gcs_client)
self._event_loop_lag_s_max: Optional[float] = None
def on_new_lag(lag_s):
# Record the lag. It's exported in `record_dashboard_metrics`
self._event_loop_lag_s_max = max(self._event_loop_lag_s_max or 0, lag_s)
enable_monitor_loop_lag(on_new_lag)
self.record_dashboard_metrics_task = asyncio.create_task(
self._record_dashboard_metrics(subprocess_module_handles)
)
try:
assert internal_kv._internal_kv_initialized()
# Note: We always record the usage, but it is not reported
# if the usage stats is disabled.
record_extra_usage_tag(TagKey.DASHBOARD_USED, "False")
except Exception as e:
logger.warning(
"Failed to record the dashboard usage. "
"This error message is harmless and can be ignored. "
f"Error: {e}"
)
http_host, http_port = self.http_host, self.http_port
if self.serve_frontend:
logger.info("Initialize the http server.")
await self._configure_http_server(
dashboard_head_modules, subprocess_module_handles
)
http_host, http_port = self.http_server.get_address()
logger.info(
f"http server initialized at {build_address(http_host, http_port)}"
)
else:
logger.info("http server disabled.")
# We need to expose dashboard's node's ip for other worker nodes
# if it's listening to all interfaces.
dashboard_http_host = (
self.ip
if self.http_host != ray_constants.DEFAULT_DASHBOARD_IP
else http_host
)
# This synchronous code inside an async context is not great.
# It is however acceptable, because this only gets run once
# during initialization and therefore cannot block the event loop.
# This could be done better in the future, including
# removing the polling on the Ray side, by communicating the
# server address to Ray via stdin / stdout or a pipe.
self.gcs_client.internal_kv_put(
ray_constants.DASHBOARD_ADDRESS.encode(),
build_address(dashboard_http_host, http_port).encode(),
True,
namespace=ray_constants.KV_NAMESPACE_DASHBOARD,
)
concurrent_tasks = [
self._gcs_check_alive(),
]
for m in dashboard_head_modules:
concurrent_tasks.append(m.run())
await asyncio.gather(*concurrent_tasks)
if self.http_server:
await self.http_server.cleanup()
| DashboardHead |
python | walkccc__LeetCode | solutions/2294. Partition Array Such That Maximum Difference Is K/2294.py | {
"start": 0,
"end": 238
} | class ____:
def partitionArray(self, nums: list[int], k: int) -> int:
nums.sort()
ans = 1
mn = nums[0]
for i in range(1, len(nums)):
if mn + k < nums[i]:
ans += 1
mn = nums[i]
return ans
| Solution |
python | google__pytype | pytype/block_environment_test.py | {
"start": 279,
"end": 669
} | class ____:
def __init__(self):
self.blocks = {}
def add_edge(self, v1: int, v2: int):
b1 = self.blocks.setdefault(v1, FakeBlock(v1))
b2 = self.blocks.setdefault(v2, FakeBlock(v2))
b2.incoming.add(b1)
@classmethod
def make(cls, edges):
ret = cls()
for v1, v2 in edges:
ret.add_edge(v1, v2)
return ret
@dataclasses.dataclass(frozen=True)
| BlockGraph |
python | encode__django-rest-framework | tests/test_serializer.py | {
"start": 15615,
"end": 16711
} | class ____:
def test_not_required_output_for_dict(self):
"""
'required=False' should allow a dictionary key to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
assert serializer.data == {'included': 'abc'}
def test_not_required_output_for_object(self):
"""
'required=False' should allow an object attribute to be missing in output.
"""
class ExampleSerializer(serializers.Serializer):
omitted = serializers.CharField(required=False)
included = serializers.CharField()
def create(self, validated_data):
return MockObject(**validated_data)
serializer = ExampleSerializer(data={'included': 'abc'})
serializer.is_valid()
serializer.save()
assert serializer.data == {'included': 'abc'}
| TestNotRequiredOutput |
python | spack__spack | var/spack/test_repos/spack_repo/builtin_mock/packages/patch_inheritance/package.py | {
"start": 199,
"end": 309
} | class ____(Patch):
def install(self, spec, prefix):
Patch.install(self, spec, prefix)
| PatchInheritance |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_dict.py | {
"start": 58795,
"end": 58884
} | class ____(mapping_tests.BasicTestMappingProtocol):
type2test = dict
| GeneralMappingTests |
python | getsentry__sentry | src/sentry/sentry_apps/services/app/model.py | {
"start": 4753,
"end": 5130
} | class ____(Protocol):
"""
Protocol making RpcSentryAppEvents capable of consuming from various sources, keeping only
the minimum required properties.
"""
@property
def id(self) -> str: ...
@property
def label(self) -> str: ...
@property
def actionType(self) -> str: ...
def is_enabled(self) -> bool: ...
| SentryAppEventDataInterface |
python | walkccc__LeetCode | solutions/236. Lowest Common Ancestor of a Binary Tree/236.py | {
"start": 0,
"end": 378
} | class ____:
def lowestCommonAncestor(
self,
root: 'TreeNode',
p: 'TreeNode',
q: 'TreeNode',
) -> 'TreeNode':
if not root or root == p or root == q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left and right:
return root
return left or right
| Solution |
python | PyCQA__pylint | tests/checkers/unittest_spelling.py | {
"start": 749,
"end": 21531
} | class ____(CheckerTestCase): # pylint:disable=too-many-public-methods
# This is a test case class, not sure why it would be relevant to have
# this pylint rule enforced for test case classes.
CHECKER_CLASS = spelling.SpellingChecker
skip_on_missing_package_or_dict = pytest.mark.skipif(
spell_dict is None,
reason="missing python-enchant package or missing spelling dictionaries",
)
def _get_msg_suggestions(self, word: str, count: int = 4) -> str:
suggestions = "' or '".join(self.checker.spelling_dict.suggest(word)[:count])
return f"'{suggestions}'"
def test_spelling_dict_help_no_enchant(self) -> None:
assert "both the python package and the system dep" in _get_enchant_dict_help(
[], pyenchant_available=False
)
assert "need to install the system dep" in _get_enchant_dict_help(
[], pyenchant_available=True
)
@skip_on_missing_package_or_dict
def test_spelling_dict_help_enchant(self) -> None:
assert "Available dictionaries: " in _get_enchant_dict_help(
enchant.Broker().list_dicts(), pyenchant_available=True
)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_check_bad_coment(self) -> None:
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"coment",
"# bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.process_tokens(_tokenize_str("# bad coment"))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
@set_config(max_spelling_suggestions=2)
def test_check_bad_comment_custom_suggestion_count(self) -> None:
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"coment",
"# bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment", count=2),
),
)
):
self.checker.process_tokens(_tokenize_str("# bad coment"))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_check_bad_docstring(self) -> None:
stmt = astroid.extract_node('def fff():\n """bad coment"""\n pass')
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_functiondef(stmt)
stmt = astroid.extract_node('class Abc(object):\n """bad coment"""\n pass')
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_shebangs(self) -> None:
self.checker.process_tokens(_tokenize_str("#!/usr/bin/env python"))
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_python_coding_comments(self) -> None:
self.checker.process_tokens(_tokenize_str("# -*- coding: utf-8 -*-"))
assert not self.linter.release_messages()
self.checker.process_tokens(_tokenize_str("# coding=utf-8"))
assert not self.linter.release_messages()
self.checker.process_tokens(_tokenize_str("# vim: set fileencoding=utf-8 :"))
assert not self.linter.release_messages()
# Now with a shebang first
self.checker.process_tokens(
_tokenize_str("#!/usr/bin/env python\n# -*- coding: utf-8 -*-")
)
assert not self.linter.release_messages()
self.checker.process_tokens(
_tokenize_str("#!/usr/bin/env python\n# coding=utf-8")
)
assert not self.linter.release_messages()
self.checker.process_tokens(
_tokenize_str("#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :")
)
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_top_level_pylint_enable_disable_comments(self) -> None:
self.checker.process_tokens(
_tokenize_str("# Line 1\n Line 2\n# pylint: disable=ungrouped-imports")
)
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_words_with_numbers(self) -> None:
self.checker.process_tokens(_tokenize_str("\n# 0ne\n# Thr33\n# Sh3ll"))
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_wiki_words(self) -> None:
stmt = astroid.extract_node(
'class ComentAbc(object):\n """ComentAbc with a bad coment"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"ComentAbc with a bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_camel_cased_words(self) -> None:
stmt = astroid.extract_node(
'class ComentAbc(object):\n """comentAbc with a bad coment"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"comentAbc with a bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
# With just a single upper case letter in the end
stmt = astroid.extract_node(
'class ComentAbc(object):\n """argumentN with a bad coment"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"argumentN with a bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
for ccn in (
"xmlHttpRequest",
"newCustomer",
"newCustomerId",
"innerStopwatch",
"supportsIpv6OnIos",
"affine3D",
):
stmt = astroid.extract_node(
f'class TestClass(object):\n """{ccn} comment"""\n pass'
)
self.checker.visit_classdef(stmt)
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_words_with_underscores(self) -> None:
stmt = astroid.extract_node(
'def fff(param_name):\n """test param_name"""\n pass'
)
self.checker.visit_functiondef(stmt)
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_email_address(self) -> None:
self.checker.process_tokens(_tokenize_str("# uname@domain.tld"))
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_urls(self) -> None:
self.checker.process_tokens(_tokenize_str("# https://github.com/rfk/pyenchant"))
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
@pytest.mark.parametrize(
"type_comment",
[
"# type: (NotAWord) -> NotAWord",
"# type: List[NotAWord] -> List[NotAWord]",
"# type: Dict[NotAWord] -> Dict[NotAWord]",
"# type: NotAWord",
"# type: List[NotAWord]",
"# type: Dict[NotAWord]",
"# type: ImmutableList[Manager]",
# will result in error: Invalid "type: ignore" comment [syntax]
# when analyzed with mypy 1.02
"# type: ignore[attr-defined] NotAWord",
],
)
def test_skip_type_comments(self, type_comment: str) -> None:
self.checker.process_tokens(_tokenize_str(type_comment))
assert not self.linter.release_messages()
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_sphinx_directives(self) -> None:
stmt = astroid.extract_node(
'class ComentAbc(object):\n """This is :class:`ComentAbc` with a bad coment"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"This is :class:`ComentAbc` with a bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_sphinx_directives_2(self) -> None:
stmt = astroid.extract_node(
'class ComentAbc(object):\n """This is :py:attr:`ComentAbc` with a bad coment"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"coment",
"This is :py:attr:`ComentAbc` with a bad coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
)
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
@pytest.mark.parametrize(
"prefix,suffix",
(
pytest.param("fmt", ": on", id="black directive to turn on formatting"),
pytest.param("fmt", ": off", id="black directive to turn off formatting"),
pytest.param("noqa", "", id="pycharm directive"),
pytest.param("noqa", ":", id="flake8 / zimports directive"),
pytest.param("nosec", "", id="bandit directive"),
pytest.param("isort", ":skip", id="isort directive"),
pytest.param("mypy", ":", id="mypy top of file directive"),
),
)
def test_tool_directives_handling(self, prefix: str, suffix: str) -> None:
"""We're not raising when the directive is at the beginning of comments,
but we raise if a directive appears later in comment.
"""
full_comment = f"# {prefix}{suffix} {prefix}"
args = (
prefix,
full_comment,
f" {'^' * len(prefix)}",
self._get_msg_suggestions(prefix),
)
with self.assertAddsMessages(
MessageTest("wrong-spelling-in-comment", line=1, args=args)
):
self.checker.process_tokens(_tokenize_str(full_comment))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_code_flanked_in_double_backticks(self) -> None:
full_comment = "# The function ``.qsize()`` .qsize()"
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"qsize",
full_comment,
" ^^^^^",
self._get_msg_suggestions("qsize"),
),
)
):
self.checker.process_tokens(_tokenize_str(full_comment))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_skip_code_flanked_in_single_backticks(self) -> None:
full_comment = "# The function `.qsize()` .qsize()"
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"qsize",
full_comment,
" ^^^^^",
self._get_msg_suggestions("qsize"),
),
)
):
self.checker.process_tokens(_tokenize_str(full_comment))
@skip_on_missing_package_or_dict
@set_config(
spelling_dict=spell_dict,
spelling_ignore_comment_directives="newdirective:,noqa",
)
def test_skip_directives_specified_in_pylintrc(self) -> None:
full_comment = "# newdirective: do this newdirective"
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"newdirective",
full_comment,
" ^^^^^^^^^^^^",
self._get_msg_suggestions("newdirective"),
),
)
):
self.checker.process_tokens(_tokenize_str(full_comment))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_handle_words_joined_by_forward_slash(self) -> None:
stmt = astroid.extract_node(
'''
class ComentAbc(object):
"""This is Comment/Abcz with a bad comment"""
pass
'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=3,
args=(
"Abcz",
"This is Comment/Abcz with a bad comment",
" ^^^^",
self._get_msg_suggestions("Abcz"),
),
)
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_more_than_one_error_in_same_line_for_same_word_on_docstring(self) -> None:
stmt = astroid.extract_node(
'class ComentAbc(object):\n """Check teh dummy comment teh"""\n pass'
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"teh",
"Check teh dummy comment teh",
" ^^^",
self._get_msg_suggestions("teh"),
),
),
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"teh",
"Check teh dummy comment teh",
" ^^^",
self._get_msg_suggestions("teh"),
),
),
):
self.checker.visit_classdef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_more_than_one_error_in_same_line_for_same_word_on_comment(self) -> None:
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"coment",
"# bad coment coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
),
MessageTest(
"wrong-spelling-in-comment",
line=1,
args=(
"coment",
"# bad coment coment",
" ^^^^^^",
self._get_msg_suggestions("coment"),
),
),
):
self.checker.process_tokens(_tokenize_str("# bad coment coment"))
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_1(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""
# msitake
"""'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=3,
args=(
"msitake",
" # msitake",
" ^^^^^^^",
self._get_msg_suggestions("msitake"),
),
)
):
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_2(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""# msitake"""'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=2,
args=(
"msitake",
"# msitake",
" ^^^^^^^",
self._get_msg_suggestions("msitake"),
),
)
):
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_3(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""
# msitake
"""'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=3,
args=(
"msitake",
"# msitake",
" ^^^^^^^",
self._get_msg_suggestions("msitake"),
),
)
):
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_4(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""
# cat
"""'''
)
with self.assertAddsMessages():
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_5(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""
msitake # cat
"""'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=3,
args=(
"msitake",
" msitake # cat",
" ^^^^^^^",
self._get_msg_suggestions("msitake"),
),
)
):
self.checker.visit_functiondef(stmt)
@skip_on_missing_package_or_dict
@set_config(spelling_dict=spell_dict)
def test_docstring_lines_that_look_like_comments_6(self) -> None:
stmt = astroid.extract_node(
'''def f():
"""
cat # msitake
"""'''
)
with self.assertAddsMessages(
MessageTest(
"wrong-spelling-in-docstring",
line=3,
args=(
"msitake",
" cat # msitake",
" ^^^^^^^",
self._get_msg_suggestions("msitake"),
),
)
):
self.checker.visit_functiondef(stmt)
| TestSpellingChecker |
python | tensorflow__tensorflow | tensorflow/python/keras/layers/convolutional.py | {
"start": 131584,
"end": 140598
} | class ____(Layer):
"""Cropping layer for 3D data (e.g. spatial or spatio-temporal).
Examples:
>>> input_shape = (2, 28, 28, 10, 3)
>>> x = np.arange(np.prod(input_shape)).reshape(input_shape)
>>> y = tf.keras.layers.Cropping3D(cropping=(2, 4, 2))(x)
>>> print(y.shape)
(2, 24, 20, 6, 3)
Args:
cropping: Int, or tuple of 3 ints, or tuple of 3 tuples of 2 ints.
- If int: the same symmetric cropping
is applied to depth, height, and width.
- If tuple of 3 ints: interpreted as two different
symmetric cropping values for depth, height, and width:
`(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop)`.
- If tuple of 3 tuples of 2 ints: interpreted as
`((left_dim1_crop, right_dim1_crop), (left_dim2_crop,
right_dim2_crop), (left_dim3_crop, right_dim3_crop))`
data_format: A string,
one of `channels_last` (default) or `channels_first`.
The ordering of the dimensions in the inputs.
`channels_last` corresponds to inputs with shape
`(batch_size, spatial_dim1, spatial_dim2, spatial_dim3, channels)`
while `channels_first` corresponds to inputs with shape
`(batch_size, channels, spatial_dim1, spatial_dim2, spatial_dim3)`.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
Input shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_axis_to_crop, second_axis_to_crop, third_axis_to_crop,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_axis_to_crop, second_axis_to_crop,
third_axis_to_crop)`
Output shape:
5D tensor with shape:
- If `data_format` is `"channels_last"`:
`(batch_size, first_cropped_axis, second_cropped_axis, third_cropped_axis,
depth)`
- If `data_format` is `"channels_first"`:
`(batch_size, depth, first_cropped_axis, second_cropped_axis,
third_cropped_axis)`
"""
def __init__(self,
cropping=((1, 1), (1, 1), (1, 1)),
data_format=None,
**kwargs):
super(Cropping3D, self).__init__(**kwargs)
self.data_format = conv_utils.normalize_data_format(data_format)
if isinstance(cropping, int):
self.cropping = ((cropping, cropping), (cropping, cropping), (cropping,
cropping))
elif hasattr(cropping, '__len__'):
if len(cropping) != 3:
raise ValueError('`cropping` should have 3 elements. '
'Found: ' + str(cropping))
dim1_cropping = conv_utils.normalize_tuple(cropping[0], 2,
'1st entry of cropping')
dim2_cropping = conv_utils.normalize_tuple(cropping[1], 2,
'2nd entry of cropping')
dim3_cropping = conv_utils.normalize_tuple(cropping[2], 2,
'3rd entry of cropping')
self.cropping = (dim1_cropping, dim2_cropping, dim3_cropping)
else:
raise ValueError(
'`cropping` should be either an int, '
'a tuple of 3 ints '
'(symmetric_dim1_crop, symmetric_dim2_crop, symmetric_dim3_crop), '
'or a tuple of 3 tuples of 2 ints '
'((left_dim1_crop, right_dim1_crop),'
' (left_dim2_crop, right_dim2_crop),'
' (left_dim3_crop, right_dim2_crop)). '
'Found: ' + str(cropping))
self.input_spec = InputSpec(ndim=5)
def compute_output_shape(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape).as_list()
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if input_shape[2] is not None:
dim1 = input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[3] is not None:
dim2 = input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[4] is not None:
dim3 = input_shape[4] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], input_shape[1], dim1, dim2, dim3])
elif self.data_format == 'channels_last':
if input_shape[1] is not None:
dim1 = input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
else:
dim1 = None
if input_shape[2] is not None:
dim2 = input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
else:
dim2 = None
if input_shape[3] is not None:
dim3 = input_shape[3] - self.cropping[2][0] - self.cropping[2][1]
else:
dim3 = None
return tensor_shape.TensorShape(
[input_shape[0], dim1, dim2, dim3, input_shape[4]])
# pylint: enable=invalid-unary-operand-type
def call(self, inputs):
# pylint: disable=invalid-unary-operand-type
if self.data_format == 'channels_first':
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:]
elif self.cropping[0][1] == 0:
return inputs[:, :, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[1][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:, self.cropping[2][0]:-self.cropping[2][1]]
elif self.cropping[2][1] == 0:
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1], self.
cropping[1][0]:-self.cropping[1][1], self.cropping[2][0]:]
return inputs[:, :, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[2][
0]:-self.cropping[2][1]]
else:
if self.cropping[0][1] == self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:,
self.cropping[2][0]:-self.cropping[2][1], :]
elif self.cropping[1][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:, self.cropping[2][0]:, :]
elif self.cropping[0][1] == self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][0]:
-self.cropping[1][1], self.cropping[2][0]:, :]
elif self.cropping[0][1] == 0:
return inputs[:, self.cropping[0][0]:, self.cropping[1][
0]:-self.cropping[1][1], self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[1][1] == 0:
return inputs[:, self.cropping[0][
0]:-self.cropping[0][1], self.cropping[1][0]:, self.cropping[2][0]:
-self.cropping[2][1], :]
elif self.cropping[2][1] == 0:
return inputs[:, self.cropping[0][0]:-self.cropping[0][1],
self.cropping[1][0]:-self.cropping[1][1], self.cropping[
2][0]:, :]
return inputs[:, self.cropping[0][0]:-self.cropping[0][1], self.cropping[
1][0]:-self.cropping[1][1], self.cropping[2][0]: # pylint: disable=invalid-unary-operand-type
-self.cropping[2][1], :] # pylint: disable=invalid-unary-operand-type
# pylint: enable=invalid-unary-operand-type
def get_config(self):
config = {'cropping': self.cropping, 'data_format': self.data_format}
base_config = super(Cropping3D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Aliases
Convolution1D = Conv1D
Convolution2D = Conv2D
Convolution3D = Conv3D
SeparableConvolution1D = SeparableConv1D
SeparableConvolution2D = SeparableConv2D
Convolution2DTranspose = Conv2DTranspose
Convolution3DTranspose = Conv3DTranspose
Deconvolution2D = Deconv2D = Conv2DTranspose
Deconvolution3D = Deconv3D = Conv3DTranspose
| Cropping3D |
python | django__django | tests/admin_inlines/admin.py | {
"start": 3404,
"end": 3548
} | class ____(admin.StackedInline):
model = Inner
can_delete = False
readonly_fields = ("readonly",) # For bug #13174 tests.
| InnerInline |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 299002,
"end": 303644
} | class ____(IRNode):
"""
TensorBox / StorageBox allow in-place mutation of Tensors
"""
data: IRNode
def has_exceeded_max_reads(self) -> bool:
return self.data.has_exceeded_max_reads()
def get_device(self) -> Optional[torch.device]:
return self.data.get_device()
def make_loader(self) -> Callable[[Sequence[Expr]], OpsValue]:
return self.data.make_loader()
def make_indexer(self) -> Callable[[Sequence[Expr]], Expr]:
return self.data.make_indexer()
def get_stride(self) -> Sequence[_IntLike]:
return self.data.get_stride()
def get_name(self) -> str:
return self.data.get_name()
def has_large_inner_fn(self, threshold: Optional[int] = None) -> bool:
return self.data.has_large_inner_fn(threshold)
def mark_reuse(self, users: int) -> None:
return self.data.mark_reuse(users)
def realize_hint(self) -> None:
return self.data.realize_hint()
def unwrap_view(self) -> IRNode:
return self.data.unwrap_view()
def is_input_buffer(self) -> bool:
return self.data.is_input_buffer()
def freeze_layout(self) -> None:
return self.data.freeze_layout()
def freeze_layout_with_stride_order(
self, order: Sequence[int], allow_padding: bool = False
) -> None:
return self.data.freeze_layout_with_stride_order(order, allow_padding)
def freeze_layout_with_fill_order(self, order: Sequence[int]) -> None:
return self.data.freeze_layout_with_fill_order(order)
def freeze_layout_with_same_order(self, stride: Sequence[_IntLike]) -> None:
return self.data.freeze_layout_with_same_order(stride)
def freeze_layout_with_exact_strides(
self, exact_strides: Sequence[_IntLike], allow_padding: bool = False
) -> None:
return self.data.freeze_layout_with_exact_strides(exact_strides, allow_padding)
def get_read_writes(self) -> dependencies.ReadWrites:
return self.data.get_read_writes()
def get_reads(self) -> OrderedSet[Dep]:
return self.data.get_reads()
def num_reads(self) -> int:
return self.data.num_reads()
def get_storage_numel(self) -> _IntLike:
return self.data.get_storage_numel()
def get_reduction_type(self) -> Optional[str]:
return self.data.get_reduction_type()
def get_reduction_size(self) -> Sequence[Expr]:
return self.data.get_reduction_size()
def is_extern(self) -> bool:
return self.data.is_extern()
def is_no_op(self) -> bool:
return self.data.is_no_op()
def constant_to_device(self, device: torch.device) -> IRNode:
return self.data.constant_to_device(device)
def get_mutation_names(self) -> Sequence[str]:
return self.data.get_mutation_names()
def get_operation_name(self) -> str:
return self.data.get_operation_name()
def get_inputs_that_alias_output(self) -> Sequence[str]:
return self.data.get_inputs_that_alias_output()
def realize(self) -> Optional[str]:
return self.data.realize()
@cache_on_self_and_args("MutableBox")
def get_free_symbol_uses(
self, unbacked_only: bool = False
) -> OrderedSet[sympy.Symbol]:
return self.data.get_free_symbol_uses(unbacked_only)
def get_read_names(self) -> OrderedSet[str]:
return self.data.get_read_names()
def get_defining_op(self) -> Optional[Operation]:
return self.data.get_defining_op()
def codegen_reference(self, writer: Optional[IndentedBuffer] = None) -> str:
return self.data.codegen_reference(writer)
@property
def layout(self) -> OutputSpec:
# we intentionally call get_output_spec (rather than get_layout) since Buffer.layout is an OutputSpec
return self.data.get_output_spec()
def get_layout(self) -> Layout:
return self.data.get_layout()
def get_output_spec(self) -> OutputSpec:
return self.data.get_output_spec()
def get_size(self) -> Sequence[Expr]:
return self.data.get_size()
@property
def dtype(self) -> torch.dtype:
return self.data.dtype
def __str__(self) -> str:
if isinstance(self.data, MutableBox):
line0 = f"{type(self).__name__}({type(self.data).__name__}("
endl = "))"
inner = self.data.data
else:
line0 = f"{type(self).__name__}("
inner = self.data
endl = ")"
lines = [
line0,
indent(str(inner)),
endl,
]
return "\n".join(lines)
__repr__ = __str__
| MutableBox |
python | apache__airflow | providers/standard/src/airflow/providers/standard/sensors/date_time.py | {
"start": 4219,
"end": 6355
} | class ____(DateTimeSensor):
"""
Wait until the specified datetime occurs.
Deferring itself to avoid taking up a worker slot while it is waiting.
It is a drop-in replacement for DateTimeSensor.
:param target_time: datetime after which the job succeeds. (templated)
:param start_from_trigger: Start the task directly from the triggerer without going into the worker.
:param trigger_kwargs: The keyword arguments passed to the trigger when start_from_trigger is set to True
during dynamic task mapping. This argument is not used in standard usage.
:param end_from_trigger: End the task directly from the triggerer without going into the worker.
"""
start_trigger_args = StartTriggerArgs(
trigger_cls="airflow.providers.standard.triggers.temporal.DateTimeTrigger",
trigger_kwargs={"moment": "", "end_from_trigger": False},
next_method="execute_complete",
next_kwargs=None,
timeout=None,
)
start_from_trigger = False
def __init__(
self,
*,
start_from_trigger: bool = False,
end_from_trigger: bool = False,
trigger_kwargs: dict[str, Any] | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.end_from_trigger = end_from_trigger
self.start_from_trigger = start_from_trigger
if self.start_from_trigger:
self.start_trigger_args.trigger_kwargs = dict(
moment=timezone.parse(self.target_time),
end_from_trigger=self.end_from_trigger,
)
def execute(self, context: Context) -> NoReturn:
self.defer(
method_name="execute_complete",
trigger=DateTimeTrigger(
moment=self._moment,
end_from_trigger=self.end_from_trigger,
)
if AIRFLOW_V_3_0_PLUS
else DateTimeTrigger(moment=self._moment),
)
def execute_complete(self, context: Context, event: Any = None) -> None:
"""Handle the event when the trigger fires and return immediately."""
return None
| DateTimeSensorAsync |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/internal/conjecture/shrinking/choicetree.py | {
"start": 1543,
"end": 3850
} | class ____:
"""A source of nondeterminism for use in shrink passes."""
def __init__(
self,
tree: "ChoiceTree",
selection_order: Callable[[int, int], Iterable[int]],
):
self.__selection_order = selection_order
self.__node_trail = [tree.root]
self.__choices: list[int] = []
self.__finished = False
def choose(
self,
values: Sequence[int],
condition: Callable[[int], bool] = lambda x: True,
) -> int:
"""Return some element of values satisfying the condition
that will not lead to an exhausted branch, or raise DeadBranch
if no such element exist".
"""
assert not self.__finished
node = self.__node_trail[-1]
if node.live_child_count is None:
node.live_child_count = len(values)
node.n = len(values)
assert node.live_child_count > 0 or len(values) == 0
for i in self.__selection_order(len(self.__choices), len(values)):
if node.live_child_count == 0:
break
if not node.children[i].exhausted:
v = values[i]
if condition(v):
self.__choices.append(i)
self.__node_trail.append(node.children[i])
return v
else:
node.children[i] = DeadNode
node.live_child_count -= 1
assert node.live_child_count == 0
raise DeadBranch
def finish(self) -> Sequence[int]:
"""Record the decisions made in the underlying tree and return
a prefix that can be used for the next Chooser to be used."""
self.__finished = True
assert len(self.__node_trail) == len(self.__choices) + 1
result = tuple(self.__choices)
self.__node_trail[-1].live_child_count = 0
while len(self.__node_trail) > 1 and self.__node_trail[-1].exhausted:
self.__node_trail.pop()
assert len(self.__node_trail) == len(self.__choices)
i = self.__choices.pop()
target = self.__node_trail[-1]
target.children[i] = DeadNode
assert target.live_child_count is not None
target.live_child_count -= 1
return result
| Chooser |
python | getsentry__sentry | src/sentry/api/exceptions.py | {
"start": 2860,
"end": 3042
} | class ____(SentryAPIException):
status_code = status.HTTP_403_FORBIDDEN
code = "superuser-required"
message = "You need to re-authenticate for superuser."
| SuperuserRequired |
python | django__django | tests/generic_inline_admin/tests.py | {
"start": 899,
"end": 3640
} | class ____(TestDataMixin, TestCase):
def setUp(self):
self.client.force_login(self.superuser)
e = Episode.objects.create(name="This Week in Django")
self.episode_pk = e.pk
m = Media(content_object=e, url="http://example.com/podcast.mp3")
m.save()
self.mp3_media_pk = m.pk
m = Media(content_object=e, url="http://example.com/logo.png")
m.save()
self.png_media_pk = m.pk
def test_basic_add_GET(self):
"""
A smoke test to ensure GET on the add_view works.
"""
response = self.client.get(reverse("admin:generic_inline_admin_episode_add"))
self.assertEqual(response.status_code, 200)
def test_basic_edit_GET(self):
"""
A smoke test to ensure GET on the change_view works.
"""
response = self.client.get(
reverse(
"admin:generic_inline_admin_episode_change", args=(self.episode_pk,)
)
)
self.assertEqual(response.status_code, 200)
def test_basic_add_POST(self):
"""
A smoke test to ensure POST on add_view works.
"""
post_data = {
"name": "This Week in Django",
# inline data
"generic_inline_admin-media-content_type-object_id-TOTAL_FORMS": "1",
"generic_inline_admin-media-content_type-object_id-INITIAL_FORMS": "0",
"generic_inline_admin-media-content_type-object_id-MAX_NUM_FORMS": "0",
}
response = self.client.post(
reverse("admin:generic_inline_admin_episode_add"), post_data
)
self.assertEqual(response.status_code, 302) # redirect somewhere
def test_basic_edit_POST(self):
"""
A smoke test to ensure POST on edit_view works.
"""
prefix = "generic_inline_admin-media-content_type-object_id"
post_data = {
"name": "This Week in Django",
# inline data
f"{prefix}-TOTAL_FORMS": "3",
f"{prefix}-INITIAL_FORMS": "2",
f"{prefix}-MAX_NUM_FORMS": "0",
f"{prefix}-0-id": str(self.mp3_media_pk),
f"{prefix}-0-url": "http://example.com/podcast.mp3",
f"{prefix}-1-id": str(self.png_media_pk),
f"{prefix}-1-url": "http://example.com/logo.png",
f"{prefix}-2-id": "",
f"{prefix}-2-url": "",
}
url = reverse(
"admin:generic_inline_admin_episode_change", args=(self.episode_pk,)
)
response = self.client.post(url, post_data)
self.assertEqual(response.status_code, 302) # redirect somewhere
@override_settings(ROOT_URLCONF="generic_inline_admin.urls")
| GenericAdminViewTest |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_formats_param.py | {
"start": 492,
"end": 616
} | class ____(TypedDict, total=False):
type: Literal["audio/pcmu"]
"""The audio format. Always `audio/pcmu`."""
| AudioPCMU |
python | catalyst-team__catalyst | catalyst/loggers/mlflow.py | {
"start": 2172,
"end": 7061
} | class ____(ILogger):
"""Mlflow logger for parameters, metrics, images and other artifacts.
Mlflow documentation: https://mlflow.org/docs/latest/index.html.
Args:
experiment: Name of the experiment in MLflow to log to.
run: Name of the run in Mlflow to log to.
tracking_uri: URI of tracking server against which
to log run information related.
registry_uri: Address of local or remote model registry server.
exclude: Name of to exclude from logging.
log_batch_metrics: boolean flag to log batch metrics
(default: SETTINGS.log_batch_metrics or False).
log_epoch_metrics: boolean flag to log epoch metrics
(default: SETTINGS.log_epoch_metrics or True).
Python API examples:
.. code-block:: python
from catalyst import dl
runner = dl.SupervisedRunner()
runner.train(
...,
loggers={"mlflow": dl.MLflowLogger(experiment="test_exp", run="test_run")}
)
.. code-block:: python
from catalyst import dl
class CustomRunner(dl.IRunner):
# ...
def get_loggers(self):
return {
"console": dl.ConsoleLogger(),
"mlflow": dl.MLflowLogger(experiment="test_exp", run="test_run")
}
# ...
runner = CustomRunner().run()
"""
def __init__(
self,
experiment: str,
run: Optional[str] = None,
tracking_uri: Optional[str] = None,
registry_uri: Optional[str] = None,
exclude: Optional[List[str]] = None,
log_batch_metrics: bool = SETTINGS.log_batch_metrics,
log_epoch_metrics: bool = SETTINGS.log_epoch_metrics,
) -> None:
super().__init__(
log_batch_metrics=log_batch_metrics, log_epoch_metrics=log_epoch_metrics
)
self.experiment = experiment
self.run = run
self.tracking_uri = tracking_uri
self.registry_uri = registry_uri
self.exclude = exclude
mlflow.set_tracking_uri(self.tracking_uri)
mlflow.set_registry_uri(self.registry_uri)
mlflow.set_experiment(self.experiment)
_get_or_start_run(run_name=self.run)
@property
def logger(self):
"""Internal logger/experiment/etc. from the monitoring system."""
return mlflow
@staticmethod
def _log_metrics(metrics: Dict[str, float], step: int, loader_key: str, suffix=""):
for key, value in metrics.items():
mlflow.log_metric(f"{key}/{loader_key}{suffix}", value, step=step)
def log_artifact(
self,
tag: str,
runner: "IRunner",
artifact: object = None,
path_to_artifact: str = None,
scope: str = None,
) -> None:
"""Logs a local file or directory as an artifact to the logger."""
mlflow.log_artifact(path_to_artifact)
def log_image(
self,
tag: str,
image: np.ndarray,
runner: "IRunner",
scope: str = None,
) -> None:
"""Logs image to MLflow for current scope on current step."""
if scope == "batch" or scope == "loader":
log_path = "_".join(
[tag, f"epoch-{runner.epoch_step:04d}", f"loader-{runner.loader_key}"]
)
elif scope == "epoch":
log_path = "_".join([tag, f"epoch-{runner.epoch_step:04d}"])
elif scope == "experiment" or scope is None:
log_path = tag
mlflow.log_image(image, f"{log_path}.png")
def log_hparams(self, hparams: Dict, runner: "IRunner" = None) -> None:
"""Logs parameters for current scope.
Args:
hparams: Parameters to log.
runner: experiment runner
"""
_mlflow_log_params_dict(hparams, log_type="param", exclude=self.exclude)
def log_metrics(
self,
metrics: Dict[str, float],
scope: str,
runner: "IRunner",
) -> None:
"""Logs batch and epoch metrics to MLflow."""
if scope == "batch" and self.log_batch_metrics:
metrics = {k: float(v) for k, v in metrics.items()}
self._log_metrics(
metrics=metrics,
step=runner.batch_step,
loader_key=runner.loader_key,
suffix="/batch",
)
elif scope == "epoch" and self.log_epoch_metrics:
for loader_key, per_loader_metrics in metrics.items():
self._log_metrics(
metrics=per_loader_metrics,
step=runner.epoch_step,
loader_key=loader_key,
suffix="/epoch",
)
def close_log(self) -> None:
"""End an active MLflow run."""
mlflow.end_run()
__all__ = ["MLflowLogger"]
| MLflowLogger |
python | huggingface__transformers | examples/pytorch/text-classification/run_glue.py | {
"start": 2417,
"end": 6129
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
Using `HfArgumentParser` we can turn this class
into argparse arguments to be able to specify them on
the command line.
"""
task_name: Optional[str] = field(
default=None,
metadata={"help": "The name of the task to train on: " + ", ".join(task_to_keys.keys())},
)
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
max_seq_length: int = field(
default=128,
metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached preprocessed datasets or not."}
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
)
},
)
train_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the training data."}
)
validation_file: Optional[str] = field(
default=None, metadata={"help": "A csv or a json file containing the validation data."}
)
test_file: Optional[str] = field(default=None, metadata={"help": "A csv or a json file containing the test data."})
def __post_init__(self):
if self.task_name is not None:
self.task_name = self.task_name.lower()
if self.task_name not in task_to_keys:
raise ValueError("Unknown task, you should pick one in " + ",".join(task_to_keys.keys()))
elif self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("Need either a GLUE task, a training/validation file or a dataset name.")
else:
train_extension = self.train_file.split(".")[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
validation_extension = self.validation_file.split(".")[-1]
assert validation_extension == train_extension, (
"`validation_file` should have the same extension (csv or json) as `train_file`."
)
@dataclass
| DataTrainingArguments |
python | getsentry__sentry | src/sentry/issues/grouptype.py | {
"start": 14098,
"end": 14509
} | class ____(GroupType):
type_id = 1910
slug = "performance_n_plus_one_api_calls_experimental"
description = "N+1 API Call (Experimental)"
category = GroupCategory.PERFORMANCE.value
category_v2 = GroupCategory.HTTP_CLIENT.value
noise_config = NoiseConfig()
default_priority = PriorityLevel.LOW
released = False
@dataclass(frozen=True)
| PerformanceNPlusOneAPICallsExperimentalGroupType |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.