language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | python-openxml__python-docx | src/docx/oxml/settings.py | {
"start": 261,
"end": 4264
} | class ____(BaseOxmlElement):
"""`w:settings` element, root element for the settings part."""
get_or_add_evenAndOddHeaders: Callable[[], CT_OnOff]
_remove_evenAndOddHeaders: Callable[[], None]
_tag_seq = (
"w:writeProtection",
"w:view",
"w:zoom",
"w:removePersonalInformation",
"w:removeDateAndTime",
"w:doNotDisplayPageBoundaries",
"w:displayBackgroundShape",
"w:printPostScriptOverText",
"w:printFractionalCharacterWidth",
"w:printFormsData",
"w:embedTrueTypeFonts",
"w:embedSystemFonts",
"w:saveSubsetFonts",
"w:saveFormsData",
"w:mirrorMargins",
"w:alignBordersAndEdges",
"w:bordersDoNotSurroundHeader",
"w:bordersDoNotSurroundFooter",
"w:gutterAtTop",
"w:hideSpellingErrors",
"w:hideGrammaticalErrors",
"w:activeWritingStyle",
"w:proofState",
"w:formsDesign",
"w:attachedTemplate",
"w:linkStyles",
"w:stylePaneFormatFilter",
"w:stylePaneSortMethod",
"w:documentType",
"w:mailMerge",
"w:revisionView",
"w:trackRevisions",
"w:doNotTrackMoves",
"w:doNotTrackFormatting",
"w:documentProtection",
"w:autoFormatOverride",
"w:styleLockTheme",
"w:styleLockQFSet",
"w:defaultTabStop",
"w:autoHyphenation",
"w:consecutiveHyphenLimit",
"w:hyphenationZone",
"w:doNotHyphenateCaps",
"w:showEnvelope",
"w:summaryLength",
"w:clickAndTypeStyle",
"w:defaultTableStyle",
"w:evenAndOddHeaders",
"w:bookFoldRevPrinting",
"w:bookFoldPrinting",
"w:bookFoldPrintingSheets",
"w:drawingGridHorizontalSpacing",
"w:drawingGridVerticalSpacing",
"w:displayHorizontalDrawingGridEvery",
"w:displayVerticalDrawingGridEvery",
"w:doNotUseMarginsForDrawingGridOrigin",
"w:drawingGridHorizontalOrigin",
"w:drawingGridVerticalOrigin",
"w:doNotShadeFormData",
"w:noPunctuationKerning",
"w:characterSpacingControl",
"w:printTwoOnOne",
"w:strictFirstAndLastChars",
"w:noLineBreaksAfter",
"w:noLineBreaksBefore",
"w:savePreviewPicture",
"w:doNotValidateAgainstSchema",
"w:saveInvalidXml",
"w:ignoreMixedContent",
"w:alwaysShowPlaceholderText",
"w:doNotDemarcateInvalidXml",
"w:saveXmlDataOnly",
"w:useXSLTWhenSaving",
"w:saveThroughXslt",
"w:showXMLTags",
"w:alwaysMergeEmptyNamespace",
"w:updateFields",
"w:hdrShapeDefaults",
"w:footnotePr",
"w:endnotePr",
"w:compat",
"w:docVars",
"w:rsids",
"m:mathPr",
"w:attachedSchema",
"w:themeFontLang",
"w:clrSchemeMapping",
"w:doNotIncludeSubdocsInStats",
"w:doNotAutoCompressPictures",
"w:forceUpgrade",
"w:captions",
"w:readModeInkLockDown",
"w:smartTagType",
"sl:schemaLibrary",
"w:shapeDefaults",
"w:doNotEmbedSmartTags",
"w:decimalSymbol",
"w:listSeparator",
)
evenAndOddHeaders: CT_OnOff | None = ZeroOrOne( # pyright: ignore[reportAssignmentType]
"w:evenAndOddHeaders", successors=_tag_seq[48:]
)
del _tag_seq
@property
def evenAndOddHeaders_val(self) -> bool:
"""Value of `w:evenAndOddHeaders/@w:val` or |None| if not present."""
evenAndOddHeaders = self.evenAndOddHeaders
if evenAndOddHeaders is None:
return False
return evenAndOddHeaders.val
@evenAndOddHeaders_val.setter
def evenAndOddHeaders_val(self, value: bool | None):
if value is None or value is False:
self._remove_evenAndOddHeaders()
return
self.get_or_add_evenAndOddHeaders().val = value
| CT_Settings |
python | mlflow__mlflow | tests/store/tracking/test_sqlalchemy_store.py | {
"start": 301612,
"end": 422170
} | class ____:
name: str
digest: str
def test_search_logged_models_order_by_dataset(store: SqlAlchemyStore):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
model_1 = store.create_logged_model(experiment_id=exp_id)
time.sleep(0.001) # Ensure the next model has a different timestamp
model_2 = store.create_logged_model(experiment_id=exp_id)
time.sleep(0.001) # Ensure the next model has a different timestamp
run = store.create_run(
experiment_id=exp_id, user_id="user", start_time=0, run_name="test", tags=[]
)
dataset_1 = DummyDataset("dataset1", "digest1")
dataset_2 = DummyDataset("dataset2", "digest2")
# For dataset_1, model_1 has a higher accuracy
# For dataset_2, model_2 has a higher accuracy
store.log_batch(
run.info.run_id,
metrics=[
Metric(
key="accuracy",
value=0.9,
timestamp=1,
step=0,
model_id=model_1.model_id,
dataset_name=dataset_1.name,
dataset_digest=dataset_1.digest,
run_id=run.info.run_id,
),
Metric(
key="accuracy",
value=0.8,
timestamp=2,
step=0,
model_id=model_1.model_id,
dataset_name=dataset_2.name,
dataset_digest=dataset_2.digest,
run_id=run.info.run_id,
),
],
params=[],
tags=[],
)
store.log_batch(
run.info.run_id,
metrics=[
Metric(
key="accuracy",
value=0.8,
timestamp=3,
step=0,
model_id=model_2.model_id,
dataset_name=dataset_1.name,
dataset_digest=dataset_1.digest,
run_id=run.info.run_id,
),
Metric(
key="accuracy",
value=0.9,
timestamp=4,
step=0,
model_id=model_2.model_id,
dataset_name=dataset_2.name,
dataset_digest=dataset_2.digest,
run_id=run.info.run_id,
),
],
params=[],
tags=[],
)
# Sorted by accuracy for dataset_1
models = store.search_logged_models(
experiment_ids=[exp_id],
order_by=[
{
"field_name": "metrics.accuracy",
"dataset_name": dataset_1.name,
"dataset_digest": dataset_1.digest,
}
],
)
assert [m.name for m in models] == [model_2.name, model_1.name]
# Sorted by accuracy for dataset_2
models = store.search_logged_models(
experiment_ids=[exp_id],
order_by=[
{
"field_name": "metrics.accuracy",
"dataset_name": dataset_2.name,
"dataset_digest": dataset_2.digest,
}
],
)
assert [m.name for m in models] == [model_1.name, model_2.name]
# Sort by accuracy with only name
models = store.search_logged_models(
experiment_ids=[exp_id],
order_by=[
{
"field_name": "metrics.accuracy",
"dataset_name": dataset_1.name,
}
],
)
assert [m.name for m in models] == [model_2.name, model_1.name]
# Sort by accuracy with only digest
models = store.search_logged_models(
experiment_ids=[exp_id],
order_by=[
{
"field_name": "metrics.accuracy",
"dataset_digest": dataset_1.digest,
}
],
)
assert [m.name for m in models] == [model_2.name, model_1.name]
def test_search_logged_models_pagination(store: SqlAlchemyStore):
exp_id_1 = store.create_experiment(f"exp-{uuid.uuid4()}")
model_1 = store.create_logged_model(experiment_id=exp_id_1)
time.sleep(0.001) # Ensure the next model has a different timestamp
model_2 = store.create_logged_model(experiment_id=exp_id_1)
page = store.search_logged_models(experiment_ids=[exp_id_1], max_results=3)
assert [m.name for m in page] == [model_2.name, model_1.name]
assert page.token is None
page_1 = store.search_logged_models(experiment_ids=[exp_id_1], max_results=1)
assert [m.name for m in page_1] == [model_2.name]
assert page_1.token is not None
page_2 = store.search_logged_models(
experiment_ids=[exp_id_1], max_results=1, page_token=page_1.token
)
assert [m.name for m in page_2] == [model_1.name]
assert page_2.token is None
page_2 = store.search_logged_models(
experiment_ids=[exp_id_1], max_results=100, page_token=page_1.token
)
assert [m.name for m in page_2] == [model_1.name]
assert page_2.token is None
# Search params must match the page token
exp_id_2 = store.create_experiment(f"exp-{uuid.uuid4()}")
with pytest.raises(MlflowException, match="Experiment IDs in the page token do not match"):
store.search_logged_models(experiment_ids=[exp_id_2], page_token=page_1.token)
with pytest.raises(MlflowException, match="Order by in the page token does not match"):
store.search_logged_models(
experiment_ids=[exp_id_1],
order_by=[{"field_name": "creation_time"}],
page_token=page_1.token,
)
with pytest.raises(MlflowException, match="Filter string in the page token does not match"):
store.search_logged_models(
experiment_ids=[exp_id_1],
filter_string=f"name = '{model_1.name}'",
page_token=page_1.token,
)
def test_search_logged_models_datasets_filter(store):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
run_id = store.create_run(exp_id, "user", 0, [], "test_run").info.run_id
model1 = store.create_logged_model(exp_id, source_run_id=run_id)
model2 = store.create_logged_model(exp_id, source_run_id=run_id)
model3 = store.create_logged_model(exp_id, source_run_id=run_id)
store.log_batch(
run_id,
metrics=[
Metric(
key="metric1",
value=0.1,
timestamp=0,
step=0,
model_id=model1.model_id,
dataset_name="dataset1",
dataset_digest="digest1",
),
Metric(
key="metric1",
value=0.2,
timestamp=0,
step=0,
model_id=model2.model_id,
dataset_name="dataset1",
dataset_digest="digest2",
),
Metric(key="metric2", value=0.1, timestamp=0, step=0, model_id=model3.model_id),
],
params=[],
tags=[],
)
# Restrict results to models with metrics on dataset1
models = store.search_logged_models(
experiment_ids=[exp_id],
filter_string="metrics.metric1 >= 0.1",
datasets=[{"dataset_name": "dataset1"}],
)
assert {m.name for m in models} == {model1.name, model2.name}
# Restrict results to models with metrics on dataset1 and digest1
models = store.search_logged_models(
experiment_ids=[exp_id],
filter_string="metrics.metric1 >= 0.1",
datasets=[{"dataset_name": "dataset1", "dataset_digest": "digest1"}],
)
assert {m.name for m in models} == {model1.name}
# No filter string, match models with any metrics on the dataset
models = store.search_logged_models(
experiment_ids=[exp_id], datasets=[{"dataset_name": "dataset1"}]
)
assert {m.name for m in models} == {model1.name, model2.name}
def test_log_batch_logged_model(store: SqlAlchemyStore):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
run = store.create_run(
experiment_id=exp_id, user_id="user", start_time=0, run_name="test", tags=[]
)
model = store.create_logged_model(experiment_id=exp_id)
metric = Metric(
key="metric1",
value=1,
timestamp=int(time.time() * 1000),
step=3,
model_id=model.model_id,
dataset_name="dataset_name",
dataset_digest="dataset_digest",
run_id=run.info.run_id,
)
store.log_batch(run.info.run_id, metrics=[metric], params=[], tags=[])
model = store.get_logged_model(model.model_id)
assert model.metrics == [metric]
# Log the same metric, should not throw
store.log_batch(run.info.run_id, metrics=[metric], params=[], tags=[])
assert model.metrics == [metric]
# Log an empty batch, should not throw
store.log_batch(run.info.run_id, metrics=[], params=[], tags=[])
assert model.metrics == [metric]
another_metric = Metric(
key="metric2",
value=2,
timestamp=int(time.time() * 1000),
step=4,
model_id=model.model_id,
dataset_name="dataset_name",
dataset_digest="dataset_digest",
run_id=run.info.run_id,
)
store.log_batch(run.info.run_id, metrics=[another_metric], params=[], tags=[])
model = store.get_logged_model(model.model_id)
actual_metrics = sorted(model.metrics, key=lambda m: m.key)
expected_metrics = sorted([metric, another_metric], key=lambda m: m.key)
assert actual_metrics == expected_metrics
# Log multiple metrics
metrics = [
Metric(
key=f"metric{i + 3}",
value=3,
timestamp=int(time.time() * 1000),
step=5,
model_id=model.model_id,
dataset_name="dataset_name",
dataset_digest="dataset_digest",
run_id=run.info.run_id,
)
for i in range(3)
]
store.log_batch(run.info.run_id, metrics=metrics, params=[], tags=[])
model = store.get_logged_model(model.model_id)
actual_metrics = sorted(model.metrics, key=lambda m: m.key)
expected_metrics = sorted([metric, another_metric, *metrics], key=lambda m: m.key)
assert actual_metrics == expected_metrics
def test_create_and_get_assessment(store_and_trace_info):
store, trace_info = store_and_trace_info
feedback = Feedback(
trace_id=trace_info.request_id,
name="correctness",
value=True,
rationale="The response is correct and well-formatted",
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="evaluator@company.com"
),
metadata={"project": "test-project", "version": "1.0"},
span_id="span-123",
)
created_feedback = store.create_assessment(feedback)
assert created_feedback.assessment_id is not None
assert created_feedback.assessment_id.startswith("a-")
assert created_feedback.trace_id == trace_info.request_id
assert created_feedback.create_time_ms is not None
assert created_feedback.name == "correctness"
assert created_feedback.value is True
assert created_feedback.rationale == "The response is correct and well-formatted"
assert created_feedback.metadata == {"project": "test-project", "version": "1.0"}
assert created_feedback.span_id == "span-123"
assert created_feedback.valid
expectation = Expectation(
trace_id=trace_info.request_id,
name="expected_response",
value="The capital of France is Paris.",
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="annotator@company.com"
),
metadata={"context": "geography-qa", "difficulty": "easy"},
span_id="span-456",
)
created_expectation = store.create_assessment(expectation)
assert created_expectation.assessment_id != created_feedback.assessment_id
assert created_expectation.trace_id == trace_info.request_id
assert created_expectation.value == "The capital of France is Paris."
assert created_expectation.metadata == {"context": "geography-qa", "difficulty": "easy"}
assert created_expectation.span_id == "span-456"
assert created_expectation.valid
retrieved_feedback = store.get_assessment(trace_info.request_id, created_feedback.assessment_id)
assert retrieved_feedback.name == "correctness"
assert retrieved_feedback.value is True
assert retrieved_feedback.rationale == "The response is correct and well-formatted"
assert retrieved_feedback.metadata == {"project": "test-project", "version": "1.0"}
assert retrieved_feedback.span_id == "span-123"
assert retrieved_feedback.trace_id == trace_info.request_id
assert retrieved_feedback.valid
retrieved_expectation = store.get_assessment(
trace_info.request_id, created_expectation.assessment_id
)
assert retrieved_expectation.value == "The capital of France is Paris."
assert retrieved_expectation.metadata == {"context": "geography-qa", "difficulty": "easy"}
assert retrieved_expectation.span_id == "span-456"
assert retrieved_expectation.trace_id == trace_info.request_id
assert retrieved_expectation.valid
def test_get_assessment_errors(store_and_trace_info):
store, trace_info = store_and_trace_info
with pytest.raises(MlflowException, match=r"Trace with request_id 'fake_trace' not found"):
store.get_assessment("fake_trace", "fake_assessment")
with pytest.raises(
MlflowException,
match=r"Assessment with ID 'fake_assessment' not found for trace",
):
store.get_assessment(trace_info.request_id, "fake_assessment")
def test_update_assessment_feedback(store_and_trace_info):
store, trace_info = store_and_trace_info
original_feedback = Feedback(
trace_id=trace_info.request_id,
name="correctness",
value=True,
rationale="Original rationale",
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="evaluator@company.com"
),
metadata={"project": "test-project", "version": "1.0"},
span_id="span-123",
)
created_feedback = store.create_assessment(original_feedback)
original_id = created_feedback.assessment_id
updated_feedback = store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=original_id,
name="correctness_updated",
feedback=FeedbackValue(value=False),
rationale="Updated rationale",
metadata={"project": "test-project", "version": "2.0", "new_field": "added"},
)
assert updated_feedback.assessment_id == original_id
assert updated_feedback.name == "correctness_updated"
assert updated_feedback.value is False
assert updated_feedback.rationale == "Updated rationale"
assert updated_feedback.metadata == {
"project": "test-project",
"version": "2.0",
"new_field": "added",
}
assert updated_feedback.span_id == "span-123"
assert updated_feedback.source.source_id == "evaluator@company.com"
assert updated_feedback.valid is True
retrieved = store.get_assessment(trace_info.request_id, original_id)
assert retrieved.value is False
assert retrieved.name == "correctness_updated"
assert retrieved.rationale == "Updated rationale"
def test_update_assessment_expectation(store_and_trace_info):
store, trace_info = store_and_trace_info
original_expectation = Expectation(
trace_id=trace_info.request_id,
name="expected_response",
value="The capital of France is Paris.",
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN, source_id="annotator@company.com"
),
metadata={"context": "geography-qa"},
span_id="span-456",
)
created_expectation = store.create_assessment(original_expectation)
original_id = created_expectation.assessment_id
updated_expectation = store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=original_id,
expectation=ExpectationValue(value="The capital and largest city of France is Paris."),
metadata={"context": "geography-qa", "updated": "true"},
)
assert updated_expectation.assessment_id == original_id
assert updated_expectation.name == "expected_response"
assert updated_expectation.value == "The capital and largest city of France is Paris."
assert updated_expectation.metadata == {"context": "geography-qa", "updated": "true"}
assert updated_expectation.span_id == "span-456"
assert updated_expectation.source.source_id == "annotator@company.com"
def test_update_assessment_partial_fields(store_and_trace_info):
store, trace_info = store_and_trace_info
original_feedback = Feedback(
trace_id=trace_info.request_id,
name="quality",
value=5,
rationale="Original rationale",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
metadata={"scorer": "automated"},
)
created_feedback = store.create_assessment(original_feedback)
original_id = created_feedback.assessment_id
updated_feedback = store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=original_id,
rationale="Updated rationale only",
)
assert updated_feedback.assessment_id == original_id
assert updated_feedback.name == "quality"
assert updated_feedback.value == 5
assert updated_feedback.rationale == "Updated rationale only"
assert updated_feedback.metadata == {"scorer": "automated"}
def test_update_assessment_type_validation(store_and_trace_info):
store, trace_info = store_and_trace_info
feedback = Feedback(
trace_id=trace_info.request_id,
name="test_feedback",
value="original",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
)
created_feedback = store.create_assessment(feedback)
with pytest.raises(
MlflowException, match=r"Cannot update expectation value on a Feedback assessment"
):
store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=created_feedback.assessment_id,
expectation=ExpectationValue(value="This should fail"),
)
expectation = Expectation(
trace_id=trace_info.request_id,
name="test_expectation",
value="original_expected",
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN),
)
created_expectation = store.create_assessment(expectation)
with pytest.raises(
MlflowException, match=r"Cannot update feedback value on an Expectation assessment"
):
store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=created_expectation.assessment_id,
feedback=FeedbackValue(value="This should fail"),
)
def test_update_assessment_errors(store_and_trace_info):
store, trace_info = store_and_trace_info
with pytest.raises(MlflowException, match=r"Trace with request_id 'fake_trace' not found"):
store.update_assessment(
trace_id="fake_trace", assessment_id="fake_assessment", rationale="This should fail"
)
with pytest.raises(
MlflowException,
match=r"Assessment with ID 'fake_assessment' not found for trace",
):
store.update_assessment(
trace_id=trace_info.request_id,
assessment_id="fake_assessment",
rationale="This should fail",
)
def test_update_assessment_metadata_merging(store_and_trace_info):
store, trace_info = store_and_trace_info
original = Feedback(
trace_id=trace_info.request_id,
name="test",
value="original",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
metadata={"keep": "this", "override": "old_value", "remove_me": "will_stay"},
)
created = store.create_assessment(original)
updated = store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=created.assessment_id,
metadata={"override": "new_value", "new_key": "new_value"},
)
expected_metadata = {
"keep": "this",
"override": "new_value",
"remove_me": "will_stay",
"new_key": "new_value",
}
assert updated.metadata == expected_metadata
def test_update_assessment_timestamps(store_and_trace_info):
store, trace_info = store_and_trace_info
original = Feedback(
trace_id=trace_info.request_id,
name="test",
value="original",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
)
created = store.create_assessment(original)
original_create_time = created.create_time_ms
original_update_time = created.last_update_time_ms
time.sleep(0.001)
updated = store.update_assessment(
trace_id=trace_info.request_id,
assessment_id=created.assessment_id,
name="updated_name",
)
assert updated.create_time_ms == original_create_time
assert updated.last_update_time_ms > original_update_time
def test_create_assessment_with_overrides(store_and_trace_info):
store, trace_info = store_and_trace_info
original_feedback = Feedback(
trace_id=trace_info.request_id,
name="quality",
value="poor",
source=AssessmentSource(source_type=AssessmentSourceType.LLM_JUDGE),
)
created_original = store.create_assessment(original_feedback)
override_feedback = Feedback(
trace_id=trace_info.request_id,
name="quality",
value="excellent",
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN),
overrides=created_original.assessment_id,
)
created_override = store.create_assessment(override_feedback)
assert created_override.overrides == created_original.assessment_id
assert created_override.value == "excellent"
assert created_override.valid is True
retrieved_original = store.get_assessment(trace_info.request_id, created_original.assessment_id)
assert retrieved_original.valid is False
assert retrieved_original.value == "poor"
def test_create_assessment_override_nonexistent(store_and_trace_info):
store, trace_info = store_and_trace_info
override_feedback = Feedback(
trace_id=trace_info.request_id,
name="quality",
value="excellent",
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN),
overrides="nonexistent-assessment-id",
)
with pytest.raises(
MlflowException, match=r"Assessment with ID 'nonexistent-assessment-id' not found"
):
store.create_assessment(override_feedback)
def test_delete_assessment_idempotent(store_and_trace_info):
store, trace_info = store_and_trace_info
feedback = Feedback(
trace_id=trace_info.request_id,
name="test",
value="test_value",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
)
created_feedback = store.create_assessment(feedback)
retrieved = store.get_assessment(trace_info.request_id, created_feedback.assessment_id)
assert retrieved.assessment_id == created_feedback.assessment_id
store.delete_assessment(trace_info.request_id, created_feedback.assessment_id)
with pytest.raises(
MlflowException,
match=rf"Assessment with ID '{created_feedback.assessment_id}' not found for trace",
):
store.get_assessment(trace_info.request_id, created_feedback.assessment_id)
store.delete_assessment(trace_info.request_id, created_feedback.assessment_id)
store.delete_assessment(trace_info.request_id, "fake_assessment_id")
def test_delete_assessment_override_behavior(store_and_trace_info):
store, trace_info = store_and_trace_info
original = store.create_assessment(
Feedback(
trace_id=trace_info.request_id,
name="original",
value="original_value",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
),
)
override = store.create_assessment(
Feedback(
trace_id=trace_info.request_id,
name="override",
value="override_value",
source=AssessmentSource(source_type=AssessmentSourceType.HUMAN),
overrides=original.assessment_id,
),
)
assert store.get_assessment(trace_info.request_id, original.assessment_id).valid is False
assert store.get_assessment(trace_info.request_id, override.assessment_id).valid is True
store.delete_assessment(trace_info.request_id, override.assessment_id)
with pytest.raises(MlflowException, match="not found"):
store.get_assessment(trace_info.request_id, override.assessment_id)
assert store.get_assessment(trace_info.request_id, original.assessment_id).valid is True
def test_assessment_with_run_id(store_and_trace_info):
store, trace_info = store_and_trace_info
run = store.create_run(
experiment_id=trace_info.experiment_id,
user_id="test_user",
start_time=get_current_time_millis(),
tags=[],
run_name="test_run",
)
feedback = Feedback(
trace_id=trace_info.request_id,
name="run_feedback",
value="excellent",
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
)
feedback.run_id = run.info.run_id
created_feedback = store.create_assessment(feedback)
assert created_feedback.run_id == run.info.run_id
retrieved_feedback = store.get_assessment(trace_info.request_id, created_feedback.assessment_id)
assert retrieved_feedback.run_id == run.info.run_id
def test_assessment_with_error(store_and_trace_info):
store, trace_info = store_and_trace_info
try:
raise ValueError("Test error message")
except ValueError as test_error:
feedback = Feedback(
trace_id=trace_info.request_id,
name="error_feedback",
value=None,
error=test_error,
source=AssessmentSource(source_type=AssessmentSourceType.CODE),
)
created_feedback = store.create_assessment(feedback)
assert created_feedback.error.error_message == "Test error message"
assert created_feedback.error.error_code == "ValueError"
assert created_feedback.error.stack_trace is not None
assert "ValueError: Test error message" in created_feedback.error.stack_trace
assert "test_assessment_with_error" in created_feedback.error.stack_trace
retrieved_feedback = store.get_assessment(trace_info.request_id, created_feedback.assessment_id)
assert retrieved_feedback.error.error_message == "Test error message"
assert retrieved_feedback.error.error_code == "ValueError"
assert retrieved_feedback.error.stack_trace is not None
assert "ValueError: Test error message" in retrieved_feedback.error.stack_trace
assert created_feedback.error.stack_trace == retrieved_feedback.error.stack_trace
def test_dataset_crud_operations(store):
with mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store):
experiment_ids = _create_experiments(store, ["test_exp_1", "test_exp_2"])
created_dataset = store.create_dataset(
name="test_eval_dataset",
tags={
"purpose": "testing",
"environment": "test",
mlflow_tags.MLFLOW_USER: "test_user",
},
experiment_ids=experiment_ids,
)
assert created_dataset.dataset_id is not None
assert created_dataset.dataset_id.startswith("d-")
assert created_dataset.name == "test_eval_dataset"
assert created_dataset.tags == {
"purpose": "testing",
"environment": "test",
mlflow_tags.MLFLOW_USER: "test_user",
}
assert created_dataset.created_time > 0
assert created_dataset.last_update_time > 0
assert created_dataset.created_time == created_dataset.last_update_time
assert created_dataset.schema is None # Schema is computed when data is added
assert created_dataset.profile is None # Profile is computed when data is added
assert created_dataset.created_by == "test_user" # Extracted from mlflow.user tag
retrieved_dataset = store.get_dataset(dataset_id=created_dataset.dataset_id)
assert retrieved_dataset.dataset_id == created_dataset.dataset_id
assert retrieved_dataset.name == created_dataset.name
assert retrieved_dataset.tags == created_dataset.tags
assert retrieved_dataset._experiment_ids is None
assert retrieved_dataset.experiment_ids == experiment_ids
assert not retrieved_dataset.has_records()
with pytest.raises(
MlflowException, match="Evaluation dataset with id 'd-nonexistent' not found"
):
store.get_dataset(dataset_id="d-nonexistent")
store.delete_dataset(created_dataset.dataset_id)
with pytest.raises(MlflowException, match="not found"):
store.get_dataset(dataset_id=created_dataset.dataset_id)
# Verify idempotentcy
store.delete_dataset("d-nonexistent")
def test_dataset_records_pagination(store):
exp_id = _create_experiments(store, ["pagination_test_exp"])[0]
dataset = store.create_dataset(
name="pagination_test_dataset", experiment_ids=[exp_id], tags={"test": "pagination"}
)
records = [
{
"inputs": {"id": i, "question": f"Question {i}"},
"expectations": {"answer": f"Answer {i}"},
"tags": {"index": str(i)},
}
for i in range(25)
]
store.upsert_dataset_records(dataset.dataset_id, records)
page1, next_token1 = store._load_dataset_records(dataset.dataset_id, max_results=10)
assert len(page1) == 10
assert next_token1 is not None # Token should exist for more pages
# Collect all IDs from page1
page1_ids = {r.inputs["id"] for r in page1}
assert len(page1_ids) == 10 # All IDs should be unique
page2, next_token2 = store._load_dataset_records(
dataset.dataset_id, max_results=10, page_token=next_token1
)
assert len(page2) == 10
assert next_token2 is not None # Token should exist for more pages
# Collect all IDs from page2
page2_ids = {r.inputs["id"] for r in page2}
assert len(page2_ids) == 10 # All IDs should be unique
assert page1_ids.isdisjoint(page2_ids) # No overlap between pages
page3, next_token3 = store._load_dataset_records(
dataset.dataset_id, max_results=10, page_token=next_token2
)
assert len(page3) == 5
assert next_token3 is None # No more pages
# Collect all IDs from page3
page3_ids = {r.inputs["id"] for r in page3}
assert len(page3_ids) == 5 # All IDs should be unique
assert page1_ids.isdisjoint(page3_ids) # No overlap
assert page2_ids.isdisjoint(page3_ids) # No overlap
# Verify we got all 25 records across all pages
all_ids = page1_ids | page2_ids | page3_ids
assert all_ids == set(range(25))
all_records, no_token = store._load_dataset_records(dataset.dataset_id, max_results=None)
assert len(all_records) == 25
assert no_token is None
# Verify we have all expected records (order doesn't matter)
all_record_ids = {r.inputs["id"] for r in all_records}
assert all_record_ids == set(range(25))
def test_dataset_search_comprehensive(store):
test_prefix = "test_search_"
exp_ids = _create_experiments(store, [f"{test_prefix}exp_{i}" for i in range(1, 4)])
datasets = []
for i in range(10):
name = f"{test_prefix}dataset_{i:02d}"
tags = {"priority": "high" if i % 2 == 0 else "low", "mlflow.user": f"user_{i % 3}"}
if i < 3:
created = store.create_dataset(
name=name,
experiment_ids=[exp_ids[0]],
tags=tags,
)
elif i < 6:
created = store.create_dataset(
name=name,
experiment_ids=[exp_ids[1], exp_ids[2]],
tags=tags,
)
elif i < 8:
created = store.create_dataset(
name=name,
experiment_ids=[exp_ids[2]],
tags=tags,
)
else:
created = store.create_dataset(
name=name,
experiment_ids=[],
tags=tags,
)
datasets.append(created)
time.sleep(0.001)
results = store.search_datasets(experiment_ids=[exp_ids[0]])
assert len([d for d in results if d.name.startswith(test_prefix)]) == 3
results = store.search_datasets(experiment_ids=[exp_ids[1], exp_ids[2]])
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 5
results = store.search_datasets(order_by=["name"])
test_results = [d for d in results if d.name.startswith(test_prefix)]
names = [d.name for d in test_results]
assert names == sorted(names)
results = store.search_datasets(order_by=["name DESC"])
test_results = [d for d in results if d.name.startswith(test_prefix)]
names = [d.name for d in test_results]
assert names == sorted(names, reverse=True)
page1 = store.search_datasets(max_results=3)
assert len(page1) == 3
assert page1.token is not None
page2 = store.search_datasets(max_results=3, page_token=page1.token)
assert len(page2) == 3
assert all(d1.dataset_id != d2.dataset_id for d1 in page1 for d2 in page2)
results = store.search_datasets(experiment_ids=None)
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 10
results = store.search_datasets(filter_string=f"name LIKE '%{test_prefix}dataset_0%'")
assert len(results) == 10
assert all("dataset_0" in d.name for d in results)
results = store.search_datasets(filter_string=f"name = '{test_prefix}dataset_05'")
assert len(results) == 1
assert results[0].name == f"{test_prefix}dataset_05"
results = store.search_datasets(filter_string="tags.priority = 'high'")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 5
assert all(d.tags.get("priority") == "high" for d in test_results)
results = store.search_datasets(filter_string="tags.priority != 'high'")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 5
assert all(d.tags.get("priority") == "low" for d in test_results)
results = store.search_datasets(
filter_string=f"name LIKE '%{test_prefix}%' AND tags.priority = 'low'"
)
assert len(results) == 5
assert all(d.tags.get("priority") == "low" and test_prefix in d.name for d in results)
mid_dataset = datasets[5]
results = store.search_datasets(filter_string=f"created_time > {mid_dataset.created_time}")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 4
assert all(d.created_time > mid_dataset.created_time for d in test_results)
results = store.search_datasets(
experiment_ids=[exp_ids[0]], filter_string="tags.priority = 'high'"
)
assert len(results) == 2
assert all(d.tags.get("priority") == "high" for d in results)
results = store.search_datasets(filter_string="tags.priority = 'low'", order_by=["name ASC"])
test_results = [d for d in results if d.name.startswith(test_prefix)]
names = [d.name for d in test_results]
assert names == sorted(names)
created_user = store.create_dataset(
name=f"{test_prefix}_user_dataset",
tags={"test": "user", mlflow_tags.MLFLOW_USER: "test_user_1"},
experiment_ids=[exp_ids[0]],
)
results = store.search_datasets(filter_string="created_by = 'test_user_1'")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 1
assert test_results[0].created_by == "test_user_1"
records_with_user = [
{
"inputs": {"test": "data"},
"expectations": {"result": "expected"},
"tags": {mlflow_tags.MLFLOW_USER: "test_user_2"},
}
]
store.upsert_dataset_records(created_user.dataset_id, records_with_user)
results = store.search_datasets(filter_string="last_updated_by = 'test_user_2'")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 1
assert test_results[0].last_updated_by == "test_user_2"
with pytest.raises(MlflowException, match="Invalid attribute key"):
store.search_datasets(filter_string="invalid_field = 'value'")
def test_dataset_schema_and_profile_computation(store):
"""Test that schema and profile are computed when records are added."""
test_prefix = "test_schema_profile_"
exp_ids = _create_experiments(store, [f"{test_prefix}exp"])
dataset = store.create_dataset(name=f"{test_prefix}dataset", experiment_ids=exp_ids)
assert dataset.schema is None
assert dataset.profile is None
records = [
{
"inputs": {
"question": "What is MLflow?",
"temperature": 0.7,
"max_tokens": 100,
"use_cache": True,
"tags": ["ml", "tools"],
},
"expectations": {
"accuracy": 0.95,
"contains_key_info": True,
"response": "MLflow is an open source platform",
},
"source": {"source_type": "TRACE", "source_data": {"trace_id": "trace1"}},
},
{
"inputs": {
"question": "What is Python?",
"temperature": 0.5,
"max_tokens": 150,
"metadata": {"user": "test", "session": 123},
},
"expectations": {"accuracy": 0.9},
"source": {"source_type": "TRACE", "source_data": {"trace_id": "trace2"}},
},
{
"inputs": {"question": "What is Docker?", "temperature": 0.8},
"source": {"source_type": "HUMAN", "source_data": {"user": "human"}},
},
]
store.upsert_dataset_records(dataset.dataset_id, records)
updated_dataset = store.get_dataset(dataset.dataset_id)
assert updated_dataset.schema is not None
schema = json.loads(updated_dataset.schema)
assert "inputs" in schema
assert "expectations" in schema
assert schema["inputs"]["question"] == "string"
assert schema["inputs"]["temperature"] == "float"
assert schema["inputs"]["max_tokens"] == "integer"
assert schema["inputs"]["use_cache"] == "boolean"
assert schema["inputs"]["tags"] == "array"
assert schema["inputs"]["metadata"] == "object"
assert schema["expectations"]["accuracy"] == "float"
assert schema["expectations"]["contains_key_info"] == "boolean"
assert schema["expectations"]["response"] == "string"
assert updated_dataset.profile is not None
profile = json.loads(updated_dataset.profile)
assert profile["num_records"] == 3
def test_dataset_schema_and_profile_incremental_updates(store):
test_prefix = "test_incremental_"
exp_ids = _create_experiments(store, [f"{test_prefix}exp"])
dataset = store.create_dataset(name=f"{test_prefix}dataset", experiment_ids=exp_ids)
initial_records = [
{
"inputs": {"question": "What is MLflow?", "temperature": 0.7},
"expectations": {"accuracy": 0.95},
"source": {"source_type": "TRACE", "source_data": {"trace_id": "trace1"}},
}
]
store.upsert_dataset_records(dataset.dataset_id, initial_records)
dataset1 = store.get_dataset(dataset.dataset_id)
schema1 = json.loads(dataset1.schema)
profile1 = json.loads(dataset1.profile)
assert schema1["inputs"] == {"question": "string", "temperature": "float"}
assert schema1["expectations"] == {"accuracy": "float"}
assert profile1["num_records"] == 1
additional_records = [
{
"inputs": {
"question": "What is Python?",
"temperature": 0.5,
"max_tokens": 100,
"use_cache": True,
},
"expectations": {"accuracy": 0.9, "relevance": 0.85},
"source": {"source_type": "HUMAN", "source_data": {"user": "test_user"}},
}
]
store.upsert_dataset_records(dataset.dataset_id, additional_records)
dataset2 = store.get_dataset(dataset.dataset_id)
schema2 = json.loads(dataset2.schema)
profile2 = json.loads(dataset2.profile)
assert schema2["inputs"]["question"] == "string"
assert schema2["inputs"]["temperature"] == "float"
assert schema2["inputs"]["max_tokens"] == "integer"
assert schema2["inputs"]["use_cache"] == "boolean"
assert schema2["expectations"]["accuracy"] == "float"
assert schema2["expectations"]["relevance"] == "float"
assert profile2["num_records"] == 2
def test_dataset_user_detection(store):
test_prefix = "test_user_detection_"
exp_ids = _create_experiments(store, [f"{test_prefix}exp"])
dataset1 = store.create_dataset(
name=f"{test_prefix}dataset1",
tags={mlflow_tags.MLFLOW_USER: "john_doe", "other": "tag"},
experiment_ids=exp_ids,
)
assert dataset1.created_by == "john_doe"
assert dataset1.tags[mlflow_tags.MLFLOW_USER] == "john_doe"
dataset2 = store.create_dataset(
name=f"{test_prefix}dataset2", tags={"other": "tag"}, experiment_ids=exp_ids
)
assert dataset2.created_by is None
assert mlflow_tags.MLFLOW_USER not in dataset2.tags
results = store.search_datasets(filter_string="created_by = 'john_doe'")
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 1
assert test_results[0].dataset_id == dataset1.dataset_id
def test_dataset_filtering_ordering_pagination(store):
test_prefix = "test_filter_order_page_"
exp_ids = _create_experiments(store, [f"{test_prefix}exp_{i}" for i in range(3)])
datasets = []
for i in range(10):
time.sleep(0.01)
tags = {
"priority": "high" if i < 3 else ("medium" if i < 7 else "low"),
"model": f"model_{i % 3}",
"environment": "production" if i % 2 == 0 else "staging",
}
created = store.create_dataset(
name=f"{test_prefix}_dataset_{i:02d}",
tags=tags,
experiment_ids=[exp_ids[i % len(exp_ids)]],
)
datasets.append(created)
results = store.search_datasets(
filter_string="tags.priority = 'high'", order_by=["name ASC"], max_results=2
)
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert len(test_results) == 2
assert all(d.tags.get("priority") == "high" for d in test_results)
assert test_results[0].name < test_results[1].name
results_all = store.search_datasets(
filter_string="tags.priority = 'high'", order_by=["name ASC"]
)
test_results_all = [d for d in results_all if d.name.startswith(test_prefix)]
assert len(test_results_all) == 3
mid_time = datasets[5].created_time
results = store.search_datasets(
filter_string=f"tags.environment = 'production' AND created_time > {mid_time}",
order_by=["created_time DESC"],
max_results=3,
)
test_results = [d for d in results if d.name.startswith(test_prefix)]
assert all(d.tags.get("environment") == "production" for d in test_results)
assert all(d.created_time > mid_time for d in test_results)
for i in range(1, len(test_results)):
assert test_results[i - 1].created_time >= test_results[i].created_time
results = store.search_datasets(
experiment_ids=[exp_ids[0]],
filter_string="tags.model = 'model_0' AND tags.priority != 'low'",
order_by=["last_update_time DESC"],
max_results=5,
)
for d in results:
assert d.tags.get("model") == "model_0"
assert d.tags.get("priority") != "low"
all_production = store.search_datasets(
filter_string="tags.environment = 'production'", order_by=["name ASC"]
)
test_all_production = [d for d in all_production if d.name.startswith(test_prefix)]
limited_results = store.search_datasets(
filter_string="tags.environment = 'production'", order_by=["name ASC"], max_results=3
)
test_limited = [d for d in limited_results if d.name.startswith(test_prefix)]
assert len(test_limited) == 3
assert len(test_all_production) == 5
for i in range(3):
assert test_limited[i].dataset_id == test_all_production[i].dataset_id
def test_dataset_upsert_comprehensive(store):
created_dataset = store.create_dataset(name="upsert_comprehensive")
records_batch1 = [
{
"inputs": {"question": "What is MLflow?"},
"expectations": {"answer": "MLflow is a platform", "score": 0.8},
"tags": {"version": "v1", "quality": "high"},
"source": {
"source_type": "TRACE",
"source_data": {"trace_id": "trace-001", "span_id": "span-001"},
},
},
{
"inputs": {"question": "What is Python?"},
"expectations": {"answer": "Python is a language"},
"tags": {"category": "programming"},
},
{
"inputs": {"question": "What is MLflow?"},
"expectations": {"answer": "MLflow is an ML platform", "confidence": 0.9},
"tags": {"version": "v2", "reviewed": "true"},
"source": {
"source_type": "TRACE",
"source_data": {"trace_id": "trace-002", "span_id": "span-002"},
},
},
]
result = store.upsert_dataset_records(created_dataset.dataset_id, records_batch1)
assert result["inserted"] == 2
assert result["updated"] == 1
loaded_records, next_token = store._load_dataset_records(created_dataset.dataset_id)
assert len(loaded_records) == 2
assert next_token is None
mlflow_record = next(r for r in loaded_records if r.inputs["question"] == "What is MLflow?")
assert mlflow_record.expectations == {
"answer": "MLflow is an ML platform",
"score": 0.8,
"confidence": 0.9,
}
assert mlflow_record.tags == {"version": "v2", "quality": "high", "reviewed": "true"}
assert mlflow_record.source.source_type == "TRACE"
assert mlflow_record.source.source_data["trace_id"] == "trace-001"
assert mlflow_record.source_id == "trace-001"
initial_update_time = mlflow_record.last_update_time
time.sleep(0.01)
records_batch2 = [
{
"inputs": {"question": "What is MLflow?"},
"expectations": {"answer": "MLflow is the best ML platform", "rating": 5},
"tags": {"version": "v3"},
},
{
"inputs": {"question": "What is Spark?"},
"expectations": {"answer": "Spark is a data processing engine"},
},
]
result = store.upsert_dataset_records(created_dataset.dataset_id, records_batch2)
assert result["inserted"] == 1
assert result["updated"] == 1
loaded_records, next_token = store._load_dataset_records(created_dataset.dataset_id)
assert len(loaded_records) == 3
assert next_token is None
updated_mlflow_record = next(
r for r in loaded_records if r.inputs["question"] == "What is MLflow?"
)
assert updated_mlflow_record.expectations == {
"answer": "MLflow is the best ML platform",
"score": 0.8,
"confidence": 0.9,
"rating": 5,
}
assert updated_mlflow_record.tags == {
"version": "v3",
"quality": "high",
"reviewed": "true",
}
assert updated_mlflow_record.last_update_time > initial_update_time
assert updated_mlflow_record.source.source_data["trace_id"] == "trace-001"
records_batch3 = [
{"inputs": {"minimal": "input"}, "expectations": {"result": "minimal test"}},
{"inputs": {"question": "Empty expectations"}, "expectations": {}},
{"inputs": {"question": "No tags"}, "expectations": {"answer": "No tags"}, "tags": {}},
]
result = store.upsert_dataset_records(created_dataset.dataset_id, records_batch3)
assert result["inserted"] == 3
assert result["updated"] == 0
result = store.upsert_dataset_records(
created_dataset.dataset_id,
[{"inputs": {}, "expectations": {"result": "empty inputs allowed"}}],
)
assert result["inserted"] == 1
assert result["updated"] == 0
empty_result = store.upsert_dataset_records(created_dataset.dataset_id, [])
assert empty_result["inserted"] == 0
assert empty_result["updated"] == 0
def test_dataset_associations_and_lazy_loading(store):
experiment_ids = _create_experiments(store, ["test_exp_1", "test_exp_2", "test_exp_3"])
created_dataset = store.create_dataset(
name="multi_exp_dataset",
experiment_ids=experiment_ids,
)
retrieved = store.get_dataset(dataset_id=created_dataset.dataset_id)
assert retrieved._experiment_ids is None
with mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store):
assert set(retrieved.experiment_ids) == set(experiment_ids)
results = store.search_datasets(experiment_ids=[experiment_ids[1]])
assert any(d.dataset_id == created_dataset.dataset_id for d in results)
results = store.search_datasets(experiment_ids=[experiment_ids[0], experiment_ids[2]])
matching = [d for d in results if d.dataset_id == created_dataset.dataset_id]
assert len(matching) == 1
assert matching[0]._experiment_ids is None
with mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store):
assert set(matching[0].experiment_ids) == set(experiment_ids)
records = [{"inputs": {"q": f"Q{i}"}, "expectations": {"a": f"A{i}"}} for i in range(5)]
store.upsert_dataset_records(created_dataset.dataset_id, records)
with mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store):
retrieved = store.get_dataset(dataset_id=created_dataset.dataset_id)
assert not retrieved.has_records()
df = retrieved.to_df()
assert len(df) == 5
assert retrieved.has_records()
assert list(df.columns) == [
"inputs",
"outputs",
"expectations",
"tags",
"source_type",
"source_id",
"source",
"created_time",
"dataset_record_id",
]
def test_dataset_get_experiment_ids(store):
experiment_ids = _create_experiments(store, ["exp_1", "exp_2", "exp_3"])
created_dataset = store.create_dataset(
name="test_get_experiment_ids",
experiment_ids=experiment_ids,
)
fetched_experiment_ids = store.get_dataset_experiment_ids(created_dataset.dataset_id)
assert set(fetched_experiment_ids) == set(experiment_ids)
created_dataset2 = store.create_dataset(
name="test_no_experiments",
experiment_ids=[],
)
fetched_experiment_ids2 = store.get_dataset_experiment_ids(created_dataset2.dataset_id)
assert fetched_experiment_ids2 == []
result = store.get_dataset_experiment_ids("d-nonexistent")
assert result == []
result = store.get_dataset_experiment_ids("")
assert result == []
def test_dataset_tags_with_sql_backend(store):
tags = {"environment": "production", "version": "2.0", "team": "ml-ops"}
created = store.create_dataset(
name="tagged_dataset",
tags=tags,
)
assert created.tags == tags
retrieved = store.get_dataset(created.dataset_id)
assert retrieved.tags == tags
assert retrieved.tags["environment"] == "production"
assert retrieved.tags["version"] == "2.0"
assert retrieved.tags["team"] == "ml-ops"
created_none = store.create_dataset(
name="no_tags_dataset",
tags=None,
)
retrieved_none = store.get_dataset(created_none.dataset_id)
assert retrieved_none.tags == {}
created_empty = store.create_dataset(
name="empty_tags_dataset",
tags={},
experiment_ids=None,
)
retrieved_empty = store.get_dataset(created_empty.dataset_id)
assert retrieved_empty.tags == {}
def test_dataset_update_tags(store):
initial_tags = {"environment": "development", "version": "1.0", "deprecated": "true"}
created = store.create_dataset(
name="test_update_tags",
tags=initial_tags,
experiment_ids=None,
)
retrieved = store.get_dataset(created.dataset_id)
assert retrieved.tags == initial_tags
update_tags = {
"environment": "production",
"team": "ml-ops",
"deprecated": None, # This will be ignored, not delete the tag
}
store.set_dataset_tags(created.dataset_id, update_tags)
updated = store.get_dataset(created.dataset_id)
expected_tags = {
"environment": "production", # Updated
"version": "1.0", # Preserved
"deprecated": "true", # Preserved (None didn't delete it)
"team": "ml-ops", # Added
}
assert updated.tags == expected_tags
assert updated.last_update_time == created.last_update_time
assert updated.last_updated_by == created.last_updated_by
created_no_tags = store.create_dataset(
name="test_no_initial_tags",
tags=None,
experiment_ids=None,
)
store.set_dataset_tags(
created_no_tags.dataset_id, {"new_tag": "value", "mlflow.user": "test_user2"}
)
updated_no_tags = store.get_dataset(created_no_tags.dataset_id)
assert updated_no_tags.tags == {"new_tag": "value", "mlflow.user": "test_user2"}
assert updated_no_tags.last_update_time == created_no_tags.last_update_time
assert updated_no_tags.last_updated_by == created_no_tags.last_updated_by
def test_dataset_digest_updates_with_changes(store):
experiment_id = store.create_experiment("test_exp")
dataset = store.create_dataset(
name="test_dataset",
tags={"env": "test"},
experiment_ids=[experiment_id],
)
initial_digest = dataset.digest
assert initial_digest is not None
time.sleep(0.01) # Ensure time difference
records = [
{
"inputs": {"question": "What is MLflow?"},
"expectations": {"accuracy": 0.95},
}
]
store.upsert_dataset_records(dataset.dataset_id, records)
updated_dataset = store.get_dataset(dataset.dataset_id)
assert updated_dataset.digest != initial_digest
prev_digest = updated_dataset.digest
time.sleep(0.01) # Ensure time difference
more_records = [
{
"inputs": {"question": "How to track experiments?"},
"expectations": {"accuracy": 0.9},
}
]
store.upsert_dataset_records(dataset.dataset_id, more_records)
final_dataset = store.get_dataset(dataset.dataset_id)
assert final_dataset.digest != prev_digest
assert final_dataset.digest != initial_digest
store.set_dataset_tags(dataset.dataset_id, {"new_tag": "value"})
dataset_after_tags = store.get_dataset(dataset.dataset_id)
assert dataset_after_tags.digest == final_dataset.digest
def test_sql_dataset_record_merge():
with mock.patch("mlflow.store.tracking.dbmodels.models.get_current_time_millis") as mock_time:
mock_time.return_value = 2000
record = SqlEvaluationDatasetRecord()
record.expectations = {"accuracy": 0.8, "relevance": 0.7}
record.tags = {"env": "test"}
record.created_time = 1000
record.last_update_time = 1000
record.created_by = "user1"
record.last_updated_by = "user1"
new_data = {
"expectations": {"accuracy": 0.9, "completeness": 0.95},
"tags": {"version": "2.0"},
}
record.merge(new_data)
assert record.expectations == {
"accuracy": 0.9, # Updated
"relevance": 0.7, # Preserved
"completeness": 0.95, # Added
}
assert record.tags == {
"env": "test", # Preserved
"version": "2.0", # Added
}
assert record.created_time == 1000 # Preserved
assert record.last_update_time == 2000 # Updated
assert record.created_by == "user1" # Preserved
assert record.last_updated_by == "user1" # No mlflow.user in tags
record2 = SqlEvaluationDatasetRecord()
record2.expectations = None
record2.tags = None
new_data2 = {"expectations": {"accuracy": 0.9}, "tags": {"env": "prod"}}
record2.merge(new_data2)
assert record2.expectations == {"accuracy": 0.9}
assert record2.tags == {"env": "prod"}
assert record2.last_update_time == 2000
record3 = SqlEvaluationDatasetRecord()
record3.created_by = "user1"
record3.last_updated_by = "user1"
new_data3 = {"tags": {"mlflow.user": "user2", "env": "prod"}}
record3.merge(new_data3)
assert record3.created_by == "user1" # Preserved
assert record3.last_updated_by == "user2" # Updated from mlflow.user tag
record4 = SqlEvaluationDatasetRecord()
record4.expectations = {"accuracy": 0.8}
record4.tags = {"env": "test"}
record4.last_update_time = 1000
record4.merge({})
assert record4.expectations == {"accuracy": 0.8}
assert record4.tags == {"env": "test"}
assert record4.last_update_time == 2000
record5 = SqlEvaluationDatasetRecord()
record5.expectations = {"accuracy": 0.8}
record5.tags = {"env": "test"}
record5.merge({"expectations": {"relevance": 0.9}})
assert record5.expectations == {"accuracy": 0.8, "relevance": 0.9}
assert record5.tags == {"env": "test"} # Unchanged
record6 = SqlEvaluationDatasetRecord()
record6.expectations = {"accuracy": 0.8}
record6.tags = {"env": "test"}
record6.merge({"tags": {"version": "1.0"}})
assert record6.expectations == {"accuracy": 0.8} # Unchanged
assert record6.tags == {"env": "test", "version": "1.0"}
def test_sql_dataset_record_wrapping_unwrapping():
from mlflow.entities.dataset_record import DATASET_RECORD_WRAPPED_OUTPUT_KEY
entity = DatasetRecord(
dataset_record_id="rec1",
dataset_id="ds1",
inputs={"q": "test"},
outputs="string output",
created_time=1000,
last_update_time=1000,
)
sql_record = SqlEvaluationDatasetRecord.from_mlflow_entity(entity, "input_hash_123")
assert sql_record.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: "string output"}
unwrapped_entity = sql_record.to_mlflow_entity()
assert unwrapped_entity.outputs == "string output"
entity2 = DatasetRecord(
dataset_record_id="rec2",
dataset_id="ds1",
inputs={"q": "test"},
outputs=[1, 2, 3],
created_time=1000,
last_update_time=1000,
)
sql_record2 = SqlEvaluationDatasetRecord.from_mlflow_entity(entity2, "input_hash_456")
assert sql_record2.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: [1, 2, 3]}
unwrapped_entity2 = sql_record2.to_mlflow_entity()
assert unwrapped_entity2.outputs == [1, 2, 3]
entity3 = DatasetRecord(
dataset_record_id="rec3",
dataset_id="ds1",
inputs={"q": "test"},
outputs=42,
created_time=1000,
last_update_time=1000,
)
sql_record3 = SqlEvaluationDatasetRecord.from_mlflow_entity(entity3, "input_hash_789")
assert sql_record3.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: 42}
unwrapped_entity3 = sql_record3.to_mlflow_entity()
assert unwrapped_entity3.outputs == 42
entity4 = DatasetRecord(
dataset_record_id="rec4",
dataset_id="ds1",
inputs={"q": "test"},
outputs={"result": "answer"},
created_time=1000,
last_update_time=1000,
)
sql_record4 = SqlEvaluationDatasetRecord.from_mlflow_entity(entity4, "input_hash_abc")
assert sql_record4.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: {"result": "answer"}}
unwrapped_entity4 = sql_record4.to_mlflow_entity()
assert unwrapped_entity4.outputs == {"result": "answer"}
entity5 = DatasetRecord(
dataset_record_id="rec5",
dataset_id="ds1",
inputs={"q": "test"},
outputs=None,
created_time=1000,
last_update_time=1000,
)
sql_record5 = SqlEvaluationDatasetRecord.from_mlflow_entity(entity5, "input_hash_def")
assert sql_record5.outputs is None
unwrapped_entity5 = sql_record5.to_mlflow_entity()
assert unwrapped_entity5.outputs is None
sql_record6 = SqlEvaluationDatasetRecord()
sql_record6.outputs = {"old": "data"}
sql_record6.merge({"outputs": "new string output"})
assert sql_record6.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: "new string output"}
sql_record7 = SqlEvaluationDatasetRecord()
sql_record7.outputs = None
sql_record7.merge({"outputs": {"new": "dict"}})
assert sql_record7.outputs == {DATASET_RECORD_WRAPPED_OUTPUT_KEY: {"new": "dict"}}
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_log_spans_default_trace_status_in_progress(store: SqlAlchemyStore, is_async: bool):
"""Test that trace status defaults to IN_PROGRESS when no root span is present."""
experiment_id = store.create_experiment("test_default_in_progress")
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id = f"tr-{uuid.uuid4().hex}"
# Create a child span (has parent, not a root span)
child_context = mock.Mock()
child_context.trace_id = 56789
child_context.span_id = 777
child_context.is_remote = False
child_context.trace_flags = trace_api.TraceFlags(1)
child_context.trace_state = trace_api.TraceState()
parent_context = mock.Mock()
parent_context.trace_id = 56789
parent_context.span_id = 888 # Parent span not included in log
parent_context.is_remote = False
parent_context.trace_flags = trace_api.TraceFlags(1)
parent_context.trace_state = trace_api.TraceState()
child_otel_span = OTelReadableSpan(
name="child_span_only",
context=child_context,
parent=parent_context, # Has parent, not a root span
attributes={
"mlflow.traceRequestId": json.dumps(trace_id),
"mlflow.spanType": json.dumps("LLM", cls=TraceJSONEncoder),
},
start_time=2000000000,
end_time=3000000000,
status=trace_api.Status(trace_api.StatusCode.OK),
resource=_OTelResource.get_empty(),
)
child_span = create_mlflow_span(child_otel_span, trace_id, "LLM")
# Log only the child span (no root span)
if is_async:
await store.log_spans_async(experiment_id, [child_span])
else:
store.log_spans(experiment_id, [child_span])
# Check trace was created with IN_PROGRESS status (default when no root span)
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == "IN_PROGRESS"
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
@pytest.mark.parametrize(
("span_status_code", "expected_trace_status"),
[
(trace_api.StatusCode.OK, "OK"),
(trace_api.StatusCode.ERROR, "ERROR"),
],
)
async def test_log_spans_sets_trace_status_from_root_span(
store: SqlAlchemyStore,
is_async: bool,
span_status_code: trace_api.StatusCode,
expected_trace_status: str,
):
"""Test that trace status is correctly set from root span status."""
experiment_id = store.create_experiment("test_trace_status_from_root")
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id = f"tr-{uuid.uuid4().hex}"
# Create root span with specified status
description = (
f"Root span {span_status_code.name}"
if span_status_code == trace_api.StatusCode.ERROR
else None
)
root_otel_span = create_test_otel_span(
trace_id=trace_id,
name=f"root_span_{span_status_code.name}",
status_code=span_status_code,
status_description=description,
trace_id_num=12345 + span_status_code.value,
span_id_num=111 + span_status_code.value,
)
root_span = create_mlflow_span(root_otel_span, trace_id, "LLM")
# Log the span
if is_async:
await store.log_spans_async(experiment_id, [root_span])
else:
store.log_spans(experiment_id, [root_span])
# Verify trace has expected status from root span
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == expected_trace_status
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_log_spans_unset_root_span_status_defaults_to_ok(
store: SqlAlchemyStore, is_async: bool
):
"""Test that UNSET root span status (unexpected) defaults to OK trace status."""
experiment_id = store.create_experiment("test_unset_root_span")
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id = f"tr-{uuid.uuid4().hex}"
# Create root span with UNSET status (this is unexpected in practice)
root_unset_span = create_test_otel_span(
trace_id=trace_id,
name="root_span_unset",
status_code=trace_api.StatusCode.UNSET, # Unexpected in practice
start_time=3000000000,
end_time=4000000000,
trace_id_num=23456,
span_id_num=333,
)
root_span = create_mlflow_span(root_unset_span, trace_id, "LLM")
if is_async:
await store.log_spans_async(experiment_id, [root_span])
else:
store.log_spans(experiment_id, [root_span])
# Verify trace defaults to OK status when root span has UNSET status
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == "OK"
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_log_spans_updates_in_progress_trace_status_from_root_span(
store: SqlAlchemyStore, is_async: bool
):
"""Test that IN_PROGRESS trace status is updated from root span on subsequent logs."""
experiment_id = store.create_experiment("test_trace_status_update")
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id = f"tr-{uuid.uuid4().hex}"
# First, log a non-root span which will create trace with default IN_PROGRESS status
parent_context = create_mock_span_context(45678, 555) # Will be root span later
child_otel_span = create_test_otel_span(
trace_id=trace_id,
name="child_span",
parent=parent_context, # Has parent, not a root span
status_code=trace_api.StatusCode.OK,
start_time=1100000000,
end_time=1900000000,
trace_id_num=45678,
span_id_num=666,
)
child_span = create_mlflow_span(child_otel_span, trace_id, "LLM")
if is_async:
await store.log_spans_async(experiment_id, [child_span])
else:
store.log_spans(experiment_id, [child_span])
# Verify trace was created with IN_PROGRESS status (default when no root span)
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == "IN_PROGRESS"
# Now log root span with ERROR status
root_otel_span = create_test_otel_span(
trace_id=trace_id,
name="root_span",
parent=None, # Root span
status_code=trace_api.StatusCode.ERROR,
status_description="Root span error",
trace_id_num=45678,
span_id_num=555,
)
root_span = create_mlflow_span(root_otel_span, trace_id, "LLM")
if is_async:
await store.log_spans_async(experiment_id, [root_span])
else:
store.log_spans(experiment_id, [root_span])
# Check trace status was updated to ERROR from root span
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == "ERROR"
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_log_spans_updates_state_unspecified_trace_status_from_root_span(
store: SqlAlchemyStore, is_async: bool
):
experiment_id = store.create_experiment("test_unspecified_update")
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id = f"tr-{uuid.uuid4().hex}"
# First, create a trace with OK status by logging a root span with OK status
initial_span = create_test_span(
trace_id=trace_id,
name="initial_unset_span",
span_id=999,
status=trace_api.StatusCode.OK,
trace_num=67890,
)
if is_async:
await store.log_spans_async(experiment_id, [initial_span])
else:
store.log_spans(experiment_id, [initial_span])
# Verify trace was created with OK status
trace = store.get_trace_info(trace_id)
assert trace.state.value == "OK"
# Now log a new root span with OK status (earlier start time makes it the new root)
new_root_span = create_test_span(
trace_id=trace_id,
name="new_root_span",
span_id=1000,
status=trace_api.StatusCode.OK,
start_ns=500000000, # Earlier than initial span
end_ns=2500000000,
trace_num=67890,
)
if is_async:
await store.log_spans_async(experiment_id, [new_root_span])
else:
store.log_spans(experiment_id, [new_root_span])
# Check trace status was updated to OK from root span
traces, _ = store.search_traces([experiment_id])
trace = next(t for t in traces if t.request_id == trace_id)
assert trace.state.value == "OK"
@pytest.mark.asyncio
@pytest.mark.parametrize("is_async", [False, True])
async def test_log_spans_does_not_update_finalized_trace_status(
store: SqlAlchemyStore, is_async: bool
):
"""Test that finalized trace statuses (OK, ERROR) are not updated by root span."""
experiment_id = store.create_experiment("test_no_update_finalized")
# Test that OK status is not updated
# Generate a proper MLflow trace ID in the format "tr-<32-char-hex>"
trace_id_ok = f"tr-{uuid.uuid4().hex}"
# Create initial root span with OK status
ok_span = create_test_span(
trace_id=trace_id_ok,
name="ok_root_span",
span_id=1111,
status=trace_api.StatusCode.OK,
trace_num=78901,
)
if is_async:
await store.log_spans_async(experiment_id, [ok_span])
else:
store.log_spans(experiment_id, [ok_span])
# Verify trace has OK status
traces, _ = store.search_traces([experiment_id])
trace_ok = next(t for t in traces if t.request_id == trace_id_ok)
assert trace_ok.state.value == "OK"
# Now log a new root span with ERROR status
error_span = create_test_span(
trace_id=trace_id_ok,
name="error_root_span",
span_id=2222,
status=trace_api.StatusCode.ERROR,
status_desc="New error",
start_ns=500000000,
end_ns=2500000000,
trace_num=78901,
)
if is_async:
await store.log_spans_async(experiment_id, [error_span])
else:
store.log_spans(experiment_id, [error_span])
# Verify trace status is still OK (not updated to ERROR)
traces, _ = store.search_traces([experiment_id])
trace_ok = next(t for t in traces if t.request_id == trace_id_ok)
assert trace_ok.state.value == "OK"
def _create_trace_info(trace_id: str, experiment_id: str):
return TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=1234,
execution_duration=100,
state=TraceState.OK,
tags={"tag1": "apple", "tag2": "orange"},
trace_metadata={"rq1": "foo", "rq2": "bar"},
)
def test_link_traces_to_run(store: SqlAlchemyStore):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
run = store.create_run(exp_id, user_id="user", start_time=0, tags=[], run_name="test_run")
trace_ids = []
for i in range(5):
trace_info = _create_trace_info(f"trace-{i}", exp_id)
store.start_trace(trace_info)
trace_ids.append(trace_info.trace_id)
store.link_traces_to_run(trace_ids, run.info.run_id)
# search_traces should return traces linked to the run
traces, _ = store.search_traces(
experiment_ids=[exp_id], filter_string=f"run_id = '{run.info.run_id}'"
)
assert len(traces) == 5
def test_link_traces_to_run_100_limit(store: SqlAlchemyStore):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
run = store.create_run(exp_id, user_id="user", start_time=0, tags=[], run_name="test_run")
# Test exceeding the limit (101 traces)
trace_ids = []
for i in range(101):
trace_info = _create_trace_info(f"trace-{i}", exp_id)
store.start_trace(trace_info)
trace_ids.append(trace_info.trace_id)
with pytest.raises(MlflowException, match="Cannot link more than 100 traces to a run"):
store.link_traces_to_run(trace_ids, run.info.run_id)
def test_link_traces_to_run_duplicate_trace_ids(store: SqlAlchemyStore):
exp_id = store.create_experiment(f"exp-{uuid.uuid4()}")
trace_ids = ["trace-1", "trace-2", "trace-3", "trace-4"]
for trace_id in trace_ids:
trace_info = _create_trace_info(trace_id, exp_id)
store.start_trace(trace_info)
run = store.create_run(exp_id, user_id="user", start_time=0, tags=[], run_name="test_run")
search_args = {"experiment_ids": [exp_id], "filter_string": f"run_id = '{run.info.run_id}'"}
store.link_traces_to_run(["trace-1", "trace-2", "trace-3"], run.info.run_id)
assert len(store.search_traces(**search_args)[0]) == 3
store.link_traces_to_run(["trace-3", "trace-4"], run.info.run_id)
assert len(store.search_traces(**search_args)[0]) == 4
store.link_traces_to_run(["trace-1", "trace-2"], run.info.run_id)
assert len(store.search_traces(**search_args)[0]) == 4
def test_scorer_operations(store: SqlAlchemyStore):
"""
Test the scorer operations: register_scorer, list_scorers, get_scorer, and delete_scorer.
This test covers:
1. Registering multiple scorers with different names
2. Registering multiple versions of the same scorer
3. Listing scorers (should return latest version for each name)
4. Getting specific scorer versions
5. Getting latest scorer version when version is not specified
6. Deleting scorers and verifying they are deleted
"""
# Create an experiment for testing
experiment_id = store.create_experiment("test_scorer_experiment")
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer1")
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer2")
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer3")
store.register_scorer(experiment_id, "safety_scorer", "serialized_safety_scorer1")
store.register_scorer(experiment_id, "safety_scorer", "serialized_safety_scorer2")
store.register_scorer(experiment_id, "relevance_scorer", "relevance_scorer_scorer1")
# Step 2: Test list_scorers - should return latest version for each scorer name
scorers = store.list_scorers(experiment_id)
# Should return 3 scorers (one for each unique name)
assert len(scorers) == 3, f"Expected 3 scorers, got {len(scorers)}"
scorer_names = [scorer.scorer_name for scorer in scorers]
# Verify the order is sorted by scorer_name
assert scorer_names == ["accuracy_scorer", "relevance_scorer", "safety_scorer"], (
f"Expected sorted order, got {scorer_names}"
)
# Verify versions are the latest and check serialized_scorer content
for scorer in scorers:
if scorer.scorer_name == "accuracy_scorer":
assert scorer.scorer_version == 3, (
f"Expected version 3 for accuracy_scorer, got {scorer.scorer_version}"
)
assert scorer._serialized_scorer == "serialized_accuracy_scorer3"
elif scorer.scorer_name == "safety_scorer":
assert scorer.scorer_version == 2, (
f"Expected version 2 for safety_scorer, got {scorer.scorer_version}"
)
assert scorer._serialized_scorer == "serialized_safety_scorer2"
elif scorer.scorer_name == "relevance_scorer":
assert scorer.scorer_version == 1, (
f"Expected version 1 for relevance_scorer, got {scorer.scorer_version}"
)
assert scorer._serialized_scorer == "relevance_scorer_scorer1"
# Test list_scorer_versions
accuracy_scorer_versions = store.list_scorer_versions(experiment_id, "accuracy_scorer")
assert len(accuracy_scorer_versions) == 3, (
f"Expected 3 versions, got {len(accuracy_scorer_versions)}"
)
# Verify versions are ordered by version number
assert accuracy_scorer_versions[0].scorer_version == 1
assert accuracy_scorer_versions[0]._serialized_scorer == "serialized_accuracy_scorer1"
assert accuracy_scorer_versions[1].scorer_version == 2
assert accuracy_scorer_versions[1]._serialized_scorer == "serialized_accuracy_scorer2"
assert accuracy_scorer_versions[2].scorer_version == 3
assert accuracy_scorer_versions[2]._serialized_scorer == "serialized_accuracy_scorer3"
# Step 3: Test get_scorer with specific versions
# Get accuracy_scorer version 1
accuracy_v1 = store.get_scorer(experiment_id, "accuracy_scorer", version=1)
assert accuracy_v1._serialized_scorer == "serialized_accuracy_scorer1"
assert accuracy_v1.scorer_version == 1
# Get accuracy_scorer version 2
accuracy_v2 = store.get_scorer(experiment_id, "accuracy_scorer", version=2)
assert accuracy_v2._serialized_scorer == "serialized_accuracy_scorer2"
assert accuracy_v2.scorer_version == 2
# Get accuracy_scorer version 3 (latest)
accuracy_v3 = store.get_scorer(experiment_id, "accuracy_scorer", version=3)
assert accuracy_v3._serialized_scorer == "serialized_accuracy_scorer3"
assert accuracy_v3.scorer_version == 3
# Step 4: Test get_scorer without version (should return latest)
accuracy_latest = store.get_scorer(experiment_id, "accuracy_scorer")
assert accuracy_latest._serialized_scorer == "serialized_accuracy_scorer3"
assert accuracy_latest.scorer_version == 3
safety_latest = store.get_scorer(experiment_id, "safety_scorer")
assert safety_latest._serialized_scorer == "serialized_safety_scorer2"
assert safety_latest.scorer_version == 2
relevance_latest = store.get_scorer(experiment_id, "relevance_scorer")
assert relevance_latest._serialized_scorer == "relevance_scorer_scorer1"
assert relevance_latest.scorer_version == 1
# Step 5: Test error cases for get_scorer
# Try to get non-existent scorer
with pytest.raises(MlflowException, match="Scorer with name 'non_existent' not found"):
store.get_scorer(experiment_id, "non_existent")
# Try to get non-existent version
with pytest.raises(
MlflowException, match="Scorer with name 'accuracy_scorer' and version 999 not found"
):
store.get_scorer(experiment_id, "accuracy_scorer", version=999)
# Step 6: Test delete_scorer - delete specific version of accuracy_scorer
# Delete version 1 of accuracy_scorer
store.delete_scorer(experiment_id, "accuracy_scorer", version=1)
# Verify version 1 is deleted but other versions still exist
with pytest.raises(
MlflowException, match="Scorer with name 'accuracy_scorer' and version 1 not found"
):
store.get_scorer(experiment_id, "accuracy_scorer", version=1)
# Verify versions 2 and 3 still exist
accuracy_v2 = store.get_scorer(experiment_id, "accuracy_scorer", version=2)
assert accuracy_v2._serialized_scorer == "serialized_accuracy_scorer2"
assert accuracy_v2.scorer_version == 2
accuracy_v3 = store.get_scorer(experiment_id, "accuracy_scorer", version=3)
assert accuracy_v3._serialized_scorer == "serialized_accuracy_scorer3"
assert accuracy_v3.scorer_version == 3
# Verify latest version still works
accuracy_latest_after_partial_delete = store.get_scorer(experiment_id, "accuracy_scorer")
assert accuracy_latest_after_partial_delete._serialized_scorer == "serialized_accuracy_scorer3"
assert accuracy_latest_after_partial_delete.scorer_version == 3
# Step 7: Test delete_scorer - delete all versions of accuracy_scorer
store.delete_scorer(experiment_id, "accuracy_scorer")
# Verify accuracy_scorer is completely deleted
with pytest.raises(MlflowException, match="Scorer with name 'accuracy_scorer' not found"):
store.get_scorer(experiment_id, "accuracy_scorer")
# Verify other scorers still exist
safety_latest_after_delete = store.get_scorer(experiment_id, "safety_scorer")
assert safety_latest_after_delete._serialized_scorer == "serialized_safety_scorer2"
assert safety_latest_after_delete.scorer_version == 2
relevance_latest_after_delete = store.get_scorer(experiment_id, "relevance_scorer")
assert relevance_latest_after_delete._serialized_scorer == "relevance_scorer_scorer1"
assert relevance_latest_after_delete.scorer_version == 1
# Step 8: Test list_scorers after deletion
scorers_after_delete = store.list_scorers(experiment_id)
assert len(scorers_after_delete) == 2, (
f"Expected 2 scorers after deletion, got {len(scorers_after_delete)}"
)
scorer_names_after_delete = [scorer.scorer_name for scorer in scorers_after_delete]
assert "accuracy_scorer" not in scorer_names_after_delete
assert "safety_scorer" in scorer_names_after_delete
assert "relevance_scorer" in scorer_names_after_delete
# Step 9: Test delete_scorer for non-existent scorer
with pytest.raises(MlflowException, match="Scorer with name 'non_existent' not found"):
store.delete_scorer(experiment_id, "non_existent")
# Step 10: Test delete_scorer for non-existent version
with pytest.raises(
MlflowException, match="Scorer with name 'safety_scorer' and version 999 not found"
):
store.delete_scorer(experiment_id, "safety_scorer", version=999)
# Step 11: Test delete_scorer for remaining scorers
store.delete_scorer(experiment_id, "safety_scorer")
store.delete_scorer(experiment_id, "relevance_scorer")
# Verify all scorers are deleted
final_scorers = store.list_scorers(experiment_id)
assert len(final_scorers) == 0, (
f"Expected 0 scorers after all deletions, got {len(final_scorers)}"
)
# Step 12: Test list_scorer_versions
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer1")
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer2")
store.register_scorer(experiment_id, "accuracy_scorer", "serialized_accuracy_scorer3")
# Test list_scorer_versions for non-existent scorer
with pytest.raises(MlflowException, match="Scorer with name 'non_existent_scorer' not found"):
store.list_scorer_versions(experiment_id, "non_existent_scorer")
def test_dataset_experiment_associations(store):
with mock.patch("mlflow.tracking._tracking_service.utils._get_store", return_value=store):
exp_ids = _create_experiments(
store, ["exp_assoc_1", "exp_assoc_2", "exp_assoc_3", "exp_assoc_4"]
)
exp1, exp2, exp3, exp4 = exp_ids
dataset = store.create_dataset(
name="test_dataset_associations", experiment_ids=[exp1], tags={"test": "associations"}
)
assert dataset.experiment_ids == [exp1]
updated = store.add_dataset_to_experiments(
dataset_id=dataset.dataset_id, experiment_ids=[exp2, exp3]
)
assert set(updated.experiment_ids) == {exp1, exp2, exp3}
result = store.add_dataset_to_experiments(
dataset_id=dataset.dataset_id, experiment_ids=[exp2, exp4]
)
assert set(result.experiment_ids) == {exp1, exp2, exp3, exp4}
removed = store.remove_dataset_from_experiments(
dataset_id=dataset.dataset_id, experiment_ids=[exp2, exp3]
)
assert set(removed.experiment_ids) == {exp1, exp4}
with mock.patch("mlflow.store.tracking.sqlalchemy_store._logger.warning") as mock_warning:
idempotent = store.remove_dataset_from_experiments(
dataset_id=dataset.dataset_id, experiment_ids=[exp2, exp3]
)
assert mock_warning.call_count == 2
assert "was not associated" in mock_warning.call_args_list[0][0][0]
assert set(idempotent.experiment_ids) == {exp1, exp4}
with pytest.raises(MlflowException, match="not found"):
store.add_dataset_to_experiments(dataset_id="d-nonexistent", experiment_ids=[exp1])
with pytest.raises(MlflowException, match="not found"):
store.add_dataset_to_experiments(
dataset_id=dataset.dataset_id, experiment_ids=["999999"]
)
with pytest.raises(MlflowException, match="not found"):
store.remove_dataset_from_experiments(dataset_id="d-nonexistent", experiment_ids=[exp1])
def _create_simple_trace(store, experiment_id, tags=None):
trace_id = f"tr-{uuid.uuid4()}"
timestamp_ms = time.time_ns() // 1_000_000
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=timestamp_ms,
execution_duration=100,
state=TraceState.OK,
tags=tags or {},
)
return store.start_trace(trace_info)
def _create_trace_for_correlation(store, experiment_id, spans=None, assessments=None, tags=None):
trace_id = f"tr-{uuid.uuid4()}"
timestamp_ms = time.time_ns() // 1_000_000
trace_tags = tags or {}
if spans:
span_types = [span.get("type", "LLM") for span in spans]
span_statuses = [span.get("status", "OK") for span in spans]
if "TOOL" in span_types:
trace_tags["primary_span_type"] = "TOOL"
elif "LLM" in span_types:
trace_tags["primary_span_type"] = "LLM"
if "LLM" in span_types:
trace_tags["has_llm"] = "true"
if "TOOL" in span_types:
trace_tags["has_tool"] = "true"
trace_tags["has_error"] = "true" if "ERROR" in span_statuses else "false"
tool_count = sum(1 for t in span_types if t == "TOOL")
if tool_count > 0:
trace_tags["tool_count"] = str(tool_count)
trace_info = TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=timestamp_ms,
execution_duration=100,
state=TraceState.OK,
tags=trace_tags,
)
store.start_trace(trace_info)
if assessments:
for assessment_data in assessments:
assessment = Feedback(
assessment_id=assessment_data.get("assessment_id", f"fb-{uuid.uuid4()}"),
trace_id=trace_id,
name=assessment_data.get("name", "quality"),
assessment_type=assessment_data.get("assessment_type", "feedback"),
source=AssessmentSource(
source_type=AssessmentSourceType.HUMAN,
source_id=assessment_data.get("source_id", "user123"),
),
value=FeedbackValue(assessment_data.get("value", 0.8)),
created_timestamp=timestamp_ms,
last_updated_timestamp=timestamp_ms,
)
store.log_assessments([assessment])
return trace_id
def _create_trace_with_spans_for_correlation(store, experiment_id, span_configs):
return _create_trace_for_correlation(store, experiment_id, spans=span_configs)
def test_calculate_trace_filter_correlation_basic(store):
exp_id = _create_experiments(store, "correlation_test")
for i in range(10):
_create_trace_with_spans_for_correlation(
store,
exp_id,
span_configs=[{"name": "tool_operation", "type": "TOOL", "status": "ERROR"}],
)
for i in range(5):
_create_trace_with_spans_for_correlation(
store,
exp_id,
span_configs=[{"name": "llm_call", "type": "LLM", "status": "OK"}],
)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.primary_span_type = "TOOL"',
filter_string2='tags.has_error = "true"',
)
assert result.npmi == pytest.approx(1.0)
assert result.filter1_count == 10
assert result.filter2_count == 10
assert result.joint_count == 10
assert result.total_count == 15
def test_calculate_trace_filter_correlation_perfect(store):
exp_id = _create_experiments(store, "correlation_test")
for i in range(8):
_create_trace_with_spans_for_correlation(
store,
exp_id,
span_configs=[{"name": "operation", "type": "TOOL", "status": "ERROR"}],
)
for i in range(7):
_create_trace_with_spans_for_correlation(
store,
exp_id,
span_configs=[{"name": "operation", "type": "LLM", "status": "OK"}],
)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.primary_span_type = "TOOL"',
filter_string2='tags.has_error = "true"',
)
assert result.npmi == pytest.approx(1.0)
assert result.npmi_smoothed > 0.8
assert result.filter1_count == 8
assert result.filter2_count == 8
assert result.joint_count == 8
assert result.total_count == 15
def test_calculate_trace_filter_correlation_count_expressions(store):
exp_id = _create_experiments(store, "correlation_test")
for i in range(15):
num_tool_calls = 5 if i < 10 else 2
spans = [{"type": "TOOL", "name": f"tool_{j}"} for j in range(num_tool_calls)]
spans.append({"type": "LLM", "name": "llm_call"})
_create_trace_with_spans_for_correlation(store, exp_id, span_configs=spans)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.tool_count = "5"',
filter_string2='tags.has_llm = "true"',
)
assert result.filter1_count == 10
assert result.filter2_count == 15
assert result.joint_count == 10
assert result.total_count == 15
def test_calculate_trace_filter_correlation_negative_correlation(store):
exp_id = _create_experiments(store, "negative_correlation_test")
for i in range(10):
_create_trace_for_correlation(
store, exp_id, spans=[{"type": "LLM", "status": "ERROR"}], tags={"version": "v1"}
)
for i in range(10):
_create_trace_for_correlation(
store, exp_id, spans=[{"type": "LLM", "status": "OK"}], tags={"version": "v2"}
)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.version = "v1"',
filter_string2='tags.has_error = "false"',
)
assert result.total_count == 20
assert result.filter1_count == 10
assert result.filter2_count == 10
assert result.joint_count == 0
assert result.npmi == pytest.approx(-1.0)
def test_calculate_trace_filter_correlation_zero_counts(store):
exp_id = _create_experiments(store, "zero_counts_test")
for i in range(5):
_create_trace_for_correlation(store, exp_id, spans=[{"type": "LLM", "status": "OK"}])
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.has_error = "true"',
filter_string2='tags.has_llm = "true"',
)
assert result.total_count == 5
assert result.filter1_count == 0
assert result.filter2_count == 5
assert result.joint_count == 0
assert math.isnan(result.npmi)
def test_calculate_trace_filter_correlation_multiple_experiments(store):
exp_id1 = _create_experiments(store, "multi_exp_1")
exp_id2 = _create_experiments(store, "multi_exp_2")
for i in range(4):
_create_trace_for_correlation(
store, exp_id1, spans=[{"type": "TOOL", "status": "OK"}], tags={"env": "prod"}
)
_create_trace_for_correlation(
store, exp_id1, spans=[{"type": "LLM", "status": "OK"}], tags={"env": "prod"}
)
_create_trace_for_correlation(
store, exp_id2, spans=[{"type": "TOOL", "status": "OK"}], tags={"env": "dev"}
)
for i in range(4):
_create_trace_for_correlation(
store, exp_id2, spans=[{"type": "LLM", "status": "OK"}], tags={"env": "dev"}
)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id1, exp_id2],
filter_string1='tags.env = "prod"',
filter_string2='tags.primary_span_type = "TOOL"',
)
assert result.total_count == 10
assert result.filter1_count == 5
assert result.filter2_count == 5
assert result.joint_count == 4
assert result.npmi > 0.4
def test_calculate_trace_filter_correlation_independent_events(store):
exp_id = _create_experiments(store, "independent_test")
configurations = [
*[{"spans": [{"type": "TOOL", "status": "ERROR"}]} for _ in range(5)],
*[{"spans": [{"type": "TOOL", "status": "OK"}]} for _ in range(5)],
*[{"spans": [{"type": "LLM", "status": "ERROR"}]} for _ in range(5)],
*[{"spans": [{"type": "LLM", "status": "OK"}]} for _ in range(5)],
]
for config in configurations:
_create_trace_for_correlation(store, exp_id, **config)
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.primary_span_type = "TOOL"',
filter_string2='tags.has_error = "true"',
)
assert result.total_count == 20
assert result.filter1_count == 10
assert result.filter2_count == 10
assert result.joint_count == 5
# Independent events should have NPMI close to 0
# P(TOOL) = 10/20 = 0.5, P(ERROR) = 10/20 = 0.5
# P(TOOL & ERROR) = 5/20 = 0.25
# Expected joint = 0.5 * 0.5 * 20 = 5, so no correlation
assert abs(result.npmi) < 0.1
def test_calculate_trace_filter_correlation_simplified_example(store):
exp_id = _create_experiments(store, "simple_correlation_test")
for _ in range(5):
_create_simple_trace(store, exp_id, {"category": "A", "status": "success"})
for _ in range(3):
_create_simple_trace(store, exp_id, {"category": "A", "status": "failure"})
for _ in range(7):
_create_simple_trace(store, exp_id, {"category": "B", "status": "success"})
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.category = "A"',
filter_string2='tags.status = "success"',
)
assert result.filter1_count == 8
assert result.filter2_count == 12
assert result.joint_count == 5
assert result.total_count == 15
def test_calculate_trace_filter_correlation_empty_experiment_list(store):
result = store.calculate_trace_filter_correlation(
experiment_ids=[],
filter_string1='tags.has_error = "true"',
filter_string2='tags.primary_span_type = "TOOL"',
)
assert result.total_count == 0
assert result.filter1_count == 0
assert result.filter2_count == 0
assert result.joint_count == 0
assert math.isnan(result.npmi)
def test_calculate_trace_filter_correlation_with_base_filter(store):
exp_id = _create_experiments(store, "base_filter_test")
early_time = 1000000000000
for i in range(5):
trace_info = TraceInfo(
trace_id=f"tr-early-{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=early_time + i,
execution_duration=100,
state=TraceState.OK,
tags={
"has_error": "true" if i < 3 else "false",
"has_tool": "true" if i % 2 == 0 else "false",
},
)
store.start_trace(trace_info)
later_time = 2000000000000
# Create traces in the later period:
# - 10 total traces in the time window
# - 6 with has_error=true
# - 4 with has_tool=true
# - 3 with both has_error=true AND has_tool=true
for i in range(10):
tags = {}
if i < 6:
tags["has_error"] = "true"
if i < 3 or i == 6:
tags["has_tool"] = "true"
trace_info = TraceInfo(
trace_id=f"tr-later-{i}",
trace_location=trace_location.TraceLocation.from_experiment_id(exp_id),
request_time=later_time + i,
execution_duration=100,
state=TraceState.OK,
tags=tags,
)
store.start_trace(trace_info)
base_filter = f"timestamp_ms >= {later_time} and timestamp_ms < {later_time + 100}"
result = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.has_error = "true"',
filter_string2='tags.has_tool = "true"',
base_filter=base_filter,
)
assert result.total_count == 10
assert result.filter1_count == 6
assert result.filter2_count == 4
assert result.joint_count == 3
# Calculate expected NPMI
# P(error) = 6/10 = 0.6
# P(tool) = 4/10 = 0.4
# P(error AND tool) = 3/10 = 0.3
# PMI = log(P(error AND tool) / (P(error) * P(tool))) = log(0.3 / (0.6 * 0.4)) = log(1.25)
# NPMI = PMI / -log(P(error AND tool)) = log(1.25) / -log(0.3)
p_error = 6 / 10
p_tool = 4 / 10
p_joint = 3 / 10
if p_joint > 0:
pmi = math.log(p_joint / (p_error * p_tool))
npmi = pmi / -math.log(p_joint)
assert abs(result.npmi - npmi) < 0.001
result_no_base = store.calculate_trace_filter_correlation(
experiment_ids=[exp_id],
filter_string1='tags.has_error = "true"',
filter_string2='tags.has_tool = "true"',
)
assert result_no_base.total_count == 15
assert result_no_base.filter1_count == 9
assert result_no_base.filter2_count == 7
assert result_no_base.joint_count == 5
def test_batch_get_traces_basic(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_batch_get_traces")
trace_id = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id,
name="root_span",
span_id=111,
status=trace_api.StatusCode.OK,
start_ns=1_000_000_000,
end_ns=2_000_000_000,
trace_num=12345,
),
create_test_span(
trace_id=trace_id,
name="child_span",
span_id=222,
parent_id=111,
status=trace_api.StatusCode.UNSET,
start_ns=1_500_000_000,
end_ns=1_800_000_000,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
loaded_spans = traces[0].data.spans
assert len(loaded_spans) == 2
root_span = next(s for s in loaded_spans if s.name == "root_span")
child_span = next(s for s in loaded_spans if s.name == "child_span")
assert root_span.trace_id == trace_id
assert root_span.span_id == "000000000000006f"
assert root_span.parent_id is None
assert root_span.start_time_ns == 1_000_000_000
assert root_span.end_time_ns == 2_000_000_000
assert child_span.trace_id == trace_id
assert child_span.span_id == "00000000000000de"
assert child_span.parent_id == "000000000000006f"
assert child_span.start_time_ns == 1_500_000_000
assert child_span.end_time_ns == 1_800_000_000
def test_batch_get_traces_empty_trace(store: SqlAlchemyStore) -> None:
trace_id = f"tr-{uuid.uuid4().hex}"
traces = store.batch_get_traces([trace_id])
assert traces == []
def test_batch_get_traces_ordering(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_load_spans_ordering")
trace_id = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id,
name="second_span",
span_id=222,
start_ns=2_000_000_000,
end_ns=3_000_000_000,
trace_num=12345,
),
create_test_span(
trace_id=trace_id,
name="first_span",
span_id=111,
start_ns=1_000_000_000,
end_ns=2_000_000_000,
trace_num=12345,
),
create_test_span(
trace_id=trace_id,
name="third_span",
span_id=333,
start_ns=3_000_000_000,
end_ns=4_000_000_000,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
loaded_spans = traces[0].data.spans
assert len(loaded_spans) == 3
assert loaded_spans[0].name == "first_span"
assert loaded_spans[1].name == "second_span"
assert loaded_spans[2].name == "third_span"
assert loaded_spans[0].start_time_ns == 1_000_000_000
assert loaded_spans[1].start_time_ns == 2_000_000_000
assert loaded_spans[2].start_time_ns == 3_000_000_000
def test_batch_get_traces_with_complex_attributes(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_load_spans_complex")
trace_id = f"tr-{uuid.uuid4().hex}"
otel_span = create_test_otel_span(
trace_id=trace_id,
name="complex_span",
status_code=trace_api.StatusCode.ERROR,
status_description="Test error",
start_time=1_000_000_000,
end_time=2_000_000_000,
trace_id_num=12345,
span_id_num=111,
)
otel_span._attributes = {
"llm.model_name": "gpt-4",
"llm.input_tokens": 100,
"llm.output_tokens": 50,
"custom.key": "custom_value",
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
}
span = create_mlflow_span(otel_span, trace_id, "LLM")
store.log_spans(experiment_id, [span])
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
loaded_spans = traces[0].data.spans
assert len(loaded_spans) == 1
loaded_span = loaded_spans[0]
assert loaded_span.status.status_code == "ERROR"
assert loaded_span.status.description == "Test error"
assert loaded_span.attributes.get("llm.model_name") == "gpt-4"
assert loaded_span.attributes.get("llm.input_tokens") == 100
assert loaded_span.attributes.get("llm.output_tokens") == 50
assert loaded_span.attributes.get("custom.key") == "custom_value"
def test_batch_get_traces_multiple_traces(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_load_spans_multiple")
trace_id_1 = f"tr-{uuid.uuid4().hex}"
trace_id_2 = f"tr-{uuid.uuid4().hex}"
spans_trace_1 = [
create_test_span(
trace_id=trace_id_1,
name="trace1_span1",
span_id=111,
trace_num=12345,
),
create_test_span(
trace_id=trace_id_1,
name="trace1_span2",
span_id=112,
trace_num=12345,
),
]
spans_trace_2 = [
create_test_span(
trace_id=trace_id_2,
name="trace2_span1",
span_id=221,
trace_num=67890,
),
]
store.log_spans(experiment_id, spans_trace_1)
store.log_spans(experiment_id, spans_trace_2)
traces = store.batch_get_traces([trace_id_1, trace_id_2])
assert len(traces) == 2
# Find traces by ID since order might not be guaranteed
trace_1 = next(t for t in traces if t.info.trace_id == trace_id_1)
trace_2 = next(t for t in traces if t.info.trace_id == trace_id_2)
loaded_spans_1 = trace_1.data.spans
loaded_spans_2 = trace_2.data.spans
assert len(loaded_spans_1) == 2
assert len(loaded_spans_2) == 1
trace_1_spans = [span.to_dict() for span in loaded_spans_1]
trace_2_spans = [span.to_dict() for span in loaded_spans_2]
assert [span.to_dict() for span in loaded_spans_1] == trace_1_spans
assert [span.to_dict() for span in loaded_spans_2] == trace_2_spans
def test_batch_get_traces_preserves_json_serialization(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_load_spans_json")
trace_id = f"tr-{uuid.uuid4().hex}"
original_span = create_test_span(
trace_id=trace_id,
name="json_test_span",
span_id=111,
status=trace_api.StatusCode.OK,
start_ns=1_000_000_000,
end_ns=2_000_000_000,
trace_num=12345,
)
store.log_spans(experiment_id, [original_span])
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
loaded_spans = traces[0].data.spans
assert len(loaded_spans) == 1
loaded_span = loaded_spans[0]
assert loaded_span.name == original_span.name
assert loaded_span.trace_id == original_span.trace_id
assert loaded_span.span_id == original_span.span_id
assert loaded_span.start_time_ns == original_span.start_time_ns
assert loaded_span.end_time_ns == original_span.end_time_ns
def test_batch_get_traces_integration_with_trace_handler(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_integration")
trace_id = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id,
name="integration_span",
span_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
trace_info = store.get_trace_info(trace_id)
assert trace_info.tags.get(TraceTagKey.SPANS_LOCATION) == SpansLocation.TRACKING_STORE.value
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
loaded_spans = traces[0].data.spans
assert len(loaded_spans) == 1
assert loaded_spans[0].name == "integration_span"
def test_batch_get_traces_with_incomplete_trace(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_incomplete_trace")
trace_id = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id,
name="incomplete_span",
span_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
store.start_trace(
TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=1234,
execution_duration=100,
state=TraceState.OK,
trace_metadata={
TraceMetadataKey.SIZE_STATS: json.dumps(
{
TraceSizeStatsKey.NUM_SPANS: 2,
}
),
},
)
)
traces = store.batch_get_traces([trace_id])
assert len(traces) == 0
# add another complete trace
trace_id_2 = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id_2,
name="incomplete_span",
span_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
store.start_trace(
TraceInfo(
trace_id=trace_id_2,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=1234,
execution_duration=100,
state=TraceState.OK,
)
)
traces = store.batch_get_traces([trace_id, trace_id_2])
assert len(traces) == 1
assert traces[0].info.trace_id == trace_id_2
assert traces[0].info.status == TraceState.OK
assert len(traces[0].data.spans) == 1
assert traces[0].data.spans[0].name == "incomplete_span"
assert traces[0].data.spans[0].status.status_code == "OK"
def test_log_spans_token_usage(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_log_spans_token_usage")
trace_id = f"tr-{uuid.uuid4().hex}"
otel_span = create_test_otel_span(
trace_id=trace_id,
name="llm_call",
start_time=1_000_000_000,
end_time=2_000_000_000,
trace_id_num=12345,
span_id_num=111,
)
otel_span._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
SpanAttributeKey.CHAT_USAGE: json.dumps(
{
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
),
}
span = create_mlflow_span(otel_span, trace_id, "LLM")
store.log_spans(experiment_id, [span])
# verify token usage is stored in the trace info
trace_info = store.get_trace_info(trace_id)
assert trace_info.token_usage == {
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
# verify loaded trace has same token usage
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
trace = traces[0]
assert trace.info.token_usage is not None
assert trace.info.token_usage["input_tokens"] == 100
assert trace.info.token_usage["output_tokens"] == 50
assert trace.info.token_usage["total_tokens"] == 150
def test_log_spans_update_token_usage_incrementally(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_log_spans_update_token_usage")
trace_id = f"tr-{uuid.uuid4().hex}"
otel_span1 = create_test_otel_span(
trace_id=trace_id,
name="first_llm_call",
start_time=1_000_000_000,
end_time=2_000_000_000,
trace_id_num=12345,
span_id_num=111,
)
otel_span1._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
SpanAttributeKey.CHAT_USAGE: json.dumps(
{
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
),
}
span1 = create_mlflow_span(otel_span1, trace_id, "LLM")
store.log_spans(experiment_id, [span1])
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
trace = traces[0]
assert trace.info.token_usage["input_tokens"] == 100
assert trace.info.token_usage["output_tokens"] == 50
assert trace.info.token_usage["total_tokens"] == 150
otel_span2 = create_test_otel_span(
trace_id=trace_id,
name="second_llm_call",
start_time=3_000_000_000,
end_time=4_000_000_000,
trace_id_num=12345,
span_id_num=222,
)
otel_span2._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id, cls=TraceJSONEncoder),
SpanAttributeKey.CHAT_USAGE: json.dumps(
{
"input_tokens": 200,
"output_tokens": 75,
"total_tokens": 275,
}
),
}
span2 = create_mlflow_span(otel_span2, trace_id, "LLM")
store.log_spans(experiment_id, [span2])
traces = store.batch_get_traces([trace_id])
assert len(traces) == 1
trace = traces[0]
assert trace.info.token_usage["input_tokens"] == 300
assert trace.info.token_usage["output_tokens"] == 125
assert trace.info.token_usage["total_tokens"] == 425
def test_batch_get_traces_token_usage(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_batch_get_traces_token_usage")
trace_id_1 = f"tr-{uuid.uuid4().hex}"
otel_span1 = create_test_otel_span(
trace_id=trace_id_1,
name="trace1_span",
start_time=1_000_000_000,
end_time=2_000_000_000,
trace_id_num=12345,
span_id_num=111,
)
otel_span1._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id_1, cls=TraceJSONEncoder),
SpanAttributeKey.CHAT_USAGE: json.dumps(
{
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
),
}
span1 = create_mlflow_span(otel_span1, trace_id_1, "LLM")
store.log_spans(experiment_id, [span1])
trace_id_2 = f"tr-{uuid.uuid4().hex}"
otel_span2 = create_test_otel_span(
trace_id=trace_id_2,
name="trace2_span",
start_time=3_000_000_000,
end_time=4_000_000_000,
trace_id_num=67890,
span_id_num=222,
)
otel_span2._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id_2, cls=TraceJSONEncoder),
SpanAttributeKey.CHAT_USAGE: json.dumps(
{
"input_tokens": 200,
"output_tokens": 100,
"total_tokens": 300,
}
),
}
span2 = create_mlflow_span(otel_span2, trace_id_2, "LLM")
store.log_spans(experiment_id, [span2])
trace_id_3 = f"tr-{uuid.uuid4().hex}"
otel_span3 = create_test_otel_span(
trace_id=trace_id_3,
name="trace3_span",
start_time=5_000_000_000,
end_time=6_000_000_000,
trace_id_num=11111,
span_id_num=333,
)
otel_span3._attributes = {
"mlflow.traceRequestId": json.dumps(trace_id_3, cls=TraceJSONEncoder),
}
span3 = create_mlflow_span(otel_span3, trace_id_3, "UNKNOWN")
store.log_spans(experiment_id, [span3])
trace_infos = [
store.get_trace_info(trace_id) for trace_id in [trace_id_1, trace_id_2, trace_id_3]
]
assert trace_infos[0].token_usage == {
"input_tokens": 100,
"output_tokens": 50,
"total_tokens": 150,
}
assert trace_infos[1].token_usage == {
"input_tokens": 200,
"output_tokens": 100,
"total_tokens": 300,
}
assert trace_infos[2].token_usage is None
traces = store.batch_get_traces([trace_id_1, trace_id_2, trace_id_3])
assert len(traces) == 3
traces_by_id = {trace.info.trace_id: trace for trace in traces}
trace1 = traces_by_id[trace_id_1]
assert trace1.info.token_usage is not None
assert trace1.info.token_usage["input_tokens"] == 100
assert trace1.info.token_usage["output_tokens"] == 50
assert trace1.info.token_usage["total_tokens"] == 150
trace2 = traces_by_id[trace_id_2]
assert trace2.info.token_usage is not None
assert trace2.info.token_usage["input_tokens"] == 200
assert trace2.info.token_usage["output_tokens"] == 100
assert trace2.info.token_usage["total_tokens"] == 300
trace3 = traces_by_id[trace_id_3]
assert trace3.info.token_usage is None
def test_get_trace_basic(store: SqlAlchemyStore) -> None:
experiment_id = store.create_experiment("test_get_trace")
trace_id = f"tr-{uuid.uuid4().hex}"
spans = [
create_test_span(
trace_id=trace_id,
name="root_span",
span_id=111,
status=trace_api.StatusCode.OK,
start_ns=1_000_000_000,
end_ns=2_000_000_000,
trace_num=12345,
),
create_test_span(
trace_id=trace_id,
name="child_span",
span_id=222,
parent_id=111,
status=trace_api.StatusCode.UNSET,
start_ns=1_500_000_000,
end_ns=1_800_000_000,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
trace = store.get_trace(trace_id)
assert trace is not None
loaded_spans = trace.data.spans
assert len(loaded_spans) == 2
root_span = next(s for s in loaded_spans if s.name == "root_span")
child_span = next(s for s in loaded_spans if s.name == "child_span")
assert root_span.trace_id == trace_id
assert root_span.span_id == "000000000000006f"
assert root_span.parent_id is None
assert root_span.start_time_ns == 1_000_000_000
assert root_span.end_time_ns == 2_000_000_000
assert child_span.trace_id == trace_id
assert child_span.span_id == "00000000000000de"
assert child_span.parent_id == "000000000000006f"
assert child_span.start_time_ns == 1_500_000_000
assert child_span.end_time_ns == 1_800_000_000
def test_get_trace_not_found(store: SqlAlchemyStore) -> None:
trace_id = f"tr-{uuid.uuid4().hex}"
with pytest.raises(MlflowException, match=f"Trace with ID {trace_id} is not found."):
store.get_trace(trace_id)
@pytest.mark.parametrize("allow_partial", [True, False])
def test_get_trace_with_partial_trace(store: SqlAlchemyStore, allow_partial: bool) -> None:
experiment_id = store.create_experiment("test_partial_trace")
trace_id = f"tr-{uuid.uuid4().hex}"
# Log only 1 span but indicate trace should have 2 spans
spans = [
create_test_span(
trace_id=trace_id,
name="span_1",
span_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
store.start_trace(
TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=1234,
execution_duration=100,
state=TraceState.OK,
trace_metadata={
TraceMetadataKey.SIZE_STATS: json.dumps(
{
TraceSizeStatsKey.NUM_SPANS: 2, # Expecting 2 spans
}
),
},
)
)
if allow_partial:
trace = store.get_trace(trace_id, allow_partial=allow_partial)
assert trace is not None
assert len(trace.data.spans) == 1
assert trace.data.spans[0].name == "span_1"
else:
with pytest.raises(
MlflowException,
match=f"Trace with ID {trace_id} is not fully exported yet",
):
store.get_trace(trace_id, allow_partial=allow_partial)
@pytest.mark.parametrize("allow_partial", [True, False])
def test_get_trace_with_complete_trace(store: SqlAlchemyStore, allow_partial: bool) -> None:
experiment_id = store.create_experiment("test_complete_trace")
trace_id = f"tr-{uuid.uuid4().hex}"
# Log 2 spans matching the expected count
spans = [
create_test_span(
trace_id=trace_id,
name="span_1",
span_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
create_test_span(
trace_id=trace_id,
name="span_2",
span_id=222,
parent_id=111,
status=trace_api.StatusCode.OK,
trace_num=12345,
),
]
store.log_spans(experiment_id, spans)
store.start_trace(
TraceInfo(
trace_id=trace_id,
trace_location=trace_location.TraceLocation.from_experiment_id(experiment_id),
request_time=1234,
execution_duration=100,
state=TraceState.OK,
trace_metadata={
TraceMetadataKey.SIZE_STATS: json.dumps(
{
TraceSizeStatsKey.NUM_SPANS: 2, # Expecting 2 spans
}
),
},
)
)
# should always return the trace
trace = store.get_trace(trace_id, allow_partial=allow_partial)
assert trace is not None
assert len(trace.data.spans) == 2
| DummyDataset |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 238326,
"end": 238827
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of FollowOrganization"""
__schema__ = github_schema
__field_names__ = ("organization_id", "client_mutation_id")
organization_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="organizationId")
"""ID of the organization to follow."""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| FollowOrganizationInput |
python | tornadoweb__tornado | tornado/test/httpserver_test.py | {
"start": 45625,
"end": 47028
} | class ____(AsyncHTTPTestCase):
def get_app(self):
return Application([("/", HelloWorldRequestHandler)])
def get_httpserver_options(self):
return dict(idle_connection_timeout=0.1)
def setUp(self):
super().setUp()
self.streams = [] # type: List[IOStream]
def tearDown(self):
super().tearDown()
for stream in self.streams:
stream.close()
@gen.coroutine
def connect(self):
stream = IOStream(socket.socket())
yield stream.connect(("127.0.0.1", self.get_http_port()))
self.streams.append(stream)
raise gen.Return(stream)
@gen_test
def test_unused_connection(self):
stream = yield self.connect()
event = Event()
stream.set_close_callback(event.set)
yield event.wait()
@gen_test
def test_idle_after_use(self):
stream = yield self.connect()
event = Event()
stream.set_close_callback(event.set)
# Use the connection twice to make sure keep-alives are working
for i in range(2):
stream.write(b"GET / HTTP/1.1\r\nHost: 127.0.0.1\r\n\r\n")
yield stream.read_until(b"\r\n\r\n")
data = yield stream.read_bytes(11)
self.assertEqual(data, b"Hello world")
# Now let the timeout trigger and close the connection.
yield event.wait()
| IdleTimeoutTest |
python | django__django | tests/admin_utils/models.py | {
"start": 244,
"end": 872
} | class ____(models.Model):
"""
A simple Article model for testing
"""
site = models.ForeignKey(Site, models.CASCADE, related_name="admin_articles")
title = models.CharField(max_length=100)
hist = models.CharField(
max_length=100,
verbose_name=_("History"),
help_text=_("History help text"),
)
created = models.DateTimeField(null=True)
def __str__(self):
return self.title
def test_from_model(self):
return "nothing"
@admin.display(description="not What you Expect")
def test_from_model_with_override(self):
return "nothing"
| Article |
python | Lightning-AI__lightning | examples/pytorch/domain_templates/semantic_segmentation.py | {
"start": 8675,
"end": 10004
} | class ____(nn.Module):
"""Upsampling (by either bilinear interpolation or transpose convolutions) followed by concatenation of feature map
from contracting path, followed by double 3x3 convolution.
>>> Up(8, 4) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
Up(
(upsample): ConvTranspose2d(8, 4, kernel_size=(2, 2), stride=(2, 2))
(conv): DoubleConv(
(net): Sequential(...)
)
)
"""
def __init__(self, in_ch: int, out_ch: int, bilinear: bool = False):
super().__init__()
self.upsample = None
if bilinear:
self.upsample = nn.Sequential(
nn.Upsample(scale_factor=2, mode="bilinear", align_corners=True),
nn.Conv2d(in_ch, in_ch // 2, kernel_size=1),
)
else:
self.upsample = nn.ConvTranspose2d(in_ch, in_ch // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_ch, out_ch)
def forward(self, x1, x2):
x1 = self.upsample(x1)
# Pad x1 to the size of x2
diff_h = x2.shape[2] - x1.shape[2]
diff_w = x2.shape[3] - x1.shape[3]
x1 = F.pad(x1, [diff_w // 2, diff_w - diff_w // 2, diff_h // 2, diff_h - diff_h // 2])
# Concatenate along the channels axis
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
| Up |
python | django__django | tests/queries/tests.py | {
"start": 160293,
"end": 162146
} | class ____(TestCase):
def test_exclude_many_to_many(self):
i_extra = Identifier.objects.create(name="extra")
i_program = Identifier.objects.create(name="program")
program = Program.objects.create(identifier=i_program)
i_channel = Identifier.objects.create(name="channel")
channel = Channel.objects.create(identifier=i_channel)
channel.programs.add(program)
# channel contains 'program1', so all Identifiers except that one
# should be returned
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=channel).order_by("name"),
[i_channel, i_extra],
)
self.assertSequenceEqual(
Identifier.objects.exclude(program__channel=None).order_by("name"),
[i_program],
)
def test_ticket_12823(self):
pg3 = Page.objects.create(text="pg3")
pg2 = Page.objects.create(text="pg2")
pg1 = Page.objects.create(text="pg1")
pa1 = Paragraph.objects.create(text="pa1")
pa1.page.set([pg1, pg2])
pa2 = Paragraph.objects.create(text="pa2")
pa2.page.set([pg2, pg3])
pa3 = Paragraph.objects.create(text="pa3")
ch1 = Chapter.objects.create(title="ch1", paragraph=pa1)
ch2 = Chapter.objects.create(title="ch2", paragraph=pa2)
ch3 = Chapter.objects.create(title="ch3", paragraph=pa3)
b1 = Book.objects.create(title="b1", chapter=ch1)
b2 = Book.objects.create(title="b2", chapter=ch2)
b3 = Book.objects.create(title="b3", chapter=ch3)
q = Book.objects.exclude(chapter__paragraph__page__text="pg1")
self.assertNotIn("IS NOT NULL", str(q.query))
self.assertEqual(len(q), 2)
self.assertNotIn(b1, q)
self.assertIn(b2, q)
self.assertIn(b3, q)
| ManyToManyExcludeTest |
python | arrow-py__arrow | tests/test_locales.py | {
"start": 24814,
"end": 25491
} | class ____:
def test_format_timeframe(self):
assert self.locale._format_timeframe("hours", 2) == "2 घंटे"
assert self.locale._format_timeframe("hour", 0) == "एक घंटा"
def test_format_relative_now(self):
result = self.locale._format_relative("अभी", "now", 0)
assert result == "अभी"
def test_format_relative_past(self):
result = self.locale._format_relative("एक घंटा", "hour", 1)
assert result == "एक घंटा बाद"
def test_format_relative_future(self):
result = self.locale._format_relative("एक घंटा", "hour", -1)
assert result == "एक घंटा पहले"
@pytest.mark.usefixtures("lang_locale")
| TestHindiLocale |
python | scipy__scipy | doc/source/tutorial/stats/plots/hinv_plot.py | {
"start": 170,
"end": 752
} | class ____:
def pdf(self, x):
return 1/np.sqrt(2*np.pi) * np.exp(-x**2 / 2)
def cdf(self, x):
return ndtr(x)
dist = StandardNormal()
urng = np.random.default_rng()
rng = NumericalInverseHermite(dist, random_state=urng)
rvs = rng.rvs(10000)
x = np.linspace(rvs.min()-0.1, rvs.max()+0.1, 1000)
fx = norm.pdf(x)
plt.plot(x, fx, 'r-', lw=2, label='true distribution')
plt.hist(rvs, bins=20, density=True, alpha=0.8, label='random variates')
plt.xlabel('x')
plt.ylabel('PDF(x)')
plt.title('Numerical Inverse Hermite Samples')
plt.legend()
plt.show()
| StandardNormal |
python | tensorflow__tensorflow | tensorflow/dtensor/python/tests/device_test.py | {
"start": 24688,
"end": 28487
} | class ____(test_util.DTensorBaseTest):
def setUp(self):
super().setUp()
self.skipForDeviceType(["TPU"],
"all tests require 8 TPU cores.",
unless_device_count_equals_to=8)
global_ids = test_util.create_device_ids_array((2, 4))
local_ids = np.ravel(global_ids).tolist()
mesh_dict = { # pylint: disable=g-complex-comprehension
device: Mesh(
[_BATCH_DIM, _MESH_DIM_X],
global_ids,
local_ids,
test_util.create_device_list((2, 4), device),
)
for device in ("CPU", "GPU", "TPU")
}
self.mesh = self.configTestMesh(mesh_dict)
@parameterized.named_parameters(
test_util.product([("Replicated", "replicated"), ("Sharded", "batch")], [(
"RankTwo",
[2, 4],
), (
"RankThree",
[2, 2, 3],
)]))
def testPackUnpackReturnsCorrectValuesAndDevices(self, sharding, shape):
a = sparse_ops.from_dense(
stateless_random_ops.stateless_random_uniform(shape, seed=[0, 1])
)
b = sparse_ops.from_dense(
stateless_random_ops.stateless_random_uniform(shape, seed=[0, 1])
)
if sharding == "replicated":
layout = Layout(["unsharded"] * len(shape), self.mesh)
input_tensor = 8 * [a]
expected = 8 * [sparse_ops.sparse_tensor_to_dense(a)]
expected_shape = shape
else:
layout = Layout([_BATCH_DIM] + ["unsharded"] * (len(shape) - 1),
self.mesh)
input_tensor = 4 * [a] + 4 * [b]
expected = 4 * [sparse_ops.sparse_tensor_to_dense(a)] + 4 * [
sparse_ops.sparse_tensor_to_dense(b)
]
expected_shape = [shape[0] * 2] + shape[1:]
with ops.device_v2(api._dtensor_device().name):
packed_tensor = api.pack(input_tensor, layout)
api.check_layout(packed_tensor, layout)
unpacked_tensor = api.unpack(packed_tensor)
got = [sparse_ops.sparse_tensor_to_dense(t) for t in unpacked_tensor]
# Check shape of packed tensor.
self.assertAllEqual(expected_shape, packed_tensor.shape)
# Check values.
self.assertAllClose(expected, got)
# Check devices.
self.assertAllEqual(self.mesh.local_devices(),
[t.indices.device for t in unpacked_tensor])
self.assertAllEqual(self.mesh.local_devices(),
[t.values.device for t in unpacked_tensor])
def testPackingMixedTensorTypesRaisesTypeError(self):
tensor = stateless_random_ops.stateless_random_uniform([2, 4], seed=[0, 1])
sparse_tensor = sparse_ops.from_dense(tensor)
with ops.device_v2(api.device_name()):
with self.assertRaisesRegex(TypeError,
"Cannot Pack SparseTensors with Tensors."):
api.pack(
4 * [tensor] + 4 * [sparse_tensor],
Layout.replicated(self.mesh, rank=2),
)
def testPackingTensorsWithDifferentShapesRaisesTypeError(self):
a = sparse_ops.from_dense(
stateless_random_ops.stateless_random_uniform([2, 2], seed=[0, 1])
)
b = sparse_ops.from_dense(
stateless_random_ops.stateless_random_uniform([4, 4], seed=[0, 1])
)
with ops.device_v2(api.device_name()):
with self.assertRaisesRegex(
TypeError, "All input SparseTensors to Pack must be same shape."):
api.pack(4 * [a] + 4 * [b], Layout.replicated(self.mesh, rank=2))
def testPackingSparseTensorsReturnsCorrectLayout(self):
layout = Layout.replicated(self.mesh, 2)
a = sparse_ops.from_dense(
stateless_random_ops.stateless_random_uniform([16, 16], seed=[0, 1])
)
with ops.device_v2(api.device_name()):
api.check_layout(api.pack(8 * [a], layout), layout)
if __name__ == "__main__":
test.main()
| DTensorSparse |
python | apache__airflow | providers/oracle/src/airflow/providers/oracle/operators/oracle.py | {
"start": 1124,
"end": 2726
} | class ____(BaseOperator):
"""
Executes stored procedure in a specific Oracle database.
:param procedure: name of stored procedure to call (templated)
:param oracle_conn_id: The :ref:`Oracle connection id <howto/connection:oracle>`
reference to a specific Oracle database.
:param parameters: (optional, templated) the parameters provided in the call
If *do_xcom_push* is *True*, the numeric exit code emitted by
the database is pushed to XCom under key ``ORA`` in case of failure.
"""
template_fields: Sequence[str] = (
"parameters",
"procedure",
)
ui_color = "#ededed"
def __init__(
self,
*,
procedure: str,
oracle_conn_id: str = "oracle_default",
parameters: dict | list | None = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.oracle_conn_id = oracle_conn_id
self.procedure = procedure
self.parameters = parameters
def execute(self, context: Context):
self.log.info("Executing: %s", self.procedure)
hook = OracleHook(oracle_conn_id=self.oracle_conn_id)
try:
return hook.callproc(self.procedure, autocommit=True, parameters=self.parameters)
except oracledb.DatabaseError as e:
if not self.do_xcom_push or not context:
raise
ti = context["ti"]
code_match = re.search("^ORA-(\\d+):.+", str(e))
if code_match:
ti.xcom_push(key="ORA", value=code_match.group(1))
raise
| OracleStoredProcedureOperator |
python | google__python-fire | fire/console/text.py | {
"start": 1548,
"end": 2313
} | class ____(object):
"""Text with a semantic type that will be used for styling."""
def __init__(self, texts, text_type=None):
"""String of text and a corresponding type to use to style that text.
Args:
texts: (list[str]), list of strs or TypedText objects
that should be styled using text_type.
text_type: (TextTypes), the semantic type of the text that
will be used to style text.
"""
self.texts = texts
self.text_type = text_type
def __len__(self):
length = 0
for text in self.texts:
length += len(text)
return length
def __add__(self, other):
texts = [self, other]
return TypedText(texts)
def __radd__(self, other):
texts = [other, self]
return TypedText(texts)
| TypedText |
python | PyCQA__pylint | tests/functional/n/no/no_classmethod_decorator.py | {
"start": 221,
"end": 812
} | class ____:
"""Some class"""
def __init__(self):
pass
def cmethod(cls):
"""class method-to-be"""
cmethod = classmethod(cmethod) # [no-classmethod-decorator]
if True:
cmethod = classmethod(cmethod) # [no-classmethod-decorator]
@classmethod
def my_second_method(cls):
"""correct class method definition"""
def other_method(cls):
"""some method"""
cmethod2 = classmethod(other_method) # [no-classmethod-decorator]
def helloworld():
"""says hello"""
MyClass.new_class_method = classmethod(helloworld)
| MyClass |
python | django__django | django/contrib/gis/gdal/geomtype.py | {
"start": 58,
"end": 4582
} | class ____:
"Encapsulate OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {
0: "Unknown",
1: "Point",
2: "LineString",
3: "Polygon",
4: "MultiPoint",
5: "MultiLineString",
6: "MultiPolygon",
7: "GeometryCollection",
8: "CircularString",
9: "CompoundCurve",
10: "CurvePolygon",
11: "MultiCurve",
12: "MultiSurface",
15: "PolyhedralSurface",
16: "TIN",
17: "Triangle",
100: "None",
101: "LinearRing",
102: "PointZ",
1008: "CircularStringZ",
1009: "CompoundCurveZ",
1010: "CurvePolygonZ",
1011: "MultiCurveZ",
1012: "MultiSurfaceZ",
1013: "CurveZ",
1014: "SurfaceZ",
1015: "PolyhedralSurfaceZ",
1016: "TINZ",
1017: "TriangleZ",
2001: "PointM",
2002: "LineStringM",
2003: "PolygonM",
2004: "MultiPointM",
2005: "MultiLineStringM",
2006: "MultiPolygonM",
2007: "GeometryCollectionM",
2008: "CircularStringM",
2009: "CompoundCurveM",
2010: "CurvePolygonM",
2011: "MultiCurveM",
2012: "MultiSurfaceM",
2015: "PolyhedralSurfaceM",
2016: "TINM",
2017: "TriangleM",
3001: "PointZM",
3002: "LineStringZM",
3003: "PolygonZM",
3004: "MultiPointZM",
3005: "MultiLineStringZM",
3006: "MultiPolygonZM",
3007: "GeometryCollectionZM",
3008: "CircularStringZM",
3009: "CompoundCurveZM",
3010: "CurvePolygonZM",
3011: "MultiCurveZM",
3012: "MultiSurfaceZM",
3015: "PolyhedralSurfaceZM",
3016: "TINZM",
3017: "TriangleZM",
1 + wkb25bit: "Point25D",
2 + wkb25bit: "LineString25D",
3 + wkb25bit: "Polygon25D",
4 + wkb25bit: "MultiPoint25D",
5 + wkb25bit: "MultiLineString25D",
6 + wkb25bit: "MultiPolygon25D",
7 + wkb25bit: "GeometryCollection25D",
}
# Reverse type dictionary, keyed by lowercase of the name.
_str_types = {v.lower(): k for k, v in _types.items()}
def __init__(self, type_input):
"Figure out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, str):
type_input = type_input.lower()
if type_input == "geometry":
type_input = "unknown"
num = self._str_types.get(type_input)
if num is None:
raise GDALException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if type_input not in self._types:
raise GDALException("Invalid OGR Integer Type: %d" % type_input)
num = type_input
else:
raise TypeError("Invalid OGR input type given.")
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Return the value of the name property."
return self.name
def __repr__(self):
return f"<{self.__class__.__qualname__}: {self.name}>"
def __eq__(self, other):
"""
Do an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, str):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
@property
def name(self):
"Return a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Return the Django GeometryField for this OGR Type."
s = self.name.replace("25D", "")
if s in ("LinearRing", "None"):
return None
elif s == "Unknown":
s = "Geometry"
elif s == "PointZ":
s = "Point"
return s + "Field"
def to_multi(self):
"""
Transform Point, LineString, Polygon, and their 25D equivalents
to their Multi... counterpart.
"""
if self.name.startswith(("Point", "LineString", "Polygon")):
self.num += 3
| OGRGeomType |
python | gevent__gevent | src/gevent/tests/test__pool.py | {
"start": 17508,
"end": 18351
} | class ____(greentest.TestCase):
error_fatal = False
def test_map(self):
p = gevent.pool.Pool(3)
self.assertRaises(ZeroDivisionError, p.map, divide_by, [1, 0, 2])
def test_imap(self):
p = gevent.pool.Pool(1)
it = p.imap(divide_by, [1, 0, 2])
self.assertEqual(next(it), 1.0)
self.assertRaises(ZeroDivisionError, next, it)
self.assertEqual(next(it), 0.5)
self.assertRaises(StopIteration, next, it)
def test_imap_unordered(self):
p = gevent.pool.Pool(1)
it = p.imap_unordered(divide_by, [1, 0, 2])
self.assertEqual(next(it), 1.0)
self.assertRaises(ZeroDivisionError, next, it)
self.assertEqual(next(it), 0.5)
self.assertRaises(StopIteration, next, it)
if __name__ == '__main__':
greentest.main()
| TestErrorInHandler |
python | eventlet__eventlet | eventlet/wsgi.py | {
"start": 9973,
"end": 31203
} | class ____(BaseHTTPServer.BaseHTTPRequestHandler):
"""This class is used to handle the HTTP requests that arrive
at the server.
The handler will parse the request and the headers, then call a method
specific to the request type.
:param conn_state: The given connection status.
:param server: The server accessible by the request handler.
"""
protocol_version = 'HTTP/1.1'
minimum_chunk_size = MINIMUM_CHUNK_SIZE
capitalize_response_headers = True
reject_bad_requests = True
# https://github.com/eventlet/eventlet/issues/295
# Stdlib default is 0 (unbuffered), but then `wfile.writelines()` looses data
# so before going back to unbuffered, remove any usage of `writelines`.
wbufsize = 16 << 10
def __init__(self, conn_state, server):
self.request = conn_state[1]
self.client_address = conn_state[0]
self.conn_state = conn_state
self.server = server
# Want to allow some overrides from the server before running setup
if server.minimum_chunk_size is not None:
self.minimum_chunk_size = server.minimum_chunk_size
self.capitalize_response_headers = server.capitalize_response_headers
self.setup()
try:
self.handle()
finally:
self.finish()
def setup(self):
# overriding SocketServer.setup to correctly handle SSL.Connection objects
conn = self.connection = self.request
# TCP_QUICKACK is a better alternative to disabling Nagle's algorithm
# https://news.ycombinator.com/item?id=10607422
if getattr(socket, 'TCP_QUICKACK', None):
try:
conn.setsockopt(socket.IPPROTO_TCP, socket.TCP_QUICKACK, True)
except OSError:
pass
try:
self.rfile = conn.makefile('rb', self.rbufsize)
self.wfile = conn.makefile('wb', self.wbufsize)
except (AttributeError, NotImplementedError):
if hasattr(conn, 'send') and hasattr(conn, 'recv'):
# it's an SSL.Connection
self.rfile = socket._fileobject(conn, "rb", self.rbufsize)
self.wfile = socket._fileobject(conn, "wb", self.wbufsize)
else:
# it's a SSLObject, or a martian
raise NotImplementedError(
'''eventlet.wsgi doesn't support sockets of type {}'''.format(type(conn)))
def handle(self):
self.close_connection = True
while True:
self.handle_one_request()
if self.conn_state[2] == STATE_CLOSE:
self.close_connection = 1
else:
self.conn_state[2] = STATE_IDLE
if self.close_connection:
break
def _read_request_line(self):
if self.rfile.closed:
self.close_connection = 1
return ''
try:
sock = self.connection
if self.server.keepalive and not isinstance(self.server.keepalive, bool):
sock.settimeout(self.server.keepalive)
line = self.rfile.readline(self.server.url_length_limit)
sock.settimeout(self.server.socket_timeout)
return line
except greenio.SSL.ZeroReturnError:
pass
except OSError as e:
last_errno = support.get_errno(e)
if last_errno in BROKEN_SOCK:
self.server.log.debug('({}) connection reset by peer {!r}'.format(
self.server.pid,
self.client_address))
elif last_errno not in BAD_SOCK:
raise
return ''
def handle_one_request(self):
if self.server.max_http_version:
self.protocol_version = self.server.max_http_version
self.raw_requestline = self._read_request_line()
self.conn_state[2] = STATE_REQUEST
if not self.raw_requestline:
self.close_connection = 1
return
if len(self.raw_requestline) >= self.server.url_length_limit:
self.wfile.write(RESPONSE_414)
self.close_connection = 1
return
orig_rfile = self.rfile
try:
self.rfile = FileObjectForHeaders(self.rfile)
if not self.parse_request():
return
except HeaderLineTooLong:
self.wfile.write(
b"HTTP/1.0 400 Header Line Too Long\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
except HeadersTooLarge:
self.wfile.write(
b"HTTP/1.0 400 Headers Too Large\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
finally:
self.rfile = orig_rfile
content_length = self.headers.get('content-length')
transfer_encoding = self.headers.get('transfer-encoding')
if content_length is not None:
try:
if int(content_length) < 0:
raise ValueError
except ValueError:
# Negative, or not an int at all
self.wfile.write(
b"HTTP/1.0 400 Bad Request\r\n"
b"Connection: close\r\nContent-length: 0\r\n\r\n")
self.close_connection = 1
return
if transfer_encoding is not None:
if self.reject_bad_requests:
msg = b"Content-Length and Transfer-Encoding are not allowed together\n"
self.wfile.write(
b"HTTP/1.0 400 Bad Request\r\n"
b"Connection: close\r\n"
b"Content-Length: %d\r\n"
b"\r\n%s" % (len(msg), msg))
self.close_connection = 1
return
self.environ = self.get_environ()
self.application = self.server.app
try:
self.server.outstanding_requests += 1
try:
self.handle_one_response()
except OSError as e:
# Broken pipe, connection reset by peer
if support.get_errno(e) not in BROKEN_SOCK:
raise
finally:
self.server.outstanding_requests -= 1
def handle_one_response(self):
start = time.time()
headers_set = []
headers_sent = []
# Grab the request input now; app may try to replace it in the environ
request_input = self.environ['eventlet.input']
# Push the headers-sent state into the Input so it won't send a
# 100 Continue response if we've already started a response.
request_input.headers_sent = headers_sent
wfile = self.wfile
result = None
use_chunked = [False]
length = [0]
status_code = [200]
# Status code of 1xx or 204 or 2xx to CONNECT request MUST NOT send body and related headers
# https://httpwg.org/specs/rfc7230.html#rfc.section.3.3.1
bodyless = [False]
def write(data):
towrite = []
if not headers_set:
raise AssertionError("write() before start_response()")
elif not headers_sent:
status, response_headers = headers_set
headers_sent.append(1)
header_list = [header[0].lower() for header in response_headers]
towrite.append(('%s %s\r\n' % (self.protocol_version, status)).encode())
for header in response_headers:
towrite.append(('%s: %s\r\n' % header).encode('latin-1'))
# send Date header?
if 'date' not in header_list:
towrite.append(('Date: %s\r\n' % (format_date_time(time.time()),)).encode())
client_conn = self.headers.get('Connection', '').lower()
send_keep_alive = False
if self.close_connection == 0 and \
self.server.keepalive and (client_conn == 'keep-alive' or
(self.request_version == 'HTTP/1.1' and
not client_conn == 'close')):
# only send keep-alives back to clients that sent them,
# it's redundant for 1.1 connections
send_keep_alive = (client_conn == 'keep-alive')
self.close_connection = 0
else:
self.close_connection = 1
if 'content-length' not in header_list:
if bodyless[0]:
pass # client didn't expect a body anyway
elif self.request_version == 'HTTP/1.1':
use_chunked[0] = True
towrite.append(b'Transfer-Encoding: chunked\r\n')
else:
# client is 1.0 and therefore must read to EOF
self.close_connection = 1
if self.close_connection:
towrite.append(b'Connection: close\r\n')
elif send_keep_alive:
towrite.append(b'Connection: keep-alive\r\n')
# Spec says timeout must be an integer, but we allow sub-second
int_timeout = int(self.server.keepalive or 0)
if not isinstance(self.server.keepalive, bool) and int_timeout:
towrite.append(b'Keep-Alive: timeout=%d\r\n' % int_timeout)
towrite.append(b'\r\n')
# end of header writing
if use_chunked[0]:
# Write the chunked encoding
towrite.append(("%x" % (len(data),)).encode() + b"\r\n" + data + b"\r\n")
else:
towrite.append(data)
wfile.writelines(towrite)
wfile.flush()
length[0] = length[0] + sum(map(len, towrite))
def start_response(status, response_headers, exc_info=None):
status_code[0] = int(status.split(" ", 1)[0])
if exc_info:
try:
if headers_sent:
# Re-raise original exception if headers sent
raise exc_info[1].with_traceback(exc_info[2])
finally:
# Avoid dangling circular ref
exc_info = None
bodyless[0] = (
status_code[0] in (204, 304)
or self.command == "HEAD"
or (100 <= status_code[0] < 200)
or (self.command == "CONNECT" and 200 <= status_code[0] < 300)
)
# Response headers capitalization
# CONTent-TYpe: TExt/PlaiN -> Content-Type: TExt/PlaiN
# Per HTTP RFC standard, header name is case-insensitive.
# Please, fix your client to ignore header case if possible.
if self.capitalize_response_headers:
def cap(x):
return x.encode('latin1').capitalize().decode('latin1')
response_headers = [
('-'.join([cap(x) for x in key.split('-')]), value)
for key, value in response_headers]
headers_set[:] = [status, response_headers]
return write
try:
try:
WSGI_LOCAL.already_handled = False
result = self.application(self.environ, start_response)
# Set content-length if possible
if headers_set and not headers_sent and hasattr(result, '__len__'):
# We've got a complete final response
if not bodyless[0] and 'Content-Length' not in [h for h, _v in headers_set[1]]:
headers_set[1].append(('Content-Length', str(sum(map(len, result)))))
if request_input.should_send_hundred_continue:
# We've got a complete final response, and never sent a 100 Continue.
# There's no chance we'll need to read the body as we stream out the
# response, so we can be nice and send a Connection: close header.
self.close_connection = 1
towrite = []
towrite_size = 0
just_written_size = 0
minimum_write_chunk_size = int(self.environ.get(
'eventlet.minimum_write_chunk_size', self.minimum_chunk_size))
for data in result:
if len(data) == 0:
continue
if isinstance(data, str):
data = data.encode('ascii')
towrite.append(data)
towrite_size += len(data)
if towrite_size >= minimum_write_chunk_size:
write(b''.join(towrite))
towrite = []
just_written_size = towrite_size
towrite_size = 0
if WSGI_LOCAL.already_handled:
self.close_connection = 1
return
if towrite:
just_written_size = towrite_size
write(b''.join(towrite))
if not headers_sent or (use_chunked[0] and just_written_size):
write(b'')
except (Exception, eventlet.Timeout):
self.close_connection = 1
tb = traceback.format_exc()
self.server.log.info(tb)
if not headers_sent:
err_body = tb.encode() if self.server.debug else b''
start_response("500 Internal Server Error",
[('Content-type', 'text/plain'),
('Content-length', len(err_body))])
write(err_body)
finally:
if hasattr(result, 'close'):
result.close()
if request_input.should_send_hundred_continue:
# We just sent the final response, no 100 Continue. Client may or
# may not have started to send a body, and if we keep the connection
# open we've seen clients either
# * send a body, then start a new request
# * skip the body and go straight to a new request
# Looks like the most broadly compatible option is to close the
# connection and let the client retry.
# https://curl.se/mail/lib-2004-08/0002.html
# Note that we likely *won't* send a Connection: close header at this point
self.close_connection = 1
if (request_input.chunked_input or
request_input.position < (request_input.content_length or 0)):
# Read and discard body if connection is going to be reused
if self.close_connection == 0:
try:
request_input.discard()
except ChunkReadError as e:
self.close_connection = 1
self.server.log.error((
'chunked encoding error while discarding request body.'
+ ' client={0} request="{1}" error="{2}"').format(
self.get_client_address()[0], self.requestline, e,
))
except OSError as e:
self.close_connection = 1
self.server.log.error((
'I/O error while discarding request body.'
+ ' client={0} request="{1}" error="{2}"').format(
self.get_client_address()[0], self.requestline, e,
))
finish = time.time()
for hook, args, kwargs in self.environ['eventlet.posthooks']:
hook(self.environ, *args, **kwargs)
if self.server.log_output:
client_host, client_port = self.get_client_address()
self.server.log.info(self.server.log_format % {
'client_ip': client_host,
'client_port': client_port,
'date_time': self.log_date_time_string(),
'request_line': self.requestline,
'status_code': status_code[0],
'body_length': length[0],
'wall_seconds': finish - start,
})
def get_client_address(self):
host, port = addr_to_host_port(self.client_address)
if self.server.log_x_forwarded_for:
forward = self.headers.get('X-Forwarded-For', '').replace(' ', '')
if forward:
host = forward + ',' + host
return (host, port)
def formalize_key_naming(self, k):
"""
Headers containing underscores are permitted by RFC9110,
but evenlet joining headers of different names into
the same environment variable will dangerously confuse applications as to which is which.
Cf.
- Nginx: http://nginx.org/en/docs/http/ngx_http_core_module.html#underscores_in_headers
- Django: https://www.djangoproject.com/weblog/2015/jan/13/security/
- Gunicorn: https://github.com/benoitc/gunicorn/commit/72b8970dbf2bf3444eb2e8b12aeff1a3d5922a9a
- Werkzeug: https://github.com/pallets/werkzeug/commit/5ee439a692dc4474e0311de2496b567eed2d02cf
- ...
"""
if "_" in k:
return
return k.replace('-', '_').upper()
def get_environ(self):
env = self.server.get_environ()
env['REQUEST_METHOD'] = self.command
env['SCRIPT_NAME'] = ''
pq = self.path.split('?', 1)
env['RAW_PATH_INFO'] = pq[0]
env['PATH_INFO'] = urllib.parse.unquote(pq[0], encoding='latin1')
if len(pq) > 1:
env['QUERY_STRING'] = pq[1]
ct = self.headers.get('content-type')
if ct is None:
try:
ct = self.headers.type
except AttributeError:
ct = self.headers.get_content_type()
env['CONTENT_TYPE'] = ct
length = self.headers.get('content-length')
if length:
env['CONTENT_LENGTH'] = length
env['SERVER_PROTOCOL'] = 'HTTP/1.0'
sockname = self.request.getsockname()
server_addr = addr_to_host_port(sockname)
env['SERVER_NAME'] = server_addr[0]
env['SERVER_PORT'] = str(server_addr[1])
client_addr = addr_to_host_port(self.client_address)
env['REMOTE_ADDR'] = client_addr[0]
env['REMOTE_PORT'] = str(client_addr[1])
env['GATEWAY_INTERFACE'] = 'CGI/1.1'
try:
headers = self.headers.headers
except AttributeError:
headers = self.headers._headers
else:
headers = [h.split(':', 1) for h in headers]
env['headers_raw'] = headers_raw = tuple((k, v.strip(' \t\n\r')) for k, v in headers)
for k, v in headers_raw:
k = self.formalize_key_naming(k)
if not k:
continue
if k in ('CONTENT_TYPE', 'CONTENT_LENGTH'):
# These do not get the HTTP_ prefix and were handled above
continue
envk = 'HTTP_' + k
if envk in env:
env[envk] += ',' + v
else:
env[envk] = v
if env.get('HTTP_EXPECT', '').lower() == '100-continue':
wfile = self.wfile
wfile_line = b'HTTP/1.1 100 Continue\r\n'
else:
wfile = None
wfile_line = None
chunked = env.get('HTTP_TRANSFER_ENCODING', '').lower() == 'chunked'
if not chunked and length is None:
# https://www.rfc-editor.org/rfc/rfc9112#section-6.3-2.7
# "If this is a request message and none of the above are true, then
# the message body length is zero (no message body is present)."
length = '0'
env['wsgi.input'] = env['eventlet.input'] = Input(
self.rfile, length, self.connection, wfile=wfile, wfile_line=wfile_line,
chunked_input=chunked)
env['eventlet.posthooks'] = []
# WebSocketWSGI needs a way to flag the connection as idle,
# since it may never fall out of handle_one_request
def set_idle():
self.conn_state[2] = STATE_IDLE
env['eventlet.set_idle'] = set_idle
return env
def finish(self):
try:
BaseHTTPServer.BaseHTTPRequestHandler.finish(self)
except OSError as e:
# Broken pipe, connection reset by peer
if support.get_errno(e) not in BROKEN_SOCK:
raise
greenio.shutdown_safe(self.connection)
self.connection.close()
def handle_expect_100(self):
return True
| HttpProtocol |
python | getsentry__sentry | src/sentry/apidocs/extensions.py | {
"start": 1266,
"end": 2146
} | class ____(OpenApiSerializerExtension):
"""
This extension will register any Sentry Response Serializer as a component that can be used
in an OpenAPI schema. To have the serializer schema be mapped, you must type the
`serialize` function with a TypedDict / List.
"""
priority = 0
target_class = "sentry.api.serializers.base.Serializer"
match_subclasses = True
def get_name(self, auto_schema: AutoSchema, direction: Direction) -> str | None:
return self.target.__name__
def map_serializer(self, auto_schema: AutoSchema, direction: Direction) -> Any:
type_hints = get_type_hints(self.target.serialize)
if "return" not in type_hints:
raise TypeError("Please type the return value of the serializer with a TypedDict")
return resolve_type_hint(type_hints["return"])
| SentryResponseSerializerExtension |
python | tiangolo__fastapi | fastapi/security/oauth2.py | {
"start": 21248,
"end": 22574
} | class ____:
"""
This is a special class that you can define in a parameter in a dependency to
obtain the OAuth2 scopes required by all the dependencies in the same chain.
This way, multiple dependencies can have different scopes, even when used in the
same *path operation*. And with this, you can access all the scopes required in
all those dependencies in a single place.
Read more about it in the
[FastAPI docs for OAuth2 scopes](https://fastapi.tiangolo.com/advanced/security/oauth2-scopes/).
"""
def __init__(
self,
scopes: Annotated[
Optional[List[str]],
Doc(
"""
This will be filled by FastAPI.
"""
),
] = None,
):
self.scopes: Annotated[
List[str],
Doc(
"""
The list of all the scopes required by dependencies.
"""
),
] = scopes or []
self.scope_str: Annotated[
str,
Doc(
"""
All the scopes required by all the dependencies in a single string
separated by spaces, as defined in the OAuth2 specification.
"""
),
] = " ".join(self.scopes)
| SecurityScopes |
python | PrefectHQ__prefect | src/prefect/blocks/abstract.py | {
"start": 2175,
"end": 2350
} | class ____(Exception):
"""Raised if a notification block fails to send a notification."""
def __init__(self, log: str) -> None:
self.log = log
| NotificationError |
python | google__jax | tests/pytorch_interoperability_test.py | {
"start": 1503,
"end": 5799
} | class ____(jtu.JaxTestCase):
def setUp(self):
super().setUp()
if not jtu.test_device_matches(["cpu", "gpu"]):
self.skipTest("DLPack only supported on CPU and GPU")
def testTorchToJaxFailure(self):
x = torch.arange(6).reshape((2, 3))
x = x.cuda() if jtu.test_device_matches(["gpu"]) else x
y = torch.utils.dlpack.to_dlpack(x[:, :2])
backend = xla_bridge.get_backend()
client = getattr(backend, "client", backend)
regex_str = (r'UNIMPLEMENTED: Only DLPack tensors with trivial \(compact\) '
r'striding are supported')
with self.assertRaisesRegex(RuntimeError, regex_str):
xla_client._xla.dlpack_managed_tensor_to_buffer(
y, client.devices()[0], None)
@jtu.sample_product(shape=all_shapes, dtype=torch_dtypes)
def testJaxToTorch(self, shape, dtype):
if not config.enable_x64.value and dtype in [
jnp.int64,
jnp.float64,
jnp.complex128,
]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
x = jnp.array(np)
y = torch.utils.dlpack.from_dlpack(x)
if dtype == jnp.bfloat16:
# .numpy() doesn't work on Torch bfloat16 tensors.
self.assertAllClose(np,
y.cpu().view(torch.int16).numpy().view(jnp.bfloat16))
else:
self.assertAllClose(np, y.cpu().numpy())
@jtu.sample_product(shape=all_shapes, dtype=torch_dtypes)
def testJaxArrayToTorch(self, shape, dtype):
if not config.enable_x64.value and dtype in [
jnp.int64,
jnp.float64,
jnp.complex128,
]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
np = rng(shape, dtype)
# Test across all devices
for device in jax.local_devices():
x = jax.device_put(np, device)
y = torch.utils.dlpack.from_dlpack(x)
if dtype == jnp.bfloat16:
# .numpy() doesn't work on Torch bfloat16 tensors.
self.assertAllClose(
np, y.cpu().view(torch.int16).numpy().view(jnp.bfloat16)
)
else:
self.assertAllClose(np, y.cpu().numpy())
def testTorchToJaxInt64(self):
# See https://github.com/jax-ml/jax/issues/11895
x = jax.dlpack.from_dlpack(
torch.ones((2, 3), dtype=torch.int64))
dtype_expected = jnp.int64 if config.enable_x64.value else jnp.int32
self.assertEqual(x.dtype, dtype_expected)
def testTorchToJaxNondefaultLayout(self):
x = torch.arange(4).reshape(2, 2).T
x = x.cuda() if jtu.test_device_matches(["gpu"]) else x
self.assertAllClose(x.cpu().numpy(), jax.dlpack.from_dlpack(x))
@jtu.sample_product(shape=all_shapes, dtype=torch_dtypes)
def testTorchToJax(self, shape, dtype):
if not config.enable_x64.value and dtype in [
jnp.int64,
jnp.float64,
jnp.complex128,
]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
x_np = rng(shape, dtype)
if dtype == jnp.bfloat16:
x = torch.tensor(x_np.view(jnp.int16)).view(torch.bfloat16)
else:
x = torch.tensor(x_np)
x = x.cuda() if jtu.test_device_matches(["gpu"]) else x
y = jax.dlpack.from_dlpack(x)
self.assertAllClose(x_np, y)
# Verify the resulting value can be passed to a jit computation.
z = jax.jit(lambda x: x + 1)(y)
self.assertAllClose(x_np + dtype(1), z)
@jtu.sample_product(shape=all_shapes, dtype=torch_dtypes)
def testTorchToJaxArray(self, shape, dtype):
if not config.enable_x64.value and dtype in [
jnp.int64,
jnp.float64,
jnp.complex128,
]:
self.skipTest("x64 types are disabled by jax_enable_x64")
rng = jtu.rand_default(self.rng())
x_np = rng(shape, dtype)
if dtype == jnp.bfloat16:
x = torch.tensor(x_np.view(jnp.int16)).view(torch.bfloat16)
else:
x = torch.tensor(x_np)
x = x.cuda() if jtu.test_device_matches(["gpu"]) else x
y = jax.dlpack.from_dlpack(x)
self.assertAllClose(x_np, y)
# Verify the resulting value can be passed to a jit computation.
z = jax.jit(lambda x: x + 1)(y)
self.assertAllClose(x_np + dtype(1), z)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| DLPackTest |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/multinomial_op_test.py | {
"start": 1841,
"end": 9219
} | class ____(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def testSmallEntropy(self):
random_seed.set_random_seed(1618)
for output_dtype in [np.int32, np.int64]:
with test_util.device(use_gpu=True):
# A logit value of -10 corresponds to a probability of ~5e-5.
logits = constant_op.constant([[-10., 10., -10.], [-10., -10., 10.]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(
logits, num_samples, output_dtype=output_dtype))
self.assertAllEqual([[1] * num_samples, [2] * num_samples], samples)
@test_util.run_deprecated_v1
def testOneOpMultipleStepsIndependent(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
sample1a = self.evaluate(sample_op1)
sample1b = self.evaluate(sample_op1)
self.assertFalse(np.equal(sample1a, sample1b).all())
def testEagerOneOpMultipleStepsIndependent(self):
with context.eager_mode(), test_util.device(use_gpu=True):
sample1, sample2 = self._make_ops(10)
# Consecutive runs shouldn't yield identical output.
self.assertFalse(np.equal(sample1.numpy(), sample2.numpy()).all())
@test_util.run_deprecated_v1
def testBfloat16(self):
with test_util.use_gpu():
sample_op1, _ = self._make_ops(10, dtype=dtypes.bfloat16)
self.evaluate(sample_op1)
def testEagerBfloat16(self):
with context.eager_mode(), test_util.device(use_gpu=True):
self._make_ops(10, dtype=dtypes.bfloat16)
def testTwoOpsIndependent(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(32)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
# We expect sample1 and sample2 to be independent.
# 1 in 2^32 chance of this assertion failing.
self.assertFalse(np.equal(sample1, sample2).all())
@test_util.run_deprecated_v1
def testTwoOpsSameSeedDrawSameSequences(self):
with test_util.use_gpu():
sample_op1, sample_op2 = self._make_ops(1000, seed=1)
sample1, sample2 = self.evaluate([sample_op1, sample_op2])
self.assertAllEqual(sample1, sample2)
def testLargeLogits(self):
for neg in [True, False]:
with test_util.use_gpu():
logits = np.array([[1000.] * 5])
if neg:
logits *= -1
samples = self.evaluate(random_ops.multinomial(logits, 10))
# Sampled classes should be in-range.
self.assertTrue((samples >= 0).all())
self.assertTrue((samples < 5).all())
def testSamplingCorrectness(self):
np.random.seed(1618) # Make it reproducible.
num_samples = 21000
rand_probs = self._normalize(np.random.random_sample((10,)))
rand_probs2 = self._normalize(np.random.random_sample((3, 5))) # batched
for probs in [[.5, .5], [.85, .05, .1], rand_probs, rand_probs2]:
probs = np.asarray(probs)
if len(probs.shape) == 1:
probs = probs.reshape(1, probs.size) # singleton batch
logits = np.log(probs).astype(np.float32)
composed_freqs = self._do_sampling(logits, num_samples, composed_sampler)
native_freqs = self._do_sampling(logits, num_samples, native_sampler)
# the test here is similar to core/lib/random/distribution_sampler_test.cc
composed_chi2 = self._chi2(probs, composed_freqs)
native_chi2 = self._chi2(probs, native_freqs)
composed_native_chi2 = self._chi2(composed_freqs, native_freqs)
def check(chi2s):
for chi2 in chi2s:
self.assertLess(chi2, 1e-3)
check(composed_chi2)
check(native_chi2)
check(composed_native_chi2)
def _make_ops(self, num_samples, seed=None, dtype=dtypes.float32):
prob_dist = constant_op.constant([[0.15, 0.5, 0.3, 0.05]], dtype=dtype)
logits = math_ops.log(prob_dist)
# Two independent sets of samples from the same distribution
sample_op1 = random_ops.multinomial(logits, num_samples, seed)
sample_op2 = random_ops.multinomial(logits, num_samples, seed)
return (sample_op1, sample_op2)
def _normalize(self, vec):
batched = (len(vec.shape) == 2)
return vec / vec.sum(axis=1, keepdims=True) if batched else vec / vec.sum()
def _do_sampling(self, logits, num_samples, sampler):
"""Samples using the supplied sampler and inputs.
Args:
logits: Numpy ndarray of shape [batch_size, num_classes].
num_samples: Int; number of samples to draw.
sampler: A sampler function that takes (1) a [batch_size, num_classes]
Tensor, (2) num_samples and returns a [batch_size, num_samples] Tensor.
Returns:
Frequencies from sampled classes; shape [batch_size, num_classes].
"""
with test_util.use_gpu():
random_seed.set_random_seed(1618)
op = sampler(constant_op.constant(logits), num_samples)
d = self.evaluate(op)
batch_size, num_classes = logits.shape
freqs_mat = []
for i in range(batch_size):
cnts = dict(collections.Counter(d[i, :]))
# Requires drawn class labels be in range.
self.assertLess(max(cnts.keys()), num_classes)
self.assertGreaterEqual(min(cnts.keys()), 0)
freqs = [(cnts[k] * 1. / num_samples if k in cnts else 0)
for k in range(num_classes)]
freqs_mat.append(freqs)
return freqs_mat
def _chi2(self, expected, actual):
actual = np.asarray(actual)
expected = np.asarray(expected)
diff = actual - expected
chi2 = np.sum(diff * diff / expected, axis=0)
return chi2
def testEmpty(self):
classes = 5
with test_util.use_gpu():
for batch in 0, 3:
for samples in 0, 7:
x = self.evaluate(
random_ops.multinomial(
array_ops.zeros([batch, classes]), samples))
self.assertEqual(x.shape, (batch, samples))
@test_util.run_deprecated_v1
def testEmptyClasses(self):
with test_util.use_gpu():
x = random_ops.multinomial(array_ops.zeros([5, 0]), 7)
with self.assertRaisesOpError("num_classes should be positive"):
self.evaluate(x)
def testNegativeMinLogits(self):
random_seed.set_random_seed(78844)
with test_util.use_gpu():
logits = constant_op.constant([[np.finfo(np.float32).min] * 1023 + [0]])
num_samples = 1000
samples = self.evaluate(random_ops.multinomial(logits, num_samples))
self.assertAllEqual([[1023] * num_samples], samples)
# Benchmarking code
def native_op_vs_composed_ops(batch_size, num_classes, num_samples, num_iters):
np.random.seed(1618) # Make it reproducible.
shape = [batch_size, num_classes]
logits_np = np.random.randn(*shape).astype(np.float32)
# No CSE/CF.
optimizer_options = config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L0)
config = config_pb2.ConfigProto(graph_options=config_pb2.GraphOptions(
optimizer_options=optimizer_options))
with session.Session(config=config) as sess:
logits = constant_op.constant(logits_np, shape=shape)
native_op = control_flow_ops.group(native_sampler(logits, num_samples))
composed_op = control_flow_ops.group(composed_sampler(logits, num_samples))
native_dt = timeit.timeit(lambda: sess.run(native_op), number=num_iters)
composed_dt = timeit.timeit(lambda: sess.run(composed_op), number=num_iters)
return native_dt, composed_dt
| MultinomialTest |
python | apache__airflow | airflow-core/tests/unit/dag_processing/bundles/test_dag_bundle_manager.py | {
"start": 3502,
"end": 7233
} | class ____(BaseDagBundle):
def refresh(self):
pass
def get_current_version(self):
pass
def path(self):
pass
BASIC_BUNDLE_CONFIG = [
{
"name": "my-test-bundle",
"classpath": "unit.dag_processing.bundles.test_dag_bundle_manager.BasicBundle",
"kwargs": {"refresh_interval": 1},
}
]
def test_get_bundle():
"""Test that get_bundle builds and returns a bundle."""
with patch.dict(
os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(BASIC_BUNDLE_CONFIG)}
):
bundle_manager = DagBundlesManager()
with pytest.raises(ValueError, match="'bundle-that-doesn't-exist' is not configured"):
bundle_manager.get_bundle(name="bundle-that-doesn't-exist", version="hello")
bundle = bundle_manager.get_bundle(name="my-test-bundle", version="hello")
assert isinstance(bundle, BasicBundle)
assert bundle.name == "my-test-bundle"
assert bundle.version == "hello"
assert bundle.refresh_interval == 1
# And none for version also works!
with patch.dict(
os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(BASIC_BUNDLE_CONFIG)}
):
bundle = bundle_manager.get_bundle(name="my-test-bundle")
assert isinstance(bundle, BasicBundle)
assert bundle.name == "my-test-bundle"
assert bundle.version is None
@pytest.fixture
def clear_db():
clear_db_dag_bundles()
yield
clear_db_dag_bundles()
@pytest.mark.db_test
@conf_vars({("core", "LOAD_EXAMPLES"): "False"})
def test_sync_bundles_to_db(clear_db, session):
def _get_bundle_names_and_active():
return session.query(DagBundleModel.name, DagBundleModel.active).order_by(DagBundleModel.name).all()
# Initial add
with patch.dict(
os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(BASIC_BUNDLE_CONFIG)}
):
manager = DagBundlesManager()
manager.sync_bundles_to_db()
assert _get_bundle_names_and_active() == [("my-test-bundle", True)]
session.add(
ParseImportError(
bundle_name="my-test-bundle", # simulate import error for this bundle
filename="some_file.py",
stacktrace="some error",
)
)
session.flush()
# simulate bundle config change (now 'dags-folder' is active, 'my-test-bundle' becomes inactive)
manager = DagBundlesManager()
manager.sync_bundles_to_db()
assert _get_bundle_names_and_active() == [
("dags-folder", True),
("my-test-bundle", False),
]
# Since my-test-bundle is inactive, the associated import errors should be deleted
assert session.query(ParseImportError).count() == 0
# Re-enable one that reappears in config
with patch.dict(
os.environ, {"AIRFLOW__DAG_PROCESSOR__DAG_BUNDLE_CONFIG_LIST": json.dumps(BASIC_BUNDLE_CONFIG)}
):
manager = DagBundlesManager()
manager.sync_bundles_to_db()
assert _get_bundle_names_and_active() == [
("dags-folder", False),
("my-test-bundle", True),
]
@conf_vars({("dag_processor", "dag_bundle_config_list"): json.dumps(BASIC_BUNDLE_CONFIG)})
@pytest.mark.parametrize("version", [None, "hello"])
def test_view_url(version):
"""Test that view_url calls the bundle's view_url method."""
bundle_manager = DagBundlesManager()
with patch.object(BaseDagBundle, "view_url") as view_url_mock:
# Test that deprecation warning is raised
with pytest.warns(DeprecationWarning, match="'view_url' method is deprecated"):
bundle_manager.view_url("my-test-bundle", version=version)
view_url_mock.assert_called_once_with(version=version)
| BasicBundle |
python | aio-libs__aiohttp | aiohttp/client_reqrep.py | {
"start": 2299,
"end": 2418
} | class ____:
type: str | None
parameters: "MappingProxyType[str, str]"
filename: str | None
| ContentDisposition |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/readers.py | {
"start": 48481,
"end": 50365
} | class ____(dataset_ops.DatasetSource):
"""A `Dataset` consisting of the results from a SQL query.
`SqlDataset` allows a user to read data from the result set of a SQL query.
For example:
```python
dataset = tf.data.experimental.SqlDataset("sqlite", "/foo/bar.sqlite3",
"SELECT name, age FROM people",
(tf.string, tf.int32))
# Prints the rows of the result set of the above query.
for element in dataset:
print(element)
```
"""
def __init__(self, driver_name, data_source_name, query, output_types):
"""Creates a `SqlDataset`.
Args:
driver_name: A 0-D `tf.string` tensor containing the database type.
Currently, the only supported value is 'sqlite'.
data_source_name: A 0-D `tf.string` tensor containing a connection string
to connect to the database.
query: A 0-D `tf.string` tensor containing the SQL query to execute.
output_types: A tuple of `tf.DType` objects representing the types of the
columns returned by `query`.
"""
self._driver_name = ops.convert_to_tensor(
driver_name, dtype=dtypes.string, name="driver_name")
self._data_source_name = ops.convert_to_tensor(
data_source_name, dtype=dtypes.string, name="data_source_name")
self._query = ops.convert_to_tensor(
query, dtype=dtypes.string, name="query")
self._element_spec = nest.map_structure(
lambda dtype: tensor_spec.TensorSpec([], dtype), output_types)
variant_tensor = gen_experimental_dataset_ops.sql_dataset(
self._driver_name, self._data_source_name, self._query,
**self._flat_structure)
super(SqlDatasetV2, self).__init__(variant_tensor)
@property
def element_spec(self):
return self._element_spec
@tf_export(v1=["data.experimental.SqlDataset"])
| SqlDatasetV2 |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-aws-bedrock-agentcore/llama_index/tools/aws_bedrock_agentcore/browser/browser_session_manager.py | {
"start": 342,
"end": 10936
} | class ____:
"""
Manages browser sessions for different threads.
This class maintains separate browser sessions for different threads,
enabling concurrent usage of browsers in multi-threaded environments.
Browsers are created lazily only when needed by tools.
Concurrency protection is also implemented. Each browser session is tied
to a specific thread_id and includes protection against concurrent usage.
When a browser is obtained via get_async_browser() or get_sync_browser(),
it is marked as "in use", and subsequent attempts to access the same
browser session will raise a RuntimeError until it is released.
"""
def __init__(self, region: str = "us-west-2"):
"""
Initialize the browser session manager.
Args:
region: AWS region for browser client
"""
self.region = region
self._async_sessions: Dict[str, Tuple[BrowserClient, AsyncBrowser, bool]] = {}
self._sync_sessions: Dict[str, Tuple[BrowserClient, SyncBrowser, bool]] = {}
async def get_async_browser(self, thread_id: str) -> AsyncBrowser:
"""
Get or create an async browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
An async browser instance specific to the thread
Raises:
RuntimeError: If the browser session is already in use by another caller
"""
if thread_id in self._async_sessions:
client, browser, in_use = self._async_sessions[thread_id]
if in_use:
raise RuntimeError(
f"Browser session for thread {thread_id} is already in use. "
"Use a different thread_id for concurrent operations."
)
self._async_sessions[thread_id] = (client, browser, True)
return browser
return await self._create_async_browser_session(thread_id)
def get_sync_browser(self, thread_id: str) -> SyncBrowser:
"""
Get or create a sync browser for the specified thread.
Args:
thread_id: Unique identifier for the thread requesting the browser
Returns:
A sync browser instance specific to the thread
Raises:
RuntimeError: If the browser session is already in use by another caller
"""
if thread_id in self._sync_sessions:
client, browser, in_use = self._sync_sessions[thread_id]
if in_use:
raise RuntimeError(
f"Browser session for thread {thread_id} is already in use. "
"Use a different thread_id for concurrent operations."
)
self._sync_sessions[thread_id] = (client, browser, True)
return browser
return self._create_sync_browser_session(thread_id)
async def _create_async_browser_session(self, thread_id: str) -> AsyncBrowser:
"""
Create a new async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created async browser instance
Raises:
Exception: If browser session creation fails
"""
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to async WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.async_api import async_playwright
# Connect to browser using Playwright
playwright = await async_playwright().start()
browser = await playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to async browser for thread {thread_id}"
)
# Store session resources
self._async_sessions[thread_id] = (browser_client, browser, True)
return browser
except Exception as e:
logger.error(
f"Failed to create async browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
def _create_sync_browser_session(self, thread_id: str) -> SyncBrowser:
"""
Create a new sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
Returns:
The newly created sync browser instance
Raises:
Exception: If browser session creation fails
"""
browser_client = BrowserClient(region=self.region)
try:
# Start browser session
browser_client.start()
# Get WebSocket connection info
ws_url, headers = browser_client.generate_ws_headers()
logger.info(
f"Connecting to sync WebSocket endpoint for thread {thread_id}: {ws_url}"
)
from playwright.sync_api import sync_playwright
# Connect to browser using Playwright
playwright = sync_playwright().start()
browser = playwright.chromium.connect_over_cdp(
endpoint_url=ws_url, headers=headers, timeout=30000
)
logger.info(
f"Successfully connected to sync browser for thread {thread_id}"
)
# Store session resources
self._sync_sessions[thread_id] = (browser_client, browser, True)
return browser
except Exception as e:
logger.error(
f"Failed to create sync browser session for thread {thread_id}: {e}"
)
# Clean up resources if session creation fails
if browser_client:
try:
browser_client.stop()
except Exception as cleanup_error:
logger.warning(f"Error cleaning up browser client: {cleanup_error}")
raise
async def release_async_browser(self, thread_id: str) -> None:
"""
Release the async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._async_sessions:
logger.warning(f"No async browser session found for thread {thread_id}")
return
client, browser, in_use = self._async_sessions[thread_id]
if in_use:
self._async_sessions[thread_id] = (client, browser, False)
logger.info(f"Released async browser for thread {thread_id}")
def release_sync_browser(self, thread_id: str) -> None:
"""
Release the sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._sync_sessions:
logger.warning(f"No sync browser session found for thread {thread_id}")
return
client, browser, in_use = self._sync_sessions[thread_id]
if in_use:
self._sync_sessions[thread_id] = (client, browser, False)
logger.info(f"Released sync browser for thread {thread_id}")
async def close_async_browser(self, thread_id: str) -> None:
"""
Close the async browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._async_sessions:
logger.warning(f"No async browser session found for thread {thread_id}")
return
client, browser, _ = self._async_sessions[thread_id]
# Close browser
if browser:
try:
await browser.close()
except Exception as e:
logger.warning(
f"Error closing async browser for thread {thread_id}: {e}"
)
# Stop browser client
if client:
try:
client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._async_sessions[thread_id]
logger.info(f"Async browser session cleaned up for thread {thread_id}")
def close_sync_browser(self, thread_id: str) -> None:
"""
Close the sync browser session for the specified thread.
Args:
thread_id: Unique identifier for the thread
"""
if thread_id not in self._sync_sessions:
logger.warning(f"No sync browser session found for thread {thread_id}")
return
client, browser, _ = self._sync_sessions[thread_id]
# Close browser
if browser:
try:
browser.close()
except Exception as e:
logger.warning(
f"Error closing sync browser for thread {thread_id}: {e}"
)
# Stop browser client
if client:
try:
client.stop()
except Exception as e:
logger.warning(
f"Error stopping browser client for thread {thread_id}: {e}"
)
# Remove session from dictionary
del self._sync_sessions[thread_id]
logger.info(f"Sync browser session cleaned up for thread {thread_id}")
async def close_all_browsers(self) -> None:
"""Close all browser sessions."""
# Close all async browsers
async_thread_ids = list(self._async_sessions.keys())
for thread_id in async_thread_ids:
await self.close_async_browser(thread_id)
# Close all sync browsers
sync_thread_ids = list(self._sync_sessions.keys())
for thread_id in sync_thread_ids:
self.close_sync_browser(thread_id)
logger.info("All browser sessions closed")
| BrowserSessionManager |
python | getsentry__sentry | src/sentry/flags/endpoints/logs.py | {
"start": 4915,
"end": 5512
} | class ____(OrganizationEndpoint):
owner = ApiOwner.FLAG
publish_status = {"GET": ApiPublishStatus.PRIVATE}
def get(self, request: Request, organization: Organization, flag_log_id: int) -> Response:
try:
model = FlagAuditLogModel.objects.filter(
id=flag_log_id,
organization_id=organization.id,
).get()
except FlagAuditLogModel.DoesNotExist:
raise ResourceDoesNotExist
return self.respond({"data": serialize(model, request.user, FlagAuditLogModelSerializer())})
| OrganizationFlagLogDetailsEndpoint |
python | huggingface__transformers | examples/modular-transformers/modeling_test_detr.py | {
"start": 31240,
"end": 36104
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: TestDetrConfig):
super().__init__()
self.embed_dim = config.d_model
# self-attention
self.self_attn = TestDetrMultiheadAttention(
embed_dim=self.embed_dim,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
# cross-attention
self.encoder_attn = TestDetrMultiscaleDeformableAttention(
config,
num_heads=config.decoder_attention_heads,
n_points=config.decoder_n_points,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
# feedforward neural networks
self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
second_residual = hidden_states
# Cross-Attention
cross_attn_weights = None
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = second_residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| TestDetrDecoderLayer |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/tryceratops/TRY201.py | {
"start": 146,
"end": 1196
} | class ____(Exception):
pass
def bad():
try:
process()
except MyException as e:
logger.exception("process failed")
raise e
def good():
try:
process()
except MyException:
logger.exception("process failed")
raise
def still_good():
try:
process()
except MyException as e:
print(e)
raise
def still_good_too():
try:
process()
except MyException as e:
print(e)
raise e from None
def still_actually_good():
try:
process()
except MyException as e:
try:
pass
except TypeError:
raise e
def bad_that_needs_recursion():
try:
process()
except MyException as e:
logger.exception("process failed")
if True:
raise e
def bad_that_needs_recursion_2():
try:
process()
except MyException as e:
logger.exception("process failed")
if True:
def foo():
raise e
| MyException |
python | PrefectHQ__prefect | examples/ai_database_cleanup_with_approval.py | {
"start": 4660,
"end": 9981
} | class ____(BaseModel):
"""Structured AI decision."""
approved: bool
confidence: float = Field(ge=0.0, le=1.0)
reasoning: str
concerns: list[str] | None = None
def create_cleanup_agent() -> PrefectAgent[None, CleanupDecision]:
"""Create AI agent with Prefect MCP tools for autonomous approval."""
# Connect to Prefect MCP server - provides read-only Prefect tools
mcp_server = MCPServerStdio(
"prefect", "uvx", args=["--from", "prefect-mcp", "prefect-mcp-server"]
)
agent = Agent(
model=AnthropicModel("claude-sonnet-4-5-20250929"),
output_type=CleanupDecision,
system_prompt=AGENT_PROMPT,
mcp_servers=[mcp_server],
)
# Wrap with PrefectAgent for retry/timeout handling
return PrefectAgent(
agent,
model_task_config=TaskConfig(retries=2, timeout_seconds=120.0),
)
@flow(name="ai-approval", log_prints=True)
async def get_ai_approval(
preview: str, count: int, config: RetentionConfig
) -> tuple[bool, str]:
"""Use AI agent to autonomously decide approval."""
print("🤖 requesting ai agent decision...")
agent = create_cleanup_agent()
context = f"""
proposed cleanup:
- retention: {config.retention_period}
- states: {", ".join(config.states_to_clean)}
- count: {count} flow runs
preview:
{preview}
investigate using your prefect mcp tools and decide if safe to proceed.
"""
result = await agent.run(context)
decision = result.output
print(f"decision: {'✅ approved' if decision.approved else '❌ rejected'}")
print(f"confidence: {decision.confidence:.0%}")
print(f"reasoning: {decision.reasoning}")
return decision.approved, decision.reasoning
# </Accordion>
#
# <Accordion title="Main Cleanup Flow">
@flow(name="database-cleanup", log_prints=True)
async def database_cleanup_flow(config: RetentionConfig | None = None) -> dict:
"""Database cleanup with configurable approval workflow."""
if config is None:
config = RetentionConfig()
print(f"🚀 starting cleanup (approval: {config.approval_type})")
# Fetch flow runs matching retention policy
async with get_client() as client:
cutoff = datetime.now(timezone.utc) - config.retention_period
flow_runs = await client.read_flow_runs(
flow_run_filter=FlowRunFilter(
start_time=FlowRunFilterStartTime(before_=cutoff),
state=FlowRunFilterStateName(any_=config.states_to_clean),
),
limit=config.batch_size * 5,
)
if not flow_runs:
print("✨ nothing to clean")
return {"status": "no_action", "deleted": 0}
# Preview what will be deleted
preview = "\n".join(
f"- {r.name} ({r.state.type.value}) - {r.start_time}" for r in flow_runs[:5]
)
if len(flow_runs) > 5:
preview += f"\n... and {len(flow_runs) - 5} more"
print(f"\n📋 preview:\n{preview}\n")
# Get approval (human or AI based on config)
if config.approval_type == "human":
approved, notes = get_human_approval(preview, len(flow_runs))
else:
approved, notes = await get_ai_approval(preview, len(flow_runs), config)
if not approved:
print(f"❌ cleanup rejected: {notes}")
return {"status": "rejected", "reason": notes}
print(f"✅ cleanup approved: {notes}")
if config.dry_run:
print("🔍 dry run - no deletions")
return {"status": "dry_run", "would_delete": len(flow_runs)}
# Perform deletion with batching and rate limiting
deleted = 0
async with get_client() as client:
for i in range(0, len(flow_runs), config.batch_size):
batch = flow_runs[i : i + config.batch_size]
for run in batch:
try:
await client.delete_flow_run(run.id)
deleted += 1
except Exception as e:
print(f"failed to delete {run.id}: {e}")
await asyncio.sleep(0.1) # rate limiting
print(f"✅ deleted {deleted}/{len(flow_runs)} flow runs")
return {"status": "completed", "deleted": deleted}
# </Accordion>
#
# </AccordionGroup>
#
# ## Deployment Examples
if __name__ == "__main__":
# Start with human approval in production
prod_config = RetentionConfig(
retention_period=timedelta(days=30),
dry_run=False,
approval_type="human",
)
# Graduate to AI approval in dev/staging
dev_config = RetentionConfig(
retention_period=timedelta(minutes=5),
dry_run=False,
approval_type="ai", # requires ANTHROPIC_API_KEY
)
database_cleanup_flow.serve(
name="database-cleanup-deployment",
tags=["database-maintenance", "cleanup"],
)
# ## Related Documentation
#
# - [Database Maintenance Guide](/v3/advanced/database-maintenance) - SQL queries, retention strategies, VACUUM
# - [Form Building](/v3/advanced/form-building) - Create validated UI forms from Pydantic models
# - [Interactive Workflows](/v3/advanced/interactive) - Pause flows and wait for human input
# - [Prefect MCP Server](/v3/how-to-guides/ai/use-prefect-mcp-server) - Connect AI agents to Prefect
# - [pydantic-ai + Prefect](https://ai.pydantic.dev/durable_execution/prefect/) - Durable AI agents with retries
| CleanupDecision |
python | crytic__slither | slither/detectors/statements/return_bomb.py | {
"start": 500,
"end": 4254
} | class ____(AbstractDetector):
ARGUMENT = "return-bomb"
HELP = "A low level callee may consume all callers gas unexpectedly."
IMPACT = DetectorClassification.LOW
CONFIDENCE = DetectorClassification.MEDIUM
WIKI = "https://github.com/crytic/slither/wiki/Detector-Documentation#return-bomb"
WIKI_TITLE = "Return Bomb"
WIKI_DESCRIPTION = "A low level callee may consume all callers gas unexpectedly."
WIKI_EXPLOIT_SCENARIO = """
```solidity
//Modified from https://github.com/nomad-xyz/ExcessivelySafeCall
contract BadGuy {
function youveActivateMyTrapCard() external pure returns (bytes memory) {
assembly{
revert(0, 1000000)
}
}
}
contract Mark {
function oops(address badGuy) public{
bool success;
bytes memory ret;
// Mark pays a lot of gas for this copy
//(success, ret) = badGuy.call{gas:10000}(
(success, ret) = badGuy.call(
abi.encodeWithSelector(
BadGuy.youveActivateMyTrapCard.selector
)
);
// Mark may OOG here, preventing local state changes
//importantCleanup();
}
}
```
After Mark calls BadGuy bytes are copied from returndata to memory, the memory expansion cost is paid. This means that when using a standard solidity call, the callee can "returnbomb" the caller, imposing an arbitrary gas cost.
Callee unexpectedly makes the caller OOG.
"""
WIKI_RECOMMENDATION = "Avoid unlimited implicit decoding of returndata."
@staticmethod
def is_dynamic_type(ty: Type) -> bool:
# ty.is_dynamic ?
name = str(ty)
if "[]" in name or name in ("bytes", "string"):
return True
return False
def get_nodes_for_function(self, function: Function, contract: Contract) -> List[Node]:
nodes = []
for ir in [ir for _, ir in function.high_level_calls] + function.low_level_calls:
if not is_tainted(ir.destination, contract): # type:ignore
# Only interested if the target address is controlled/tainted
continue
if isinstance(ir, HighLevelCall) and isinstance(ir.function, Function):
# in normal highlevel calls return bombs are _possible_
# if the return type is dynamic and the caller tries to copy and decode large data
has_dyn = False
if ir.function.return_type:
has_dyn = any(self.is_dynamic_type(ty) for ty in ir.function.return_type)
if not has_dyn:
continue
# If a gas budget was specified then the
# user may not know about the return bomb
if ir.call_gas is None:
# if a gas budget was NOT specified then the caller
# may already suspect the call may spend all gas?
continue
nodes.append(ir.node)
# TODO: check that there is some state change after the call
return nodes
def _detect(self) -> List[Output]:
results = []
for contract in self.compilation_unit.contracts:
for function in contract.functions_declared:
nodes = self.get_nodes_for_function(function, contract)
if nodes:
info: DETECTOR_INFO = [
function,
" tries to limit the gas of an external call that controls implicit decoding\n",
]
for node in sorted(nodes, key=lambda x: x.node_id):
info += ["\t", node, "\n"]
res = self.generate_result(info)
results.append(res)
return results
| ReturnBomb |
python | neetcode-gh__leetcode | python/0701-insert-into-a-binary-search-tree.py | {
"start": 192,
"end": 530
} | class ____:
def insertIntoBST(self, root: Optional[TreeNode], val: int) -> Optional[TreeNode]:
if not root:
return TreeNode(val)
if val > root.val:
root.right = self.insertIntoBST(root.right, val)
else:
root.left = self.insertIntoBST(root.left, val)
return root
| Solution |
python | astropy__astropy | astropy/nddata/flag_collection.py | {
"start": 117,
"end": 1491
} | class ____(dict):
"""
The purpose of this class is to provide a dictionary for
containing arrays of flags for the `NDData` class. Flags should be
stored in Numpy arrays that have the same dimensions as the parent
data, so the `FlagCollection` class adds shape checking to a
dictionary.
The `FlagCollection` should be initialized like a
dict, but with the addition of a ``shape=``
keyword argument used to pass the NDData shape.
"""
def __init__(self, *args, **kwargs):
if "shape" in kwargs:
self.shape = kwargs.pop("shape")
if not np.iterable(self.shape):
raise ValueError("FlagCollection shape should be an iterable object")
else:
raise Exception(
"FlagCollection should be initialized with the shape of the data"
)
super().__init__(self, *args, **kwargs)
def __setitem__(self, item, value, **kwargs):
if isinstance(value, np.ndarray):
if value.shape == self.shape:
super().__setitem__(item, value)
else:
raise ValueError(
f"flags array shape {value.shape} does not match data shape "
f"{self.shape}"
)
else:
raise TypeError("flags should be given as a Numpy array")
| FlagCollection |
python | PyCQA__isort | isort/exceptions.py | {
"start": 3337,
"end": 3632
} | class ____(ISortError):
"""Raised when a formatting plugin is set by the user that doesn't exist"""
def __init__(self, formatter: str):
super().__init__(f"Specified formatting plugin of {formatter} does not exist. ")
self.formatter = formatter
| FormattingPluginDoesNotExist |
python | cython__cython | Cython/Compiler/ExprNodes.py | {
"start": 70134,
"end": 73813
} | class ____(ConstNode):
# A char* or bytes literal
#
# value BytesLiteral
is_string_literal = True
# start off as Python 'bytes' to support len() in O(1)
type = bytes_type
def calculate_constant_result(self):
self.constant_result = self.value
def as_sliced_node(self, start, stop, step=None):
value = StringEncoding.bytes_literal(self.value[start:stop:step], self.value.encoding)
return BytesNode(self.pos, value=value, constant_result=value)
def compile_time_value(self, denv):
return self.value.byteencode()
def analyse_as_type(self, env):
return _analyse_name_as_type(self.value.decode('ISO8859-1'), self.pos, env)
def can_coerce_to_char_literal(self):
return len(self.value) == 1
def coerce_to_boolean(self, env):
# This is special because testing a C char* for truth directly
# would yield the wrong result.
return BoolNode(self.pos, value=bool(self.value))
def coerce_to(self, dst_type, env):
if self.type == dst_type:
return self
if dst_type.is_int:
if not self.can_coerce_to_char_literal():
error(self.pos, "Only single-character string literals can be coerced into ints.")
return self
if dst_type.is_unicode_char:
error(self.pos, "Bytes literals cannot coerce to Py_UNICODE/Py_UCS4, use a unicode literal instead.")
return self
return CharNode(self.pos, value=self.value,
constant_result=ord(self.value))
node = BytesNode(self.pos, value=self.value, constant_result=self.constant_result)
if dst_type.is_pyobject:
if dst_type in (py_object_type, Builtin.bytes_type):
node.type = Builtin.bytes_type
else:
self.check_for_coercion_error(dst_type, env, fail=True)
return node
elif dst_type in (PyrexTypes.c_char_ptr_type, PyrexTypes.c_const_char_ptr_type):
node.type = dst_type
return node
elif dst_type in (PyrexTypes.c_uchar_ptr_type, PyrexTypes.c_const_uchar_ptr_type, PyrexTypes.c_void_ptr_type):
node.type = (PyrexTypes.c_const_char_ptr_type if dst_type.base_type.is_const
else PyrexTypes.c_char_ptr_type)
return CastNode(node, dst_type)
elif dst_type.assignable_from(PyrexTypes.c_char_ptr_type):
# Exclude the case of passing a C string literal into a non-const C++ string.
if not dst_type.is_cpp_class or dst_type.is_const:
node.type = dst_type
return node
# We still need to perform normal coerce_to processing on the
# result, because we might be coercing to an extension type,
# in which case a type test node will be needed.
return ConstNode.coerce_to(node, dst_type, env)
def generate_evaluation_code(self, code):
if self.type.is_pyobject:
result = code.get_py_string_const(self.value)
elif (self.type.is_ptr or self.type.is_array) and self.type.base_type.is_const:
result = code.get_string_const(self.value)
else:
# not const => use plain C string literal and cast to mutable type
literal = self.value.as_c_string_literal()
# C++ may require a cast
result = typecast(self.type, PyrexTypes.c_void_ptr_type, literal)
self.result_code = result
def get_constant_c_result_code(self):
return None # FIXME
def calculate_result_code(self):
return self.result_code
| BytesNode |
python | eventlet__eventlet | eventlet/support/greendns.py | {
"start": 5157,
"end": 6070
} | class ____(dns.resolver.Answer):
"""Answer class for HostsResolver object"""
def __init__(self, qname, rdtype, rdclass, rrset, raise_on_no_answer=True):
"""Create a new answer
:qname: A dns.name.Name instance of the query name
:rdtype: The rdatatype of the query
:rdclass: The rdataclass of the query
:rrset: The dns.rrset.RRset with the response, must have ttl attribute
:raise_on_no_answer: Whether to raise dns.resolver.NoAnswer if no
answer.
"""
self.response = None
self.qname = qname
self.rdtype = rdtype
self.rdclass = rdclass
self.canonical_name = qname
if not rrset and raise_on_no_answer:
raise dns.resolver.NoAnswer()
self.rrset = rrset
self.expiration = (time.time() +
rrset.ttl if hasattr(rrset, 'ttl') else 0)
| HostsAnswer |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 4505,
"end": 4642
} | class ____(IncrementalShopifyGraphQlBulkStream):
bulk_query: MetafieldProductVariant = MetafieldProductVariant
| MetafieldProductVariants |
python | huggingface__transformers | src/transformers/models/sam3_tracker/modular_sam3_tracker.py | {
"start": 3247,
"end": 4681
} | class ____(Sam2Config):
def __init__(
self,
vision_config=None,
prompt_encoder_config=None,
mask_decoder_config=None,
initializer_range=0.02,
**kwargs,
):
vision_config = (
vision_config
if vision_config is not None
else {"backbone_feature_sizes": [[288, 288], [144, 144], [72, 72]]}
)
prompt_encoder_config = prompt_encoder_config if prompt_encoder_config is not None else {}
mask_decoder_config = mask_decoder_config if mask_decoder_config is not None else {}
if isinstance(vision_config, dict):
vision_config["model_type"] = vision_config.get("model_type", "sam3_vision_model")
vision_config = CONFIG_MAPPING[vision_config["model_type"]](**vision_config)
if isinstance(prompt_encoder_config, Sam3TrackerPromptEncoderConfig):
prompt_encoder_config = prompt_encoder_config.to_dict()
if isinstance(mask_decoder_config, Sam3TrackerMaskDecoderConfig):
mask_decoder_config = mask_decoder_config.to_dict()
self.vision_config = vision_config
self.prompt_encoder_config = Sam3TrackerPromptEncoderConfig(**prompt_encoder_config)
self.mask_decoder_config = Sam3TrackerMaskDecoderConfig(**mask_decoder_config)
self.initializer_range = initializer_range
PreTrainedConfig.__init__(**kwargs)
| Sam3TrackerConfig |
python | ethereum__web3.py | web3/_utils/windows.py | {
"start": 191,
"end": 960
} | class ____:
def __init__(self, ipc_path: str) -> None:
try:
self.handle = win32file.CreateFile(
ipc_path,
win32file.GENERIC_READ | win32file.GENERIC_WRITE,
0,
None,
win32file.OPEN_EXISTING,
0,
None,
)
except pywintypes.error as err:
raise OSError(err)
def recv(self, max_length: int) -> str:
(err, data) = win32file.ReadFile(self.handle, max_length)
if err:
raise OSError(err)
return data
def sendall(self, data: str) -> tuple[int, int]:
return win32file.WriteFile(self.handle, data)
def close(self) -> None:
self.handle.close()
| NamedPipe |
python | pydata__xarray | xarray/tests/test_backends.py | {
"start": 269551,
"end": 296605
} | class ____:
def skip_if_zarr_python_3_and_zip_store(self, store) -> None:
if has_zarr_v3 and isinstance(store, zarr.storage.ZipStore):
pytest.skip(
reason="zarr-python 3.x doesn't support reopening ZipStore with a new mode."
)
def test_dataarray_to_zarr_no_name(self, tmp_store) -> None:
self.skip_if_zarr_python_3_and_zip_store(tmp_store)
original_da = DataArray(np.arange(12).reshape((3, 4)))
original_da.to_zarr(tmp_store)
with open_dataarray(tmp_store, engine="zarr") as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_zarr_with_name(self, tmp_store) -> None:
self.skip_if_zarr_python_3_and_zip_store(tmp_store)
original_da = DataArray(np.arange(12).reshape((3, 4)), name="test")
original_da.to_zarr(tmp_store)
with open_dataarray(tmp_store, engine="zarr") as loaded_da:
assert_identical(original_da, loaded_da)
def test_dataarray_to_zarr_coord_name_clash(self, tmp_store) -> None:
self.skip_if_zarr_python_3_and_zip_store(tmp_store)
original_da = DataArray(
np.arange(12).reshape((3, 4)), dims=["x", "y"], name="x"
)
original_da.to_zarr(tmp_store)
with open_dataarray(tmp_store, engine="zarr") as loaded_da:
assert_identical(original_da, loaded_da)
def test_open_dataarray_options(self, tmp_store) -> None:
self.skip_if_zarr_python_3_and_zip_store(tmp_store)
data = DataArray(np.arange(5), coords={"y": ("x", range(1, 6))}, dims=["x"])
data.to_zarr(tmp_store)
expected = data.drop_vars("y")
with open_dataarray(tmp_store, engine="zarr", drop_variables=["y"]) as loaded:
assert_identical(expected, loaded)
@requires_dask
def test_dataarray_to_zarr_compute_false(self, tmp_store) -> None:
from dask.delayed import Delayed
skip_if_zarr_format_3(tmp_store)
original_da = DataArray(np.arange(12).reshape((3, 4)))
output = original_da.to_zarr(tmp_store, compute=False)
assert isinstance(output, Delayed)
output.compute()
with open_dataarray(tmp_store, engine="zarr") as loaded_da:
assert_identical(original_da, loaded_da)
@requires_dask
def test_dataarray_to_zarr_align_chunks_true(self, tmp_store) -> None:
# TODO: Improve data integrity checks when using Dask.
# Detecting automatic alignment issues in Dask can be tricky,
# as unintended misalignment might lead to subtle data corruption.
# For now, ensure that the parameter is present, but explore
# more robust verification methods to confirm data consistency.
skip_if_zarr_format_3(tmp_store)
arr = DataArray(
np.arange(4), dims=["a"], coords={"a": np.arange(4)}, name="foo"
).chunk(a=(2, 1, 1))
arr.to_zarr(
tmp_store,
align_chunks=True,
encoding={"foo": {"chunks": (3,)}},
)
with open_dataarray(tmp_store, engine="zarr") as loaded_da:
assert_identical(arr, loaded_da)
@requires_scipy_or_netCDF4
def test_no_warning_from_dask_effective_get() -> None:
with create_tmp_file() as tmpfile:
with assert_no_warnings():
ds = Dataset()
ds.to_netcdf(tmpfile)
@requires_scipy_or_netCDF4
def test_source_encoding_always_present() -> None:
# Test for GH issue #2550.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(tmp) as ds:
assert ds.encoding["source"] == tmp
@requires_scipy_or_netCDF4
def test_source_encoding_always_present_with_pathlib() -> None:
# Test for GH issue #5888.
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
with open_dataset(Path(tmp)) as ds:
assert ds.encoding["source"] == tmp
@requires_h5netcdf
@requires_fsspec
def test_source_encoding_always_present_with_fsspec() -> None:
import fsspec
rnddata = np.random.randn(10)
original = Dataset({"foo": ("x", rnddata)})
with create_tmp_file() as tmp:
original.to_netcdf(tmp)
fs = fsspec.filesystem("file")
with fs.open(tmp) as f, open_dataset(f) as ds:
assert ds.encoding["source"] == tmp
with fs.open(tmp) as f, open_mfdataset([f]) as ds:
assert "foo" in ds
def _assert_no_dates_out_of_range_warning(record):
undesired_message = "dates out of range"
for warning in record:
assert undesired_message not in str(warning.message)
@requires_scipy_or_netCDF4
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
def test_use_cftime_standard_calendar_default_in_range(calendar) -> None:
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", ["standard", "gregorian"])
def test_use_cftime_standard_calendar_default_out_of_range(calendar) -> None:
# todo: check, if we still need to test for two dates
import cftime
x = [0, 1]
time = [0, 720]
units = "days since 1582-01-01"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with pytest.warns(SerializationWarning):
with open_dataset(tmp_file) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
@requires_cftime
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _ALL_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_true(calendar, units_year) -> None:
import cftime
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}-01-01"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
decoded_x = cftime.num2date(x, units, calendar, only_use_cftime_datetimes=True)
decoded_time = cftime.num2date(
time, units, calendar, only_use_cftime_datetimes=True
)
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
decoder = CFDatetimeCoder(use_cftime=True)
with open_dataset(tmp_file, decode_times=decoder) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.parametrize("calendar", _STANDARD_CALENDARS)
@pytest.mark.xfail(
has_numpy_2, reason="https://github.com/pandas-dev/pandas/issues/56996"
)
def test_use_cftime_false_standard_calendar_in_range(calendar) -> None:
x = [0, 1]
time = [0, 720]
units_date = "2000-01-01"
units = "days since 2000-01-01"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
x_timedeltas = np.array(x).astype("timedelta64[D]")
time_timedeltas = np.array(time).astype("timedelta64[D]")
decoded_x = np.datetime64(units_date, "ns") + x_timedeltas
decoded_time = np.datetime64(units_date, "ns") + time_timedeltas
expected_x = DataArray(decoded_x, [("time", decoded_time)], name="x")
expected_time = DataArray(decoded_time, [("time", decoded_time)], name="time")
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
with warnings.catch_warnings(record=True) as record:
coder = xr.coders.CFDatetimeCoder(use_cftime=False)
with open_dataset(tmp_file, decode_times=coder) as ds:
assert_identical(expected_x, ds.x)
assert_identical(expected_time, ds.time)
_assert_no_dates_out_of_range_warning(record)
@requires_scipy_or_netCDF4
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
@pytest.mark.parametrize("calendar", ["standard", "gregorian"])
def test_use_cftime_false_standard_calendar_out_of_range(calendar) -> None:
x = [0, 1]
time = [0, 720]
units = "days since 1582-01-01"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
decoder = CFDatetimeCoder(use_cftime=False)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, decode_times=decoder)
@requires_scipy_or_netCDF4
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
@pytest.mark.parametrize("calendar", _NON_STANDARD_CALENDARS)
@pytest.mark.parametrize("units_year", [1500, 2000, 2500])
def test_use_cftime_false_nonstandard_calendar(calendar, units_year) -> None:
x = [0, 1]
time = [0, 720]
units = f"days since {units_year}"
original = DataArray(x, [("time", time)], name="x").to_dataset()
for v in ["x", "time"]:
original[v].attrs["units"] = units
original[v].attrs["calendar"] = calendar
with create_tmp_file() as tmp_file:
original.to_netcdf(tmp_file)
decoder = CFDatetimeCoder(use_cftime=False)
with pytest.raises((OutOfBoundsDatetime, ValueError)):
open_dataset(tmp_file, decode_times=decoder)
@pytest.mark.parametrize("engine", ["netcdf4", "scipy"])
def test_invalid_netcdf_raises(engine) -> None:
data = create_test_data()
with pytest.raises(ValueError, match=r"unrecognized option 'invalid_netcdf'"):
data.to_netcdf("foo.nc", engine=engine, invalid_netcdf=True)
@requires_zarr
def test_encode_zarr_attr_value() -> None:
# array -> list
arr = np.array([1, 2, 3])
expected1 = [1, 2, 3]
actual1 = backends.zarr.encode_zarr_attr_value(arr)
assert isinstance(actual1, list)
assert actual1 == expected1
# scalar array -> scalar
sarr = np.array(1)[()]
expected2 = 1
actual2 = backends.zarr.encode_zarr_attr_value(sarr)
assert isinstance(actual2, int)
assert actual2 == expected2
# string -> string (no change)
expected3 = "foo"
actual3 = backends.zarr.encode_zarr_attr_value(expected3)
assert isinstance(actual3, str)
assert actual3 == expected3
@requires_zarr
def test_extract_zarr_variable_encoding() -> None:
var = xr.Variable("x", [1, 2])
actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3)
assert "chunks" in actual
assert actual["chunks"] == ("auto" if has_zarr_v3 else None)
var = xr.Variable("x", [1, 2], encoding={"chunks": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3)
assert actual["chunks"] == (1,)
# does not raise on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
actual = backends.zarr.extract_zarr_variable_encoding(var, zarr_format=3)
# raises on invalid
var = xr.Variable("x", [1, 2], encoding={"foo": (1,)})
with pytest.raises(ValueError, match=r"unexpected encoding parameters"):
actual = backends.zarr.extract_zarr_variable_encoding(
var, raise_on_invalid=True, zarr_format=3
)
@requires_zarr
@requires_fsspec
@pytest.mark.filterwarnings("ignore:deallocating CachingFileManager")
def test_open_fsspec() -> None:
import fsspec
if not (
(
hasattr(zarr.storage, "FSStore")
and hasattr(zarr.storage.FSStore, "getitems")
) # zarr v2
or hasattr(zarr.storage, "FsspecStore") # zarr v3
):
pytest.skip("zarr too old")
ds = open_dataset(os.path.join(os.path.dirname(__file__), "data", "example_1.nc"))
m = fsspec.filesystem("memory")
mm = m.get_mapper("out1.zarr")
ds.to_zarr(mm) # old interface
ds0 = ds.copy()
# pd.to_timedelta returns ns-precision, but the example data is in second precision
# so we need to fix this
ds0["time"] = ds.time + np.timedelta64(1, "D")
mm = m.get_mapper("out2.zarr")
ds0.to_zarr(mm) # old interface
# single dataset
url = "memory://out2.zarr"
ds2 = open_dataset(url, engine="zarr")
xr.testing.assert_equal(ds0, ds2)
# single dataset with caching
url = "simplecache::memory://out2.zarr"
ds2 = open_dataset(url, engine="zarr")
xr.testing.assert_equal(ds0, ds2)
# open_mfdataset requires dask
if has_dask:
# multi dataset
url = "memory://out*.zarr"
ds2 = open_mfdataset(url, engine="zarr")
xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2)
# multi dataset with caching
url = "simplecache::memory://out*.zarr"
ds2 = open_mfdataset(url, engine="zarr")
xr.testing.assert_equal(xr.concat([ds, ds0], dim="time"), ds2)
@requires_h5netcdf
@requires_netCDF4
def test_load_single_value_h5netcdf(tmp_path: Path) -> None:
"""Test that numeric single-element vector attributes are handled fine.
At present (h5netcdf v0.8.1), the h5netcdf exposes single-valued numeric variable
attributes as arrays of length 1, as opposed to scalars for the NetCDF4
backend. This was leading to a ValueError upon loading a single value from
a file, see #4471. Test that loading causes no failure.
"""
ds = xr.Dataset(
{
"test": xr.DataArray(
np.array([0]), dims=("x",), attrs={"scale_factor": 1, "add_offset": 0}
)
}
)
ds.to_netcdf(tmp_path / "test.nc")
with xr.open_dataset(tmp_path / "test.nc", engine="h5netcdf") as ds2:
ds2["test"][0].load()
@requires_zarr
@requires_dask
@pytest.mark.parametrize(
"chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}]
)
def test_open_dataset_chunking_zarr(chunks, tmp_path: Path) -> None:
encoded_chunks = 100
dask_arr = da.from_array(
np.ones((500, 500), dtype="float64"), chunks=encoded_chunks
)
ds = xr.Dataset(
{
"test": xr.DataArray(
dask_arr,
dims=("x", "y"),
)
}
)
ds["test"].encoding["chunks"] = encoded_chunks
ds.to_zarr(tmp_path / "test.zarr")
with dask.config.set({"array.chunk-size": "1MiB"}):
expected = ds.chunk(chunks)
with open_dataset(
tmp_path / "test.zarr", engine="zarr", chunks=chunks
) as actual:
xr.testing.assert_chunks_equal(actual, expected)
@requires_zarr
@requires_dask
@pytest.mark.parametrize(
"chunks", ["auto", -1, {}, {"x": "auto"}, {"x": -1}, {"x": "auto", "y": -1}]
)
@pytest.mark.filterwarnings("ignore:The specified chunks separate")
def test_chunking_consintency(chunks, tmp_path: Path) -> None:
encoded_chunks: dict[str, Any] = {}
dask_arr = da.from_array(
np.ones((500, 500), dtype="float64"), chunks=encoded_chunks
)
ds = xr.Dataset(
{
"test": xr.DataArray(
dask_arr,
dims=("x", "y"),
)
}
)
ds["test"].encoding["chunks"] = encoded_chunks
ds.to_zarr(tmp_path / "test.zarr")
ds.to_netcdf(tmp_path / "test.nc")
with dask.config.set({"array.chunk-size": "1MiB"}):
expected = ds.chunk(chunks)
with xr.open_dataset(
tmp_path / "test.zarr", engine="zarr", chunks=chunks
) as actual:
xr.testing.assert_chunks_equal(actual, expected)
with xr.open_dataset(tmp_path / "test.nc", chunks=chunks) as actual:
xr.testing.assert_chunks_equal(actual, expected)
def _check_guess_can_open_and_open(entrypoint, obj, engine, expected):
assert entrypoint.guess_can_open(obj)
with open_dataset(obj, engine=engine) as actual:
assert_identical(expected, actual)
@requires_netCDF4
def test_netcdf4_entrypoint(tmp_path: Path) -> None:
entrypoint = NetCDF4BackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, format="NETCDF3_CLASSIC")
_check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds)
path = tmp_path / "bar"
ds.to_netcdf(path, format="NETCDF4_CLASSIC")
_check_guess_can_open_and_open(entrypoint, path, engine="netcdf4", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="netcdf4", expected=ds)
# Remote URLs without extensions return True (backward compatibility)
assert entrypoint.guess_can_open("http://something/remote")
# Remote URLs with netCDF extensions are also claimed
assert entrypoint.guess_can_open("http://something/remote.nc")
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc4")
assert entrypoint.guess_can_open("something-local.cdf")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
contents = ds.to_netcdf(engine="netcdf4")
_check_guess_can_open_and_open(entrypoint, contents, engine="netcdf4", expected=ds)
path = tmp_path / "baz"
with open(path, "wb") as f:
f.write(b"not-a-netcdf-file")
assert not entrypoint.guess_can_open(path)
@requires_scipy
def test_scipy_entrypoint(tmp_path: Path) -> None:
entrypoint = ScipyBackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, engine="scipy")
_check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds)
with open(path, "rb") as f:
_check_guess_can_open_and_open(entrypoint, f, engine="scipy", expected=ds)
contents = ds.to_netcdf(engine="scipy")
_check_guess_can_open_and_open(entrypoint, contents, engine="scipy", expected=ds)
_check_guess_can_open_and_open(
entrypoint, BytesIO(contents), engine="scipy", expected=ds
)
path = tmp_path / "foo.nc.gz"
with gzip.open(path, mode="wb") as f:
f.write(contents)
_check_guess_can_open_and_open(entrypoint, path, engine="scipy", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="scipy", expected=ds)
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc.gz")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
assert not entrypoint.guess_can_open(b"not-a-netcdf-file")
# Should not claim .gz files that aren't netCDF
assert not entrypoint.guess_can_open("something.zarr.gz")
assert not entrypoint.guess_can_open("something.tar.gz")
assert not entrypoint.guess_can_open("something.txt.gz")
@requires_h5netcdf
def test_h5netcdf_entrypoint(tmp_path: Path) -> None:
entrypoint = H5netcdfBackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo"
ds.to_netcdf(path, engine="h5netcdf")
_check_guess_can_open_and_open(entrypoint, path, engine="h5netcdf", expected=ds)
_check_guess_can_open_and_open(
entrypoint, str(path), engine="h5netcdf", expected=ds
)
with open(path, "rb") as f:
_check_guess_can_open_and_open(entrypoint, f, engine="h5netcdf", expected=ds)
contents = ds.to_netcdf(engine="h5netcdf")
_check_guess_can_open_and_open(entrypoint, contents, engine="h5netcdf", expected=ds)
assert entrypoint.guess_can_open("something-local.nc")
assert entrypoint.guess_can_open("something-local.nc4")
assert entrypoint.guess_can_open("something-local.cdf")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
@requires_zarr
def test_zarr_entrypoint(tmp_path: Path) -> None:
from xarray.backends.zarr import ZarrBackendEntrypoint
entrypoint = ZarrBackendEntrypoint()
ds = create_test_data()
path = tmp_path / "foo.zarr"
ds.to_zarr(path)
_check_guess_can_open_and_open(entrypoint, path, engine="zarr", expected=ds)
_check_guess_can_open_and_open(entrypoint, str(path), engine="zarr", expected=ds)
# add a trailing slash to the path and check again
_check_guess_can_open_and_open(
entrypoint, str(path) + "/", engine="zarr", expected=ds
)
# Test the new functionality: .zarr with trailing slash
assert entrypoint.guess_can_open("something-local.zarr")
assert entrypoint.guess_can_open("something-local.zarr/") # With trailing slash
assert not entrypoint.guess_can_open("something-local.nc")
assert not entrypoint.guess_can_open("not-found-and-no-extension")
assert not entrypoint.guess_can_open("something.zarr.txt")
@requires_h5netcdf
@requires_netCDF4
@requires_zarr
def test_remote_url_backend_auto_detection() -> None:
"""
Test that remote URLs are correctly selected by the backend resolution system.
This tests the fix for issue where netCDF4, h5netcdf, and pydap backends were
claiming ALL remote URLs, preventing remote Zarr stores from being
auto-detected.
See: https://github.com/pydata/xarray/issues/10801
"""
from xarray.backends.plugins import guess_engine
# Test cases: (url, expected_backend)
test_cases = [
# Remote Zarr URLs
("https://example.com/store.zarr", "zarr"),
("http://example.com/data.zarr/", "zarr"),
("s3://bucket/path/to/data.zarr", "zarr"),
# Remote netCDF URLs (non-DAP) - netcdf4 wins (first in order, no query params)
("https://example.com/file.nc", "netcdf4"),
("http://example.com/data.nc4", "netcdf4"),
("https://example.com/test.cdf", "netcdf4"),
("s3://bucket/path/to/data.nc", "netcdf4"),
# Remote netCDF URLs with query params - netcdf4 wins
# Note: Query params are typically indicative of DAP URLs (e.g., OPeNDAP constraint expressions),
# so we prefer netcdf4 (which has DAP support) over h5netcdf (which doesn't)
("https://example.com/data.nc?var=temperature&time=0", "netcdf4"),
(
"http://test.opendap.org/opendap/dap4/StaggeredGrid.nc4?dap4.ce=/time[0:1:0]",
"netcdf4",
),
# DAP URLs with .nc extensions (no query params) - netcdf4 wins (first in order)
("http://test.opendap.org/opendap/dap4/StaggeredGrid.nc4", "netcdf4"),
("https://example.com/DAP4/data.nc", "netcdf4"),
("http://example.com/data/Dap4/file.nc", "netcdf4"),
]
for url, expected_backend in test_cases:
engine = guess_engine(url)
assert engine == expected_backend, (
f"URL {url!r} should select {expected_backend!r} but got {engine!r}"
)
# DAP URLs - netcdf4 should handle these (it comes first in backend order)
# Both netcdf4 and pydap can open DAP URLs, but netcdf4 has priority
expected_dap_backend = "netcdf4"
dap_urls = [
# Explicit DAP protocol schemes
"dap2://opendap.earthdata.nasa.gov/collections/dataset",
"dap4://opendap.earthdata.nasa.gov/collections/dataset",
"dap://example.com/dataset",
"DAP2://example.com/dataset", # uppercase scheme
"DAP4://example.com/dataset", # uppercase scheme
# DAP path indicators
"https://example.com/services/DAP2/dataset", # uppercase in path
"http://test.opendap.org/opendap/data/nc/file.nc", # /opendap/ path
"https://coastwatch.pfeg.noaa.gov/erddap/griddap/erdMH1chla8day", # ERDDAP
"http://thredds.ucar.edu/thredds/dodsC/grib/NCEP/GFS/", # THREDDS dodsC
"https://disc2.gesdisc.eosdis.nasa.gov/dods/TRMM_3B42", # GrADS /dods/
]
for url in dap_urls:
engine = guess_engine(url)
assert engine == expected_dap_backend, (
f"URL {url!r} should select {expected_dap_backend!r} but got {engine!r}"
)
# URLs with .dap suffix are claimed by netcdf4 (backward compatibility fallback)
# Note: .dap suffix is intentionally NOT recognized as a DAP dataset URL
fallback_urls = [
("http://test.opendap.org/opendap/data/nc/coads_climatology.nc.dap", "netcdf4"),
("https://example.com/data.dap", "netcdf4"),
]
for url, expected_backend in fallback_urls:
engine = guess_engine(url)
assert engine == expected_backend
@requires_netCDF4
@pytest.mark.parametrize("str_type", (str, np.str_))
def test_write_file_from_np_str(str_type: type[str | np.str_], tmpdir: str) -> None:
# https://github.com/pydata/xarray/pull/5264
scenarios = [str_type(v) for v in ["scenario_a", "scenario_b", "scenario_c"]]
years = range(2015, 2100 + 1)
tdf = pd.DataFrame(
data=np.random.random((len(scenarios), len(years))),
columns=years,
index=scenarios,
)
tdf.index.name = "scenario"
tdf.columns.name = "year"
tdf = cast(pd.DataFrame, tdf.stack())
tdf.name = "tas"
txr = tdf.to_xarray()
txr.to_netcdf(tmpdir.join("test.nc"))
@requires_zarr
@requires_netCDF4
| TestDataArrayToZarr |
python | kamyu104__LeetCode-Solutions | Python/minimum-absolute-difference-queries.py | {
"start": 926,
"end": 1736
} | class ____(object):
def minDifference(self, nums, queries):
"""
:type nums: List[int]
:type queries: List[List[int]]
:rtype: List[int]
"""
INF = float("inf")
idxs = [[] for _ in xrange(max(nums)+1)]
for i, num in enumerate(nums):
idxs[num].append(i)
result = []
for l, r in queries:
min_diff, prev = INF, -1
for num in xrange(len(idxs)):
i = bisect.bisect_left(idxs[num], l)
if not (i < len(idxs[num]) and idxs[num][i] <= r):
continue
if prev != -1:
min_diff = min(min_diff, num-prev)
prev = num
result.append(min_diff if min_diff != INF else -1)
return result
| Solution2 |
python | huggingface__transformers | src/transformers/models/internvl/modular_internvl.py | {
"start": 10927,
"end": 11048
} | class ____(CLIPMLP):
pass
NORM2FN = {"layer_norm": nn.LayerNorm, "rms_norm": InternVLVisionRMSNorm}
| InternVLVisionMLP |
python | walkccc__LeetCode | solutions/359. Logger Rate Limiter/359.py | {
"start": 0,
"end": 654
} | class ____:
def __init__(self):
# [(timestamp, message)]
self.messageQueue = collections.deque()
self.messageSet = set()
def shouldPrintMessage(self, timestamp: int, message: str) -> bool:
# Remove the messages that are 10 seconds from the current timestamp.
while self.messageQueue:
headTimestamp, headMessage = self.messageQueue[0]
if timestamp < headTimestamp + 10:
break
self.messageQueue.popleft()
self.messageSet.remove(headMessage)
if message in self.messageSet:
return False
self.messageQueue.append((timestamp, message))
self.messageSet.add(message)
return True
| Logger |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/cursor_shapes.py | {
"start": 1628,
"end": 1977
} | class ____(CursorShapeConfig):
"""
Always show the given cursor shape.
"""
def __init__(self, cursor_shape: CursorShape = CursorShape._NEVER_CHANGE) -> None:
self.cursor_shape = cursor_shape
def get_cursor_shape(self, application: Application[Any]) -> CursorShape:
return self.cursor_shape
| SimpleCursorShapeConfig |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/mappedoperator.py | {
"start": 11432,
"end": 32754
} | class ____(AbstractOperator):
"""Object representing a mapped operator in a Dag."""
operator_class: type[BaseOperator]
_is_mapped: bool = attrs.field(init=False, default=True)
expand_input: ExpandInput
partial_kwargs: dict[str, Any]
# Needed for serialization.
task_id: str
params: ParamsDict | dict
operator_extra_links: Collection[BaseOperatorLink]
template_ext: Sequence[str]
template_fields: Collection[str]
template_fields_renderers: dict[str, str]
ui_color: str
ui_fgcolor: str
_is_empty: bool = attrs.field(alias="is_empty")
_can_skip_downstream: bool = attrs.field(alias="can_skip_downstream")
_is_sensor: bool = attrs.field(alias="is_sensor", default=False)
_task_module: str
task_type: str
_operator_name: str
start_trigger_args: StartTriggerArgs | None
start_from_trigger: bool
_needs_expansion: bool = True
dag: DAG | None
task_group: TaskGroup | None
start_date: pendulum.DateTime | None
end_date: pendulum.DateTime | None
upstream_task_ids: set[str] = attrs.field(factory=set, init=False)
downstream_task_ids: set[str] = attrs.field(factory=set, init=False)
_disallow_kwargs_override: bool
"""Whether execution fails if ``expand_input`` has duplicates to ``partial_kwargs``.
If *False*, values from ``expand_input`` under duplicate keys override those
under corresponding keys in ``partial_kwargs``.
"""
_expand_input_attr: str
"""Where to get kwargs to calculate expansion length against.
This should be a name to call ``getattr()`` on.
"""
HIDE_ATTRS_FROM_UI: ClassVar[frozenset[str]] = AbstractOperator.HIDE_ATTRS_FROM_UI | frozenset(
("parse_time_mapped_ti_count", "operator_class", "start_trigger_args", "start_from_trigger")
)
def __hash__(self):
return id(self)
def __repr__(self):
return f"<Mapped({self.task_type}): {self.task_id}>"
def __attrs_post_init__(self):
from airflow.sdk.definitions.xcom_arg import XComArg
if self.get_closest_mapped_task_group() is not None:
raise NotImplementedError("operator expansion in an expanded task group is not yet supported")
if self.task_group:
self.task_group.add(self)
if self.dag:
self.dag.add_task(self)
XComArg.apply_upstream_relationship(self, self._get_specified_expand_input().value)
for k, v in self.partial_kwargs.items():
if k in self.template_fields:
XComArg.apply_upstream_relationship(self, v)
@methodtools.lru_cache(maxsize=None)
@classmethod
def get_serialized_fields(cls):
# Not using 'cls' here since we only want to serialize base fields.
return (frozenset(attrs.fields_dict(MappedOperator))) - {
"_is_empty",
"_can_skip_downstream",
"dag",
"deps",
"expand_input", # This is needed to be able to accept XComArg.
"task_group",
"upstream_task_ids",
"_is_setup",
"_is_teardown",
"_on_failure_fail_dagrun",
"operator_class",
"_needs_expansion",
"partial_kwargs",
"operator_extra_links",
}
@property
def operator_name(self) -> str:
return self._operator_name
@property
def roots(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def leaves(self) -> Sequence[AbstractOperator]:
"""Implementing DAGNode."""
return [self]
@property
def task_display_name(self) -> str:
return self.partial_kwargs.get("task_display_name") or self.task_id
@property
def owner(self) -> str:
return self.partial_kwargs.get("owner", DEFAULT_OWNER)
@owner.setter
def owner(self, value: str) -> None:
self.partial_kwargs["owner"] = value
@property
def email(self) -> None | str | Iterable[str]:
return self.partial_kwargs.get("email")
@property
def email_on_failure(self) -> bool:
return self.partial_kwargs.get("email_on_failure", True)
@property
def email_on_retry(self) -> bool:
return self.partial_kwargs.get("email_on_retry", True)
@property
def map_index_template(self) -> None | str:
return self.partial_kwargs.get("map_index_template")
@map_index_template.setter
def map_index_template(self, value: str | None) -> None:
self.partial_kwargs["map_index_template"] = value
@property
def trigger_rule(self) -> TriggerRule:
return self.partial_kwargs.get("trigger_rule", DEFAULT_TRIGGER_RULE)
@trigger_rule.setter
def trigger_rule(self, value):
self.partial_kwargs["trigger_rule"] = value
@property
def is_setup(self) -> bool:
return bool(self.partial_kwargs.get("is_setup"))
@is_setup.setter
def is_setup(self, value: bool) -> None:
self.partial_kwargs["is_setup"] = value
@property
def is_teardown(self) -> bool:
return bool(self.partial_kwargs.get("is_teardown"))
@is_teardown.setter
def is_teardown(self, value: bool) -> None:
self.partial_kwargs["is_teardown"] = value
@property
def depends_on_past(self) -> bool:
return bool(self.partial_kwargs.get("depends_on_past"))
@depends_on_past.setter
def depends_on_past(self, value: bool) -> None:
self.partial_kwargs["depends_on_past"] = value
@property
def ignore_first_depends_on_past(self) -> bool:
value = self.partial_kwargs.get("ignore_first_depends_on_past", DEFAULT_IGNORE_FIRST_DEPENDS_ON_PAST)
return bool(value)
@ignore_first_depends_on_past.setter
def ignore_first_depends_on_past(self, value: bool) -> None:
self.partial_kwargs["ignore_first_depends_on_past"] = value
@property
def wait_for_past_depends_before_skipping(self) -> bool:
value = self.partial_kwargs.get(
"wait_for_past_depends_before_skipping", DEFAULT_WAIT_FOR_PAST_DEPENDS_BEFORE_SKIPPING
)
return bool(value)
@wait_for_past_depends_before_skipping.setter
def wait_for_past_depends_before_skipping(self, value: bool) -> None:
self.partial_kwargs["wait_for_past_depends_before_skipping"] = value
@property
def wait_for_downstream(self) -> bool:
return bool(self.partial_kwargs.get("wait_for_downstream"))
@wait_for_downstream.setter
def wait_for_downstream(self, value: bool) -> None:
self.partial_kwargs["wait_for_downstream"] = value
@property
def retries(self) -> int:
return self.partial_kwargs.get("retries", DEFAULT_RETRIES)
@retries.setter
def retries(self, value: int) -> None:
self.partial_kwargs["retries"] = value
@property
def queue(self) -> str:
return self.partial_kwargs.get("queue", DEFAULT_QUEUE)
@queue.setter
def queue(self, value: str) -> None:
self.partial_kwargs["queue"] = value
@property
def pool(self) -> str:
return self.partial_kwargs.get("pool", DEFAULT_POOL_NAME)
@pool.setter
def pool(self, value: str) -> None:
self.partial_kwargs["pool"] = value
@property
def pool_slots(self) -> int:
return self.partial_kwargs.get("pool_slots", DEFAULT_POOL_SLOTS)
@pool_slots.setter
def pool_slots(self, value: int) -> None:
self.partial_kwargs["pool_slots"] = value
@property
def execution_timeout(self) -> datetime.timedelta | None:
return self.partial_kwargs.get("execution_timeout")
@execution_timeout.setter
def execution_timeout(self, value: datetime.timedelta | None) -> None:
self.partial_kwargs["execution_timeout"] = value
@property
def max_retry_delay(self) -> datetime.timedelta | None:
return self.partial_kwargs.get("max_retry_delay")
@max_retry_delay.setter
def max_retry_delay(self, value: datetime.timedelta | None) -> None:
self.partial_kwargs["max_retry_delay"] = value
@property
def retry_delay(self) -> datetime.timedelta:
return self.partial_kwargs.get("retry_delay", DEFAULT_RETRY_DELAY)
@retry_delay.setter
def retry_delay(self, value: datetime.timedelta) -> None:
self.partial_kwargs["retry_delay"] = value
@property
def retry_exponential_backoff(self) -> float:
value = self.partial_kwargs.get("retry_exponential_backoff", 0)
if value is True:
return 2.0
if value is False:
return 0.0
return float(value)
@retry_exponential_backoff.setter
def retry_exponential_backoff(self, value: float) -> None:
self.partial_kwargs["retry_exponential_backoff"] = value
@property
def priority_weight(self) -> int:
return self.partial_kwargs.get("priority_weight", DEFAULT_PRIORITY_WEIGHT)
@priority_weight.setter
def priority_weight(self, value: int) -> None:
self.partial_kwargs["priority_weight"] = value
@property
def weight_rule(self) -> PriorityWeightStrategy:
return validate_and_load_priority_weight_strategy(
self.partial_kwargs.get("weight_rule", DEFAULT_WEIGHT_RULE)
)
@weight_rule.setter
def weight_rule(self, value: str | PriorityWeightStrategy) -> None:
self.partial_kwargs["weight_rule"] = validate_and_load_priority_weight_strategy(value)
@property
def max_active_tis_per_dag(self) -> int | None:
return self.partial_kwargs.get("max_active_tis_per_dag")
@max_active_tis_per_dag.setter
def max_active_tis_per_dag(self, value: int | None) -> None:
self.partial_kwargs["max_active_tis_per_dag"] = value
@property
def max_active_tis_per_dagrun(self) -> int | None:
return self.partial_kwargs.get("max_active_tis_per_dagrun")
@max_active_tis_per_dagrun.setter
def max_active_tis_per_dagrun(self, value: int | None) -> None:
self.partial_kwargs["max_active_tis_per_dagrun"] = value
@property
def resources(self) -> Resources | None:
return self.partial_kwargs.get("resources")
@property
def on_execute_callback(self) -> TaskStateChangeCallbackAttrType:
return self.partial_kwargs.get("on_execute_callback") or []
@on_execute_callback.setter
def on_execute_callback(self, value: TaskStateChangeCallbackAttrType) -> None:
self.partial_kwargs["on_execute_callback"] = value or []
@property
def on_failure_callback(self) -> TaskStateChangeCallbackAttrType:
return self.partial_kwargs.get("on_failure_callback") or []
@on_failure_callback.setter
def on_failure_callback(self, value: TaskStateChangeCallbackAttrType) -> None:
self.partial_kwargs["on_failure_callback"] = value or []
@property
def on_retry_callback(self) -> TaskStateChangeCallbackAttrType:
return self.partial_kwargs.get("on_retry_callback") or []
@on_retry_callback.setter
def on_retry_callback(self, value: TaskStateChangeCallbackAttrType) -> None:
self.partial_kwargs["on_retry_callback"] = value or []
@property
def on_success_callback(self) -> TaskStateChangeCallbackAttrType:
return self.partial_kwargs.get("on_success_callback") or []
@on_success_callback.setter
def on_success_callback(self, value: TaskStateChangeCallbackAttrType) -> None:
self.partial_kwargs["on_success_callback"] = value or []
@property
def on_skipped_callback(self) -> TaskStateChangeCallbackAttrType:
return self.partial_kwargs.get("on_skipped_callback") or []
@on_skipped_callback.setter
def on_skipped_callback(self, value: TaskStateChangeCallbackAttrType) -> None:
self.partial_kwargs["on_skipped_callback"] = value or []
@property
def has_on_execute_callback(self) -> bool:
return bool(self.on_execute_callback)
@property
def has_on_failure_callback(self) -> bool:
return bool(self.on_failure_callback)
@property
def has_on_retry_callback(self) -> bool:
return bool(self.on_retry_callback)
@property
def has_on_success_callback(self) -> bool:
return bool(self.on_success_callback)
@property
def has_on_skipped_callback(self) -> bool:
return bool(self.on_skipped_callback)
@property
def run_as_user(self) -> str | None:
return self.partial_kwargs.get("run_as_user")
@property
def executor(self) -> str | None:
return self.partial_kwargs.get("executor", DEFAULT_EXECUTOR)
@property
def executor_config(self) -> dict:
return self.partial_kwargs.get("executor_config", {})
@property
def inlets(self) -> list[Any]:
return self.partial_kwargs.get("inlets", [])
@inlets.setter
def inlets(self, value: list[Any]) -> None:
self.partial_kwargs["inlets"] = value
@property
def outlets(self) -> list[Any]:
return self.partial_kwargs.get("outlets", [])
@outlets.setter
def outlets(self, value: list[Any]) -> None:
self.partial_kwargs["outlets"] = value
@property
def doc(self) -> str | None:
return self.partial_kwargs.get("doc")
@property
def doc_md(self) -> str | None:
return self.partial_kwargs.get("doc_md")
@property
def doc_json(self) -> str | None:
return self.partial_kwargs.get("doc_json")
@property
def doc_yaml(self) -> str | None:
return self.partial_kwargs.get("doc_yaml")
@property
def doc_rst(self) -> str | None:
return self.partial_kwargs.get("doc_rst")
@property
def allow_nested_operators(self) -> bool:
return bool(self.partial_kwargs.get("allow_nested_operators"))
def get_dag(self) -> DAG | None:
"""Implement Operator."""
return self.dag
@property
def output(self) -> XComArg:
"""Return reference to XCom pushed by current operator."""
from airflow.sdk.definitions.xcom_arg import XComArg
return XComArg(operator=self)
def serialize_for_task_group(self) -> tuple[DagAttributeTypes, Any]:
"""Implement DAGNode."""
return DagAttributeTypes.OP, self.task_id
def _expand_mapped_kwargs(self, context: Mapping[str, Any]) -> tuple[Mapping[str, Any], set[int]]:
"""
Get the kwargs to create the unmapped operator.
This exists because taskflow operators expand against op_kwargs, not the
entire operator kwargs dict.
"""
return self._get_specified_expand_input().resolve(context)
def _get_unmap_kwargs(self, mapped_kwargs: Mapping[str, Any], *, strict: bool) -> dict[str, Any]:
"""
Get init kwargs to unmap the underlying operator class.
:param mapped_kwargs: The dict returned by ``_expand_mapped_kwargs``.
"""
if strict:
prevent_duplicates(
self.partial_kwargs,
mapped_kwargs,
fail_reason="unmappable or already specified",
)
# If params appears in the mapped kwargs, we need to merge it into the
# partial params, overriding existing keys.
params = copy.copy(self.params)
with contextlib.suppress(KeyError):
params.update(mapped_kwargs["params"])
# Ordering is significant; mapped kwargs should override partial ones,
# and the specially handled params should be respected.
return {
"task_id": self.task_id,
"dag": self.dag,
"task_group": self.task_group,
"start_date": self.start_date,
"end_date": self.end_date,
**self.partial_kwargs,
**mapped_kwargs,
"params": params,
}
def unmap(self, resolve: None | Mapping[str, Any]) -> BaseOperator:
"""
Get the "normal" Operator after applying the current mapping.
:meta private:
"""
if isinstance(resolve, Mapping):
kwargs = resolve
elif resolve is not None:
kwargs, _ = self._expand_mapped_kwargs(*resolve)
else:
raise RuntimeError("cannot unmap a non-serialized operator without context")
kwargs = self._get_unmap_kwargs(kwargs, strict=self._disallow_kwargs_override)
is_setup = kwargs.pop("is_setup", False)
is_teardown = kwargs.pop("is_teardown", False)
on_failure_fail_dagrun = kwargs.pop("on_failure_fail_dagrun", False)
kwargs["task_id"] = self.task_id
op = self.operator_class(**kwargs, _airflow_from_mapped=True)
op.is_setup = is_setup
op.is_teardown = is_teardown
op.on_failure_fail_dagrun = on_failure_fail_dagrun
op.downstream_task_ids = self.downstream_task_ids
op.upstream_task_ids = self.upstream_task_ids
return op
def _get_specified_expand_input(self) -> ExpandInput:
"""Input received from the expand call on the operator."""
return getattr(self, self._expand_input_attr)
def prepare_for_execution(self) -> MappedOperator:
# Since a mapped operator cannot be used for execution, and an unmapped
# BaseOperator needs to be created later (see render_template_fields),
# we don't need to create a copy of the MappedOperator here.
return self
# TODO (GH-52141): Do we need this in the SDK?
def iter_mapped_dependencies(self) -> Iterator[AbstractOperator]:
"""Upstream dependencies that provide XComs used by this task for task mapping."""
from airflow.sdk.definitions.xcom_arg import XComArg
for operator, _ in XComArg.iter_xcom_references(self._get_specified_expand_input()):
yield operator
@methodtools.lru_cache(maxsize=None)
def get_parse_time_mapped_ti_count(self) -> int:
current_count = self._get_specified_expand_input().get_parse_time_mapped_ti_count()
try:
# The use of `methodtools` interferes with the zero-arg super
parent_count = super(MappedOperator, self).get_parse_time_mapped_ti_count() # noqa: UP008
except NotMapped:
return current_count
return parent_count * current_count
def render_template_fields(
self,
context: Context,
jinja_env: jinja2.Environment | None = None,
) -> None:
"""
Template all attributes listed in *self.template_fields*.
This updates *context* to reference the map-expanded task and relevant
information, without modifying the mapped operator. The expanded task
in *context* is then rendered in-place.
:param context: Context dict with values to apply on content.
:param jinja_env: Jinja environment to use for rendering.
"""
from airflow.sdk.execution_time.context import context_update_for_unmapped
if not jinja_env:
jinja_env = self.get_template_env()
mapped_kwargs, seen_oids = self._expand_mapped_kwargs(context)
unmapped_task = self.unmap(mapped_kwargs)
context_update_for_unmapped(context, unmapped_task)
# Since the operators that extend `BaseOperator` are not subclasses of
# `MappedOperator`, we need to call `_do_render_template_fields` from
# the unmapped task in order to call the operator method when we override
# it to customize the parsing of nested fields.
unmapped_task._do_render_template_fields(
parent=unmapped_task,
template_fields=self.template_fields,
context=context,
jinja_env=jinja_env,
seen_oids=seen_oids,
)
def expand_start_trigger_args(self, *, context: Context) -> StartTriggerArgs | None:
"""
Get the kwargs to create the unmapped start_trigger_args.
This method is for allowing mapped operator to start execution from triggerer.
"""
from airflow.triggers.base import StartTriggerArgs
if not self.start_trigger_args:
return None
mapped_kwargs, _ = self._expand_mapped_kwargs(context)
if self._disallow_kwargs_override:
prevent_duplicates(
self.partial_kwargs,
mapped_kwargs,
fail_reason="unmappable or already specified",
)
# Ordering is significant; mapped kwargs should override partial ones.
trigger_kwargs = mapped_kwargs.get(
"trigger_kwargs",
self.partial_kwargs.get("trigger_kwargs", self.start_trigger_args.trigger_kwargs),
)
next_kwargs = mapped_kwargs.get(
"next_kwargs",
self.partial_kwargs.get("next_kwargs", self.start_trigger_args.next_kwargs),
)
timeout = mapped_kwargs.get(
"trigger_timeout", self.partial_kwargs.get("trigger_timeout", self.start_trigger_args.timeout)
)
return StartTriggerArgs(
trigger_cls=self.start_trigger_args.trigger_cls,
trigger_kwargs=trigger_kwargs,
next_method=self.start_trigger_args.next_method,
next_kwargs=next_kwargs,
timeout=timeout,
)
| MappedOperator |
python | getsentry__sentry | tests/sentry/partnerships/test_base.py | {
"start": 95,
"end": 356
} | class ____(TestCase):
def setUp(self) -> None:
self.backend = Partnership()
def test_get_inbound_filters(self) -> None:
org = self.create_organization()
assert self.backend.get_inbound_filters(organization=org) == []
| PartnershipTest |
python | astropy__astropy | astropy/cosmology/_src/tests/flrw/test_parameters.py | {
"start": 18880,
"end": 20267
} | class ____(ParameterOde0TestMixin):
"""Tests for `astropy.cosmology.Parameter` Ode0 on a flat Cosmology.
This will augment or override some tests in ``ParameterOde0TestMixin``.
Ode0 is a descriptor, which are tested by mixin, here with ``TestFLRW``.
These tests expect dicts ``_cls_args`` and ``cls_kwargs`` which give the
args and kwargs for the cosmology class, respectively. See ``TestFLRW``.
"""
def test_Parameter_Ode0(self, cosmo_cls: type[Cosmology]):
"""Test Parameter ``Ode0`` on the class."""
super().test_Parameter_Ode0(cosmo_cls)
Ode0 = cosmo_cls.parameters.get("Ode0", cosmo_cls._derived_parameters["Ode0"])
assert Ode0.derived in (True, np.True_)
def test_Ode0(self, cosmo: Cosmology):
"""Test no-longer-Parameter ``Ode0``."""
assert cosmo.Ode0 is cosmo.__dict__["Ode0"]
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0)
def test_init_Ode0(self, cosmo_cls: type[Cosmology], ba: BoundArguments):
"""Test initialization for values of ``Ode0``."""
cosmo = cosmo_cls(*ba.args, **ba.kwargs)
assert cosmo.Ode0 == 1.0 - (cosmo.Om0 + cosmo.Ogamma0 + cosmo.Onu0 + cosmo.Ok0)
# Ode0 is not in the signature
with pytest.raises(TypeError, match="Ode0"):
cosmo_cls(*ba.args, **ba.kwargs, Ode0=1)
| ParameterFlatOde0TestMixin |
python | pytorch__pytorch | test/jit/test_tensor_creation_ops.py | {
"start": 355,
"end": 2930
} | class ____(JitTestCase):
"""
A suite of tests for ops that create tensors.
"""
def test_randperm_default_dtype(self):
def randperm(x: int):
perm = torch.randperm(x)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
self.checkScript(randperm, (3,))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
perm = torch.randperm(x, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
self.checkScript(randperm, (3,))
def test_triu_indices_default_dtype(self):
def triu_indices(rows: int, cols: int):
indices = torch.triu_indices(rows, cols)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int64
self.checkScript(triu_indices, (3, 3))
def test_triu_indices_specified_dtype(self):
def triu_indices(rows: int, cols: int):
indices = torch.triu_indices(rows, cols, dtype=torch.int32)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int32
self.checkScript(triu_indices, (3, 3))
def test_tril_indices_default_dtype(self):
def tril_indices(rows: int, cols: int):
indices = torch.tril_indices(rows, cols)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int64
self.checkScript(tril_indices, (3, 3))
def test_tril_indices_specified_dtype(self):
def tril_indices(rows: int, cols: int):
indices = torch.tril_indices(rows, cols, dtype=torch.int32)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int32
self.checkScript(tril_indices, (3, 3))
if __name__ == "__main__":
raise_on_run_directly("test/test_jit.py")
| TestTensorCreationOps |
python | numba__numba | numba/tests/test_ir_inlining.py | {
"start": 4686,
"end": 13938
} | class ____(MemoryLeakMixin, InliningBase):
def test_basic_inline_never(self):
@njit(inline='never')
def foo():
return
def impl():
return foo()
self.check(impl, inline_expect={'foo': False})
def test_basic_inline_always(self):
@njit(inline='always')
def foo():
return
def impl():
return foo()
self.check(impl, inline_expect={'foo': True})
def test_basic_inline_combos(self):
def impl():
x = foo()
y = bar()
z = baz()
return x, y, z
opts = (('always'), ('never'))
for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):
@njit(inline=inline_foo)
def foo():
return
@njit(inline=inline_bar)
def bar():
return
@njit(inline=inline_baz)
def baz():
return
inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],
'bar': self.inline_opt_as_bool[inline_bar],
'baz': self.inline_opt_as_bool[inline_baz]}
self.check(impl, inline_expect=inline_expect)
@unittest.skip("Need to work out how to prevent this")
def test_recursive_inline(self):
@njit(inline='always')
def foo(x):
if x == 0:
return 12
else:
foo(x - 1)
a = 3
def impl():
b = 0
if a > 1:
b += 1
foo(5)
if b < a:
b -= 1
self.check(impl, inline_expect={'foo': True})
def test_freevar_bindings(self):
def factory(inline, x, y):
z = x + 12
@njit(inline=inline)
def func():
return (x, y + 3, z)
return func
def impl():
x = foo()
y = bar()
z = baz()
return x, y, z
opts = (('always'), ('never'))
for inline_foo, inline_bar, inline_baz in product(opts, opts, opts):
foo = factory(inline_foo, 10, 20)
bar = factory(inline_bar, 30, 40)
baz = factory(inline_baz, 50, 60)
inline_expect = {'foo': self.inline_opt_as_bool[inline_foo],
'bar': self.inline_opt_as_bool[inline_bar],
'baz': self.inline_opt_as_bool[inline_baz]}
self.check(impl, inline_expect=inline_expect)
def test_global_binding(self):
def impl():
x = 19
return _global_func(x)
self.check(impl, inline_expect={'_global_func': True})
def test_inline_from_another_module(self):
from .inlining_usecases import bar
def impl():
z = _GLOBAL1 + 2
return bar(), z
self.check(impl, inline_expect={'bar': True})
def test_inline_from_another_module_w_getattr(self):
import numba.tests.inlining_usecases as iuc
def impl():
z = _GLOBAL1 + 2
return iuc.bar(), z
self.check(impl, inline_expect={'bar': True})
def test_inline_from_another_module_w_2_getattr(self):
import numba.tests.inlining_usecases # noqa forces registration
import numba.tests as nt
def impl():
z = _GLOBAL1 + 2
return nt.inlining_usecases.bar(), z
self.check(impl, inline_expect={'bar': True})
def test_inline_from_another_module_as_freevar(self):
def factory():
from .inlining_usecases import bar
@njit(inline='always')
def tmp():
return bar()
return tmp
baz = factory()
def impl():
z = _GLOBAL1 + 2
return baz(), z
self.check(impl, inline_expect={'bar': True})
def test_inline_w_freevar_from_another_module(self):
from .inlining_usecases import baz_factory
def gen(a, b):
bar = baz_factory(a)
def impl():
z = _GLOBAL1 + a * b
return bar(), z, a
return impl
impl = gen(10, 20)
self.check(impl, inline_expect={'bar': True})
def test_inlining_models(self):
def s17_caller_model(expr, caller_info, callee_info):
self.assertIsInstance(expr, ir.Expr)
self.assertEqual(expr.op, "call")
return self.sentinel_17_cost_model(caller_info)
def s17_callee_model(expr, caller_info, callee_info):
self.assertIsInstance(expr, ir.Expr)
self.assertEqual(expr.op, "call")
return self.sentinel_17_cost_model(callee_info)
# caller has sentinel
for caller, callee in ((11, 17), (17, 11)):
@njit(inline=s17_caller_model)
def foo():
return callee
def impl(z):
x = z + caller
y = foo()
return y + 3, x
self.check(impl, 10, inline_expect={'foo': caller == 17})
# callee has sentinel
for caller, callee in ((11, 17), (17, 11)):
@njit(inline=s17_callee_model)
def bar():
return callee
def impl(z):
x = z + caller
y = bar()
return y + 3, x
self.check(impl, 10, inline_expect={'bar': callee == 17})
def test_inline_inside_loop(self):
@njit(inline='always')
def foo():
return 12
def impl():
acc = 0.0
for i in range(5):
acc += foo()
return acc
self.check(impl, inline_expect={'foo': True}, block_count=4)
def test_inline_inside_closure_inside_loop(self):
@njit(inline='always')
def foo():
return 12
def impl():
acc = 0.0
for i in range(5):
def bar():
return foo() + 7
acc += bar()
return acc
self.check(impl, inline_expect={'foo': True}, block_count=4)
def test_inline_closure_inside_inlinable_inside_closure(self):
@njit(inline='always')
def foo(a):
def baz():
return 12 + a
return baz() + 8
def impl():
z = 9
def bar(x):
return foo(z) + 7 + x
return bar(z + 2)
self.check(impl, inline_expect={'foo': True}, block_count=1)
def test_inline_involved(self):
fortran = njit(inline='always')(_gen_involved())
@njit(inline='always')
def boz(j):
acc = 0
def biz(t):
return t + acc
for x in range(j):
acc += biz(8 + acc) + fortran(2., acc, 1, 12j, biz(acc))
return acc
@njit(inline='always')
def foo(a):
acc = 0
for p in range(12):
tmp = fortran(1, 1, 1, 1, 1)
def baz(x):
return 12 + a + x + tmp
acc += baz(p) + 8 + boz(p) + tmp
return acc + baz(2)
def impl():
z = 9
def bar(x):
return foo(z) + 7 + x
return bar(z + 2)
# block count changes with Python version due to bytecode differences.
if utils.PYVERSION in ((3, 12), (3, 13), (3, 14)):
bc = 39
elif utils.PYVERSION in ((3, 10), (3, 11)):
bc = 35
else:
raise NotImplementedError(utils.PYVERSION)
self.check(impl, inline_expect={'foo': True, 'boz': True,
'fortran': True}, block_count=bc)
def test_inline_renaming_scheme(self):
# See #7380, this checks that inlined variables have a name derived from
# the function they were defined in.
@njit(inline="always")
def bar(z):
x = 5
y = 10
return x + y + z
@njit(pipeline_class=IRPreservingTestPipeline)
def foo(a, b):
return bar(a), bar(b)
self.assertEqual(foo(10, 20), (25, 35))
# check IR. Look for the `x = 5`... there should be
# Two lots of `const(int, 5)`, one for each inline
# The LHS of the assignment will have a name like:
# TestFunctionInlining_test_inline_renaming_scheme__locals__bar_v2.x
# Ensure that this is the case!
func_ir = foo.overloads[foo.signatures[0]].metadata['preserved_ir']
store = []
for blk in func_ir.blocks.values():
for stmt in blk.body:
if isinstance(stmt, ir.Assign):
if isinstance(stmt.value, ir.Const):
if stmt.value.value == 5:
store.append(stmt)
self.assertEqual(len(store), 2)
for i in store:
name = i.target.name
basename = self.id().lstrip(self.__module__)
regex = rf'{basename}__locals__bar_v[0-9]+.x'
self.assertRegex(name, regex)
| TestFunctionInlining |
python | rapidsai__cudf | python/cudf/cudf/core/column/datetime.py | {
"start": 27767,
"end": 32378
} | class ____(DatetimeColumn):
def _clear_cache(self) -> None:
super()._clear_cache()
try:
del self._local_time
except AttributeError:
pass
@staticmethod
def _validate_dtype_instance(
dtype: pd.DatetimeTZDtype,
) -> pd.DatetimeTZDtype:
if not isinstance(dtype, pd.DatetimeTZDtype):
raise ValueError("dtype must be a pandas.DatetimeTZDtype")
return get_compatible_timezone(dtype)
def to_pandas(
self,
*,
nullable: bool = False,
arrow_type: bool = False,
) -> pd.Index:
if (
arrow_type
or nullable
or (
cudf.get_option("mode.pandas_compatible")
and isinstance(self.dtype, pd.ArrowDtype)
)
):
return super().to_pandas(nullable=nullable, arrow_type=arrow_type)
else:
return self._local_time.to_pandas().tz_localize(
self.dtype.tz, # type: ignore[union-attr]
ambiguous="NaT",
nonexistent="NaT",
)
def to_arrow(self) -> pa.Array:
# Cast to expected timestamp array type for assume_timezone
local_array = cast(pa.TimestampArray, self._local_time.to_arrow())
return pa.compute.assume_timezone(local_array, str(self.dtype.tz)) # type: ignore[union-attr]
@functools.cached_property
def time_unit(self) -> str:
return self.dtype.unit # type: ignore[union-attr]
@property
def _utc_time(self) -> DatetimeColumn:
"""Return UTC time as naive timestamps."""
return DatetimeColumn(
plc_column=self.plc_column,
size=self.size,
dtype=_get_base_dtype(self.dtype),
offset=self.offset,
null_count=self.null_count,
exposed=False,
)
@functools.cached_property
def _local_time(self) -> DatetimeColumn:
"""Return the local time as naive timestamps."""
transition_times, offsets = get_tz_data(str(self.dtype.tz)) # type: ignore[union-attr]
base_dtype = _get_base_dtype(self.dtype)
indices = (
transition_times.astype(base_dtype).searchsorted(
self.astype(base_dtype), side="right"
)
- 1
)
offsets_from_utc = offsets.take(indices, nullify=True)
return self + offsets_from_utc
def as_string_column(self, dtype: DtypeObj) -> StringColumn:
return self._local_time.as_string_column(dtype)
def as_datetime_column(
self, dtype: np.dtype | pd.DatetimeTZDtype
) -> DatetimeColumn:
if isinstance(dtype, pd.DatetimeTZDtype) and dtype != self.dtype:
if dtype.unit != self.time_unit:
# TODO: Doesn't check that new unit is valid.
casted = self._with_type_metadata(dtype)
else:
casted = self
return casted.tz_convert(str(dtype.tz))
return super().as_datetime_column(dtype)
@acquire_spill_lock()
def _get_dt_field(
self, field: plc.datetime.DatetimeComponent
) -> ColumnBase:
return type(self).from_pylibcudf(
plc.datetime.extract_datetime_component(
self._local_time.to_pylibcudf(mode="read"),
field,
)
)
def __repr__(self) -> str:
# Arrow prints the UTC timestamps, but we want to print the
# local timestamps:
arr = self._local_time.to_arrow().cast(
pa.timestamp(self.dtype.unit, str(self.dtype.tz)) # type: ignore[union-attr]
)
return (
f"{object.__repr__(self)}\n{arr.to_string()}\ndtype: {self.dtype}"
)
def tz_localize(
self,
tz: str | None,
ambiguous: Literal["NaT"] = "NaT",
nonexistent: Literal["NaT"] = "NaT",
) -> DatetimeColumn:
if tz is None:
return self._local_time
ambiguous, nonexistent = check_ambiguous_and_nonexistent(
ambiguous, nonexistent
)
raise ValueError(
"Already localized. "
"Use `tz_convert` to convert between time zones."
)
def tz_convert(self, tz: str | None) -> DatetimeColumn:
if tz is None:
return self._utc_time
elif tz == str(self.dtype.tz): # type: ignore[union-attr]
return self.copy()
utc_time = self._utc_time
return utc_time._with_type_metadata(
pd.DatetimeTZDtype(self.time_unit, tz)
)
| DatetimeTZColumn |
python | rapidsai__cudf | python/cudf_polars/cudf_polars/dsl/expressions/base.py | {
"start": 679,
"end": 782
} | class ____(NamedTuple):
requests: list[tuple[Expr | None, plc.aggregation.Aggregation, Expr]]
| AggInfo |
python | spack__spack | lib/spack/spack/package_base.py | {
"start": 4827,
"end": 11069
} | class ____(type):
"""Check if a package is detectable and add default implementations
for the detection function.
"""
TAG = "detectable"
def __init__(cls, name, bases, attr_dict):
if hasattr(cls, "executables") and hasattr(cls, "libraries"):
msg = "a package can have either an 'executables' or 'libraries' attribute"
raise spack.error.SpackError(f"{msg} [package '{name}' defines both]")
# On windows, extend the list of regular expressions to look for
# filenames ending with ".exe"
# (in some cases these regular expressions include "$" to avoid
# pulling in filenames with unexpected suffixes, but this allows
# for example detecting "foo.exe" when the package writer specified
# that "foo" was a possible executable.
# If a package has the executables or libraries attribute then it's
# assumed to be detectable. Add a tag, so finding them is faster
if hasattr(cls, "executables") or hasattr(cls, "libraries"):
# To add the tag, we need to copy the tags attribute, and attach it to
# the current class. We don't use append, since it might modify base classes,
# if "tags" is retrieved following the MRO.
cls.tags = getattr(cls, "tags", []) + [DetectablePackageMeta.TAG]
@classmethod
def platform_executables(cls):
def to_windows_exe(exe):
if exe.endswith("$"):
exe = exe.replace("$", "%s$" % spack.util.path.win_exe_ext())
else:
exe += spack.util.path.win_exe_ext()
return exe
plat_exe = []
if hasattr(cls, "executables"):
for exe in cls.executables:
if sys.platform == "win32":
exe = to_windows_exe(exe)
plat_exe.append(exe)
return plat_exe
@classmethod
def determine_spec_details(cls, prefix, objs_in_prefix):
"""Allow ``spack external find ...`` to locate installations.
Args:
prefix (str): the directory containing the executables
or libraries
objs_in_prefix (set): the executables or libraries that
match the regex
Returns:
The list of detected specs for this package
"""
objs_by_version = collections.defaultdict(list)
# The default filter function is the identity function for the
# list of executables
filter_fn = getattr(cls, "filter_detected_exes", lambda x, exes: exes)
objs_in_prefix = filter_fn(prefix, objs_in_prefix)
for obj in objs_in_prefix:
try:
version_str = cls.determine_version(obj)
if version_str:
objs_by_version[version_str].append(obj)
except Exception as e:
tty.debug(f"Cannot detect the version of '{obj}' [{str(e)}]")
specs = []
for version_str, objs in objs_by_version.items():
variants = cls.determine_variants(objs, version_str)
# Normalize output to list
if not isinstance(variants, list):
variants = [variants]
for variant in variants:
if isinstance(variant, str):
variant = (variant, {})
variant_str, extra_attributes = variant
spec_str = f"{cls.name}@{version_str} {variant_str}"
# Pop a few reserved keys from extra attributes, since
# they have a different semantics
external_path = extra_attributes.pop("prefix", None)
external_modules = extra_attributes.pop("modules", None)
try:
spec = spack.spec.Spec.from_detection(
spec_str,
external_path=external_path,
external_modules=external_modules,
extra_attributes=extra_attributes,
)
except Exception as e:
tty.debug(f'Parsing failed [spec_str="{spec_str}", error={str(e)}]')
else:
specs.append(spec)
return sorted(specs)
@classmethod
def determine_variants(cls, objs, version_str):
return ""
# Register the class as a detectable package
detectable_packages[cls.namespace].append(cls.name)
# Attach function implementations to the detectable class
default = False
if not hasattr(cls, "determine_spec_details"):
default = True
cls.determine_spec_details = determine_spec_details
if default and not hasattr(cls, "determine_version"):
msg = (
'the package "{0}" in the "{1}" repo needs to define'
' the "determine_version" method to be detectable'
)
NotImplementedError(msg.format(cls.name, cls.namespace))
if default and not hasattr(cls, "determine_variants"):
cls.determine_variants = determine_variants
# This function should not be overridden by subclasses,
# as it is not designed for bespoke pkg detection but rather
# on a per-platform basis
if "platform_executables" in cls.__dict__.keys():
raise PackageError("Packages should not override platform_executables")
cls.platform_executables = platform_executables
super(DetectablePackageMeta, cls).__init__(name, bases, attr_dict)
| DetectablePackageMeta |
python | Textualize__textual | src/textual/widgets/_tabs.py | {
"start": 2700,
"end": 5490
} | class ____(Static):
"""A Widget to manage a single tab within a Tabs widget."""
DEFAULT_CSS = """
Tab {
width: auto;
height: 1;
padding: 0 1;
text-align: center;
color: $foreground 50%;
&:hover {
color: $foreground;
}
&:disabled {
color: $foreground 25%;
}
&.-active {
color: $foreground;
}
&.-hidden {
display: none;
}
}
"""
@dataclass
class TabMessage(Message):
"""Tab-related messages.
These are mostly intended for internal use when interacting with `Tabs`.
"""
tab: Tab
"""The tab that is the object of this message."""
@property
def control(self) -> Tab:
"""The tab that is the object of this message.
This is an alias for the attribute `tab` and is used by the
[`on`][textual.on] decorator.
"""
return self.tab
class Clicked(TabMessage):
"""A tab was clicked."""
class Disabled(TabMessage):
"""A tab was disabled."""
class Enabled(TabMessage):
"""A tab was enabled."""
class Relabelled(TabMessage):
"""A tab was relabelled."""
def __init__(
self,
label: ContentText,
*,
id: str | None = None,
classes: str | None = None,
disabled: bool = False,
) -> None:
"""Initialise a Tab.
Args:
label: The label to use in the tab.
id: Optional ID for the widget.
classes: Space separated list of class names.
disabled: Whether the tab is disabled or not.
"""
super().__init__(id=id, classes=classes, disabled=disabled)
self._label: Content
# Setter takes Text or str
self.label = Content.from_text(label)
@property
def label(self) -> Content:
"""The label for the tab."""
return self._label
@label.setter
def label(self, label: ContentText) -> None:
self._label = Content.from_text(label)
self.update(self._label)
def update(self, content: VisualType = "") -> None:
self.post_message(self.Relabelled(self))
return super().update(content)
@property
def label_text(self) -> str:
"""Undecorated text of the label."""
return self.label.plain
def _on_click(self):
"""Inform the message that the tab was clicked."""
self.post_message(self.Clicked(self))
def _watch_disabled(self, disabled: bool) -> None:
"""Notify the parent `Tabs` that a tab was enabled/disabled."""
self.post_message(self.Disabled(self) if disabled else self.Enabled(self))
| Tab |
python | pytorch__pytorch | functorch/dim/_getsetitem.py | {
"start": 897,
"end": 18978
} | class ____:
can_call_original: bool = False
advanced_indexing: bool = False
self_tensor: Optional[torch.Tensor] = None
flat_inputs: list[Any] = field(default_factory=list)
result_levels: list[DimEntry] = field(default_factory=list)
has_device: bool = False
def has_dims(obj: Any) -> bool:
"""
Check if an object has first-class dimensions.
This function checks if the object is either a Dim or a functorch Tensor
that has first-class dimensions, using the proper check_exact methods.
"""
from . import Dim, Tensor
return Dim.check_exact(obj) or Tensor.check_exact(obj)
def _bind_dims_to_size(sz: int, sd: int, dims: list, nsz: list, nsd: list) -> None:
"""
Bind dimensions to size and calculate proper strides for dim packs.
"""
from . import DimensionBindError
rhs_prod = 1
for i, dim in enumerate(dims):
if not dim.is_bound:
# Check for multiple unbound dimensions
for j in range(i + 1, len(dims)):
if not dims[j].is_bound:
raise DimensionBindError(
f"cannot infer the sizes of two dimensions at once {dim!r} and {dims[j]!r}"
)
rhs_prod *= dims[j].size
# Calculate the size for this unbound dimension
if sz % rhs_prod != 0:
tup = tuple(dim.size if dim.is_bound else "?" for dim in dims)
raise DimensionBindError(
f"inferred dimension does not evenly fit into larger dimension: {sz} vs {tup}"
)
inferred_size = sz // rhs_prod
dim.size = inferred_size
rhs_prod = sz
break
else:
rhs_prod *= dim.size
# Final validation that dimensions match
if rhs_prod != sz:
tup = tuple(dims)
raise DimensionBindError(
f"Dimension sizes to do not match ({sz} != {rhs_prod}) when matching dimension pack {tup}"
)
# Calculate new sizes and strides for each dimension in the pack
# First calculate all strides by iterating in reverse
new_strides = [0] * len(dims)
current_stride = sd
for i in reversed(range(len(dims))):
new_strides[i] = current_stride
current_stride *= dims[i].size
# Then append sizes and strides in forward order
for i in range(len(dims)):
nsz.append(dims[i].size)
nsd.append(new_strides[i])
def slice_to_tuple(flat_inputs: list) -> tuple:
return tuple(flat_inputs)
def extractIndices(index: Any, indices: list) -> bool:
if isinstance(index, tuple): # mpy::tuple_view::check
indices.extend(index)
return True
elif isinstance(index, torch.Tensor): # THPVariable_Check
indices.append(index)
return False
elif not hasattr(index, "__iter__") or isinstance(
index, (str, bytes)
): # !mpy::is_sequence
indices.append(index)
return False
# Handle sequence case (list)
if isinstance(index, list):
if len(index) >= 32:
indices.extend(index)
return True
# Check each item in the sequence
for item in index:
if (
isinstance(item, (torch.Tensor, slice))
or hasattr(item, "__iter__")
or item is ...
or item is None
or has_dims(item)
):
indices.extend(index)
return True
# If we got here, treat as single index
indices.append(index)
return False
# Default case
indices.append(index)
return False
def getitem(cls: Any, func: Any, types: Any, args: Any, kwargs: Any) -> Any:
self = args[0]
index = args[1]
iinfo = getsetitem(self, index, has_dims(self))
if iinfo.can_call_original:
# Call original tensor __getitem__ directly, bypassing __torch_function__
return torch.Tensor.__getitem__(self, index)
return invoke_getitem(iinfo)
def setitem(self: Any, index: Any, rhs: Any) -> None:
"""Set values in tensor using first-class dimensions."""
from . import DimensionBindError, TensorInfo
iinfo = getsetitem(self, index, has_dims(self) or has_dims(rhs))
if iinfo.can_call_original:
# Call original tensor __setitem__ directly, bypassing __torch_function__
torch._C.TensorBase.__setitem__(self, index, rhs)
return
# Handle RHS tensor with dimensions
rhs_info = TensorInfo.create(rhs, False, False)
if rhs_info:
# Check that rhs dimensions are compatible with result dimensions
for l in rhs_info.levels:
if not l.is_positional():
# Find this dimension in result levels
found = False
for result_level in iinfo.result_levels:
if (
not result_level.is_positional()
and result_level.dim() is l.dim()
):
found = True
break
if not found:
# Create tuple representation of result levels for error message
result_dims: list[Union[int, Dim]] = []
for rl in iinfo.result_levels:
if rl.is_positional():
result_dims.append(rl.position())
else:
result_dims.append(rl.dim())
raise DimensionBindError(
f"rhs of setitem contains dimension {l.dim()!r} which is not in the dimension on the left "
f"({tuple(result_dims)!r})"
)
# Match RHS tensor to result levels
assert rhs_info.tensor is not None, "Cannot match levels on None tensor"
matched_rhs = _match_levels(
rhs_info.tensor, rhs_info.levels, iinfo.result_levels
)
else:
matched_rhs = rhs
# For advanced indexing with dimensions, we need special handling
if iinfo.advanced_indexing:
# Use advanced indexing - the flat_inputs already contain matched tensors
tup = slice_to_tuple(iinfo.flat_inputs)
if iinfo.self_tensor is None:
raise RuntimeError("Cannot setitem on None tensor")
torch._C.TensorBase.__setitem__(iinfo.self_tensor, tup, matched_rhs)
else:
# Simple copy operation
if iinfo.self_tensor is None:
raise RuntimeError("Cannot copy to None tensor")
iinfo.self_tensor.copy_(matched_rhs)
def invoke_getitem(iinfo: IndexingInfo) -> Any:
if iinfo.advanced_indexing:
self_tensor = iinfo.self_tensor
tup = slice_to_tuple(iinfo.flat_inputs)
if self_tensor is None:
raise RuntimeError("Cannot getitem on None tensor")
rtensor = self_tensor[tup]
else:
rtensor = iinfo.self_tensor # type: ignore[assignment]
if rtensor is None:
raise RuntimeError("Cannot getitem on None tensor")
# rtensor is now guaranteed to be not None
# Create a Tensor with the proper dimensions using the class method
from . import Tensor
return Tensor.from_positional(rtensor, iinfo.result_levels, iinfo.has_device)
def getsetitem(self: Any, index: Any, tensors_have_dims: bool) -> IndexingInfo:
from . import DimList # Import DimList for type checking
can_call_original_getitem = not tensors_have_dims
input_list = []
if has_dims(index):
input_list.append(index)
else:
is_sequence = extractIndices(index, input_list)
# nothing about first class dims here, fallback to getitem
if can_call_original_getitem and not is_sequence:
return IndexingInfo(can_call_original=True)
# Calculate how many dimensions have been indexed in order to compute the
# size of ... or expand a potentially unbound dimension list.
dims_indexed = 0
expanding_object = -1
unbound_dim_list = None
dimlists = [] # Track DimList positions for later processing
def check_expanding(i: int) -> None:
nonlocal expanding_object
if expanding_object != -1:
from . import DimensionBindError
raise DimensionBindError(
f"at most one ... or unbound dimension list can exist in indexing list but found 2 at offsets "
f"{expanding_object} and {i}"
)
expanding_object = i
def is_dimpack(s: Any) -> bool:
from . import Dim
return (
isinstance(s, (tuple, list))
and len(s) > 0
and all(Dim.check_exact(item) for item in s)
)
has_dimpacks_or_none = False
for i, s in enumerate(input_list):
if has_dims(s):
can_call_original_getitem = False
dims_indexed += 1
elif s is ...:
check_expanding(i)
elif isinstance(s, DimList):
can_call_original_getitem = False
if not s.is_bound:
check_expanding(i)
unbound_dim_list = s
else:
dims_indexed += len(s._dims)
dimlists.append(i)
elif s is None:
has_dimpacks_or_none = True
elif is_dimpack(s):
can_call_original_getitem = False
has_dimpacks_or_none = True
dims_indexed += 1
else:
dims_indexed += 1
# Early return if we can use original getitem
if can_call_original_getitem:
return IndexingInfo(can_call_original=True)
self_info = TensorInfo.create(self, False, True)
total_dims = len(self_info.levels) # Total dimensions (positional + named)
if dims_indexed > total_dims:
raise ValueError(
f"at least {dims_indexed} indices were supplied but the tensor only has {total_dims} dimensions"
)
# Expand any unbound dimension list, or expand ... into individual : slices.
expanding_dims = total_dims - dims_indexed
if expanding_object != -1:
if unbound_dim_list is not None:
# Bind unbound dimension list to the expanding dimensions
unbound_dim_list.bind_len(expanding_dims)
else:
# Expand ... into slice(None) objects
no_slices = [slice(None)] * expanding_dims
input_list = (
input_list[:expanding_object]
+ no_slices
+ input_list[expanding_object + 1 :]
)
# Flatten out any dimensions stored in dimlist elements directly into the inputs
# Process in reverse order to maintain indices
for i in range(len(dimlists) - 1, -1, -1):
idx = dimlists[i]
# We added more elements to input because of ...
# so we need to also adjust the index to get back to where the
# dimlist existed
if (
unbound_dim_list is None
and expanding_object != -1
and idx > expanding_object
):
idx += expanding_dims
dl = input_list[idx]
# PRIVATE here naughty
input_list = input_list[:idx] + dl._dims + input_list[idx + 1 :]
return getsetitem_flat(self_info, input_list, [], [], has_dimpacks_or_none)
def getsetitem_flat(
self_info: TensorInfo,
input_list: list,
keys: list[DimEntry],
values: list,
has_dimpacks_or_none: bool,
) -> IndexingInfo:
from . import Dim
# Track dimension usage
seen_dims: list[Any] = []
seen_dims_nuses: list[int] = []
def add_dim(dim: Any) -> None:
# Use safe indexing to avoid triggering __torch_function__ on Dim objects
idx = _safe_index(seen_dims, dim)
if idx is not None:
seen_dims_nuses[idx] += 1
else:
seen_dims.append(dim)
seen_dims_nuses.append(1)
flat_inputs = []
tensor_inputs: list[Any] = []
device_holding_tensor = None
def append_flat_handle(handle: Any) -> None:
flat_inputs.append(handle)
tensor_inputs.append(None)
def append_tensor_input(ti: TensorInfo) -> None:
flat_inputs.append(None)
tensor_inputs.append(ti)
nonlocal device_holding_tensor
if ti.has_device and device_holding_tensor is None:
device_holding_tensor = ti.tensor
nsz = []
nsd = []
if self_info.tensor is None:
raise RuntimeError("Cannot get size/stride on None tensor")
sz = self_info.tensor.size()
sd = self_info.tensor.stride()
def append_size(i: int) -> None:
if has_dimpacks_or_none:
nsz.append(sz[i])
nsd.append(sd[i])
input_it = input_list[:]
def parse_nones() -> None:
nonlocal input_it
while input_it and input_it[0] is None:
append_flat_handle(slice(None))
nsz.append(1)
nsd.append(0)
input_it = input_it[1:]
def append_item(i: int, arg: Any) -> None:
if Dim.check_exact(arg):
d = arg
if d._size == -1:
d.size = sz[i]
add_dim(d)
append_size(i)
append_flat_handle(arg)
return
info = TensorInfo.create(arg, False, False)
if info:
append_size(i)
append_tensor_input(info)
for level in info.levels:
if not level.is_positional():
add_dim(level.dim())
return
if has_dimpacks_or_none:
if isinstance(arg, (tuple, list)) and all(Dim.check_exact(d) for d in arg):
# dim pack
dim_pack = list(arg)
for d in dim_pack:
add_dim(d)
append_flat_handle(d)
_bind_dims_to_size(sz[i], sd[i], dim_pack, nsz, nsd)
return
append_size(i)
append_flat_handle(arg)
# Match indexing expressions with tensor dimensions
for i, level in enumerate(self_info.levels):
# Use safe indexing to avoid triggering __torch_function__ on DimEntry comparisons
idx = _safe_index(keys, level)
if idx is not None:
append_item(i, values[idx])
else:
if level.is_positional():
parse_nones()
if not input_it:
append_flat_handle(slice(None))
append_size(i)
else:
arg = input_it[0]
input_it = input_it[1:]
append_item(i, arg)
else:
add_dim(level.dim())
append_flat_handle(level.dim())
append_size(i)
parse_nones()
# Restride tensor if needed
if has_dimpacks_or_none and nsz:
if self_info.tensor is None:
raise RuntimeError("Cannot restride None tensor")
self_tensor = self_info.tensor.as_strided(
nsz, nsd, self_info.tensor.storage_offset()
)
else:
self_tensor = self_info.tensor
# Determine result shape and indexing requirements
result_levels: list[Any] = []
index_levels = []
tensor_insert_point = -1
requires_getindex = False
def mark_tensor_index() -> None:
nonlocal tensor_insert_point
if tensor_insert_point == -1:
tensor_insert_point = len(result_levels)
elif tensor_insert_point != len(result_levels):
tensor_insert_point = 0
for i, inp in enumerate(flat_inputs):
if tensor_inputs[i] is not None:
requires_getindex = True
mark_tensor_index()
for level in tensor_inputs[i].levels:
if level not in index_levels:
index_levels.append(level)
elif Dim.check_exact(inp):
d = inp
# Use safe indexing to avoid triggering __torch_function__
dim_idx = _safe_index(seen_dims, d)
assert dim_idx is not None, f"Dim {d} not found in seen_dims"
if seen_dims_nuses[dim_idx] == 1:
flat_inputs[i] = slice(None)
result_levels.append(DimEntry(d))
else:
requires_getindex = True
flat_inputs[i] = None
tensor_inputs[i] = TensorInfo(
d._get_range(), [DimEntry(d)], False, None
)
if DimEntry(d) not in index_levels:
index_levels.append(DimEntry(d))
mark_tensor_index()
else:
if inp != slice(None):
requires_getindex = True
if not isinstance(inp, int):
result_levels.append(DimEntry(-1))
# Insert indexing dimensions at first tensor use point
if tensor_insert_point != -1:
for level in reversed(index_levels):
result_levels.insert(tensor_insert_point, level)
# Match tensors to indexing shape
if requires_getindex:
for i in range(len(flat_inputs)):
if tensor_inputs[i] is not None:
t = tensor_inputs[i].tensor
assert t is not None, "TensorInfo should have valid tensor data"
if (
not tensor_inputs[i].has_device
and device_holding_tensor is not None
):
t = t.to(device_holding_tensor.device)
flat_inputs[i] = _match_levels(t, tensor_inputs[i].levels, index_levels)
# Number positional dimensions correctly
seen_positionals = 0
for i in reversed(range(len(result_levels))):
if result_levels[i].is_positional():
seen_positionals += 1
result_levels[i] = DimEntry(-seen_positionals)
return IndexingInfo(
can_call_original=False,
advanced_indexing=requires_getindex,
self_tensor=self_tensor,
flat_inputs=flat_inputs,
result_levels=result_levels,
has_device=self_info.has_device,
)
| IndexingInfo |
python | pypa__warehouse | warehouse/rate_limiting/__init__.py | {
"start": 3731,
"end": 4823
} | class ____:
def __init__(self, limit, identifiers=None, limiter_class=RateLimiter):
self.limit = limit
self.identifiers = identifiers
self.limiter_class = limiter_class
def __call__(self, context, request):
return self.limiter_class(
request.registry["ratelimiter.storage"],
limit=self.limit,
identifiers=self.identifiers,
metrics=request.find_service(IMetricsService, context=None),
)
def __repr__(self):
return (
f'RateLimit("{self.limit}", identifiers={self.identifiers}, '
f"limiter_class={self.limiter_class})"
)
def __eq__(self, other):
if not isinstance(other, RateLimit):
return NotImplemented
return (self.limit, self.identifiers, self.limiter_class) == (
other.limit,
other.identifiers,
other.limiter_class,
)
def includeme(config):
config.registry["ratelimiter.storage"] = storage_from_string(
config.registry.settings["ratelimit.url"]
)
| RateLimit |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/layout/processors.py | {
"start": 3928,
"end": 4890
} | class ____:
"""
Transformation result, as returned by :meth:`.Processor.apply_transformation`.
Important: Always make sure that the length of `document.text` is equal to
the length of all the text in `fragments`!
:param fragments: The transformed fragments. To be displayed, or to pass to
the next processor.
:param source_to_display: Cursor position transformation from original
string to transformed string.
:param display_to_source: Cursor position transformed from source string to
original string.
"""
def __init__(
self,
fragments: StyleAndTextTuples,
source_to_display: SourceToDisplay | None = None,
display_to_source: DisplayToSource | None = None,
) -> None:
self.fragments = fragments
self.source_to_display = source_to_display or (lambda i: i)
self.display_to_source = display_to_source or (lambda i: i)
| Transformation |
python | keras-team__keras | keras/src/export/openvino_test.py | {
"start": 2549,
"end": 7884
} | class ____(testing.TestCase):
@parameterized.named_parameters(
named_product(
model_type=["sequential", "functional", "subclass", "lstm"]
)
)
def test_standard_model_export(self, model_type):
if model_type == "lstm":
self.skipTest(
"LSTM export not supported - unimplemented QR operation"
)
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = get_model(model_type)
batch_size = 3
if model_type == "lstm":
ref_input = np.random.normal(size=(batch_size, 4, 10))
else:
ref_input = np.random.normal(size=(batch_size, 10))
ref_input = ref_input.astype("float32")
ref_output = model(ref_input)
openvino.export_openvino(model, temp_filepath)
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input])[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
larger_input = np.concatenate([ref_input, ref_input], axis=0)
compiled_model([larger_input])
@parameterized.named_parameters(
named_product(struct_type=["tuple", "array", "dict"])
)
def test_model_with_input_structure(self, struct_type):
class TupleModel(models.Model):
def call(self, inputs):
x, y = inputs
return ops.add(x, y)
class ArrayModel(models.Model):
def call(self, inputs):
x = inputs[0]
y = inputs[1]
return ops.add(x, y)
class DictModel(models.Model):
def call(self, inputs):
x = inputs["x"]
y = inputs["y"]
return ops.add(x, y)
batch_size = 3
ref_input = np.random.normal(size=(batch_size, 10)).astype("float32")
if struct_type == "tuple":
model = TupleModel()
ref_input = (ref_input, ref_input * 2)
elif struct_type == "array":
model = ArrayModel()
ref_input = [ref_input, ref_input * 2]
elif struct_type == "dict":
model = DictModel()
ref_input = {"x": ref_input, "y": ref_input * 2}
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
ref_output = model(tree.map_structure(ops.convert_to_tensor, ref_input))
openvino.export_openvino(model, temp_filepath)
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
if isinstance(ref_input, dict):
ov_inputs = [ref_input[key] for key in ref_input.keys()]
else:
ov_inputs = list(ref_input)
ov_output = compiled_model(ov_inputs)[compiled_model.output(0)]
self.assertAllClose(ref_output, ov_output)
# Test with keras.saving_lib
temp_filepath = os.path.join(
self.get_temp_dir(), "exported_model.keras"
)
saving_lib.save_model(model, temp_filepath)
revived_model = saving_lib.load_model(
temp_filepath,
{
"TupleModel": TupleModel,
"ArrayModel": ArrayModel,
"DictModel": DictModel,
},
)
self.assertAllClose(ref_output, revived_model(ref_input))
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model2.xml")
openvino.export_openvino(revived_model, temp_filepath)
bigger_ref_input = tree.map_structure(
lambda x: np.concatenate([x, x], axis=0), ref_input
)
if isinstance(bigger_ref_input, dict):
bigger_ov_inputs = [
bigger_ref_input[key] for key in bigger_ref_input.keys()
]
else:
bigger_ov_inputs = list(bigger_ref_input)
compiled_model(bigger_ov_inputs)
def test_model_with_multiple_inputs(self):
class TwoInputsModel(models.Model):
def call(self, x, y):
return x + y
def build(self, y_shape, x_shape):
self.built = True
temp_filepath = os.path.join(self.get_temp_dir(), "exported_model.xml")
model = TwoInputsModel()
batch_size = 3
ref_input_x = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_input_y = np.random.normal(size=(batch_size, 10)).astype("float32")
ref_output = model(ref_input_x, ref_input_y)
openvino.export_openvino(model, temp_filepath)
# Load and run inference with OpenVINO
core = ov.Core()
ov_model = core.read_model(temp_filepath)
compiled_model = core.compile_model(ov_model, "CPU")
ov_output = compiled_model([ref_input_x, ref_input_y])[
compiled_model.output(0)
]
self.assertAllClose(ref_output, ov_output)
larger_input_x = np.concatenate([ref_input_x, ref_input_x], axis=0)
larger_input_y = np.concatenate([ref_input_y, ref_input_y], axis=0)
compiled_model([larger_input_x, larger_input_y])
| ExportOpenVINOTest |
python | getsentry__sentry | tests/sentry/api/test_paginator.py | {
"start": 1337,
"end": 3226
} | class ____(TestCase):
cls = Paginator
def test_max_limit(self) -> None:
self.create_user("foo@example.com")
self.create_user("bar@example.com")
self.create_user("baz@example.com")
queryset = User.objects.all()
paginator = self.cls(queryset, "id", max_limit=10)
result = paginator.get_result(limit=2, cursor=None)
assert len(result) == 2
paginator = self.cls(queryset, "id", max_limit=1)
result = paginator.get_result(limit=2, cursor=None)
assert len(result) == 1
def test_count_hits(self) -> None:
self.create_user("foo@example.com")
self.create_user("bar@example.com")
queryset = User.objects.filter(email="foo@example.com")
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 1
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 2
queryset = User.objects.none()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1000)
assert result == 0
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result = paginator.count_hits(1)
assert result == 1
def test_prev_emptyset(self) -> None:
queryset = User.objects.all()
paginator = self.cls(queryset, "id")
result1 = paginator.get_result(limit=1, cursor=None)
res1 = self.create_user("foo@example.com")
result2 = paginator.get_result(limit=1, cursor=result1.prev)
assert len(result2) == 1, (result2, list(result2))
assert result2[0] == res1
result3 = paginator.get_result(limit=1, cursor=result2.prev)
assert len(result3) == 0, (result3, list(result3))
@control_silo_test
| PaginatorTest |
python | getsentry__sentry | src/sentry/api/endpoints/project_trace_item_details.py | {
"start": 8773,
"end": 11834
} | class ____(ProjectEndpoint):
owner = ApiOwner.DATA_BROWSING
publish_status = {
"GET": ApiPublishStatus.EXPERIMENTAL,
}
@staticmethod
def get(request: Request, project: Project, item_id: str) -> Response:
"""
Retrieve a Trace Item for a project.
For example, you might ask 'give me all the details about the span/log with id 01234567'
"""
serializer = ProjectTraceItemDetailsEndpointSerializer(data=request.GET)
if not serializer.is_valid():
return Response(serializer.errors, status=400)
serialized = serializer.validated_data
trace_id = serialized.get("trace_id")
item_type = serialized.get("item_type")
referrer = serialized.get("referrer", Referrer.API_ORGANIZATION_TRACE_ITEM_DETAILS.value)
trace_item_type = None
if item_type is not None:
trace_item_type = constants.SUPPORTED_TRACE_ITEM_TYPE_MAP.get(
SupportedTraceItemType(item_type), None
)
if trace_item_type is None:
raise BadRequest(detail=f"Unknown trace item type: {item_type}")
start_timestamp_proto = ProtoTimestamp()
start_timestamp_proto.FromSeconds(0)
end_timestamp_proto = ProtoTimestamp()
# due to clock drift, the end time can be in the future - add a week to be safe
end_timestamp_proto.FromSeconds(int(time.time()) + 60 * 60 * 24 * 7)
trace_id = request.GET.get("trace_id")
if not trace_id:
raise BadRequest(detail="Missing required query parameter 'trace_id'")
req = TraceItemDetailsRequest(
item_id=item_id,
meta=RequestMeta(
organization_id=project.organization.id,
cogs_category="events_analytics_platform",
referrer=referrer,
project_ids=[project.id],
start_timestamp=start_timestamp_proto,
end_timestamp=end_timestamp_proto,
trace_item_type=trace_item_type,
request_id=str(uuid.uuid4()),
),
trace_id=trace_id,
)
resp = MessageToDict(snuba_rpc.trace_item_details_rpc(req))
use_sentry_conventions = features.has(
"organizations:performance-sentry-conventions-fields",
project.organization,
actor=request.user,
)
include_internal = is_active_superuser(request) or is_active_staff(request)
resp_dict = {
"itemId": serialize_item_id(resp["itemId"], item_type),
"timestamp": resp["timestamp"],
"attributes": convert_rpc_attribute_to_json(
resp["attributes"],
item_type,
use_sentry_conventions,
include_internal=include_internal,
),
"meta": serialize_meta(resp["attributes"], item_type),
"links": serialize_links(resp["attributes"]),
}
return Response(resp_dict)
| ProjectTraceItemDetailsEndpoint |
python | pennersr__django-allauth | allauth/usersessions/apps.py | {
"start": 125,
"end": 736
} | class ____(AppConfig):
name = "allauth.usersessions"
verbose_name = _("User Sessions")
default_auto_field = (
app_settings.DEFAULT_AUTO_FIELD or "django.db.models.BigAutoField"
)
def ready(self):
from allauth.account.signals import (
password_changed,
password_set,
user_logged_in,
)
from allauth.usersessions import signals
user_logged_in.connect(receiver=signals.on_user_logged_in)
for sig in [password_set, password_changed]:
sig.connect(receiver=signals.on_password_changed)
| UserSessionsConfig |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 50148,
"end": 50487
} | class ____(FieldValues):
"""
Values for `DateTimeField` with a custom output format.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
datetime.datetime(2001, 1, 1, 13, 00): '01:00PM, 01 Jan 2001',
}
field = serializers.DateTimeField(format='%I:%M%p, %d %b %Y')
| TestCustomOutputFormatDateTimeField |
python | kamyu104__LeetCode-Solutions | Python/maximal-network-rank.py | {
"start": 2211,
"end": 3464
} | class ____(object):
def maximalNetworkRank(self, n, roads):
"""
:type n: int
:type roads: List[List[int]]
:rtype: int
"""
degree = [0]*n
adj = collections.defaultdict(set)
for a, b in roads:
degree[a] += 1
degree[b] += 1
adj[a].add(b)
adj[b].add(a)
sorted_idx = range(n)
sorted_idx.sort(key=lambda x:-degree[x])
m = 2
while m < n:
if degree[sorted_idx[m]] != degree[sorted_idx[1]]:
break
m += 1
result = degree[sorted_idx[0]] + degree[sorted_idx[1]] - 1 # at least sum(top2 values) - 1
for i in xrange(m-1): # only need to check pairs of top2 values
for j in xrange(i+1, m):
if degree[sorted_idx[i]]+degree[sorted_idx[j]]-int(sorted_idx[i] in adj and sorted_idx[j] in adj[sorted_idx[i]]) > result: # if equal to ideal sum of top2 values, break
return degree[sorted_idx[i]]+degree[sorted_idx[j]]-int(sorted_idx[i] in adj and sorted_idx[j] in adj[sorted_idx[i]])
return result
# Time: O(m + n^2)
# Space: O(m + n)
import collections
| Solution2 |
python | automl__auto-sklearn | autosklearn/pipeline/components/regression/gradient_boosting.py | {
"start": 655,
"end": 8105
} | class ____(
IterativeComponent,
AutoSklearnRegressionAlgorithm,
):
def __init__(
self,
loss,
learning_rate,
min_samples_leaf,
max_depth,
max_leaf_nodes,
max_bins,
l2_regularization,
early_stop,
tol,
scoring,
n_iter_no_change=0,
validation_fraction=None,
random_state=None,
verbose=0,
):
self.loss = loss
self.learning_rate = learning_rate
self.max_iter = self.get_max_iter()
self.min_samples_leaf = min_samples_leaf
self.max_depth = max_depth
self.max_leaf_nodes = max_leaf_nodes
self.max_bins = max_bins
self.l2_regularization = l2_regularization
self.early_stop = early_stop
self.tol = tol
self.scoring = scoring
self.n_iter_no_change = n_iter_no_change
self.validation_fraction = validation_fraction
self.random_state = random_state
self.verbose = verbose
self.estimator = None
self.fully_fit_ = False
@staticmethod
def get_max_iter():
return 512
def get_current_iter(self):
return self.estimator.n_iter_
def iterative_fit(self, X, y, n_iter=2, refit=False):
"""Set n_iter=2 for the same reason as for SGD"""
import sklearn.ensemble
from sklearn.experimental import enable_hist_gradient_boosting # noqa
if refit:
self.estimator = None
if self.estimator is None:
self.fully_fit_ = False
self.learning_rate = float(self.learning_rate)
self.max_iter = int(self.max_iter)
self.min_samples_leaf = int(self.min_samples_leaf)
if check_none(self.max_depth):
self.max_depth = None
else:
self.max_depth = int(self.max_depth)
if check_none(self.max_leaf_nodes):
self.max_leaf_nodes = None
else:
self.max_leaf_nodes = int(self.max_leaf_nodes)
self.max_bins = int(self.max_bins)
self.l2_regularization = float(self.l2_regularization)
self.tol = float(self.tol)
if check_none(self.scoring):
self.scoring = None
if self.early_stop == "off":
self.n_iter_no_change = 0
self.validation_fraction_ = None
elif self.early_stop == "train":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction_ = None
elif self.early_stop == "valid":
self.n_iter_no_change = int(self.n_iter_no_change)
self.validation_fraction_ = float(self.validation_fraction)
else:
raise ValueError("early_stop should be either off, train or valid")
self.verbose = int(self.verbose)
n_iter = int(np.ceil(n_iter))
self.estimator = sklearn.ensemble.HistGradientBoostingRegressor(
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=n_iter,
min_samples_leaf=self.min_samples_leaf,
max_depth=self.max_depth,
max_leaf_nodes=self.max_leaf_nodes,
max_bins=self.max_bins,
l2_regularization=self.l2_regularization,
tol=self.tol,
scoring=self.scoring,
n_iter_no_change=self.n_iter_no_change,
validation_fraction=self.validation_fraction_,
verbose=self.verbose,
warm_start=True,
random_state=self.random_state,
)
else:
self.estimator.max_iter += n_iter
self.estimator.max_iter = min(self.estimator.max_iter, self.max_iter)
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
self.estimator.fit(X, y)
if (
self.estimator.max_iter >= self.max_iter
or self.estimator.max_iter > self.estimator.n_iter_
):
self.fully_fit_ = True
return self
def configuration_fully_fitted(self):
if self.estimator is None:
return False
elif not hasattr(self, "fully_fit_"):
return False
else:
return self.fully_fit_
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
@staticmethod
def get_properties(dataset_properties=None):
return {
"shortname": "GB",
"name": "Gradient Boosting Regressor",
"handles_regression": True,
"handles_classification": False,
"handles_multiclass": False,
"handles_multilabel": False,
"handles_multioutput": False,
"is_deterministic": True,
"input": (DENSE, UNSIGNED_DATA),
"output": (PREDICTIONS,),
}
@staticmethod
def get_hyperparameter_search_space(
feat_type: Optional[FEAT_TYPE_TYPE] = None, dataset_properties=None
):
cs = ConfigurationSpace()
loss = CategoricalHyperparameter(
"loss", ["least_squares"], default_value="least_squares"
)
learning_rate = UniformFloatHyperparameter(
name="learning_rate", lower=0.01, upper=1, default_value=0.1, log=True
)
min_samples_leaf = UniformIntegerHyperparameter(
name="min_samples_leaf", lower=1, upper=200, default_value=20, log=True
)
max_depth = UnParametrizedHyperparameter(name="max_depth", value="None")
max_leaf_nodes = UniformIntegerHyperparameter(
name="max_leaf_nodes", lower=3, upper=2047, default_value=31, log=True
)
max_bins = Constant("max_bins", 255)
l2_regularization = UniformFloatHyperparameter(
name="l2_regularization",
lower=1e-10,
upper=1,
default_value=1e-10,
log=True,
)
early_stop = CategoricalHyperparameter(
name="early_stop", choices=["off", "valid", "train"], default_value="off"
)
tol = UnParametrizedHyperparameter(name="tol", value=1e-7)
scoring = UnParametrizedHyperparameter(name="scoring", value="loss")
n_iter_no_change = UniformIntegerHyperparameter(
name="n_iter_no_change", lower=1, upper=20, default_value=10
)
validation_fraction = UniformFloatHyperparameter(
name="validation_fraction", lower=0.01, upper=0.4, default_value=0.1
)
cs.add_hyperparameters(
[
loss,
learning_rate,
min_samples_leaf,
max_depth,
max_leaf_nodes,
max_bins,
l2_regularization,
early_stop,
tol,
scoring,
n_iter_no_change,
validation_fraction,
]
)
n_iter_no_change_cond = InCondition(
n_iter_no_change, early_stop, ["valid", "train"]
)
validation_fraction_cond = EqualsCondition(
validation_fraction, early_stop, "valid"
)
cs.add_conditions([n_iter_no_change_cond, validation_fraction_cond])
return cs
| GradientBoosting |
python | django-extensions__django-extensions | django_extensions/management/modelviz.py | {
"start": 2081,
"end": 19522
} | class ____:
def __init__(self, app_labels, **kwargs):
self.graphs = []
self.cli_options = kwargs.get("cli_options", None)
self.disable_fields = kwargs.get("disable_fields", False)
self.disable_abstract_fields = kwargs.get("disable_abstract_fields", False)
self.include_models = parse_file_or_list(kwargs.get("include_models", ""))
self.all_applications = kwargs.get("all_applications", False)
self.use_subgraph = kwargs.get("group_models", False)
self.verbose_names = kwargs.get("verbose_names", False)
self.inheritance = kwargs.get("inheritance", True)
self.relations_as_fields = kwargs.get("relations_as_fields", True)
self.relation_fields_only = kwargs.get("relation_fields_only", False)
self.sort_fields = kwargs.get("sort_fields", True)
self.language = kwargs.get("language", None)
if self.language is not None:
activate_language(self.language)
self.exclude_columns = parse_file_or_list(kwargs.get("exclude_columns", ""))
self.exclude_models = parse_file_or_list(kwargs.get("exclude_models", ""))
self.hide_edge_labels = kwargs.get("hide_edge_labels", False)
self.arrow_shape = kwargs.get("arrow_shape")
self.color_code_deletions = kwargs.get("color_code_deletions", False)
if self.all_applications:
self.app_labels = [app.label for app in apps.get_app_configs()]
else:
self.app_labels = app_labels
self.rankdir = kwargs.get("rankdir")
self.display_field_choices = kwargs.get("display_field_choices", False)
self.ordering = kwargs.get("ordering")
def generate_graph_data(self):
self.process_apps()
nodes = []
for graph in self.graphs:
nodes.extend([e["name"] for e in graph["models"]])
for graph in self.graphs:
for model in graph["models"]:
for relation in model["relations"]:
if relation is not None:
if relation["target"] in nodes:
relation["needs_node"] = False
def get_graph_data(self, as_json=False):
now = datetime.datetime.now()
graph_data = {
"created_at": now.strftime("%Y-%m-%d %H:%M"),
"cli_options": self.cli_options,
"disable_fields": self.disable_fields,
"disable_abstract_fields": self.disable_abstract_fields,
"display_field_choices": self.display_field_choices,
"use_subgraph": self.use_subgraph,
"rankdir": self.rankdir,
"ordering": self.ordering,
}
if as_json:
# We need to remove the model and field class
# because it is not JSON serializable
graphs = [context.flatten() for context in self.graphs]
for context in graphs:
for model_data in context["models"]:
model_data.pop("model")
for field_data in model_data["fields"]:
field_data.pop("field")
graph_data["graphs"] = graphs
else:
graph_data["graphs"] = self.graphs
return graph_data
def add_attributes(self, field, abstract_fields):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
t = type(field).__name__
if isinstance(field, (OneToOneField, ForeignKey)):
t += " ({0})".format(field.remote_field.field_name)
if self.display_field_choices and field.choices is not None:
choices = {c for c, _ in field.choices}
t = str(choices)
# TODO: ManyToManyField, GenericRelation
return {
"field": field,
"name": field.name,
"label": label,
"type": t,
"blank": field.blank,
"abstract": any(
field.creation_counter == abstract_field.creation_counter
for abstract_field in abstract_fields
),
"relation": isinstance(field, RelatedField),
"primary_key": field.primary_key,
}
def add_relation(self, field, model, extras="", color=None):
if self.verbose_names and field.verbose_name:
label = force_str(field.verbose_name)
if label.islower():
label = label.capitalize()
else:
label = field.name
# show related field name
if hasattr(field, "related_query_name"):
related_query_name = field.related_query_name()
if self.verbose_names and related_query_name.islower():
related_query_name = related_query_name.replace("_", " ").capitalize()
label = "{} ({})".format(label, force_str(related_query_name))
if self.hide_edge_labels:
label = ""
# handle self-relationships and lazy-relationships
if isinstance(field.remote_field.model, str):
if field.remote_field.model == "self":
target_model = field.model
else:
if "." in field.remote_field.model:
app_label, model_name = field.remote_field.model.split(".", 1)
else:
app_label = field.model._meta.app_label
model_name = field.remote_field.model
target_model = apps.get_model(app_label, model_name)
else:
target_model = field.remote_field.model
if color:
extras = "[{}, color={}]".format(extras[1:-1], color)
_rel = self.get_relation_context(target_model, field, label, extras)
if _rel not in model["relations"] and self.use_model(_rel["target"]):
return _rel
def get_abstract_models(self, appmodels):
abstract_models = []
for appmodel in appmodels:
abstract_models += [
abstract_model
for abstract_model in appmodel.__bases__
if hasattr(abstract_model, "_meta") and abstract_model._meta.abstract
]
abstract_models = list(set(abstract_models)) # remove duplicates
return abstract_models
def get_app_context(self, app):
return Context(
{
"name": '"%s"' % app.name,
"app_name": "%s" % app.name,
"cluster_app_name": "cluster_%s" % app.name.replace(".", "_"),
"models": [],
}
)
def get_appmodel_attributes(self, appmodel):
if self.relations_as_fields:
attributes = [field for field in appmodel._meta.local_fields]
else:
# Find all the 'real' attributes. Relations are depicted as graph edges
# instead of attributes
attributes = [
field
for field in appmodel._meta.local_fields
if not isinstance(field, RelatedField)
]
return attributes
def get_appmodel_abstracts(self, appmodel):
return [
abstract_model.__name__
for abstract_model in appmodel.__bases__
if hasattr(abstract_model, "_meta") and abstract_model._meta.abstract
]
def get_appmodel_context(self, appmodel, appmodel_abstracts):
context = {
"model": appmodel,
"app_name": appmodel.__module__.replace(".", "_"),
"name": appmodel.__name__,
"abstracts": appmodel_abstracts,
"fields": [],
"relations": [],
}
if self.verbose_names and appmodel._meta.verbose_name:
context["label"] = force_str(appmodel._meta.verbose_name)
else:
context["label"] = context["name"]
return context
def get_bases_abstract_fields(self, c):
_abstract_fields = []
for e in c.__bases__:
if hasattr(e, "_meta") and e._meta.abstract:
_abstract_fields.extend(e._meta.fields)
_abstract_fields.extend(self.get_bases_abstract_fields(e))
return _abstract_fields
def get_inheritance_context(self, appmodel, parent):
label = "multi-table"
if parent._meta.abstract:
label = "abstract"
if appmodel._meta.proxy:
label = "proxy"
label += r"\ninheritance"
if self.hide_edge_labels:
label = ""
return {
"target_app": parent.__module__.replace(".", "_"),
"target": parent.__name__,
"type": "inheritance",
"name": "inheritance",
"label": label,
"arrows": "[arrowhead=empty, arrowtail=none, dir=both]",
"needs_node": True,
}
def get_models(self, app):
appmodels = list(app.get_models())
return appmodels
def get_relation_context(self, target_model, field, label, extras):
return {
"target_app": target_model.__module__.replace(".", "_"),
"target": target_model.__name__,
"type": type(field).__name__,
"name": field.name,
"label": label,
"arrows": extras,
"needs_node": True,
}
def process_attributes(self, field, model, pk, abstract_fields):
newmodel = model.copy()
if self.skip_field(field) or pk and field == pk:
return newmodel
newmodel["fields"].append(self.add_attributes(field, abstract_fields))
return newmodel
def process_apps(self):
for app_label in self.app_labels:
app = apps.get_app_config(app_label)
if not app:
continue
app_graph = self.get_app_context(app)
app_models = self.get_models(app)
abstract_models = self.get_abstract_models(app_models)
app_models = abstract_models + app_models
for appmodel in app_models:
if not self.use_model(appmodel._meta.object_name):
continue
appmodel_abstracts = self.get_appmodel_abstracts(appmodel)
abstract_fields = self.get_bases_abstract_fields(appmodel)
model = self.get_appmodel_context(appmodel, appmodel_abstracts)
attributes = self.get_appmodel_attributes(appmodel)
# find primary key and print it first
# ignoring implicit id if other pk exists
pk = appmodel._meta.pk
if pk and not appmodel._meta.abstract and pk in attributes:
model["fields"].append(self.add_attributes(pk, abstract_fields))
for field in attributes:
model = self.process_attributes(field, model, pk, abstract_fields)
if self.sort_fields:
model = self.sort_model_fields(model)
for field in appmodel._meta.local_fields:
model = self.process_local_fields(field, model, abstract_fields)
for field in appmodel._meta.local_many_to_many:
model = self.process_local_many_to_many(field, model)
if self.inheritance:
# add inheritance arrows
for parent in appmodel.__bases__:
model = self.process_parent(parent, appmodel, model)
app_graph["models"].append(model)
if app_graph["models"]:
self.graphs.append(app_graph)
def process_local_fields(self, field, model, abstract_fields):
newmodel = model.copy()
if (
field.attname.endswith("_ptr_id")
or field in abstract_fields
or self.skip_field(field)
):
# excluding field redundant with inheritance relation
# excluding fields inherited from abstract classes.
# they too show as local_fields
return newmodel
color = None
if self.color_code_deletions and isinstance(field, (OneToOneField, ForeignKey)):
field_on_delete = getattr(field.remote_field, "on_delete", None)
color = ON_DELETE_COLORS.get(field_on_delete)
if isinstance(field, OneToOneField):
relation = self.add_relation(
field, newmodel, "[arrowhead=none, arrowtail=none, dir=both]", color
)
elif isinstance(field, ForeignKey):
relation = self.add_relation(
field,
newmodel,
"[arrowhead=none, arrowtail={}, dir=both]".format(self.arrow_shape),
color,
)
else:
relation = None
if relation is not None:
newmodel["relations"].append(relation)
return newmodel
def process_local_many_to_many(self, field, model):
newmodel = model.copy()
if self.skip_field(field):
return newmodel
relation = None
if isinstance(field, ManyToManyField):
if (
hasattr(field.remote_field.through, "_meta")
and field.remote_field.through._meta.auto_created
):
relation = self.add_relation(
field,
newmodel,
"[arrowhead={} arrowtail={}, dir=both]".format(
self.arrow_shape, self.arrow_shape
),
)
elif isinstance(field, GenericRelation):
relation = self.add_relation(
field,
newmodel,
mark_safe(
'[style="dotted", arrowhead=normal, arrowtail=normal, dir=both]'
),
)
if relation is not None:
newmodel["relations"].append(relation)
return newmodel
def process_parent(self, parent, appmodel, model):
newmodel = model.copy()
if hasattr(parent, "_meta"): # parent is a model
_rel = self.get_inheritance_context(appmodel, parent)
# TODO: seems as if abstract models aren't part of models.getModels,
# which is why they are printed by this without any attributes.
if _rel not in newmodel["relations"] and self.use_model(_rel["target"]):
newmodel["relations"].append(_rel)
return newmodel
def sort_model_fields(self, model):
newmodel = model.copy()
newmodel["fields"] = sorted(
newmodel["fields"],
key=lambda field: (
not field["primary_key"],
not field["relation"],
field["label"],
),
)
return newmodel
def use_model(self, model_name):
"""
Decide whether to use a model, based on the model name and the lists of
models to exclude and include.
"""
# Check against include list.
if self.include_models:
for model_pattern in self.include_models:
model_pattern = "^%s$" % model_pattern.replace("*", ".*")
if re.search(model_pattern, model_name):
return True
# Check against exclude list.
if self.exclude_models:
for model_pattern in self.exclude_models:
model_pattern = "^%s$" % model_pattern.replace("*", ".*")
if re.search(model_pattern, model_name):
return False
# Return `True` if `include_models` is falsey, otherwise return `False`.
return not self.include_models
def skip_field(self, field):
if self.exclude_columns:
if self.verbose_names and field.verbose_name:
if field.verbose_name in self.exclude_columns:
return True
if field.name in self.exclude_columns:
return True
if self.relation_fields_only:
if not isinstance(
field,
(
ForeignKey,
ManyToManyField,
OneToOneField,
RelatedField,
OneToOneRel,
ManyToOneRel,
),
):
return True
return False
def generate_dot(graph_data, template="django_extensions/graph_models/digraph.dot"):
if isinstance(template, str):
template = loader.get_template(template)
if not isinstance(template, Template) and not (
hasattr(template, "template") and isinstance(template.template, Template)
):
raise Exception(
"Default Django template loader isn't used. "
"This can lead to the incorrect template rendering. "
"Please, check the settings."
)
c = Context(graph_data).flatten()
dot = template.render(c)
return dot
def generate_graph_data(*args, **kwargs):
generator = ModelGraph(*args, **kwargs)
generator.generate_graph_data()
return generator.get_graph_data()
def use_model(model, include_models, exclude_models):
generator = ModelGraph(
[], include_models=include_models, exclude_models=exclude_models
)
return generator.use_model(model)
| ModelGraph |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_type_checking/runtime_evaluated_base_classes_1.py | {
"start": 469,
"end": 502
} | class ____(BaseModel):
x: array
| G |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/fixtures/orm.py | {
"start": 811,
"end": 4138
} | class ____(ORMTest, TablesTest, assertions.AssertsExecutionResults):
# 'once', 'each', None
run_setup_classes = "once"
# 'once', 'each', None
run_setup_mappers = "each"
classes: Any = None
@config.fixture(autouse=True, scope="class")
def _setup_tables_test_class(self):
cls = self.__class__
cls._init_class()
if cls.classes is None:
cls.classes = adict()
cls._setup_once_tables()
cls._setup_once_classes()
cls._setup_once_mappers()
cls._setup_once_inserts()
yield
cls._teardown_once_class()
cls._teardown_once_metadata_bind()
@config.fixture(autouse=True, scope="function")
def _setup_tables_test_instance(self):
self._setup_each_tables()
self._setup_each_classes()
self._setup_each_mappers()
self._setup_each_inserts()
yield
orm.session.close_all_sessions()
self._teardown_each_mappers()
self._teardown_each_classes()
self._teardown_each_tables()
@classmethod
def _teardown_once_class(cls):
cls.classes.clear()
@classmethod
def _setup_once_classes(cls):
if cls.run_setup_classes == "once":
cls._with_register_classes(cls.setup_classes)
@classmethod
def _setup_once_mappers(cls):
if cls.run_setup_mappers == "once":
cls.mapper_registry, cls.mapper = cls._generate_registry()
cls._with_register_classes(cls.setup_mappers)
def _setup_each_mappers(self):
if self.run_setup_mappers != "once":
(
self.__class__.mapper_registry,
self.__class__.mapper,
) = self._generate_registry()
if self.run_setup_mappers == "each":
self._with_register_classes(self.setup_mappers)
def _setup_each_classes(self):
if self.run_setup_classes == "each":
self._with_register_classes(self.setup_classes)
@classmethod
def _generate_registry(cls):
decl = registry(metadata=cls._tables_metadata)
return decl, decl.map_imperatively
@classmethod
def _with_register_classes(cls, fn):
"""Run a setup method, framing the operation with a Base class
that will catch new subclasses to be established within
the "classes" registry.
"""
cls_registry = cls.classes
class _Base:
def __init_subclass__(cls) -> None:
assert cls_registry is not None
cls_registry[cls.__name__] = cls
super().__init_subclass__()
class Basic(BasicEntity, _Base):
pass
class Comparable(ComparableEntity, _Base):
pass
cls.Basic = Basic
cls.Comparable = Comparable
fn()
def _teardown_each_mappers(self):
# some tests create mappers in the test bodies
# and will define setup_mappers as None -
# clear mappers in any case
if self.run_setup_mappers != "once":
orm.clear_mappers()
def _teardown_each_classes(self):
if self.run_setup_classes != "once":
self.classes.clear()
@classmethod
def setup_classes(cls):
pass
@classmethod
def setup_mappers(cls):
pass
| MappedTest |
python | tensorflow__tensorflow | tensorflow/examples/speech_commands/input_data_test.py | {
"start": 1004,
"end": 11024
} | class ____(test.TestCase):
def _getWavData(self):
with self.cached_session():
sample_data = tf.zeros([32000, 2])
wav_encoder = tf.audio.encode_wav(sample_data, 16000)
wav_data = self.evaluate(wav_encoder)
return wav_data
def _saveTestWavFile(self, filename, wav_data):
with open(filename, "wb") as f:
f.write(wav_data)
def _saveWavFolders(self, root_dir, labels, how_many):
wav_data = self._getWavData()
for label in labels:
dir_name = os.path.join(root_dir, label)
os.mkdir(dir_name)
for i in range(how_many):
file_path = os.path.join(dir_name, "some_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
def _model_settings(self):
return {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
def _runGetDataTest(self, preprocess, window_length_ms):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = models.prepare_model_settings(
4, 16000, 1000, window_length_ms, 20, 40, preprocess)
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_data(
10, 0, model_settings, 0.3, 0.1, 100, "training", sess)
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
def testPrepareWordsList(self):
words_list = ["a", "b"]
self.assertGreater(
len(input_data.prepare_words_list(words_list)), len(words_list))
def testWhichSet(self):
self.assertEqual(
input_data.which_set("foo.wav", 10, 10),
input_data.which_set("foo.wav", 10, 10))
self.assertEqual(
input_data.which_set("foo_nohash_0.wav", 10, 10),
input_data.which_set("foo_nohash_1.wav", 10, 10))
@test_util.run_deprecated_v1
def testPrepareDataIndex(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertLess(0, audio_processor.set_size("training"))
self.assertIn("training", audio_processor.data_index)
self.assertIn("validation", audio_processor.data_index)
self.assertIn("testing", audio_processor.data_index)
self.assertEqual(input_data.UNKNOWN_WORD_INDEX,
audio_processor.word_to_index["c"])
def testPrepareDataIndexEmpty(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 0)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertIn("No .wavs found", str(e.exception))
def testPrepareDataIndexMissing(self):
tmp_dir = self.get_temp_dir()
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
with self.assertRaises(Exception) as e:
_ = input_data.AudioProcessor("", tmp_dir, 10, 10, ["a", "b", "d"], 10,
10, self._model_settings(), tmp_dir)
self.assertIn("Expected to find", str(e.exception))
@test_util.run_deprecated_v1
def testPrepareBackgroundData(self):
tmp_dir = self.get_temp_dir()
background_dir = os.path.join(tmp_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
self._saveWavFolders(tmp_dir, ["a", "b", "c"], 100)
audio_processor = input_data.AudioProcessor("", tmp_dir, 10, 10,
["a", "b"], 10, 10,
self._model_settings(), tmp_dir)
self.assertEqual(10, len(audio_processor.background_data))
def testLoadWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
wav_data = self._getWavData()
self._saveTestWavFile(file_path, wav_data)
sample_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(sample_data)
def testSaveWavFile(self):
tmp_dir = self.get_temp_dir()
file_path = os.path.join(tmp_dir, "load_test.wav")
save_data = np.zeros([16000, 1])
input_data.save_wav_file(file_path, save_data, 16000)
loaded_data = input_data.load_wav_file(file_path)
self.assertIsNotNone(loaded_data)
self.assertEqual(16000, len(loaded_data))
@test_util.run_deprecated_v1
def testPrepareProcessingGraph(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
background_dir = os.path.join(wav_dir, "_background_noise_")
os.mkdir(background_dir)
wav_data = self._getWavData()
for i in range(10):
file_path = os.path.join(background_dir, "background_audio_%d.wav" % i)
self._saveTestWavFile(file_path, wav_data)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
self.assertIsNotNone(audio_processor.wav_filename_placeholder_)
self.assertIsNotNone(audio_processor.foreground_volume_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_padding_placeholder_)
self.assertIsNotNone(audio_processor.time_shift_offset_placeholder_)
self.assertIsNotNone(audio_processor.background_data_placeholder_)
self.assertIsNotNone(audio_processor.background_volume_placeholder_)
self.assertIsNotNone(audio_processor.output_)
@test_util.run_deprecated_v1
def testGetDataAverage(self):
self._runGetDataTest("average", 10)
@test_util.run_deprecated_v1
def testGetDataAverageLongWindow(self):
self._runGetDataTest("average", 30)
@test_util.run_deprecated_v1
def testGetDataMfcc(self):
self._runGetDataTest("mfcc", 30)
@test_util.run_deprecated_v1
def testGetDataMicro(self):
self._runGetDataTest("micro", 20)
@test_util.run_deprecated_v1
def testGetUnprocessedData(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 100)
model_settings = {
"desired_samples": 160,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"preprocess": "mfcc",
}
audio_processor = input_data.AudioProcessor("", wav_dir, 10, 10, ["a", "b"],
10, 10, model_settings, tmp_dir)
result_data, result_labels = audio_processor.get_unprocessed_data(
10, model_settings, "training")
self.assertEqual(10, len(result_data))
self.assertEqual(10, len(result_labels))
@test_util.run_deprecated_v1
def testGetFeaturesForWav(self):
tmp_dir = self.get_temp_dir()
wav_dir = os.path.join(tmp_dir, "wavs")
os.mkdir(wav_dir)
self._saveWavFolders(wav_dir, ["a", "b", "c"], 1)
desired_samples = 1600
model_settings = {
"desired_samples": desired_samples,
"fingerprint_size": 40,
"label_count": 4,
"window_size_samples": 100,
"window_stride_samples": 100,
"fingerprint_width": 40,
"average_window_width": 6,
"preprocess": "average",
}
with self.cached_session() as sess:
audio_processor = input_data.AudioProcessor(
"", wav_dir, 10, 10, ["a", "b"], 10, 10, model_settings, tmp_dir)
sample_data = np.zeros([desired_samples, 1])
for i in range(desired_samples):
phase = i % 4
if phase == 0:
sample_data[i, 0] = 0
elif phase == 1:
sample_data[i, 0] = -1
elif phase == 2:
sample_data[i, 0] = 0
elif phase == 3:
sample_data[i, 0] = 1
test_wav_path = os.path.join(tmp_dir, "test_wav.wav")
input_data.save_wav_file(test_wav_path, sample_data, 16000)
results = audio_processor.get_features_for_wav(test_wav_path,
model_settings, sess)
spectrogram = results[0]
self.assertEqual(1, spectrogram.shape[0])
self.assertEqual(16, spectrogram.shape[1])
self.assertEqual(11, spectrogram.shape[2])
self.assertNear(0, spectrogram[0, 0, 0], 0.1)
self.assertNear(200, spectrogram[0, 0, 5], 0.1)
def testGetFeaturesRange(self):
model_settings = {
"preprocess": "average",
}
features_min, _ = input_data.get_features_range(model_settings)
self.assertNear(0.0, features_min, 1e-5)
def testGetMfccFeaturesRange(self):
model_settings = {
"preprocess": "mfcc",
}
features_min, features_max = input_data.get_features_range(model_settings)
self.assertLess(features_min, features_max)
if __name__ == "__main__":
test.main()
| InputDataTest |
python | charliermarsh__ruff | python/ruff-ecosystem/ruff_ecosystem/projects.py | {
"start": 1674,
"end": 5071
} | class ____(Serializable):
"""
A collection of key, value pairs to override in the Ruff configuration file.
The key describes a member to override in the toml file; '.' may be used to indicate a
nested value e.g. `format.quote-style`.
If a Ruff configuration file does not exist and overrides are provided, it will be created.
"""
always: dict[str, Any] = field(default_factory=dict)
when_preview: dict[str, Any] = field(default_factory=dict)
when_no_preview: dict[str, Any] = field(default_factory=dict)
def __hash__(self) -> int:
# Avoid computing this hash repeatedly since this object is intended
# to be immutable and serializing to toml is not necessarily cheap
@cache
def as_string():
return tomli_w.dumps(
{
"always": self.always,
"when_preview": self.when_preview,
"when_no_preview": self.when_no_preview,
}
)
return hash(as_string())
@contextlib.contextmanager
def patch_config(
self,
dirpath: Path,
preview: bool,
) -> None:
"""
Temporarily patch the Ruff configuration file in the given directory.
"""
dot_ruff_toml = dirpath / ".ruff.toml"
ruff_toml = dirpath / "ruff.toml"
pyproject_toml = dirpath / "pyproject.toml"
# Prefer `ruff.toml` over `pyproject.toml`
if dot_ruff_toml.exists():
path = dot_ruff_toml
base = []
elif ruff_toml.exists():
path = ruff_toml
base = []
else:
path = pyproject_toml
base = ["tool", "ruff"]
overrides = {
**ALWAYS_CONFIG_OVERRIDES,
**self.always,
**(self.when_preview if preview else self.when_no_preview),
}
if not overrides:
yield
return
# Read the existing content if the file is present
if path.exists():
contents = path.read_text()
toml = tomli.loads(contents)
else:
contents = None
toml = {}
# Do not write a toml file if it does not exist and we're just nulling values
if all((value is None for value in overrides.values())):
yield
return
# Update the TOML, using `.` to descend into nested keys
for key, value in overrides.items():
if value is not None:
logger.debug(f"Setting {key}={value!r} in {path}")
else:
logger.debug(f"Restoring {key} to default in {path}")
target = toml
names = base + key.split(".")
for name in names[:-1]:
if name not in target:
target[name] = {}
target = target[name]
if value is None:
# Remove null values i.e. restore to default
target.pop(names[-1], None)
else:
target[names[-1]] = value
tomli_w.dump(toml, path.open("wb"))
try:
yield
finally:
# Restore the contents or delete the file
if contents is None:
path.unlink()
else:
path.write_text(contents)
| ConfigOverrides |
python | dask__distributed | distributed/dashboard/components/scheduler.py | {
"start": 64131,
"end": 66625
} | class ____(DashboardComponent):
def __init__(self, scheduler, name, height=150, **kwargs):
self.scheduler = scheduler
self.action_ys = dict()
self.last = 0
self.name = name
self.source = ColumnDataSource(
{"time": [], "action": [], "hover": [], "y": [], "color": []}
)
x_range = DataRange1d(follow="end", follow_interval=200000)
self.root = figure(
title=name,
x_axis_type="datetime",
height=height,
tools="",
x_range=x_range,
**kwargs,
)
self.root.scatter(
source=self.source,
x="time",
y="y",
color="color",
size=50,
alpha=0.5,
legend_field="action",
)
self.root.yaxis.axis_label = "Action"
self.root.legend.location = "top_left"
hover = HoverTool()
hover.tooltips = "@action<br>@hover"
hover.point_policy = "follow_mouse"
self.root.add_tools(
hover,
ResetTool(),
PanTool(dimensions="width"),
WheelZoomTool(dimensions="width"),
)
@without_property_validation
@log_errors
def update(self):
topic = self.scheduler._broker._topics[self.name]
log = topic.events
n = min(topic.count - self.last, len(log))
if log:
log = [log[-i] for i in range(1, n + 1)]
self.last = topic.count
if log:
actions = []
times = []
hovers = []
ys = []
colors = []
for msg_time, msg in log:
times.append(msg_time * 1000)
action = msg["action"]
actions.append(action)
try:
ys.append(self.action_ys[action])
except KeyError:
self.action_ys[action] = len(self.action_ys)
ys.append(self.action_ys[action])
colors.append(color_of(action))
hovers.append("TODO")
new = {
"time": times,
"action": actions,
"hover": hovers,
"y": ys,
"color": colors,
}
if PROFILING:
curdoc().add_next_tick_callback(lambda: self.source.stream(new, 10000))
else:
self.source.stream(new, 10000)
| Events |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/inputs.py | {
"start": 13579,
"end": 13894
} | class ____(graphene.InputObjectType):
stepKey = graphene.NonNull(graphene.String)
marshalledInputs = graphene.List(graphene.NonNull(GrapheneMarshalledInput))
marshalledOutputs = graphene.List(graphene.NonNull(GrapheneMarshalledOutput))
class Meta:
name = "StepExecution"
| GrapheneStepExecution |
python | joke2k__faker | faker/providers/ssn/__init__.py | {
"start": 62,
"end": 240
} | class ____(BaseProvider):
ssn_formats: ElementsType[str] = ("###-##-####",)
def ssn(self) -> str:
return self.bothify(self.random_element(self.ssn_formats))
| Provider |
python | keon__algorithms | tests/test_strings.py | {
"start": 17951,
"end": 18159
} | class ____(unittest.TestCase):
def test_first_unique_char(self):
self.assertEqual(0, first_unique_char("leetcode"))
self.assertEqual(2, first_unique_char("loveleetcode"))
| TestFirstUniqueChar |
python | ipython__ipython | IPython/core/compilerop.py | {
"start": 2529,
"end": 6990
} | class ____(codeop.Compile):
"""A compiler that caches code compiled from interactive statements.
"""
def __init__(self):
codeop.Compile.__init__(self)
# Caching a dictionary { filename: execution_count } for nicely
# rendered tracebacks. The filename corresponds to the filename
# argument used for the builtins.compile function.
self._filename_map = {}
def ast_parse(self, source, filename='<unknown>', symbol='exec'):
"""Parse code to an AST with the current compiler flags active.
Arguments are exactly the same as ast.parse (in the standard library),
and are passed to the built-in compile function."""
return compile(source, filename, symbol, self.flags | PyCF_ONLY_AST, 1)
def reset_compiler_flags(self):
"""Reset compiler flags to default state."""
# This value is copied from codeop.Compile.__init__, so if that ever
# changes, it will need to be updated.
self.flags = codeop.PyCF_DONT_IMPLY_DEDENT
@property
def compiler_flags(self):
"""Flags currently active in the compilation process.
"""
return self.flags
def get_code_name(self, raw_code, transformed_code, number):
"""Compute filename given the code, and the cell number.
Parameters
----------
raw_code : str
The raw cell code.
transformed_code : str
The executable Python source code to cache and compile.
number : int
A number which forms part of the code's name. Used for the execution
counter.
Returns
-------
The computed filename.
"""
return code_name(transformed_code, number)
def format_code_name(self, name):
"""Return a user-friendly label and name for a code block.
Parameters
----------
name : str
The name for the code block returned from get_code_name
Returns
-------
A (label, name) pair that can be used in tracebacks, or None if the default formatting should be used.
"""
if name in self._filename_map:
return "Cell", "In[%s]" % self._filename_map[name]
def cache(self, transformed_code, number=0, raw_code=None):
"""Make a name for a block of code, and cache the code.
Parameters
----------
transformed_code : str
The executable Python source code to cache and compile.
number : int
A number which forms part of the code's name. Used for the execution
counter.
raw_code : str
The raw code before transformation, if None, set to `transformed_code`.
Returns
-------
The name of the cached code (as a string). Pass this as the filename
argument to compilation, so that tracebacks are correctly hooked up.
"""
if raw_code is None:
raw_code = transformed_code
name = self.get_code_name(raw_code, transformed_code, number)
# Save the execution count
self._filename_map[name] = number
# Since Python 2.5, setting mtime to `None` means the lines will
# never be removed by `linecache.checkcache`. This means all the
# monkeypatching has *never* been necessary, since this code was
# only added in 2010, at which point IPython had already stopped
# supporting Python 2.4.
#
# Note that `linecache.clearcache` and `linecache.updatecache` may
# still remove our code from the cache, but those show explicit
# intent, and we should not try to interfere. Normally the former
# is never called except when out of memory, and the latter is only
# called for lines *not* in the cache.
entry = (
len(transformed_code),
None,
[line + "\n" for line in transformed_code.splitlines()],
name,
)
linecache.cache[name] = entry
return name
@contextmanager
def extra_flags(self, flags):
## bits that we'll set to 1
turn_on_bits = ~self.flags & flags
self.flags = self.flags | flags
try:
yield
finally:
# turn off only the bits we turned on so that something like
# __future__ that set flags stays.
self.flags &= ~turn_on_bits
| CachingCompiler |
python | django__django | tests/null_fk_ordering/models.py | {
"start": 855,
"end": 992
} | class ____(models.Model):
forum = models.ForeignKey(Forum, models.SET_NULL, null=True)
title = models.CharField(max_length=32)
| Post |
python | dask__dask | dask/dataframe/dask_expr/_expr.py | {
"start": 49718,
"end": 49891
} | class ____(Elemwise):
_parameters = ["frame", "unit", "errors"]
_defaults = {"unit": None, "errors": "raise"}
operation = staticmethod(pd.to_timedelta)
| ToTimedelta |
python | getsentry__sentry-python | sentry_sdk/integrations/celery/utils.py | {
"start": 1004,
"end": 1208
} | class ____:
def __enter__(self):
# type: () -> None
return None
def __exit__(self, exc_type, exc_value, traceback):
# type: (Any, Any, Any) -> None
return None
| NoOpMgr |
python | pandas-dev__pandas | pandas/tests/series/test_npfuncs.py | {
"start": 203,
"end": 1338
} | class ____:
def test_ptp(self):
# GH#21614
N = 1000
arr = np.random.default_rng(2).standard_normal(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
def test_numpy_unique(datetime_series):
# it works!
np.unique(datetime_series)
@pytest.mark.parametrize("index", [["a", "b", "c", "d", "e"], None])
def test_numpy_argwhere(index):
# GH#35331
s = Series(range(5), index=index, dtype=np.int64)
result = np.argwhere(s > 2).astype(np.int64)
expected = np.array([[3], [4]], dtype=np.int64)
tm.assert_numpy_array_equal(result, expected)
@td.skip_if_no("pyarrow")
def test_log_arrow_backed_missing_value(using_nan_is_na):
# GH#56285
ser = Series([1, 2, None], dtype="float64[pyarrow]")
if using_nan_is_na:
result = np.log(ser)
expected = np.log(Series([1, 2, None], dtype="float64[pyarrow]"))
tm.assert_series_equal(result, expected)
else:
# we get cast to object which raises
msg = "loop of ufunc does not support argument"
with pytest.raises(TypeError, match=msg):
np.log(ser)
| TestPtp |
python | getsentry__sentry | src/sentry/db/models/manager/base.py | {
"start": 2095,
"end": 23289
} | class ____(_base_manager_base[M]):
lookup_handlers = {"iexact": lambda x: x.upper()}
use_for_related_fields = True
_queryset_class = BaseQuerySet
def __init__(
self,
*args: Any,
cache_fields: Sequence[str] | None = None,
cache_ttl: int = 60 * 5,
**kwargs: Any,
) -> None:
#: Model fields for which we should build up a cache to be used with
#: Model.objects.get_from_cache(fieldname=value)`.
#:
#: Note that each field by its own needs to be a potential primary key
#: (uniquely identify a row), so for example organization slug is ok,
#: project slug is not.
self.cache_fields = cache_fields if cache_fields is not None else ()
self.cache_ttl = cache_ttl
self._cache_version: str | None = kwargs.pop("cache_version", None)
self.__local_cache = threading.local()
self._triggers: dict[
object, tuple[ModelManagerTriggerCondition, ModelManagerTriggerAction]
] = {}
super().__init__(*args, **kwargs)
@staticmethod
@contextmanager
def local_cache() -> Generator[None]:
"""Enables local caching for the entire process."""
global _local_cache_enabled, _local_cache_generation
if _local_cache_enabled:
raise RuntimeError("nested use of process global local cache")
_local_cache_enabled = True
try:
yield
finally:
_local_cache_enabled = False
_local_cache_generation += 1
def _get_local_cache(self) -> MutableMapping[str, M] | None:
if not _local_cache_enabled:
return None
gen = _local_cache_generation
cache_gen = getattr(_local_cache, "generation", None)
if cache_gen != gen or not hasattr(_local_cache, "cache"):
_local_cache.cache = {}
_local_cache.generation = gen
return _local_cache.cache
def _get_cache(self) -> MutableMapping[str, Any]:
if not hasattr(self.__local_cache, "value"):
self.__local_cache.value = weakref.WeakKeyDictionary()
return self.__local_cache.value
def _set_cache(self, value: Any) -> None:
self.__local_cache.value = value
@property
def cache_version(self) -> str:
if self._cache_version is None:
self._cache_version = md5_text(
"&".join(sorted(f.attname for f in self.model._meta.fields))
).hexdigest()[:3]
return self._cache_version
__cache = property(_get_cache, _set_cache)
def __getstate__(self) -> Mapping[str, Any]:
d = self.__dict__.copy()
# we can't serialize weakrefs
d.pop("_BaseManager__cache", None)
d.pop("_BaseManager__local_cache", None)
return d
def __setstate__(self, state: Mapping[str, Any]) -> None:
self.__dict__.update(state)
# TODO(typing): Basically everywhere else we set this to `threading.local()`.
self.__local_cache = weakref.WeakKeyDictionary() # type: ignore[assignment]
def __class_prepared(self, sender: Any, **kwargs: Any) -> None:
"""
Given the cache is configured, connects the required signals for invalidation.
"""
post_save.connect(self.post_save, sender=sender, weak=False)
post_delete.connect(self.post_delete, sender=sender, weak=False)
if not self.cache_fields:
return
post_init.connect(self.__post_init, sender=sender, weak=False)
post_save.connect(self.__post_save, sender=sender, weak=False)
post_delete.connect(self.__post_delete, sender=sender, weak=False)
def __cache_state(self, instance: M) -> None:
"""
Updates the tracked state of an instance.
"""
if instance.pk:
self.__cache[instance] = {
f: self.__value_for_field(instance, f) for f in self.cache_fields
}
def __post_init(self, instance: M, **kwargs: Any) -> None:
"""
Stores the initial state of an instance.
"""
self.__cache_state(instance)
def __post_save(self, instance: M, **kwargs: Any) -> None:
"""
Pushes changes to an instance into the cache, and removes invalid (changed)
lookup values.
"""
pk_name = instance._meta.pk.name
pk_names = ("pk", pk_name)
pk_val = instance.pk
for key in self.cache_fields:
if key in pk_names:
continue
# store pointers
value = self.__value_for_field(instance, key)
cache.set(
key=self.__get_lookup_cache_key(**{key: value}),
value=pk_val,
timeout=self.cache_ttl,
version=self.cache_version,
)
# Ensure we don't serialize the database into the cache
db = instance._state.db
instance._state.db = None
# store actual object
try:
cache.set(
key=self.__get_lookup_cache_key(**{pk_name: pk_val}),
value=instance,
timeout=self.cache_ttl,
version=self.cache_version,
)
except Exception as e:
logger.exception(str(e))
instance._state.db = db
# Kill off any keys which are no longer valid
if instance in self.__cache:
for key in self.cache_fields:
if key not in self.__cache[instance]:
continue
value = self.__cache[instance][key]
current_value = self.__value_for_field(instance, key)
if value != current_value:
cache.delete(
key=self.__get_lookup_cache_key(**{key: value}), version=self.cache_version
)
self.__cache_state(instance)
self._execute_triggers(ModelManagerTriggerCondition.SAVE)
def __post_delete(self, instance: M, **kwargs: Any) -> None:
"""
Drops instance from all cache storages.
"""
pk_name = instance._meta.pk.name
for key in self.cache_fields:
if key in ("pk", pk_name):
continue
# remove pointers
value = self.__value_for_field(instance, key)
cache.delete(
key=self.__get_lookup_cache_key(**{key: value}), version=self.cache_version
)
# remove actual object
cache.delete(
key=self.__get_lookup_cache_key(**{pk_name: instance.pk}), version=self.cache_version
)
self._execute_triggers(ModelManagerTriggerCondition.DELETE)
def __get_lookup_cache_key(self, **kwargs: Any) -> str:
return make_key(self.model, "modelcache", kwargs)
def __value_for_field(self, instance: M, key: str) -> Any:
"""
Return the cacheable value for a field.
ForeignKey's will cache via the primary key rather than using an
instance ref. This is needed due to the way lifecycle of models works
as otherwise we end up doing wasteful queries.
"""
if key == "pk":
return instance.pk
field = instance._meta.get_field(key)
assert isinstance(field, Field), field
return getattr(instance, field.attname)
def contribute_to_class(self, model: type[Model], name: str) -> None:
super().contribute_to_class(model, name)
class_prepared.connect(self.__class_prepared, sender=model)
@django_test_transaction_water_mark()
def get_from_cache(
self, use_replica: bool = settings.SENTRY_MODEL_CACHE_USE_REPLICA, **kwargs: Any
) -> M:
"""
Wrapper around QuerySet.get which supports caching of the
intermediate value. Callee is responsible for making sure
the cache key is cleared on save.
"""
if not self.cache_fields:
raise ValueError("We cannot cache this query. Just hit the database.")
key, pk_name, value = self._get_cacheable_kv_from_kwargs(kwargs)
if key not in self.cache_fields and key != pk_name:
raise ValueError("We cannot cache this query. Just hit the database.")
cache_key = self.__get_lookup_cache_key(**{key: value})
local_cache = self._get_local_cache()
def validate_result(inst: Any) -> M:
if isinstance(inst, self.model) and (key != pk_name or int(value) == inst.pk):
return inst
if settings.DEBUG:
raise ValueError("Unexpected value type returned from cache")
logger.error(
"Cache response returned invalid value",
extra={"instance": inst, "key": key, "model": str(self.model)},
)
if local_cache is not None and cache_key in local_cache:
del local_cache[cache_key]
cache.delete(cache_key, version=self.cache_version)
return self.using_replica().get(**kwargs) if use_replica else self.get(**kwargs)
if local_cache is not None and cache_key in local_cache:
return validate_result(local_cache[cache_key])
retval = cache.get(cache_key, version=self.cache_version)
# If we don't have a hit in the django level cache, collect
# the result, and store it both in django and local caches.
if retval is None:
result = self.using_replica().get(**kwargs) if use_replica else self.get(**kwargs)
assert result
# Ensure we're pushing it into the cache
self.__post_save(instance=result)
if local_cache is not None:
local_cache[cache_key] = result
return validate_result(result)
# If we didn't look up by pk we need to hit the reffed
# key
if key != pk_name:
result = self.get_from_cache(**{pk_name: retval})
if local_cache is not None:
local_cache[cache_key] = result
return validate_result(result)
retval = validate_result(retval)
kwargs = {**kwargs, "replica": True} if use_replica else {**kwargs}
retval._state.db = router.db_for_read(self.model, **kwargs)
return retval
def _get_cacheable_kv_from_kwargs(self, kwargs: Mapping[str, Any]) -> tuple[str, str, int]:
if not kwargs or len(kwargs) > 1:
raise ValueError("We cannot cache this query. Just hit the database.")
key, value = next(iter(kwargs.items()))
pk_name = self.model._meta.pk.name
if key == "pk":
key = pk_name
# We store everything by key references (vs instances)
if isinstance(value, Model):
value = value.pk
# Kill __exact since it's the default behavior
if key.endswith("__exact"):
key = key.split("__exact", 1)[0]
return key, pk_name, value
def get_many_from_cache(self, values: Collection[str | int], key: str = "pk") -> Sequence[Any]:
"""
Wrapper around `QuerySet.filter(pk__in=values)` which supports caching of
the intermediate value. Callee is responsible for making sure the
cache key is cleared on save.
NOTE: We can only query by primary key or some other unique identifier.
It is not possible to e.g. run `Project.objects.get_many_from_cache([1,
2, 3], key="organization_id")` and get back all projects belonging to
those orgs. The length of the return value is bounded by the length of
`values`.
For most models, if one attempts to use a non-PK value this will just
degrade to a DB query, like with `get_from_cache`.
"""
pk_name = self.model._meta.pk.name
if key == "pk":
key = pk_name
# Kill __exact since it's the default behavior
if key.endswith("__exact"):
key = key.split("__exact", 1)[0]
if key not in self.cache_fields and key != pk_name:
raise ValueError("We cannot cache this query. Just hit the database.")
final_results = []
cache_lookup_cache_keys = []
cache_lookup_values = []
local_cache = self._get_local_cache()
for value in values:
cache_key = self.__get_lookup_cache_key(**{key: value})
result = local_cache and local_cache.get(cache_key)
if result is not None:
final_results.append(result)
else:
cache_lookup_cache_keys.append(cache_key)
cache_lookup_values.append(value)
if not cache_lookup_cache_keys:
return final_results
cache_results = cache.get_many(cache_lookup_cache_keys, version=self.cache_version)
db_lookup_cache_keys = []
db_lookup_values = []
nested_lookup_cache_keys = []
nested_lookup_values = []
for cache_key, value in zip(cache_lookup_cache_keys, cache_lookup_values):
cache_result = cache_results.get(cache_key)
if cache_result is None:
db_lookup_cache_keys.append(cache_key)
db_lookup_values.append(value)
continue
# If we didn't look up by pk we need to hit the reffed key
if key != pk_name:
nested_lookup_cache_keys.append(cache_key)
nested_lookup_values.append(cache_result)
continue
if not isinstance(cache_result, self.model):
if settings.DEBUG:
raise ValueError("Unexpected value type returned from cache")
logger.error("Cache response returned invalid value %r", cache_result)
db_lookup_cache_keys.append(cache_key)
db_lookup_values.append(value)
continue
if key == pk_name and int(value) != cache_result.pk:
if settings.DEBUG:
raise ValueError("Unexpected value returned from cache")
logger.error("Cache response returned invalid value %r", cache_result)
db_lookup_cache_keys.append(cache_key)
db_lookup_values.append(value)
continue
final_results.append(cache_result)
if nested_lookup_values:
nested_results = self.get_many_from_cache(nested_lookup_values, key=pk_name)
final_results.extend(nested_results)
if local_cache is not None:
for nested_result in nested_results:
value = getattr(nested_result, key)
cache_key = self.__get_lookup_cache_key(**{key: value})
local_cache[cache_key] = nested_result
if not db_lookup_values:
return final_results
cache_writes = []
db_results = {getattr(x, key): x for x in self.filter(**{key + "__in": db_lookup_values})}
for cache_key, value in zip(db_lookup_cache_keys, db_lookup_values):
db_result = db_results.get(value)
if db_result is None:
continue # This model ultimately does not exist
# Ensure we're pushing it into the cache
cache_writes.append(db_result)
if local_cache is not None:
local_cache[cache_key] = db_result
final_results.append(db_result)
# XXX: Should use set_many here, but __post_save code is too complex
for instance in cache_writes:
self.__post_save(instance=instance)
return final_results
def create_or_update(self, **kwargs: Any) -> tuple[Any, bool]:
return create_or_update(self.model, **kwargs)
def uncache_object(self, instance_id: int) -> None:
pk_name = self.model._meta.pk.name
cache_key = self.__get_lookup_cache_key(**{pk_name: instance_id})
cache.delete(cache_key, version=self.cache_version)
def post_save(self, *, instance: M, created: bool, **kwargs: object) -> None: # type: ignore[misc] # python/mypy#6178
"""
Triggered when a model bound to this manager is saved.
"""
def post_delete(self, instance: M, **kwargs: Any) -> None: # type: ignore[misc] # python/mypy#6178
"""
Triggered when a model bound to this manager is deleted.
"""
def get_queryset(self) -> BaseQuerySet[M]:
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
"""
# TODO: This is a quick-and-dirty place to put the trigger hook that won't
# work for all model classes, because some custom managers override
# get_queryset without a `super` call.
self._execute_triggers(ModelManagerTriggerCondition.QUERY)
if hasattr(self, "_hints"):
return self._queryset_class(self.model, using=self._db, hints=self._hints)
return self._queryset_class(self.model, using=self._db)
@contextmanager
def register_trigger(
self, condition: ModelManagerTriggerCondition, action: ModelManagerTriggerAction
) -> Generator[None]:
"""Register a callback for when an operation is executed inside the context.
There is no guarantee whether the action will be called before or after the
triggering operation is executed, nor whether it will or will not be called
if the triggering operation raises an exception.
Both the registration of the trigger and the execution of the action are NOT
THREADSAFE. This is intended for offline use in single-threaded contexts such
as pytest. We must add synchronization if we intend to adapt it for
production use.
"""
key = object()
self._triggers[key] = (condition, action)
try:
yield
finally:
del self._triggers[key]
def _execute_triggers(self, condition: ModelManagerTriggerCondition) -> None:
for next_condition, next_action in self._triggers.values():
if condition == next_condition:
next_action(self.model)
def create_silo_limited_copy(self: BaseManager[M], limit: SiloLimit) -> BaseManager[M]:
"""Create a copy of this manager that enforces silo limitations."""
# Dynamically create a subclass of this manager's class, adding overrides.
cls = type(self)
overrides = {
"get_queryset": limit.create_override(cls.get_queryset),
"bulk_create": limit.create_override(cls.bulk_create),
"bulk_update": limit.create_override(cls.bulk_update),
"create": limit.create_override(cls.create),
"create_or_update": (
limit.create_override(cls.create_or_update)
if hasattr(cls, "create_or_update")
else None
),
"get_or_create": limit.create_override(cls.get_or_create),
"post_delete": (
limit.create_override(cls.post_delete) if hasattr(cls, "post_delete") else None
),
"select_for_update": limit.create_override(cls.select_for_update),
"update": limit.create_override(cls.update),
"update_or_create": limit.create_override(cls.update_or_create),
"get_from_cache": (
limit.create_override(cls.get_from_cache) if hasattr(cls, "get_from_cache") else None
),
"get_many_from_cache": (
limit.create_override(cls.get_many_from_cache)
if hasattr(cls, "get_many_from_cache")
else None
),
}
manager_subclass = type(cls.__name__, (cls,), overrides)
manager_instance = manager_subclass()
# Ordinarily a pointer to the model class is set after the class is defined,
# meaning we can't inherit it. Manually copy it over now.
manager_instance.model = self.model
# Copy over some more stuff that would be set in __init__
# (warning: this is brittle)
if hasattr(self, "cache_fields"):
manager_instance.cache_fields = self.cache_fields
manager_instance.cache_ttl = self.cache_ttl
manager_instance._cache_version = self._cache_version
manager_instance.__local_cache = threading.local()
# Dynamically extend and replace the queryset class. This will affect all
# queryset objects later returned from the new manager.
qs_cls = manager_instance._queryset_class
assert issubclass(qs_cls, BaseQuerySet) or issubclass(qs_cls, models.QuerySet)
queryset_overrides = {
"bulk_create": limit.create_override(qs_cls.bulk_create),
"bulk_update": limit.create_override(qs_cls.bulk_update),
"create": limit.create_override(qs_cls.create),
"delete": limit.create_override(qs_cls.delete),
"get_or_create": limit.create_override(qs_cls.get_or_create),
"update": limit.create_override(qs_cls.update),
"update_or_create": limit.create_override(qs_cls.update_or_create),
}
queryset_subclass = type(qs_cls.__name__, (qs_cls,), queryset_overrides)
manager_instance._queryset_class = queryset_subclass
return manager_instance
| BaseManager |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/parsers/value_parsers.py | {
"start": 4956,
"end": 6061
} | class ____(Parser):
"""
Composite argument parser for connecting to a host using SSH.
Format: user@host[:port]
"""
EXPECTED_FORMAT = '{user}@{host}[:{port}]'
def parse(self, state: ParserState) -> t.Any:
"""Parse the input from the given state and return the result."""
namespace = state.current_namespace
with state.delimit('@'):
user = AnyParser(no_match_message=f'Expected {{user}} from: {self.EXPECTED_FORMAT}').parse(state)
setattr(namespace, 'user', user)
with state.delimit(':', required=False) as colon: # type: ParserBoundary
host = AnyParser(no_match_message=f'Expected {{host}} from: {self.EXPECTED_FORMAT}').parse(state)
setattr(namespace, 'host', host)
if colon.match:
port = IntegerParser(65535).parse(state)
setattr(namespace, 'port', port)
return namespace
def document(self, state: DocumentationState) -> t.Optional[str]:
"""Generate and return documentation for this parser."""
return self.EXPECTED_FORMAT
| SshConnectionParser |
python | django__django | tests/many_to_one/models.py | {
"start": 371,
"end": 646
} | class ____(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, models.CASCADE)
class Meta:
ordering = ("headline",)
def __str__(self):
return self.headline
| Article |
python | django__django | tests/template_tests/test_loaders.py | {
"start": 4455,
"end": 9143
} | class ____(SimpleTestCase):
@classmethod
def setUpClass(cls):
cls.engine = Engine(
dirs=[TEMPLATE_DIR], loaders=["django.template.loaders.filesystem.Loader"]
)
super().setUpClass()
@contextmanager
def set_dirs(self, dirs):
original_dirs = self.engine.dirs
self.engine.dirs = dirs
try:
yield
finally:
self.engine.dirs = original_dirs
@contextmanager
def source_checker(self, dirs):
loader = self.engine.template_loaders[0]
def check_sources(path, expected_sources):
expected_sources = [os.path.abspath(s) for s in expected_sources]
self.assertEqual(
[origin.name for origin in loader.get_template_sources(path)],
expected_sources,
)
with self.set_dirs(dirs):
yield check_sources
def test_get_template(self):
template = self.engine.get_template("index.html")
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, "index.html"))
self.assertEqual(template.origin.template_name, "index.html")
self.assertEqual(template.origin.loader, self.engine.template_loaders[0])
self.assertEqual(
template.origin.loader_name, "django.template.loaders.filesystem.Loader"
)
def test_loaders_dirs(self):
engine = Engine(
loaders=[("django.template.loaders.filesystem.Loader", [TEMPLATE_DIR])]
)
template = engine.get_template("index.html")
self.assertEqual(template.origin.name, os.path.join(TEMPLATE_DIR, "index.html"))
def test_loaders_dirs_empty(self):
"""An empty dirs list in loaders overrides top level dirs."""
engine = Engine(
dirs=[TEMPLATE_DIR],
loaders=[("django.template.loaders.filesystem.Loader", [])],
)
with self.assertRaises(TemplateDoesNotExist):
engine.get_template("index.html")
def test_directory_security(self):
with self.source_checker(["/dir1", "/dir2"]) as check_sources:
check_sources("index.html", ["/dir1/index.html", "/dir2/index.html"])
check_sources("/etc/passwd", [])
check_sources("etc/passwd", ["/dir1/etc/passwd", "/dir2/etc/passwd"])
check_sources("../etc/passwd", [])
check_sources("../../../etc/passwd", [])
check_sources("/dir1/index.html", ["/dir1/index.html"])
check_sources("../dir2/index.html", ["/dir2/index.html"])
check_sources("/dir1blah", [])
check_sources("../dir1blah", [])
def test_unicode_template_name(self):
with self.source_checker(["/dir1", "/dir2"]) as check_sources:
check_sources("Ångström", ["/dir1/Ångström", "/dir2/Ångström"])
def test_bytestring(self):
loader = self.engine.template_loaders[0]
msg = "Can't mix strings and bytes in path components"
with self.assertRaisesMessage(TypeError, msg):
list(loader.get_template_sources(b"\xc3\x85ngstr\xc3\xb6m"))
def test_unicode_dir_name(self):
with self.source_checker(["/Straße"]) as check_sources:
check_sources("Ångström", ["/Straße/Ångström"])
@unittest.skipUnless(
os.path.normcase("/TEST") == os.path.normpath("/test"),
"This test only runs on case-sensitive file systems.",
)
def test_case_sensitivity(self):
with self.source_checker(["/dir1", "/DIR2"]) as check_sources:
check_sources("index.html", ["/dir1/index.html", "/DIR2/index.html"])
check_sources("/DIR1/index.HTML", ["/DIR1/index.HTML"])
def test_file_does_not_exist(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template("doesnotexist.html")
@unittest.skipIf(
sys.platform == "win32",
"Python on Windows doesn't have working os.chmod().",
)
def test_permissions_error(self):
with tempfile.NamedTemporaryFile() as tmpfile:
tmpdir = os.path.dirname(tmpfile.name)
tmppath = os.path.join(tmpdir, tmpfile.name)
os.chmod(tmppath, 0o0222)
with self.set_dirs([tmpdir]):
with self.assertRaisesMessage(PermissionError, "Permission denied"):
self.engine.get_template(tmpfile.name)
def test_notafile_error(self):
# Windows raises PermissionError when trying to open a directory.
with self.assertRaises(
PermissionError if sys.platform == "win32" else IsADirectoryError
):
self.engine.get_template("first")
| FileSystemLoaderTests |
python | doocs__leetcode | solution/2100-2199/2132.Stamping the Grid/Solution.py | {
"start": 0,
"end": 1067
} | class ____:
def possibleToStamp(
self, grid: List[List[int]], stampHeight: int, stampWidth: int
) -> bool:
m, n = len(grid), len(grid[0])
s = [[0] * (n + 1) for _ in range(m + 1)]
for i, row in enumerate(grid, 1):
for j, v in enumerate(row, 1):
s[i][j] = s[i - 1][j] + s[i][j - 1] - s[i - 1][j - 1] + v
d = [[0] * (n + 2) for _ in range(m + 2)]
for i in range(1, m - stampHeight + 2):
for j in range(1, n - stampWidth + 2):
x, y = i + stampHeight - 1, j + stampWidth - 1
if s[x][y] - s[x][j - 1] - s[i - 1][y] + s[i - 1][j - 1] == 0:
d[i][j] += 1
d[i][y + 1] -= 1
d[x + 1][j] -= 1
d[x + 1][y + 1] += 1
for i, row in enumerate(grid, 1):
for j, v in enumerate(row, 1):
d[i][j] += d[i - 1][j] + d[i][j - 1] - d[i - 1][j - 1]
if v == 0 and d[i][j] == 0:
return False
return True
| Solution |
python | tiangolo__fastapi | scripts/contributors.py | {
"start": 1967,
"end": 3648
} | class ____(BaseSettings):
github_token: SecretStr
github_repository: str
httpx_timeout: int = 30
def get_graphql_response(
*,
settings: Settings,
query: str,
after: str | None = None,
) -> dict[str, Any]:
headers = {"Authorization": f"token {settings.github_token.get_secret_value()}"}
variables = {"after": after}
response = httpx.post(
github_graphql_url,
headers=headers,
timeout=settings.httpx_timeout,
json={"query": query, "variables": variables, "operationName": "Q"},
)
if response.status_code != 200:
logging.error(f"Response was not 200, after: {after}")
logging.error(response.text)
raise RuntimeError(response.text)
data = response.json()
if "errors" in data:
logging.error(f"Errors in response, after: {after}")
logging.error(data["errors"])
logging.error(response.text)
raise RuntimeError(response.text)
return data
def get_graphql_pr_edges(
*, settings: Settings, after: str | None = None
) -> list[PullRequestEdge]:
data = get_graphql_response(settings=settings, query=prs_query, after=after)
graphql_response = PRsResponse.model_validate(data)
return graphql_response.data.repository.pullRequests.edges
def get_pr_nodes(settings: Settings) -> list[PullRequestNode]:
pr_nodes: list[PullRequestNode] = []
pr_edges = get_graphql_pr_edges(settings=settings)
while pr_edges:
for edge in pr_edges:
pr_nodes.append(edge.node)
last_edge = pr_edges[-1]
pr_edges = get_graphql_pr_edges(settings=settings, after=last_edge.cursor)
return pr_nodes
| Settings |
python | walkccc__LeetCode | solutions/2023. Number of Pairs of Strings With Concatenation Equal to Target/2023.py | {
"start": 0,
"end": 326
} | class ____:
def numOfPairs(self, nums: list[str], target: str) -> int:
ans = 0
count = collections.Counter()
for num in nums:
k = len(num)
if target[:k] == num:
ans += count[target[k:]]
if target[-k:] == num:
ans += count[target[:-k]]
count[num] += 1
return ans
| Solution |
python | tensorflow__tensorflow | tensorflow/python/autograph/converters/conditional_expressions.py | {
"start": 916,
"end": 1601
} | class ____(converter.Base):
"""Converts conditional expressions to functional form."""
def visit_IfExp(self, node):
template = '''
ag__.if_exp(
test,
lambda: true_expr,
lambda: false_expr,
expr_repr)
'''
expr_repr = parser.unparse(node.test, include_encoding_marker=False).strip()
return templates.replace_as_expression(
template,
test=node.test,
true_expr=node.body,
false_expr=node.orelse,
expr_repr=gast.Constant(expr_repr, kind=None))
def transform(node, ctx):
node = ConditionalExpressionTransformer(ctx).visit(node)
return node
| ConditionalExpressionTransformer |
python | fastai__fastai | fastai/text/data.py | {
"start": 1529,
"end": 3167
} | class ____(Transform):
"Reversible transform of tokenized texts to numericalized ids"
def __init__(self, vocab=None, min_freq=3, max_vocab=60000, special_toks=None):
store_attr('vocab,min_freq,max_vocab,special_toks')
self.o2i = None if vocab is None else defaultdict(int, {v:k for k,v in enumerate(vocab)})
def setups(self, dsets):
if dsets is None: return
if self.vocab is None:
count = dsets.counter if getattr(dsets, 'counter', None) is not None else Counter(p for o in dsets for p in o)
if self.special_toks is None and hasattr(dsets, 'special_toks'):
self.special_toks = dsets.special_toks
self.vocab = make_vocab(count, min_freq=self.min_freq, max_vocab=self.max_vocab, special_toks=self.special_toks)
self.o2i = defaultdict(int, {v:k for k,v in enumerate(self.vocab) if v != 'xxfake'})
def encodes(self, o): return TensorText(tensor([self.o2i [o_] for o_ in o]))
def decodes(self, o): return L(self.vocab[o_] for o_ in o)
# %% ../../nbs/31_text.data.ipynb 22
def _maybe_first(o): return o[0] if isinstance(o, tuple) else o
# %% ../../nbs/31_text.data.ipynb 23
def _get_tokenizer(ds):
tok = getattr(ds, 'tokenizer', None)
if isinstance(tok, Tokenizer): return tok
if isinstance(tok, (list,L)):
for t in tok:
if isinstance(t, Tokenizer): return t
# %% ../../nbs/31_text.data.ipynb 24
def _get_lengths(ds):
tok = _get_tokenizer(ds)
if tok is None: return
return tok.get_lengths(ds.items)
# %% ../../nbs/31_text.data.ipynb 25
#TODO: add backward
@delegates()
| Numericalize |
python | allegroai__clearml | clearml/backend_api/services/v2_9/tasks.py | {
"start": 301930,
"end": 302215
} | class ____(BatchRequest):
"""
Updates a batch of tasks.
Headers
Content type should be 'application/json-lines'.
"""
_service = "tasks"
_action = "update_batch"
_version = "2.9"
_batched_request_cls = UpdateRequest
| UpdateBatchRequest |
python | doocs__leetcode | solution/1100-1199/1125.Smallest Sufficient Team/Solution.py | {
"start": 0,
"end": 847
} | class ____:
def smallestSufficientTeam(
self, req_skills: List[str], people: List[List[str]]
) -> List[int]:
d = {s: i for i, s in enumerate(req_skills)}
m, n = len(req_skills), len(people)
p = [0] * n
for i, ss in enumerate(people):
for s in ss:
p[i] |= 1 << d[s]
f = [inf] * (1 << m)
g = [0] * (1 << m)
h = [0] * (1 << m)
f[0] = 0
for i in range(1 << m):
if f[i] == inf:
continue
for j in range(n):
if f[i] + 1 < f[i | p[j]]:
f[i | p[j]] = f[i] + 1
g[i | p[j]] = j
h[i | p[j]] = i
i = (1 << m) - 1
ans = []
while i:
ans.append(g[i])
i = h[i]
return ans
| Solution |
python | joke2k__faker | faker/providers/bank/tl_PH/__init__.py | {
"start": 51,
"end": 219
} | class ____(EnPhBankProvider):
"""Implement bank provider for ``tl_PH`` locale.
There is no difference from the ``en_PH`` implementation.
"""
pass
| Provider |
python | ray-project__ray | python/ray/tests/test_unavailable_actors.py | {
"start": 6363,
"end": 10869
} | class ____:
"""
An actor that is awaiting for a signal to be sent to it during its init. It is used
to test the behavior of the actor when it is killed and restarted.
It also increments a counter during its init to keep track of the number of
restarts.
"""
def __init__(
self,
restart_counter: Counter,
blocking_signal: SignalActor,
restart_death_range: Tuple[int, int],
):
restart_count = ray.get(restart_counter.inc.remote())
ray.get(blocking_signal.wait.remote()) # block on signal
restart_death_lower, restart_death_upper = restart_death_range
if restart_count > restart_death_lower and restart_count < restart_death_upper:
msg = (
f"Failed to restart the actor because the restart count is in the death range [{restart_death_lower}, "
f"{restart_death_upper}]: {restart_count}"
)
print(msg)
raise ValueError(msg)
def ping(self, name):
print(f"ping from {name}")
return f"hello {name}!"
def getpid(self):
return os.getpid()
@pytest.mark.skipif(sys.platform == "win32", reason="does not work on windows")
def test_actor_restart(ray_start_regular):
"""
Test the following actor restart scenarios:
- The actor restarts successfully on being killed.
- The actor emits the right error message during the restart when it is not fully
initialized.
- The actor emits the right error message when it is permanently dead.
"""
counter = Counter.remote()
signal_actor = SignalActor.remote()
actor = ActorAwaitingOnCreation.options(max_restarts=3).remote(
restart_counter=counter,
blocking_signal=signal_actor,
restart_death_range=(2, 10),
)
# unblock actor creation, actor should be created eventually
ray.get(signal_actor.send.remote())
wait_for_condition(
lambda: ray.get(actor.ping.remote("lemon")) == "hello lemon!",
)
# block actor creation and kill it
ray.get(signal_actor.send.remote(clear=True))
sigkill_actor(actor)
with pytest.raises(ActorUnavailableError):
print(ray.get(actor.ping.remote("unavailable")))
# unblock actor creation, actor should be created eventually
ray.get(signal_actor.send.remote())
wait_for_condition(
lambda: ray.get(actor.ping.remote("ok")) == "hello ok!",
)
# block actor creation and kill it
ray.get(signal_actor.send.remote(clear=True))
sigkill_actor(actor)
with pytest.raises(ActorUnavailableError):
print(ray.get(actor.ping.remote("unavailable")))
# unblock actor creation, the actor still dies because it reaches the restart limit
ray.get(signal_actor.send.remote())
wait_for_condition(
lambda: ray.get(signal_actor.cur_num_waiters.remote()) == 0,
)
with pytest.raises(ActorDiedError, match="an error raised in its creation task"):
print(ray.get(actor.ping.remote("actor error")))
@pytest.mark.skipif(sys.platform == "win32", reason="does not work on windows")
def test_actor_inifite_restart(ray_start_regular):
"""
Test that the actor can be restarted inifinitely. We do that by intentionally
cause the actor to fail when its restarting counter is in the death range. We
then test that the restarting counter will eventually go out of the death range
and the actor will be able to restart.
"""
counter = Counter.remote()
signal_actor = SignalActor.remote()
actor = ActorAwaitingOnCreation.options().remote(
restart_counter=counter,
blocking_signal=signal_actor,
restart_death_range=(2, 5),
)
# unblock actor creation
ray.get(signal_actor.send.remote())
wait_for_condition(
lambda: ray.get(actor.ping.remote("lemon")) == "hello lemon!",
)
# block actor creation and kill it
ray.get(signal_actor.send.remote(clear=True))
sigkill_actor(actor)
# When the actor is restarting, any method call raises ActorUnavailableError.
with pytest.raises(ActorUnavailableError):
print(ray.get(actor.ping.remote("unavailable")))
# unblock actor creation, the actor keeps retrying until it gets out of the death
# range
ray.get(signal_actor.send.remote())
assert ray.get(actor.ping.options(max_task_retries=-1).remote("ok")) == "hello ok!"
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
| ActorAwaitingOnCreation |
python | google__jax | tests/hijax_test.py | {
"start": 32368,
"end": 34564
} | class ____(jtu.JaxTestCase):
# with differentiable hijax arguments
def test_hitypes_as_grad_args(self):
tup = make_tup(jnp.array(2.0), jnp.array(3.0))
def loss_fn(tup):
x = get_tuple_element(tup, 0)
return x ** 2
grads = jax.grad(loss_fn)(tup)
self.assertAllClose(get_tuple_element(grads, 0), 4.0)
# with non-differentiable hijax arguments
def test_hitypes_as_nondiff_grad_args(self):
tup = make_tup(jnp.array(2.0), jnp.array(3.0))
x = jnp.array(3.0)
def loss_fn(x, tup):
y = get_tuple_element(tup, 1)
return x ** 2 + y
grad = jax.grad(loss_fn)(x, tup)
self.assertAllClose(grad, 6.0, check_dtypes=False)
# with hijax captured arguments
def test_hitypes_as_captured_args(self):
tup = make_tup(jnp.array(2.0), jnp.array(3.0))
def loss_fn(x):
y = get_tuple_element(tup, 1)
return x ** 2 + y
grad = jax.grad(loss_fn)(jnp.array(4.0))
self.assertAllClose(grad, 8.0, check_dtypes=False)
# with differentiable mutable hijax arguments
@absltest.skip("Not yet implemented")
def test_mutable_hitypes_as_grad_args(self):
box = Box(jnp.array(2.0))
def loss_fn(box):
return box.get() ** 2
grads = jax.grad(loss_fn)(box)
# NOTE: unclear what the tangent type will be here
# with non-differentiable mutable hijax arguments
def test_mutable_hitypes_as_nondiff_grad_args(self):
box = Box(jnp.array(2.0))
x = jnp.array(3.0)
def loss_fn(x, box):
box.set(jax.lax.stop_gradient(x * 2))
return x ** 2 + box.get()
grad = jax.grad(loss_fn)(x, box)
self.assertAllClose(box.get(), 6.0, check_dtypes=False)
self.assertAllClose(grad, 6.0, check_dtypes=False)
# with mutable hijax captured arguments
def test_mutable_hitypes_as_captured_args(self):
box = Box(jnp.array(2.0))
def loss_fn(x):
box.set(jax.lax.stop_gradient(x * 3))
return x ** 2 + box.get()
grad = jax.grad(loss_fn)(jnp.array(4.0))
self.assertAllClose(box.get(), 12.0, check_dtypes=False)
self.assertAllClose(grad, 8.0, check_dtypes=False)
if __name__ == '__main__':
absltest.main(testLoader=jtu.JaxTestLoader())
| HijaxTransformCoverageTest |
python | astropy__astropy | astropy/table/tests/test_init_table.py | {
"start": 6783,
"end": 9194
} | class ____(BaseInitFromListLike):
def _setup(self, table_type):
self.data = [{"a": 1, "b": 2, "c": 3}, {"a": 3, "b": 4, "c": 5}]
self.data_ragged = [{"a": 1, "b": 2}, {"a": 2, "c": 4}]
self.data_acb = [{"a": 2, "c": 4}, {"a": 1, "b": 2}]
def test_names(self, table_type):
self._setup(table_type)
t = table_type(self.data)
assert all(colname in {"a", "b", "c"} for colname in t.colnames)
def test_names_ordered(self, table_type):
self._setup(table_type)
t = table_type(self.data, names=("c", "b", "a"))
assert t.colnames == ["c", "b", "a"]
def test_rows_without_names_args(self, table_type):
# see https://github.com/astropy/astropy/pull/15735
self._setup(table_type)
t1 = table_type(rows=self.data)
assert t1.colnames == ["a", "b", "c"]
t2 = table_type(rows=self.data_acb)
assert t2.colnames == ["a", "c", "b"]
def test_missing_data_init_from_dict(self, table_type):
self._setup(table_type)
dat = self.data_ragged
for rows in [False, True]:
t = table_type(rows=dat) if rows else table_type(dat)
assert np.all(t["a"] == [1, 2])
assert np.all(t["b"].mask == [False, True])
assert np.all(t["b"].data == [2, 2])
assert np.all(t["c"].mask == [True, False])
assert np.all(t["c"].data == [4, 4])
assert type(t["a"]) is (MaskedColumn if t.masked else Column)
assert type(t["b"]) is MaskedColumn
assert type(t["c"]) is MaskedColumn
def test_qtable_uses_masked_quantity_as_needed():
data = [{"a": 1 * u.m, "b": 1}, {"a": 2 * u.Mm, "b": 2}]
data_ragged = [{"a": 1 * u.m, "b": 1}, {"a": 2 * u.Mm}, {"b": 3}]
t = QTable(data)
assert t.colnames == ["a", "b"]
assert isinstance(t["a"], u.Quantity)
assert isinstance(t["b"], Column)
assert t["a"].unit == u.m
assert_array_equal(t["a"], [1, 2000000] * u.m)
assert not t.masked
t2 = QTable(data_ragged)
assert t2.colnames == ["a", "b"]
assert isinstance(t2["a"], Masked(u.Quantity))
assert isinstance(t2["b"], MaskedColumn)
assert t2["a"].unit == u.m
assert np.all(t2["a"] == [1, 2000000, 0] * u.m)
assert_array_equal(t2["a"].mask, [False, False, True])
assert_array_equal(t2["b"].mask, [False, True, False])
| TestInitFromListOfDicts |
python | python-poetry__poetry | src/poetry/console/commands/installer_command.py | {
"start": 357,
"end": 1130
} | class ____(GroupCommand, EnvCommand):
def __init__(self) -> None:
# Set in poetry.console.application.Application.configure_installer
self._installer: Installer | None = None
super().__init__()
def reset_poetry(self) -> None:
super().reset_poetry()
self.installer.set_package(self.poetry.package)
self.installer.set_locker(self.poetry.locker)
@property
def installer(self) -> Installer:
assert self._installer is not None
return self._installer
def set_installer(self, installer: Installer) -> None:
self._installer = installer
def execute(self, io: IO) -> int:
PoetryKeyring.preflight_check(io, self.poetry.config)
return super().execute(io)
| InstallerCommand |
python | pypa__pipenv | pipenv/patched/pip/_vendor/rich/segment.py | {
"start": 22710,
"end": 24788
} | class ____:
def __init__(self, lines: Iterable[List[Segment]], new_lines: bool = False) -> None:
"""A simple renderable containing a number of lines of segments. May be used as an intermediate
in rendering process.
Args:
lines (Iterable[List[Segment]]): Lists of segments forming lines.
new_lines (bool, optional): Insert new lines after each line. Defaults to False.
"""
self.lines = list(lines)
self.new_lines = new_lines
def __rich_console__(
self, console: "Console", options: "ConsoleOptions"
) -> "RenderResult":
if self.new_lines:
new_line = Segment.line()
for line in self.lines:
yield from line
yield new_line
else:
for line in self.lines:
yield from line
if __name__ == "__main__": # pragma: no cover
from pipenv.patched.pip._vendor.rich.console import Console
from pipenv.patched.pip._vendor.rich.syntax import Syntax
from pipenv.patched.pip._vendor.rich.text import Text
code = """from rich.console import Console
console = Console()
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console.print(text)"""
text = Text.from_markup("Hello, [bold magenta]World[/]!")
console = Console()
console.rule("rich.Segment")
console.print(
"A Segment is the last step in the Rich render process before generating text with ANSI codes."
)
console.print("\nConsider the following code:\n")
console.print(Syntax(code, "python", line_numbers=True))
console.print()
console.print(
"When you call [b]print()[/b], Rich [i]renders[/i] the object in to the following:\n"
)
fragments = list(console.render(text))
console.print(fragments)
console.print()
console.print("The Segments are then processed to produce the following output:\n")
console.print(text)
console.print(
"\nYou will only need to know this if you are implementing your own Rich renderables."
)
| SegmentLines |
python | pallets__jinja | tests/test_async.py | {
"start": 6017,
"end": 9517
} | class ____:
def test_context_include(self, test_env_async):
t = test_env_async.from_string('{% include "header" %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string('{% include "header" with context %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string('{% include "header" without context %}')
assert t.render(foo=42) == "[|23]"
def test_choice_includes(self, test_env_async):
t = test_env_async.from_string('{% include ["missing", "header"] %}')
assert t.render(foo=42) == "[42|23]"
t = test_env_async.from_string(
'{% include ["missing", "missing2"] ignore missing %}'
)
assert t.render(foo=42) == ""
t = test_env_async.from_string('{% include ["missing", "missing2"] %}')
pytest.raises(TemplateNotFound, t.render)
with pytest.raises(TemplatesNotFound) as e:
t.render()
assert e.value.templates == ["missing", "missing2"]
assert e.value.name == "missing2"
def test_includes(t, **ctx):
ctx["foo"] = 42
assert t.render(ctx) == "[42|23]"
t = test_env_async.from_string('{% include ["missing", "header"] %}')
test_includes(t)
t = test_env_async.from_string("{% include x %}")
test_includes(t, x=["missing", "header"])
t = test_env_async.from_string('{% include [x, "header"] %}')
test_includes(t, x="missing")
t = test_env_async.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env_async.from_string("{% include x %}")
test_includes(t, x="header")
t = test_env_async.from_string("{% include [x] %}")
test_includes(t, x="header")
def test_include_ignoring_missing(self, test_env_async):
t = test_env_async.from_string('{% include "missing" %}')
pytest.raises(TemplateNotFound, t.render)
for extra in "", "with context", "without context":
t = test_env_async.from_string(
'{% include "missing" ignore missing ' + extra + " %}"
)
assert t.render() == ""
def test_context_include_with_overrides(self, test_env_async):
env = Environment(
loader=DictLoader(
dict(
main="{% for item in [1, 2, 3] %}{% include 'item' %}{% endfor %}",
item="{{ item }}",
)
)
)
assert env.get_template("main").render() == "123"
def test_unoptimized_scopes(self, test_env_async):
t = test_env_async.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
def test_unoptimized_scopes_autoescape(self):
env = Environment(
loader=DictLoader({"o_printer": "({{ o }})"}),
autoescape=True,
enable_async=True,
)
t = env.from_string(
"""
{% macro outer(o) %}
{% macro inner() %}
{% include "o_printer" %}
{% endmacro %}
{{ inner() }}
{% endmacro %}
{{ outer("FOO") }}
"""
)
assert t.render().strip() == "(FOO)"
| TestAsyncIncludes |
python | django-guardian__django-guardian | guardian/testapp/tests/test_management.py | {
"start": 911,
"end": 5851
} | class ____(TestCase):
@mock.patch("guardian.management.guardian_settings")
def test_uses_custom_function(self, guardian_settings):
mocked_get_init_anon.reset_mock()
path = "guardian.testapp.tests.test_management.mocked_get_init_anon"
guardian_settings.GET_INIT_ANONYMOUS_USER = path
guardian_settings.ANONYMOUS_USER_NAME = "anonymous"
User = get_user_model()
anon = mocked_get_init_anon.return_value = mock.Mock()
create_anonymous_user("sender", using="default")
mocked_get_init_anon.assert_called_once_with(User)
anon.save.assert_called_once_with(using="default")
@mock.patch("guardian.management.guardian_settings")
@override_settings(AUTH_USER_MODEL="testapp.CustomUsernameUser")
def test_uses_custom_username_field_model(self, guardian_settings):
mocked_get_init_anon.reset_mock()
guardian_settings.GET_INIT_ANONYMOUS_USER = "guardian.testapp.tests.test_management.mocked_get_init_anon"
guardian_settings.ANONYMOUS_USER_NAME = "testuser@example.com"
User = get_user_model()
anon = mocked_get_init_anon.return_value = mock.Mock()
create_anonymous_user("sender", using="default")
mocked_get_init_anon.assert_called_once_with(User)
anon.save.assert_called_once_with(using="default")
def test_get_anonymous_user(self):
anon = get_anonymous_user()
self.assertFalse(anon.has_usable_password())
self.assertEqual(anon.get_username(), "AnonymousUser")
@mock.patch("guardian.management.guardian_settings")
def test_non_migrated_db(self, guardian_settings):
mocked_get_init_anon.reset_mock()
guardian_settings.GET_INIT_ANONYMOUS_USER = "guardian.testapp.tests.test_management.mocked_get_init_anon"
# Suppress the DATABASES override warning for this specific test
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
with override_settings(DATABASE_ROUTERS=[SessionRouter()], DATABASES=multi_db_dict):
create_anonymous_user("sender", using="session")
mocked_get_init_anon.assert_not_called()
@mock.patch("guardian.management.guardian_settings")
def test_database_error_on_user_lookup(self, guardian_settings):
"""Test that DatabaseError is handled gracefully when User table doesn't exist (issue #770)"""
guardian_settings.GET_INIT_ANONYMOUS_USER = "guardian.management.get_init_anonymous_user"
guardian_settings.ANONYMOUS_USER_NAME = "anonymous"
User = get_user_model()
# Mock User.objects.using().get() to raise DatabaseError (simulating missing table)
with mock.patch.object(User.objects, "using") as mock_using:
mock_manager = mock.Mock()
mock_using.return_value = mock_manager
mock_manager.get.side_effect = DatabaseError("relation 'auth_user' does not exist")
# This should not raise an exception - it should handle DatabaseError gracefully
try:
create_anonymous_user("sender", using="default")
# If we reach here, the function handled the DatabaseError correctly
success = True
except DatabaseError:
success = False
self.assertTrue(success, "create_anonymous_user should handle DatabaseError gracefully")
@mock.patch("guardian.management.guardian_settings")
def test_database_error_on_user_save(self, guardian_settings):
"""Test that DatabaseError is handled gracefully when saving fails due to missing table (issue #770)"""
guardian_settings.GET_INIT_ANONYMOUS_USER = "guardian.management.get_init_anonymous_user"
guardian_settings.ANONYMOUS_USER_NAME = "anonymous"
User = get_user_model()
# Mock User.objects.using().get() to raise DoesNotExist (user doesn't exist)
# Then mock save() to raise DatabaseError (table doesn't exist)
with mock.patch.object(User.objects, "using") as mock_using:
mock_manager = mock.Mock()
mock_using.return_value = mock_manager
mock_manager.get.side_effect = User.DoesNotExist()
# Mock user.save() to raise DatabaseError
with mock.patch("guardian.management.get_init_anonymous_user") as mock_get_init:
mock_user = mock.Mock()
mock_user.save.side_effect = DatabaseError("relation 'auth_user' does not exist")
mock_get_init.return_value = mock_user
# This should not raise an exception
try:
create_anonymous_user("sender", using="default")
success = True
except DatabaseError:
success = False
self.assertTrue(success, "create_anonymous_user should handle DatabaseError on save gracefully")
| TestGetAnonymousUser |
python | kubernetes-client__python | kubernetes/client/models/v1_match_condition.py | {
"start": 383,
"end": 7295
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'expression': 'str',
'name': 'str'
}
attribute_map = {
'expression': 'expression',
'name': 'name'
}
def __init__(self, expression=None, name=None, local_vars_configuration=None): # noqa: E501
"""V1MatchCondition - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._expression = None
self._name = None
self.discriminator = None
self.expression = expression
self.name = name
@property
def expression(self):
"""Gets the expression of this V1MatchCondition. # noqa: E501
Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
:return: The expression of this V1MatchCondition. # noqa: E501
:rtype: str
"""
return self._expression
@expression.setter
def expression(self, expression):
"""Sets the expression of this V1MatchCondition.
Expression represents the expression which will be evaluated by CEL. Must evaluate to bool. CEL expressions have access to the contents of the AdmissionRequest and Authorizer, organized into CEL variables: 'object' - The object from the incoming request. The value is null for DELETE requests. 'oldObject' - The existing object. The value is null for CREATE requests. 'request' - Attributes of the admission request(/pkg/apis/admission/types.go#AdmissionRequest). 'authorizer' - A CEL Authorizer. May be used to perform authorization checks for the principal (user or service account) of the request. See https://pkg.go.dev/k8s.io/apiserver/pkg/cel/library#Authz 'authorizer.requestResource' - A CEL ResourceCheck constructed from the 'authorizer' and configured with the request resource. Documentation on CEL: https://kubernetes.io/docs/reference/using-api/cel/ Required. # noqa: E501
:param expression: The expression of this V1MatchCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and expression is None: # noqa: E501
raise ValueError("Invalid value for `expression`, must not be `None`") # noqa: E501
self._expression = expression
@property
def name(self):
"""Gets the name of this V1MatchCondition. # noqa: E501
Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
:return: The name of this V1MatchCondition. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1MatchCondition.
Name is an identifier for this match condition, used for strategic merging of MatchConditions, as well as providing an identifier for logging purposes. A good name should be descriptive of the associated expression. Name must be a qualified name consisting of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName') Required. # noqa: E501
:param name: The name of this V1MatchCondition. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1MatchCondition):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1MatchCondition):
return True
return self.to_dict() != other.to_dict()
| V1MatchCondition |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/qual_names.py | {
"start": 1227,
"end": 1579
} | class ____(collections.namedtuple('Literal', ['value'])):
"""Represents a Python numeric literal."""
def __str__(self):
if isinstance(self.value, str):
return "'{}'".format(self.value)
return str(self.value)
def __repr__(self):
return str(self)
# TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans.
| Literal |
python | walkccc__LeetCode | solutions/2527. Find Xor-Beauty of Array/2527.py | {
"start": 0,
"end": 111
} | class ____:
def xorBeauty(self, nums: list[int]) -> int:
return functools.reduce(operator.xor, nums)
| Solution |
python | facelessuser__pymdown-extensions | hatch_build.py | {
"start": 515,
"end": 1652
} | class ____(MetadataHookInterface):
"""Our metadata hook."""
def update(self, metadata):
"""See https://ofek.dev/hatch/latest/plugins/metadata-hook/ for more information."""
metadata["classifiers"] = [
f"Development Status :: {get_version_dev_status(self.root)}",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: 3.14",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Filters",
"Topic :: Text Processing :: Markup :: HTML",
]
| CustomMetadataHook |
python | pytorch__pytorch | test/dynamo/test_structured_trace.py | {
"start": 2615,
"end": 5319
} | class ____(logging.Formatter):
def format(self, record):
metadata = copy.deepcopy(record.metadata)
# Stub out values that are not stable across runs
# TODO: Check that these match schema
if "has_payload" in metadata:
metadata["has_payload"] = "HASH"
if "dynamo_start" in metadata:
metadata["dynamo_start"]["stack"] = "STACK"
if "inductor_output_code" in metadata:
metadata["inductor_output_code"]["filename"] = "FILENAME"
if "file_path" in metadata["inductor_output_code"]:
metadata["inductor_output_code"]["file_path"] = "FILENAME"
if "stack" in metadata:
metadata["stack"] = "STACK"
if "compilation_metrics" in metadata:
metadata["compilation_metrics"] = "METRICS"
if "bwd_compilation_metrics" in metadata:
metadata["bwd_compilation_metrics"] = "METRICS"
if "compilation_metrics_runtime" in metadata:
metadata["compilation_metrics_runtime"] = "METRICS"
if "bwd_compilation_metrics_runtime" in metadata:
metadata["bwd_compilation_metrics_runtime"] = "METRICS"
if "describe_storage" in metadata:
metadata["describe_storage"]["describer_id"] = "ID"
if "describe_tensor" in metadata:
metadata["describe_tensor"]["describer_id"] = "ID"
if "view_func" in metadata["describe_tensor"]:
metadata["describe_tensor"]["view_func"] = "VIEW_FUNC"
if "describe_source" in metadata:
metadata["describe_source"]["describer_id"] = "ID"
if (
(k := "create_symbol") in metadata
or (k := "guard_added_fast") in metadata
or (k := "create_unbacked_symbol") in metadata
):
metadata[k]["user_stack"] = "STACK"
metadata[k]["stack"] = "STACK"
if "dump_file" in metadata:
# Don't include the actually key number, that's sensitive to other
# test runs
metadata["dump_file"]["name"] = "<eval_with_key>"
return (
json.dumps(metadata)
+ "\n"
+ "\n".join(l.rstrip() for l in record.payload.splitlines())
)
return json.dumps(metadata)
trace_log = logging.getLogger("torch.__trace")
chrome_event_filter = ChromiumEventFilter()
def show_chrome_events(fn):
"""
Don't hide chrome events for this test
"""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
self.handler.removeFilter(chrome_event_filter)
return fn(self, *args, **kwargs)
return wrapper
| StructuredTraceTestingFormatter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.