3d_model / tests /test_audit_runner.py
Azan
Clean deployment build (Squashed)
7a87926
import json
from pathlib import Path
import pytest
from ylff.services.audit.audit_runner import load_measurements_json, run_audit
from ylff.services.audit.models import ExternalReferenceMeasurement, OperatingRegime
pytestmark = pytest.mark.spec
def test_load_measurements_json_list(tmp_path: Path):
p = tmp_path / "m.json"
p.write_text(
json.dumps(
[
{
"regime": "indoor_constrained",
"measurement_type": "tag_to_tag",
"d_pred": 2.0,
"d_star": 2.0,
"sigma_d": 0.1,
}
]
)
)
ms = load_measurements_json(p)
assert len(ms) == 1
assert ms[0].regime == OperatingRegime.INDOOR_CONSTRAINED
def test_load_measurements_json_wrapped(tmp_path: Path):
p = tmp_path / "m.json"
p.write_text(
json.dumps(
{
"measurements": [
{
"regime": "indoor_constrained",
"measurement_type": "tag_to_tag",
"d_pred": 2.0,
"d_star": 2.0,
"sigma_d": 0.1,
}
]
}
)
)
ms = load_measurements_json(p)
assert len(ms) == 1
def test_run_audit_with_calibration_sets_summary_fields():
# Add slight variation so correlation computations don't produce NaNs.
ms = []
for i in range(20):
ms.append(
ExternalReferenceMeasurement(
capture_id=None,
regime=OperatingRegime.INDOOR_CONSTRAINED,
measurement_type="tag_to_tag",
d_pred=2.0 + 0.02 + 0.001 * i,
d_star=2.0,
sigma_d=0.05 + 0.001 * i,
)
)
res = run_audit(ms, calibrate=True, calibration_split_fraction=0.5)
assert "calibrated" in res.summary
assert res.summary["calibrated"] in (True, False)
assert "hard_fail_passed" in res.summary
if res.summary.get("calibrated"):
assert "calibration_table" in res.summary
assert "calibration_version" in res.summary