Spaces:
Running on Zero
Running on Zero
File size: 6,002 Bytes
b701455 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 | import importlib
import os
import pytest
@pytest.mark.asyncio
async def test_settings_api_endpoints(tmp_path, monkeypatch, async_server_client):
# Ensure SettingsStore and server use test-local store path
monkeypatch.setenv("LD_SETTINGS_STORE_PATH", str(tmp_path / "settings_store.json"))
# Reload modules so they pick up the env var
# Load SettingsStore directly from file to avoid package-import side effects
import importlib.util
spec = importlib.util.spec_from_file_location('settings_store_module', os.path.join(os.getcwd(), 'src', 'Core', 'SettingsStore.py'))
SS = importlib.util.module_from_spec(spec)
spec.loader.exec_module(SS)
import server
importlib.reload(server)
# Initially no seed
r = await async_server_client.get("/api/settings/last")
assert r.status_code == 200
assert r.json().get("seed") is None
# Set seed via SettingsStore and read through API
SS.set_last_seed(123456)
r = await async_server_client.get("/api/settings/last")
assert r.status_code == 200
assert r.json().get("seed") == 123456
# Post a settings snapshot
payload = {
"settings": {
"prompt": "unit test",
"width": 64,
"height": 64,
"num_images": 1,
"batch_size": 1,
"steps": 10,
"cfg_scale": 7.0,
"seed": 42,
"scheduler": "ays",
"sampler": "dpmpp_sde_cfgpp",
"model_path": "test-model",
"img2img_mode": False,
"img2img_denoise": 0.75,
"hiresfix": False,
"adetailer": False,
"enhance_prompt": False,
"stable_fast": False,
"reuse_seed": False,
"keep_models_loaded": True,
"enable_preview": False
}
}
r = await async_server_client.post("/api/settings/history", json=payload)
assert r.status_code == 200
snap = r.json().get("snapshot")
assert snap and snap.get("id") and snap.get("ts")
# GET history
r = await async_server_client.get("/api/settings/history")
assert r.status_code == 200
hist = r.json().get("history")
assert isinstance(hist, list) and len(hist) >= 1
entry = hist[0]["settings"]
# prompt should be removed by server-side sanitization
assert "prompt" not in entry
# verify allowed parameters are preserved
assert entry["steps"] == 10
assert entry["cfg_scale"] == 7.0
assert entry["sampler"] == "dpmpp_sde_cfgpp"
assert entry["scheduler"] == "ays"
assert entry["model_path"] == "test-model"
assert entry["width"] == 64
assert entry["height"] == 64
assert entry["seed"] == 42
# Now explicitly store prompt via include_prompt flag
r = await async_server_client.post(
"/api/settings/history",
json={"settings": payload["settings"], "include_prompt": True},
)
assert r.status_code == 200
snap2 = r.json().get("snapshot")
assert snap2 and snap2.get("id")
r = await async_server_client.get("/api/settings/history")
assert r.status_code == 200
hist2 = r.json().get("history")
# Newest-first; explicit include_prompt entry should be first and contain the prompt
assert isinstance(hist2, list) and len(hist2) >= 1
assert hist2[0]["settings"].get("prompt") == "unit test"
@pytest.mark.asyncio
async def test_settings_preferences_api_and_generate_fallback(tmp_path, monkeypatch, async_server_client):
monkeypatch.setenv("LD_SETTINGS_STORE_PATH", str(tmp_path / "settings_store.json"))
import server
importlib.reload(server)
reset_calls = []
monkeypatch.setattr(server, "_reset_autotune_runtime_state", lambda: reset_calls.append("reset"))
r = await async_server_client.get("/api/settings/preferences")
assert r.status_code == 200
assert r.json() == {"torch_compile": False, "vae_autotune": False}
r = await async_server_client.post(
"/api/settings/preferences",
json={"torch_compile": True, "vae_autotune": True},
)
assert r.status_code == 200
assert r.json() == {"torch_compile": True, "vae_autotune": True}
assert reset_calls == ["reset"]
r = await async_server_client.post(
"/api/settings/preferences",
json={"torch_compile": True, "vae_autotune": True},
)
assert r.status_code == 200
assert reset_calls == ["reset"]
captured = {}
async def fake_enqueue(pending):
captured["torch_compile"] = pending.req.torch_compile
captured["vae_autotune"] = pending.req.vae_autotune
return {"ok": True}
monkeypatch.setattr(server._generation_buffer, "enqueue", fake_enqueue)
r = await async_server_client.post("/api/generate", json={"prompt": "unit test prompt"})
assert r.status_code == 200
assert r.json() == {"ok": True}
assert captured == {"torch_compile": True, "vae_autotune": True}
r = await async_server_client.post(
"/api/generate",
json={"prompt": "unit test prompt", "torch_compile": False},
)
assert r.status_code == 200
assert captured == {"torch_compile": False, "vae_autotune": True}
def test_reset_autotune_runtime_state_clears_runtime_caches(tmp_path, monkeypatch):
monkeypatch.setenv("LD_SETTINGS_STORE_PATH", str(tmp_path / "settings_store.json"))
import server
importlib.reload(server)
PipelineModule = importlib.import_module("src.Core.Pipeline")
DeviceModule = importlib.import_module("src.Device.Device")
ModelCacheModule = importlib.import_module("src.Device.ModelCache")
calls = []
monkeypatch.setattr(PipelineModule, "reset_default_pipeline", lambda: calls.append("pipeline"))
monkeypatch.setattr(ModelCacheModule, "clear_model_cache", lambda: calls.append("cache"))
monkeypatch.setattr(DeviceModule, "clear_compiled_models", lambda: calls.append("compiled"))
server._reset_autotune_runtime_state()
assert calls == ["pipeline", "cache", "compiled"]
|