| import os |
| import pathlib |
| import sys |
| import types |
| import unittest |
| from unittest.mock import patch |
|
|
| import numpy as np |
|
|
|
|
| def _install_gradio_stub() -> None: |
| if "gradio" in sys.modules: |
| return |
|
|
| module = types.ModuleType("gradio") |
|
|
| class _Event: |
| def then(self, *args, **kwargs): |
| return self |
|
|
| class _Component: |
| def __init__(self, *args, **kwargs): |
| self.args = args |
| self.kwargs = kwargs |
|
|
| def click(self, *args, **kwargs): |
| return _Event() |
|
|
| def change(self, *args, **kwargs): |
| return _Event() |
|
|
| class _Context: |
| def __init__(self, *args, **kwargs): |
| self.args = args |
| self.kwargs = kwargs |
|
|
| def __enter__(self): |
| return self |
|
|
| def __exit__(self, exc_type, exc, tb): |
| return False |
|
|
| class _Blocks(_Context): |
| def load(self, *args, **kwargs): |
| return _Event() |
|
|
| def launch(self, *args, **kwargs): |
| return None |
|
|
| class _Themes: |
| class Soft: |
| def __init__(self, *args, **kwargs): |
| self.args = args |
| self.kwargs = kwargs |
|
|
| module.themes = _Themes() |
| module.Blocks = _Blocks |
| module.Row = _Context |
| module.Column = _Context |
| module.Group = _Context |
| module.Tabs = _Context |
| module.Tab = _Context |
| module.Accordion = _Context |
|
|
| module.State = _Component |
| module.Markdown = _Component |
| module.Slider = _Component |
| module.Button = _Component |
| module.Dropdown = _Component |
| module.Checkbox = _Component |
| module.HTML = _Component |
| module.Code = _Component |
| module.Dataframe = _Component |
| module.Textbox = _Component |
| module.File = _Component |
| module.DownloadButton = _Component |
| module.Plot = _Component |
|
|
| sys.modules["gradio"] = module |
|
|
|
|
| def _install_openai_stub() -> None: |
| if "openai" in sys.modules: |
| return |
|
|
| module = types.ModuleType("openai") |
|
|
| class _Completions: |
| def create(self, *args, **kwargs): |
| raise RuntimeError("OpenAI call is stubbed in tests.") |
|
|
| class _Chat: |
| def __init__(self): |
| self.completions = _Completions() |
|
|
| class OpenAI: |
| def __init__(self, *args, **kwargs): |
| self.chat = _Chat() |
|
|
| module.OpenAI = OpenAI |
| sys.modules["openai"] = module |
|
|
|
|
| def _install_export_pdf_stub() -> None: |
| if "quread.export_pdf" in sys.modules: |
| return |
|
|
| module = types.ModuleType("quread.export_pdf") |
|
|
| def md_to_pdf(markdown_text: str, output_path: str): |
| pathlib.Path(output_path).write_text(markdown_text or "", encoding="utf-8") |
|
|
| module.md_to_pdf = md_to_pdf |
| sys.modules["quread.export_pdf"] = module |
|
|
|
|
| _install_gradio_stub() |
| _install_openai_stub() |
| _install_export_pdf_stub() |
|
|
| import app |
| from quread.engine import QuantumStateVector |
|
|
|
|
| class AppFlowsTest(unittest.TestCase): |
| def test_qubit_count_change_reinitializes_simulator(self): |
| qc, last_counts, selected_gate, _target, _control, _cnot_target, status = app._on_qubit_count_change(3) |
|
|
| self.assertEqual(qc.n_qubits, 3) |
| self.assertEqual(qc.history, []) |
| self.assertIsNone(last_counts) |
| self.assertEqual(selected_gate, "H") |
| self.assertIn("Reinitialized simulator with 3 qubits", status) |
|
|
| def test_write_tmp_generates_unique_paths(self): |
| p1 = app._write_tmp("circuit.qasm", "OPENQASM 2.0;") |
| p2 = app._write_tmp("circuit.qasm", "OPENQASM 2.0;") |
| try: |
| self.assertNotEqual(p1, p2) |
| self.assertTrue(pathlib.Path(p1).exists()) |
| self.assertTrue(pathlib.Path(p2).exists()) |
| self.assertEqual(pathlib.Path(p1).read_text(encoding="utf-8"), "OPENQASM 2.0;") |
| self.assertEqual(pathlib.Path(p2).read_text(encoding="utf-8"), "OPENQASM 2.0;") |
| finally: |
| for path in (p1, p2): |
| try: |
| os.remove(path) |
| except FileNotFoundError: |
| pass |
|
|
| def test_explain_reuse_preserves_previous_markdown(self): |
| qc = QuantumStateVector(2) |
| last_hash = app._circuit_hash(qc.history) |
|
|
| shown, returned_hash, stored_md = app.explain_llm( |
| qc=qc, |
| n_qubits=2, |
| shots=1024, |
| last_hash=last_hash, |
| previous_explanation="previous explanation", |
| ) |
|
|
| self.assertEqual(returned_hash, last_hash) |
| self.assertEqual(stored_md, "previous explanation") |
| self.assertIn("Reusing previous explanation", shown) |
|
|
| def test_explain_failure_preserves_previous_markdown(self): |
| qc = QuantumStateVector(2) |
| qc.apply_single("H", target=0) |
|
|
| with patch.object(app, "explain_with_gpt4o", side_effect=RuntimeError("boom")): |
| shown, returned_hash, stored_md = app.explain_llm( |
| qc=qc, |
| n_qubits=2, |
| shots=1024, |
| last_hash="", |
| previous_explanation="previous explanation", |
| ) |
|
|
| self.assertEqual(returned_hash, "") |
| self.assertEqual(stored_md, "previous explanation") |
| self.assertIn("Explanation request failed", shown) |
| self.assertIn("Showing previous explanation", shown) |
|
|
| def test_hotspot_rows_sorted_descending(self): |
| metrics = { |
| "composite_risk": np.array([0.22, 0.91, 0.45], dtype=float), |
| "hotspot_level": np.array([0, 2, 1], dtype=float), |
| "activity_count": np.array([1.0, 4.0, 2.0], dtype=float), |
| "gate_error": np.array([0.01, 0.04, 0.02], dtype=float), |
| "readout_error": np.array([0.02, 0.05, 0.03], dtype=float), |
| "state_fidelity": np.array([0.98, 0.82, 0.91], dtype=float), |
| "process_fidelity": np.array([0.97, 0.79, 0.9], dtype=float), |
| "coherence_health": np.array([0.8, 0.5, 0.7], dtype=float), |
| "decoherence_risk": np.array([0.2, 0.6, 0.3], dtype=float), |
| "fidelity": np.array([0.99, 0.95, 0.97], dtype=float), |
| } |
| rows = app._hotspot_rows(metrics, n_qubits=3, top_k=2) |
| self.assertEqual(len(rows), 2) |
| self.assertEqual(rows[0][0], 1) |
| self.assertEqual(rows[0][1], "critical") |
| self.assertGreaterEqual(rows[0][2], rows[1][2]) |
|
|
| def test_hotspot_rows_include_layout_coordinates(self): |
| metrics = { |
| "composite_risk": np.array([0.22, 0.91, 0.45], dtype=float), |
| "hotspot_level": np.array([0, 2, 1], dtype=float), |
| "activity_count": np.array([1.0, 4.0, 2.0], dtype=float), |
| "gate_error": np.array([0.01, 0.04, 0.02], dtype=float), |
| "readout_error": np.array([0.02, 0.05, 0.03], dtype=float), |
| "state_fidelity": np.array([0.98, 0.82, 0.91], dtype=float), |
| "process_fidelity": np.array([0.97, 0.79, 0.9], dtype=float), |
| "coherence_health": np.array([0.8, 0.5, 0.7], dtype=float), |
| "decoherence_risk": np.array([0.2, 0.6, 0.3], dtype=float), |
| "fidelity": np.array([0.99, 0.95, 0.97], dtype=float), |
| } |
| rows = app._hotspot_rows( |
| metrics, |
| n_qubits=3, |
| top_k=1, |
| qubit_coords={1: (2, 3)}, |
| ) |
| self.assertEqual(rows[0][-2], 2) |
| self.assertEqual(rows[0][-1], 3) |
|
|
| def test_ideal_vs_noisy_plot_returns_figure(self): |
| qc = QuantumStateVector(2) |
| qc.apply_single("H", target=0) |
| fig = app._ideal_vs_noisy_plot( |
| qc=qc, |
| shots=64, |
| calibration_text='{"qubits":{"0":{"readout_error":0.1},"1":{"readout_error":0.1}}}', |
| readout_scale=1.0, |
| depolarizing_prob=0.1, |
| ) |
| self.assertTrue(hasattr(fig, "axes")) |
| self.assertGreaterEqual(len(fig.axes), 1) |
|
|
|
|
| if __name__ == "__main__": |
| unittest.main() |
|
|