File size: 4,954 Bytes
201cf4d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#!/usr/bin/env python3
"""Report backend/runtime portability for this repo.

The goal is to answer one question deterministically:
what can this checkout run on this machine right now?
"""

from __future__ import annotations

import json
import platform
import shutil
import sys
from pathlib import Path
from typing import Any, Dict

from training.core.runtime_contract import BackendName, KernelSupportLevel
from training.core.unified_backend import backend_capability_report

ROOT = Path(__file__).resolve().parent.parent


def _command_available(name: str) -> bool:
    return shutil.which(name) is not None


def _import_status(module_name: str) -> Dict[str, Any]:
    try:
        __import__(module_name)
        return {"available": True, "error": ""}
    except Exception as exc:  # pragma: no cover - environment dependent
        return {"available": False, "error": f"{type(exc).__name__}: {exc}"}


def _torch_status() -> Dict[str, Any]:
    status: Dict[str, Any] = {
        "available": False,
        "version": "",
        "cuda": False,
        "mps": False,
    }
    try:
        import torch

        status["available"] = True
        status["version"] = torch.__version__
        status["cuda"] = bool(torch.cuda.is_available())
        status["mps"] = bool(getattr(torch.backends, "mps", None) and torch.backends.mps.is_available())
    except Exception as exc:  # pragma: no cover - environment dependent
        status["error"] = f"{type(exc).__name__}: {exc}"
    return status


def _max_status() -> Dict[str, Any]:
    matrix = backend_capability_report()
    return matrix.get(BackendName.MAX.value, {})


def _repo_paths() -> Dict[str, bool]:
    required = {
        "docker_cpu": ROOT / "docker" / "Dockerfile.cpu",
        "docker_cuda": ROOT / "docker" / "Dockerfile.cuda",
        "docker_modular": ROOT / "docker" / "Dockerfile.modular",
        "docker_entrypoint": ROOT / "docker" / "entrypoint.sh",
        "mojo_dir": ROOT / "mojo",
        "kernels_dir": ROOT / "kernels",
        "prediction_engine": ROOT / "prediction_engine",
        "serving": ROOT / "serving",
        "patent_docs": ROOT / "docs" / "patents",
    }
    return {name: path.exists() for name, path in required.items()}


def build_runtime_matrix() -> Dict[str, Any]:
    capabilities = backend_capability_report()
    return {
        "root": str(ROOT),
        "python": {
            "executable": sys.executable,
            "version": sys.version.split()[0],
        },
        "platform": {
            "system": platform.system(),
            "release": platform.release(),
            "machine": platform.machine(),
            "processor": platform.processor(),
        },
        "tooling": {
            "uv": _command_available("uv"),
            "docker": _command_available("docker"),
            "mojo": _command_available("mojo"),
            "modular": _command_available("modular"),
        },
        "modules": {
            "training": _import_status("training"),
            "prediction_engine": _import_status("prediction_engine"),
            "serving": _import_status("serving"),
        },
        "torch": _torch_status(),
        "max": _max_status(),
        "capabilities": capabilities,
        "support_matrix": {
            name: {
                "available": payload.get("available", False),
                "execution_mode": payload.get("execution_mode", "unavailable"),
                "kernel_support": payload.get(
                    "kernel_support", KernelSupportLevel.UNSUPPORTED.value
                ),
                "fallback_backend": payload.get("fallback_backend"),
            }
            for name, payload in capabilities.items()
        },
        "repo_paths": _repo_paths(),
        "recommended_commands": {
            "repo_smoke": ".venv/bin/python3 scripts/confirm_pipeline.py --profile smoke",
            "repo_confirm": ".venv/bin/python3 scripts/confirm_pipeline.py --profile full-matrix --max-samples 32 --epochs 1",
            "benchmark_matrix": ".venv/bin/python3 scripts/benchmark_matrix.py --max-samples 16 --epochs 1 --allow-failures",
            "runtime_matrix": ".venv/bin/python3 scripts/runtime_matrix.py",
            "patent_inventory": ".venv/bin/python3 scripts/repo_patent_inventory.py --write",
            "patent_packet": ".venv/bin/python3 scripts/generate_patent_packet.py --write",
            "docker_cpu": "docker build -f docker/Dockerfile.cpu -t ane-cpu . && docker run --rm ane-cpu smoke",
            "docker_cuda": "docker build -f docker/Dockerfile.cuda -t ane-cuda . && docker run --rm --gpus all ane-cuda bench-runtime",
            "docker_modular": "docker build -f docker/Dockerfile.modular -t ane-modular . && docker run --rm ane-modular bench-runtime",
        },
    }


def main() -> int:
    print(json.dumps(build_runtime_matrix(), indent=2, sort_keys=True))
    return 0


if __name__ == "__main__":
    raise SystemExit(main())