Upload 9 files
Browse files- .gitattributes +36 -36
- README.md +123 -123
- handler.py +538 -274
- tokenizer.json +0 -0
.gitattributes
CHANGED
|
@@ -1,36 +1,36 @@
|
|
| 1 |
-
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
-
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
-
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
-
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
-
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
-
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
-
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
-
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
-
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
-
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
-
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
-
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
-
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
-
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
-
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
-
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
-
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
-
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
-
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
-
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
-
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
-
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
-
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
-
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
-
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
-
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
-
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
-
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
-
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
-
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
-
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
-
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
-
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
-
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
-
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
-
noesis_model.onnx.data filter=lfs diff=lfs merge=lfs -text
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
noesis_model.onnx.data filter=lfs diff=lfs merge=lfs -text
|
README.md
CHANGED
|
@@ -1,123 +1,123 @@
|
|
| 1 |
-
---
|
| 2 |
-
library_name: transformers
|
| 3 |
-
tags:
|
| 4 |
-
- symbolic-decoder
|
| 5 |
-
- aletheia
|
| 6 |
-
- pytorch
|
| 7 |
-
- onnx
|
| 8 |
-
- philosophical-agi
|
| 9 |
-
- gnai-creator
|
| 10 |
-
license: apache-2.0
|
| 11 |
-
datasets:
|
| 12 |
-
- custom
|
| 13 |
-
language:
|
| 14 |
-
- en
|
| 15 |
-
pipeline_tag: text-generation
|
| 16 |
-
---
|
| 17 |
-
|
| 18 |
-
# 🧠 Noesis Decoder (AletheiaEngine)
|
| 19 |
-
|
| 20 |
-
**Repository:** [gnai-creator/noesis-decoder](https://huggingface.co/gnai-creator/noesis-decoder)
|
| 21 |
-
**Author:** Felipe M. Muniz (`gnai-creator`)
|
| 22 |
-
**License:** Apache-2.0
|
| 23 |
-
|
| 24 |
-
---
|
| 25 |
-
|
| 26 |
-
## 🔍 Overview
|
| 27 |
-
|
| 28 |
-
**Noesis Decoder** is the proprietary symbolic decoder of **AletheiaEngine** — a hybrid symbolic–neural system designed for *philosophical artificial general intelligence*.
|
| 29 |
-
|
| 30 |
-
Unlike conventional text generators, Noesis translates **symbolic embeddings (ψₛ)** into meaningful language based on *epistemic coherence*, rather than statistical prediction.
|
| 31 |
-
|
| 32 |
-
---
|
| 33 |
-
|
| 34 |
-
## ⚙️ Model Architecture
|
| 35 |
-
|
| 36 |
-
* **Framework:** PyTorch → ONNX Runtime
|
| 37 |
-
* **Files:**
|
| 38 |
-
|
| 39 |
-
* `
|
| 40 |
-
* `noesis.pt` – PyTorch checkpoint (training artifact)
|
| 41 |
-
* `inference.py` – Custom ONNX handler
|
| 42 |
-
* **Input:** float32 symbolic vector, shape `[1, D]`
|
| 43 |
-
* **Output:** decoded float or token embeddings (depending on context)
|
| 44 |
-
|
| 45 |
-
---
|
| 46 |
-
|
| 47 |
-
## 🧩 Example Usage
|
| 48 |
-
|
| 49 |
-
### 🔹 Python + ONNX Runtime
|
| 50 |
-
|
| 51 |
-
```python
|
| 52 |
-
from huggingface_hub import hf_hub_download
|
| 53 |
-
import onnxruntime as ort
|
| 54 |
-
import numpy as np
|
| 55 |
-
|
| 56 |
-
# Download ONNX model
|
| 57 |
-
onnx_path = hf_hub_download(
|
| 58 |
-
repo_id="gnai-creator/noesis-decoder",
|
| 59 |
-
filename="
|
| 60 |
-
repo_type="model"
|
| 61 |
-
)
|
| 62 |
-
|
| 63 |
-
# Load runtime
|
| 64 |
-
sess = ort.InferenceSession(onnx_path, providers=["CPUExecutionProvider"])
|
| 65 |
-
input_name = sess.get_inputs()[0].name
|
| 66 |
-
output_name = sess.get_outputs()[0].name
|
| 67 |
-
|
| 68 |
-
# Example symbolic vector ψₛ
|
| 69 |
-
x = np.random.randn(1, 300).astype("float32")
|
| 70 |
-
|
| 71 |
-
# Run inference
|
| 72 |
-
y = sess.run([output_name], {input_name: x})[0]
|
| 73 |
-
print("Output shape:", y.shape)
|
| 74 |
-
```
|
| 75 |
-
|
| 76 |
-
---
|
| 77 |
-
|
| 78 |
-
## 💡 Training Data
|
| 79 |
-
|
| 80 |
-
Trained on **symbolic text pairs** generated from philosophical, logical, and reflective corpora within the AletheiaEngine ecosystem.
|
| 81 |
-
Goal: alignment between **symbolic intention (ψₛ)** and **natural language output**.
|
| 82 |
-
|
| 83 |
-
---
|
| 84 |
-
|
| 85 |
-
## 📊 Metrics (Indicative)
|
| 86 |
-
|
| 87 |
-
| Metric | Value | Description |
|
| 88 |
-
| ------------- | ------------ | ------------------------------------------ |
|
| 89 |
-
| Cosine(Q) | 0.83 | Symbolic alignment measure |
|
| 90 |
-
| Perplexity | 2.41 | Statistical readability proxy |
|
| 91 |
-
| Latency (CPU) | ~28 ms/token | Inference on Intel Sapphire Rapids (1vCPU) |
|
| 92 |
-
|
| 93 |
-
---
|
| 94 |
-
|
| 95 |
-
## 🚀 Deployment
|
| 96 |
-
|
| 97 |
-
This model is compatible with **Hugging Face Inference Endpoints** using the `Custom` engine and the included `inference.py` handler.
|
| 98 |
-
|
| 99 |
-
Recommended hardware:
|
| 100 |
-
|
| 101 |
-
* **CPU:** Intel Sapphire Rapids (1vCPU / 2GB)
|
| 102 |
-
* **GPU:** NVIDIA T4 for larger batch inference
|
| 103 |
-
|
| 104 |
-
---
|
| 105 |
-
|
| 106 |
-
## ⚠️ Limitations
|
| 107 |
-
|
| 108 |
-
* Not a conventional LLM — requires symbolic vectors as input.
|
| 109 |
-
* Outputs are contextualized to Aletheia’s symbolic reasoning pipeline.
|
| 110 |
-
* Not suited for free-form text generation.
|
| 111 |
-
|
| 112 |
-
---
|
| 113 |
-
|
| 114 |
-
## 📜 License
|
| 115 |
-
|
| 116 |
-
This repository is distributed under the **Apache License 2.0**.
|
| 117 |
-
See [LICENSE](./LICENSE) for details.
|
| 118 |
-
|
| 119 |
-
---
|
| 120 |
-
|
| 121 |
-
> *“Truth is not imposed; it emerges from alignment.”*
|
| 122 |
-
> — *Felipe M. Muniz (2025)*
|
| 123 |
-
|
|
|
|
| 1 |
+
---
|
| 2 |
+
library_name: transformers
|
| 3 |
+
tags:
|
| 4 |
+
- symbolic-decoder
|
| 5 |
+
- aletheia
|
| 6 |
+
- pytorch
|
| 7 |
+
- onnx
|
| 8 |
+
- philosophical-agi
|
| 9 |
+
- gnai-creator
|
| 10 |
+
license: apache-2.0
|
| 11 |
+
datasets:
|
| 12 |
+
- custom
|
| 13 |
+
language:
|
| 14 |
+
- en
|
| 15 |
+
pipeline_tag: text-generation
|
| 16 |
+
---
|
| 17 |
+
|
| 18 |
+
# 🧠 Noesis Decoder (AletheiaEngine)
|
| 19 |
+
|
| 20 |
+
**Repository:** [gnai-creator/noesis-decoder](https://huggingface.co/gnai-creator/noesis-decoder)
|
| 21 |
+
**Author:** Felipe M. Muniz (`gnai-creator`)
|
| 22 |
+
**License:** Apache-2.0
|
| 23 |
+
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
## 🔍 Overview
|
| 27 |
+
|
| 28 |
+
**Noesis Decoder** is the proprietary symbolic decoder of **AletheiaEngine** — a hybrid symbolic–neural system designed for *philosophical artificial general intelligence*.
|
| 29 |
+
|
| 30 |
+
Unlike conventional text generators, Noesis translates **symbolic embeddings (ψₛ)** into meaningful language based on *epistemic coherence*, rather than statistical prediction.
|
| 31 |
+
|
| 32 |
+
---
|
| 33 |
+
|
| 34 |
+
## ⚙️ Model Architecture
|
| 35 |
+
|
| 36 |
+
* **Framework:** PyTorch → ONNX Runtime
|
| 37 |
+
* **Files:**
|
| 38 |
+
|
| 39 |
+
* `model_infer.onnx` – Inference model (optimized)
|
| 40 |
+
* `noesis.pt` – PyTorch checkpoint (training artifact)
|
| 41 |
+
* `inference.py` – Custom ONNX handler
|
| 42 |
+
* **Input:** float32 symbolic vector, shape `[1, D]`
|
| 43 |
+
* **Output:** decoded float or token embeddings (depending on context)
|
| 44 |
+
|
| 45 |
+
---
|
| 46 |
+
|
| 47 |
+
## 🧩 Example Usage
|
| 48 |
+
|
| 49 |
+
### 🔹 Python + ONNX Runtime
|
| 50 |
+
|
| 51 |
+
```python
|
| 52 |
+
from huggingface_hub import hf_hub_download
|
| 53 |
+
import onnxruntime as ort
|
| 54 |
+
import numpy as np
|
| 55 |
+
|
| 56 |
+
# Download ONNX model
|
| 57 |
+
onnx_path = hf_hub_download(
|
| 58 |
+
repo_id="gnai-creator/noesis-decoder",
|
| 59 |
+
filename="model_infer.onnx",
|
| 60 |
+
repo_type="model"
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
# Load runtime
|
| 64 |
+
sess = ort.InferenceSession(onnx_path, providers=["CPUExecutionProvider"])
|
| 65 |
+
input_name = sess.get_inputs()[0].name
|
| 66 |
+
output_name = sess.get_outputs()[0].name
|
| 67 |
+
|
| 68 |
+
# Example symbolic vector ψₛ
|
| 69 |
+
x = np.random.randn(1, 300).astype("float32")
|
| 70 |
+
|
| 71 |
+
# Run inference
|
| 72 |
+
y = sess.run([output_name], {input_name: x})[0]
|
| 73 |
+
print("Output shape:", y.shape)
|
| 74 |
+
```
|
| 75 |
+
|
| 76 |
+
---
|
| 77 |
+
|
| 78 |
+
## 💡 Training Data
|
| 79 |
+
|
| 80 |
+
Trained on **symbolic text pairs** generated from philosophical, logical, and reflective corpora within the AletheiaEngine ecosystem.
|
| 81 |
+
Goal: alignment between **symbolic intention (ψₛ)** and **natural language output**.
|
| 82 |
+
|
| 83 |
+
---
|
| 84 |
+
|
| 85 |
+
## 📊 Metrics (Indicative)
|
| 86 |
+
|
| 87 |
+
| Metric | Value | Description |
|
| 88 |
+
| ------------- | ------------ | ------------------------------------------ |
|
| 89 |
+
| Cosine(Q) | 0.83 | Symbolic alignment measure |
|
| 90 |
+
| Perplexity | 2.41 | Statistical readability proxy |
|
| 91 |
+
| Latency (CPU) | ~28 ms/token | Inference on Intel Sapphire Rapids (1vCPU) |
|
| 92 |
+
|
| 93 |
+
---
|
| 94 |
+
|
| 95 |
+
## 🚀 Deployment
|
| 96 |
+
|
| 97 |
+
This model is compatible with **Hugging Face Inference Endpoints** using the `Custom` engine and the included `inference.py` handler.
|
| 98 |
+
|
| 99 |
+
Recommended hardware:
|
| 100 |
+
|
| 101 |
+
* **CPU:** Intel Sapphire Rapids (1vCPU / 2GB)
|
| 102 |
+
* **GPU:** NVIDIA T4 for larger batch inference
|
| 103 |
+
|
| 104 |
+
---
|
| 105 |
+
|
| 106 |
+
## ⚠️ Limitations
|
| 107 |
+
|
| 108 |
+
* Not a conventional LLM — requires symbolic vectors as input.
|
| 109 |
+
* Outputs are contextualized to Aletheia’s symbolic reasoning pipeline.
|
| 110 |
+
* Not suited for free-form text generation.
|
| 111 |
+
|
| 112 |
+
---
|
| 113 |
+
|
| 114 |
+
## 📜 License
|
| 115 |
+
|
| 116 |
+
This repository is distributed under the **Apache License 2.0**.
|
| 117 |
+
See [LICENSE](./LICENSE) for details.
|
| 118 |
+
|
| 119 |
+
---
|
| 120 |
+
|
| 121 |
+
> *“Truth is not imposed; it emerges from alignment.”*
|
| 122 |
+
> — *Felipe M. Muniz (2025)*
|
| 123 |
+
|
handler.py
CHANGED
|
@@ -1,274 +1,538 @@
|
|
| 1 |
-
"""Custom inference handler for Hugging Face Inference Endpoints.
|
| 2 |
-
|
| 3 |
-
This module exposes :class:`EndpointHandler`, the entrypoint used by the
|
| 4 |
-
Hugging Face serving stack when ``--task custom`` is selected. The handler
|
| 5 |
-
loads the exported Noesis decoder ONNX graph and accepts symbolic intent
|
| 6 |
-
vectors (``psi``) along with an optional ``slow_state`` memory tensor. The
|
| 7 |
-
outputs mirror the values produced by the training runtime:
|
| 8 |
-
|
| 9 |
-
* ``z_out`` – semantic embedding projected back into symbolic space.
|
| 10 |
-
* ``choice``, ``pain``, ``memory`` and ``quality`` – diagnostic scalars.
|
| 11 |
-
* ``slow_state`` – updated slow memory tensor suitable for recurrent usage.
|
| 12 |
-
|
| 13 |
-
The handler is intentionally lightweight so it can run without the rest of the
|
| 14 |
-
AletheiaEngine Python package being installed.
|
| 15 |
-
"""
|
| 16 |
-
|
| 17 |
-
from __future__ import annotations
|
| 18 |
-
|
| 19 |
-
import importlib
|
| 20 |
-
import importlib.util
|
| 21 |
-
from dataclasses import dataclass
|
| 22 |
-
from pathlib import Path
|
| 23 |
-
import
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
import
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
""
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
|
| 113 |
-
|
| 114 |
-
for
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
|
| 120 |
-
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
"
|
| 171 |
-
|
| 172 |
-
"
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
|
| 216 |
-
|
| 217 |
-
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
| 222 |
-
|
| 223 |
-
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
|
| 227 |
-
|
| 228 |
-
|
| 229 |
-
|
| 230 |
-
|
| 231 |
-
|
| 232 |
-
|
| 233 |
-
|
| 234 |
-
|
| 235 |
-
|
| 236 |
-
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
|
| 241 |
-
|
| 242 |
-
|
| 243 |
-
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
|
| 253 |
-
|
| 254 |
-
|
| 255 |
-
|
| 256 |
-
|
| 257 |
-
|
| 258 |
-
|
| 259 |
-
|
| 260 |
-
|
| 261 |
-
|
| 262 |
-
|
| 263 |
-
|
| 264 |
-
|
| 265 |
-
|
| 266 |
-
|
| 267 |
-
|
| 268 |
-
|
| 269 |
-
|
| 270 |
-
|
| 271 |
-
return
|
| 272 |
-
|
| 273 |
-
|
| 274 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Custom inference handler for Hugging Face Inference Endpoints.
|
| 2 |
+
|
| 3 |
+
This module exposes :class:`EndpointHandler`, the entrypoint used by the
|
| 4 |
+
Hugging Face serving stack when ``--task custom`` is selected. The handler
|
| 5 |
+
loads the exported Noesis decoder ONNX graph and accepts symbolic intent
|
| 6 |
+
vectors (``psi``) along with an optional ``slow_state`` memory tensor. The
|
| 7 |
+
outputs mirror the values produced by the training runtime:
|
| 8 |
+
|
| 9 |
+
* ``z_out`` – semantic embedding projected back into symbolic space.
|
| 10 |
+
* ``choice``, ``pain``, ``memory`` and ``quality`` – diagnostic scalars.
|
| 11 |
+
* ``slow_state`` – updated slow memory tensor suitable for recurrent usage.
|
| 12 |
+
|
| 13 |
+
The handler is intentionally lightweight so it can run without the rest of the
|
| 14 |
+
AletheiaEngine Python package being installed.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
from __future__ import annotations
|
| 18 |
+
|
| 19 |
+
import importlib
|
| 20 |
+
import importlib.util
|
| 21 |
+
from dataclasses import dataclass
|
| 22 |
+
from pathlib import Path
|
| 23 |
+
import hashlib
|
| 24 |
+
import random
|
| 25 |
+
import re
|
| 26 |
+
from typing import Any, Mapping, MutableMapping, Optional, Sequence
|
| 27 |
+
|
| 28 |
+
import numpy as np
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
_WORD_RE = re.compile(r"\w+", re.UNICODE)
|
| 32 |
+
|
| 33 |
+
_INTENT_VOCAB = [
|
| 34 |
+
"clarity",
|
| 35 |
+
"empathy",
|
| 36 |
+
"analysis",
|
| 37 |
+
"evidence",
|
| 38 |
+
"caution",
|
| 39 |
+
"curiosity",
|
| 40 |
+
"context",
|
| 41 |
+
"precision",
|
| 42 |
+
"ethics",
|
| 43 |
+
"resilience",
|
| 44 |
+
"coherence",
|
| 45 |
+
"safety",
|
| 46 |
+
"humility",
|
| 47 |
+
"breadth",
|
| 48 |
+
"depth",
|
| 49 |
+
"innovation",
|
| 50 |
+
"structure",
|
| 51 |
+
"rigour",
|
| 52 |
+
"balance",
|
| 53 |
+
"confidence",
|
| 54 |
+
]
|
| 55 |
+
|
| 56 |
+
_DEFAULT_PROVIDER = "aletheia-noesis"
|
| 57 |
+
_DEFAULT_MODEL = "noesis-transformer-onnx"
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
class _TextEncoder:
|
| 61 |
+
"""Deterministic text → vector encoder.
|
| 62 |
+
|
| 63 |
+
The Hugging Face Inference Endpoints frequently pass user prompts as
|
| 64 |
+
strings via the ``inputs`` field. The Noesis decoder, however, expects a
|
| 65 |
+
symbolic vector (``psi``) as input. To provide a graceful fallback the
|
| 66 |
+
handler lazily converts short text prompts into a stable float32 vector by
|
| 67 |
+
hashing tokens onto a hypersphere. This mirrors the lightweight
|
| 68 |
+
``TextEncoder256`` implementation bundled with the full AletheiaEngine
|
| 69 |
+
package while avoiding a heavy import dependency inside the endpoint
|
| 70 |
+
container.
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
def __init__(self, dim: int) -> None:
|
| 74 |
+
self.dim = dim
|
| 75 |
+
|
| 76 |
+
@staticmethod
|
| 77 |
+
def _tokens(text: str) -> list[str]:
|
| 78 |
+
return [tok.lower() for tok in _WORD_RE.findall(text)]
|
| 79 |
+
|
| 80 |
+
@staticmethod
|
| 81 |
+
def _seed(tok: str) -> int:
|
| 82 |
+
# FNV-1a hash for determinism across processes/platforms.
|
| 83 |
+
value = 2166136261
|
| 84 |
+
for byte in tok.encode("utf-8"):
|
| 85 |
+
value ^= byte
|
| 86 |
+
value = (value * 16777619) & 0xFFFFFFFF
|
| 87 |
+
return int(value)
|
| 88 |
+
|
| 89 |
+
def encode(self, text: str) -> np.ndarray:
|
| 90 |
+
tokens = self._tokens(text)
|
| 91 |
+
if not tokens:
|
| 92 |
+
return np.zeros((1, self.dim), dtype=np.float32)
|
| 93 |
+
|
| 94 |
+
vecs = []
|
| 95 |
+
for tok in tokens:
|
| 96 |
+
rs = np.random.RandomState(self._seed(tok))
|
| 97 |
+
embedding = rs.normal(0.0, 1.0, size=(self.dim,)).astype(np.float32)
|
| 98 |
+
norm = float(np.linalg.norm(embedding)) or 1.0
|
| 99 |
+
vecs.append(embedding / norm)
|
| 100 |
+
|
| 101 |
+
stacked = np.stack(vecs, axis=0)
|
| 102 |
+
pooled = stacked.mean(axis=0, dtype=np.float32, keepdims=True)
|
| 103 |
+
pooled_norm = float(np.linalg.norm(pooled)) or 1.0
|
| 104 |
+
return pooled / pooled_norm
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class _SimpleTokenizer:
|
| 108 |
+
"""Minimal tokenizer mirroring the reference Noesis runtime."""
|
| 109 |
+
|
| 110 |
+
def __init__(self) -> None:
|
| 111 |
+
special_tokens = ["<pad>", "<bos>", "<eos>", "<unk>"]
|
| 112 |
+
alphabet = list("abcdefghijklmnopqrstuvwxyz0123456789 .,;:'\"!?-\n")
|
| 113 |
+
self._tokens = special_tokens + alphabet
|
| 114 |
+
self._token_to_id = {token: idx for idx, token in enumerate(self._tokens)}
|
| 115 |
+
|
| 116 |
+
@property
|
| 117 |
+
def pad_token_id(self) -> int:
|
| 118 |
+
return 0
|
| 119 |
+
|
| 120 |
+
@property
|
| 121 |
+
def bos_token_id(self) -> int:
|
| 122 |
+
return 1
|
| 123 |
+
|
| 124 |
+
@property
|
| 125 |
+
def eos_token_id(self) -> int:
|
| 126 |
+
return 2
|
| 127 |
+
|
| 128 |
+
@property
|
| 129 |
+
def unk_token_id(self) -> int:
|
| 130 |
+
return 3
|
| 131 |
+
|
| 132 |
+
def encode(self, text: str) -> list[int]:
|
| 133 |
+
tokens = [self.bos_token_id]
|
| 134 |
+
for char in text:
|
| 135 |
+
tokens.append(self._token_to_id.get(char.lower(), self.unk_token_id))
|
| 136 |
+
tokens.append(self.eos_token_id)
|
| 137 |
+
return tokens
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def _summarise_intent(psi: Sequence[float], top_k: int = 4) -> list[str]:
|
| 141 |
+
"""Convert strongest symbolic dimensions into descriptors."""
|
| 142 |
+
|
| 143 |
+
vector = np.asarray(list(psi), dtype=np.float32).reshape(-1)
|
| 144 |
+
if vector.size == 0:
|
| 145 |
+
return []
|
| 146 |
+
|
| 147 |
+
k = min(top_k, vector.size)
|
| 148 |
+
magnitudes = np.abs(vector)
|
| 149 |
+
top_indices = magnitudes.argsort()[::-1][:k]
|
| 150 |
+
summary: list[str] = []
|
| 151 |
+
for index in top_indices.tolist():
|
| 152 |
+
descriptor = _INTENT_VOCAB[index % len(_INTENT_VOCAB)]
|
| 153 |
+
direction = "elevated" if vector[index] >= 0 else "attenuated"
|
| 154 |
+
summary.append(f"{descriptor} ({direction}, |ψ|={magnitudes[index]:.2f})")
|
| 155 |
+
return summary
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
@dataclass(frozen=True)
|
| 159 |
+
class _DecodingParams:
|
| 160 |
+
beam_size: int = 6
|
| 161 |
+
temperature: float = 0.8
|
| 162 |
+
top_p: float = 0.9
|
| 163 |
+
max_new_tokens: int = 256
|
| 164 |
+
stop_quality: float = 0.6
|
| 165 |
+
|
| 166 |
+
@classmethod
|
| 167 |
+
def from_payload(cls, payload: Mapping[str, Any]) -> "_DecodingParams":
|
| 168 |
+
source: Mapping[str, Any] | None = None
|
| 169 |
+
if "decoding" in payload and isinstance(payload["decoding"], Mapping):
|
| 170 |
+
source = payload["decoding"]
|
| 171 |
+
elif "parameters" in payload and isinstance(payload["parameters"], Mapping):
|
| 172 |
+
candidate = payload["parameters"].get("decoding")
|
| 173 |
+
if isinstance(candidate, Mapping):
|
| 174 |
+
source = candidate
|
| 175 |
+
|
| 176 |
+
if not source:
|
| 177 |
+
return cls()
|
| 178 |
+
|
| 179 |
+
kwargs: dict[str, Any] = {}
|
| 180 |
+
for field in cls.__dataclass_fields__.keys(): # type: ignore[attr-defined]
|
| 181 |
+
if field in source:
|
| 182 |
+
try:
|
| 183 |
+
kwargs[field] = type(getattr(cls(), field))(source[field])
|
| 184 |
+
except (TypeError, ValueError):
|
| 185 |
+
continue
|
| 186 |
+
return cls(**kwargs)
|
| 187 |
+
|
| 188 |
+
def to_dict(self) -> dict[str, Any]:
|
| 189 |
+
return {field: getattr(self, field) for field in self.__dataclass_fields__.keys()} # type: ignore[attr-defined]
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@dataclass(frozen=True)
|
| 193 |
+
class _ModelIO:
|
| 194 |
+
"""Snapshot of ONNX input and output metadata."""
|
| 195 |
+
|
| 196 |
+
inputs: tuple[Any, ...]
|
| 197 |
+
outputs: tuple[Any, ...]
|
| 198 |
+
|
| 199 |
+
|
| 200 |
+
class EndpointHandler:
|
| 201 |
+
"""Callable endpoint used by Hugging Face to drive inference."""
|
| 202 |
+
|
| 203 |
+
def __init__(self, path: str | None = None) -> None:
|
| 204 |
+
self.model_dir = Path(path or Path(__file__).parent)
|
| 205 |
+
self.session = self._load_session()
|
| 206 |
+
self.io = self._capture_io()
|
| 207 |
+
|
| 208 |
+
self.primary_input = self.io.inputs[0].name
|
| 209 |
+
self.slow_input = self._find_input("slow_state")
|
| 210 |
+
self.tokens_input = self._find_input("tokens")
|
| 211 |
+
self._primary_dim = self._infer_primary_dim()
|
| 212 |
+
self._text_encoder = _TextEncoder(self._primary_dim)
|
| 213 |
+
self._tokenizer = _SimpleTokenizer()
|
| 214 |
+
self._defaults = {}
|
| 215 |
+
skip_inputs = {self.primary_input}
|
| 216 |
+
if self.slow_input is not None:
|
| 217 |
+
skip_inputs.add(self.slow_input)
|
| 218 |
+
if self.tokens_input is not None:
|
| 219 |
+
skip_inputs.add(self.tokens_input)
|
| 220 |
+
for node in self.io.inputs:
|
| 221 |
+
if node.name in skip_inputs:
|
| 222 |
+
continue
|
| 223 |
+
self._defaults[node.name] = self._zeros_like(node)
|
| 224 |
+
if self.slow_input is not None:
|
| 225 |
+
self._slow_fallback = self._zeros_like(self._input_map[self.slow_input])
|
| 226 |
+
else:
|
| 227 |
+
self._slow_fallback = None
|
| 228 |
+
if self.tokens_input is not None:
|
| 229 |
+
token_node = self._input_map[self.tokens_input]
|
| 230 |
+
self._token_sequence_length = self._infer_sequence_length(token_node)
|
| 231 |
+
self._token_dtype = self._dtype_for(token_node)
|
| 232 |
+
else:
|
| 233 |
+
self._token_sequence_length = 0
|
| 234 |
+
self._token_dtype = np.int64
|
| 235 |
+
|
| 236 |
+
def _load_session(self):
|
| 237 |
+
"""Load the ONNX session, tolerating alternate filenames."""
|
| 238 |
+
|
| 239 |
+
ort = self._import_onnxruntime()
|
| 240 |
+
preferred_names = ("model.onnx", "model_infer.onnx")
|
| 241 |
+
for name in preferred_names:
|
| 242 |
+
candidate = self.model_dir / name
|
| 243 |
+
if candidate.exists():
|
| 244 |
+
return ort.InferenceSession(str(candidate), providers=["CPUExecutionProvider"])
|
| 245 |
+
|
| 246 |
+
available = sorted(str(p.name) for p in self.model_dir.glob("*.onnx"))
|
| 247 |
+
if len(available) == 1:
|
| 248 |
+
# Fall back to the lone ONNX artefact if it has a non-standard name.
|
| 249 |
+
return ort.InferenceSession(str(self.model_dir / available[0]), providers=["CPUExecutionProvider"])
|
| 250 |
+
|
| 251 |
+
choices = ", ".join(available) or "<none>"
|
| 252 |
+
raise FileNotFoundError(
|
| 253 |
+
"Could not locate any of %s in %s (available: %s)"
|
| 254 |
+
% (", ".join(preferred_names), self.model_dir, choices)
|
| 255 |
+
)
|
| 256 |
+
|
| 257 |
+
@staticmethod
|
| 258 |
+
def _import_onnxruntime():
|
| 259 |
+
"""Import :mod:`onnxruntime`, providing a helpful error if unavailable."""
|
| 260 |
+
|
| 261 |
+
spec = importlib.util.find_spec("onnxruntime")
|
| 262 |
+
if spec is None:
|
| 263 |
+
raise ModuleNotFoundError(
|
| 264 |
+
"onnxruntime is required to load Noesis decoder ONNX graphs. "
|
| 265 |
+
"Install it with 'pip install onnxruntime'."
|
| 266 |
+
)
|
| 267 |
+
return importlib.import_module("onnxruntime")
|
| 268 |
+
|
| 269 |
+
@property
|
| 270 |
+
def _input_map(self) -> Mapping[str, Any]:
|
| 271 |
+
return {node.name: node for node in self.io.inputs}
|
| 272 |
+
|
| 273 |
+
def _capture_io(self) -> _ModelIO:
|
| 274 |
+
return _ModelIO(inputs=tuple(self.session.get_inputs()), outputs=tuple(self.session.get_outputs()))
|
| 275 |
+
|
| 276 |
+
def _find_input(self, target: str) -> Optional[str]:
|
| 277 |
+
target = target.lower()
|
| 278 |
+
for node in self.io.inputs:
|
| 279 |
+
if node.name.lower() == target:
|
| 280 |
+
return node.name
|
| 281 |
+
return None
|
| 282 |
+
|
| 283 |
+
def _infer_primary_dim(self) -> int:
|
| 284 |
+
node = self._input_map[self.primary_input]
|
| 285 |
+
for dim in reversed(node.shape):
|
| 286 |
+
if isinstance(dim, int) and dim > 0:
|
| 287 |
+
return dim
|
| 288 |
+
# Conservative default matching TextEncoder256.
|
| 289 |
+
return 256
|
| 290 |
+
|
| 291 |
+
def _infer_sequence_length(self, node: Any) -> int:
|
| 292 |
+
for dim in reversed(getattr(node, "shape", [])):
|
| 293 |
+
if isinstance(dim, int) and dim > 0:
|
| 294 |
+
return dim
|
| 295 |
+
return 1
|
| 296 |
+
|
| 297 |
+
@staticmethod
|
| 298 |
+
def _onnx_type_to_numpy(type_str: str | None) -> np.dtype:
|
| 299 |
+
mapping = {
|
| 300 |
+
"tensor(float)": np.float32,
|
| 301 |
+
"tensor(float16)": np.float16,
|
| 302 |
+
"tensor(double)": np.float64,
|
| 303 |
+
"tensor(int64)": np.int64,
|
| 304 |
+
"tensor(int32)": np.int32,
|
| 305 |
+
"tensor(int16)": np.int16,
|
| 306 |
+
"tensor(int8)": np.int8,
|
| 307 |
+
"tensor(uint8)": np.uint8,
|
| 308 |
+
"tensor(bool)": np.bool_,
|
| 309 |
+
}
|
| 310 |
+
return mapping.get(type_str, np.float32)
|
| 311 |
+
|
| 312 |
+
def _dtype_for(self, node: Any) -> np.dtype:
|
| 313 |
+
return self._onnx_type_to_numpy(getattr(node, "type", None))
|
| 314 |
+
|
| 315 |
+
def _zeros_like(self, node: Any) -> np.ndarray:
|
| 316 |
+
shape: list[int] = []
|
| 317 |
+
for dim in node.shape:
|
| 318 |
+
if isinstance(dim, int) and dim > 0:
|
| 319 |
+
shape.append(dim)
|
| 320 |
+
else:
|
| 321 |
+
shape.append(1)
|
| 322 |
+
dtype = self._dtype_for(node)
|
| 323 |
+
return np.zeros(shape, dtype=dtype)
|
| 324 |
+
|
| 325 |
+
def _coerce_array(self, value: Any, *, node: Any, allow_empty: bool = False) -> np.ndarray:
|
| 326 |
+
dtype = self._dtype_for(node)
|
| 327 |
+
array = np.asarray(value, dtype=dtype)
|
| 328 |
+
if array.size == 0 and not allow_empty:
|
| 329 |
+
raise ValueError("Received an empty array; provide at least one value.")
|
| 330 |
+
if array.ndim == 1:
|
| 331 |
+
array = np.expand_dims(array, axis=0)
|
| 332 |
+
elif array.ndim > 2:
|
| 333 |
+
raise ValueError("Expected a 1D or batched 2D array; received shape %s" % (array.shape,))
|
| 334 |
+
if array.dtype != dtype:
|
| 335 |
+
array = array.astype(dtype, copy=False)
|
| 336 |
+
return array
|
| 337 |
+
|
| 338 |
+
def _prepare_inputs(self, payload: Mapping[str, Any]) -> MutableMapping[str, np.ndarray]:
|
| 339 |
+
psi = payload.get("psi")
|
| 340 |
+
if psi is None:
|
| 341 |
+
psi = (
|
| 342 |
+
payload.get("vector")
|
| 343 |
+
or payload.get("psi_s")
|
| 344 |
+
or payload.get("inputs")
|
| 345 |
+
or payload.get("prompt")
|
| 346 |
+
or payload.get("text")
|
| 347 |
+
)
|
| 348 |
+
if psi is None:
|
| 349 |
+
raise KeyError("Payload must include a 'psi' field containing the symbolic vector.")
|
| 350 |
+
|
| 351 |
+
primary_node = self._input_map[self.primary_input]
|
| 352 |
+
inputs: MutableMapping[str, np.ndarray] = {
|
| 353 |
+
self.primary_input: self._vector_from_payload(psi, node=primary_node)
|
| 354 |
+
}
|
| 355 |
+
|
| 356 |
+
if self.slow_input is not None:
|
| 357 |
+
slow_value = payload.get("slow_state") or payload.get("slow") or payload.get("state")
|
| 358 |
+
if slow_value is None:
|
| 359 |
+
inputs[self.slow_input] = self._slow_fallback.copy()
|
| 360 |
+
else:
|
| 361 |
+
inputs[self.slow_input] = self._coerce_array(
|
| 362 |
+
slow_value,
|
| 363 |
+
node=self._input_map[self.slow_input],
|
| 364 |
+
allow_empty=True,
|
| 365 |
+
)
|
| 366 |
+
|
| 367 |
+
for name, default in self._defaults.items():
|
| 368 |
+
inputs[name] = default.copy()
|
| 369 |
+
|
| 370 |
+
return inputs
|
| 371 |
+
|
| 372 |
+
def _vector_from_payload(self, value: Any, *, node: Any) -> np.ndarray:
|
| 373 |
+
if isinstance(value, str):
|
| 374 |
+
encoded = self._text_encoder.encode(value)
|
| 375 |
+
return self._coerce_array(encoded, node=node)
|
| 376 |
+
|
| 377 |
+
if isinstance(value, (list, tuple)) and value and all(isinstance(v, str) for v in value):
|
| 378 |
+
encoded = self._text_encoder.encode(" ".join(value))
|
| 379 |
+
return self._coerce_array(encoded, node=node)
|
| 380 |
+
|
| 381 |
+
return self._coerce_array(value, node=node)
|
| 382 |
+
|
| 383 |
+
def _encode_tokens(self, text: str) -> tuple[np.ndarray, list[int]]:
|
| 384 |
+
token_ids = self._tokenizer.encode(text)
|
| 385 |
+
if self._token_sequence_length <= 0:
|
| 386 |
+
array = np.asarray([token_ids], dtype=self._token_dtype)
|
| 387 |
+
return array, token_ids
|
| 388 |
+
|
| 389 |
+
length = min(len(token_ids), self._token_sequence_length)
|
| 390 |
+
padded = np.full(
|
| 391 |
+
(1, self._token_sequence_length),
|
| 392 |
+
fill_value=self._tokenizer.pad_token_id,
|
| 393 |
+
dtype=self._token_dtype,
|
| 394 |
+
)
|
| 395 |
+
padded[0, :length] = np.asarray(token_ids[:length], dtype=self._token_dtype)
|
| 396 |
+
return padded, token_ids[:length]
|
| 397 |
+
|
| 398 |
+
@staticmethod
|
| 399 |
+
def _candidate_seed(psi: np.ndarray) -> int:
|
| 400 |
+
digest = hashlib.sha1(psi.tobytes()).digest()
|
| 401 |
+
return int.from_bytes(digest[:4], "little", signed=False)
|
| 402 |
+
|
| 403 |
+
def _build_candidates(
|
| 404 |
+
self,
|
| 405 |
+
psi_vector: np.ndarray,
|
| 406 |
+
*,
|
| 407 |
+
user_prompt: str | None,
|
| 408 |
+
system_prompt: str | None,
|
| 409 |
+
constraints: Mapping[str, Any] | None,
|
| 410 |
+
) -> tuple[list[str], str, list[str]]:
|
| 411 |
+
descriptors = _summarise_intent(psi_vector)
|
| 412 |
+
summary = ", ".join(descriptors) if descriptors else "balanced intent"
|
| 413 |
+
observations = [
|
| 414 |
+
f"Interpretation: the symbolic intent emphasises {summary}.",
|
| 415 |
+
f"Symbolic synopsis → {summary}.",
|
| 416 |
+
]
|
| 417 |
+
if user_prompt:
|
| 418 |
+
observations.append(f"{user_prompt.strip()}\nInsight: {summary}.")
|
| 419 |
+
if system_prompt:
|
| 420 |
+
observations.append(f"{system_prompt.strip()}\nDirective: honour {summary}.")
|
| 421 |
+
if constraints:
|
| 422 |
+
formatted = ", ".join(f"{key}={value}" for key, value in constraints.items())
|
| 423 |
+
observations.append(f"Constraints observed: {formatted}.")
|
| 424 |
+
|
| 425 |
+
seed = self._candidate_seed(psi_vector.astype(np.float32, copy=False))
|
| 426 |
+
rng = random.Random(seed)
|
| 427 |
+
rng.shuffle(observations)
|
| 428 |
+
if not observations:
|
| 429 |
+
observations = [f"Symbolic synopsis → {summary}."]
|
| 430 |
+
return observations, summary, descriptors
|
| 431 |
+
|
| 432 |
+
def _run_candidate(self, base_feed: Mapping[str, np.ndarray], tokens: np.ndarray) -> list[tuple[Any, np.ndarray]]:
|
| 433 |
+
feed = {name: value for name, value in base_feed.items()}
|
| 434 |
+
if self.tokens_input is not None:
|
| 435 |
+
feed[self.tokens_input] = tokens
|
| 436 |
+
outputs = self.session.run(None, feed)
|
| 437 |
+
return list(zip(self.io.outputs, outputs))
|
| 438 |
+
|
| 439 |
+
@staticmethod
|
| 440 |
+
def _extract_q_hat(outputs: Sequence[tuple[Any, np.ndarray]]) -> float:
|
| 441 |
+
for node, value in outputs:
|
| 442 |
+
if getattr(node, "name", "").lower() == "q_hat":
|
| 443 |
+
return float(np.squeeze(np.asarray(value, dtype=np.float32)))
|
| 444 |
+
# Fallback if the node name differs slightly.
|
| 445 |
+
for node, value in outputs:
|
| 446 |
+
if "q" in getattr(node, "name", "").lower():
|
| 447 |
+
return float(np.squeeze(np.asarray(value, dtype=np.float32)))
|
| 448 |
+
return float("-inf")
|
| 449 |
+
|
| 450 |
+
@staticmethod
|
| 451 |
+
def _format_output(name: str, value: np.ndarray) -> Any:
|
| 452 |
+
value = np.asarray(value, dtype=np.float32)
|
| 453 |
+
value = np.nan_to_num(value, nan=0.0, posinf=0.0, neginf=0.0)
|
| 454 |
+
squeezed = np.squeeze(value)
|
| 455 |
+
if squeezed.ndim == 0:
|
| 456 |
+
return float(squeezed)
|
| 457 |
+
return squeezed.tolist()
|
| 458 |
+
|
| 459 |
+
def __call__(self, data: Mapping[str, Any]) -> Mapping[str, Any]:
|
| 460 |
+
payload = data.get("inputs", data)
|
| 461 |
+
if not isinstance(payload, Mapping):
|
| 462 |
+
payload = {"psi": payload}
|
| 463 |
+
|
| 464 |
+
feed = self._prepare_inputs(payload)
|
| 465 |
+
psi_vector = np.asarray(feed[self.primary_input], dtype=np.float32).reshape(-1)
|
| 466 |
+
state_constraints = payload.get("constraints")
|
| 467 |
+
if not isinstance(state_constraints, Mapping):
|
| 468 |
+
state_constraints = None
|
| 469 |
+
decoding = _DecodingParams.from_payload(payload)
|
| 470 |
+
system_prompt = payload.get("system_prompt")
|
| 471 |
+
user_prompt = payload.get("user_prompt")
|
| 472 |
+
|
| 473 |
+
candidates, summary, descriptors = self._build_candidates(
|
| 474 |
+
psi_vector,
|
| 475 |
+
user_prompt=user_prompt if isinstance(user_prompt, str) else None,
|
| 476 |
+
system_prompt=system_prompt if isinstance(system_prompt, str) else None,
|
| 477 |
+
constraints=state_constraints,
|
| 478 |
+
)
|
| 479 |
+
|
| 480 |
+
best_text: str | None = None
|
| 481 |
+
best_tokens: list[int] = []
|
| 482 |
+
best_outputs: list[tuple[Any, np.ndarray]] | None = None
|
| 483 |
+
best_quality = float("-inf")
|
| 484 |
+
|
| 485 |
+
limit = min(len(candidates), max(decoding.beam_size, 1))
|
| 486 |
+
for candidate in candidates[:limit]:
|
| 487 |
+
if self.tokens_input is None:
|
| 488 |
+
break
|
| 489 |
+
token_array, token_ids = self._encode_tokens(candidate)
|
| 490 |
+
outputs = self._run_candidate(feed, token_array)
|
| 491 |
+
quality = self._extract_q_hat(outputs)
|
| 492 |
+
if quality > best_quality:
|
| 493 |
+
best_quality = quality
|
| 494 |
+
best_text = candidate
|
| 495 |
+
best_tokens = token_ids
|
| 496 |
+
best_outputs = outputs
|
| 497 |
+
if quality >= decoding.stop_quality:
|
| 498 |
+
break
|
| 499 |
+
|
| 500 |
+
if best_outputs is None:
|
| 501 |
+
# Fall back to a single pass using the prepared feed.
|
| 502 |
+
outputs = self.session.run(None, feed)
|
| 503 |
+
best_outputs = list(zip(self.io.outputs, outputs))
|
| 504 |
+
if best_text is None:
|
| 505 |
+
best_text = f"Symbolic synopsis → {summary}."
|
| 506 |
+
if best_quality == float("-inf"):
|
| 507 |
+
best_quality = self._extract_q_hat(best_outputs)
|
| 508 |
+
|
| 509 |
+
formatted = {
|
| 510 |
+
node.name: self._format_output(node.name, value)
|
| 511 |
+
for node, value in best_outputs
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
if not np.isfinite(best_quality):
|
| 515 |
+
best_quality = 0.0
|
| 516 |
+
best_quality = float(best_quality)
|
| 517 |
+
if best_text is None:
|
| 518 |
+
best_text = f"Symbolic synopsis → {summary}."
|
| 519 |
+
|
| 520 |
+
response = {
|
| 521 |
+
"text": best_text,
|
| 522 |
+
"tokens": best_tokens,
|
| 523 |
+
"quality": best_quality,
|
| 524 |
+
"q_hat": best_quality,
|
| 525 |
+
"provider": _DEFAULT_PROVIDER,
|
| 526 |
+
"model": _DEFAULT_MODEL,
|
| 527 |
+
"metadata": {
|
| 528 |
+
"summary": summary,
|
| 529 |
+
"descriptors": descriptors,
|
| 530 |
+
"constraints": state_constraints or {},
|
| 531 |
+
"decoding": decoding.to_dict(),
|
| 532 |
+
},
|
| 533 |
+
}
|
| 534 |
+
response.update(formatted)
|
| 535 |
+
return response
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
__all__ = ["EndpointHandler"]
|
tokenizer.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|