AletheionGuard / model_info.json
gnai-creator
Add model weights with Git LFS
1f08279
{
"model_name": "AletheionGuard Trial 012",
"version": "1.0.0",
"training_date": "2025-11-11",
"architecture": "Pyramidal Q1Q2 with Gates",
"framework": "PyTorch Lightning",
"embedding_model": "sentence-transformers/all-MiniLM-L6-v2",
"embedding_dim": 384,
"files": {
"q1_gate.pth": {
"description": "Aleatoric Uncertainty (Q1) Gate - MLP for inherent randomness",
"size_kb": 904,
"parameters": "~235k"
},
"q2_gate.pth": {
"description": "Epistemic Uncertainty (Q2) Gate - MLP for model confidence",
"size_kb": 905,
"parameters": "~235k"
},
"height_gate.pth": {
"description": "Pyramidal Height Gate - MLP for combined confidence score",
"size_kb": 230,
"parameters": "~60k"
},
"base_forces.pth": {
"description": "Base Force Embeddings - Learnable base representation",
"size_kb": 198,
"parameters": "~51k"
},
"q1q2_best.ckpt": {
"description": "Full PyTorch Lightning checkpoint (epoch 24, val_loss=0.2944)",
"size_mb": 6.6,
"parameters": "~580k total"
}
},
"training_info": {
"dataset": "Synthetic dataset with epistemic labels",
"num_samples": 1590,
"train_split": 0.7,
"val_split": 0.15,
"test_split": 0.15,
"epochs_trained": 33,
"best_epoch": 24,
"best_val_loss": 0.2944,
"learning_rate": 0.001,
"optimizer": "Adam",
"batch_size": 32
},
"metrics": {
"q1_mse": 0.0501,
"q2_mse": 0.0499,
"rce": 0.0415,
"height_mse": 0.0521,
"description": "Metrics on synthetic test set. Fine-tuning on real data (TruthfulQA + SQuAD) expected to improve by 10-15%."
},
"usage": {
"python_sdk": "from aletheion_guard import EpistemicAuditor; auditor = EpistemicAuditor(model_path='AletheionAGI/aletheionguard-models')",
"rest_api": "POST https://api.aletheion.com/v1/audit with { text: '...', api_key: 'ag_...' }",
"byo_hf": "Deploy this Space as PRIVATE and use with AletheionGuard BYO-HF mode"
},
"license": "AGPL-3.0-or-later",
"author": "Felipe Maya Muniz",
"copyright": "2024-2025 AletheionAGI",
"notes": [
"These models were trained on synthetic data and are suitable for MVP/demo purposes.",
"For production use, fine-tune on real datasets (TruthfulQA, SQuAD v2, etc.).",
"Models are small (~2.3MB total) and optimized for fast inference.",
"Expected inference time: <50ms per request on CPU."
]
}