|
|
"""HuggingFace Datasets loader for .causal knowledge graph files.""" |
|
|
|
|
|
import datasets |
|
|
from datasets import DatasetInfo, Features, Value, Sequence |
|
|
|
|
|
|
|
|
class CausalConfig(datasets.BuilderConfig): |
|
|
"""BuilderConfig for .causal files.""" |
|
|
|
|
|
def __init__( |
|
|
self, |
|
|
include_inferred: bool = True, |
|
|
min_confidence: float = 0.0, |
|
|
**kwargs, |
|
|
): |
|
|
""" |
|
|
Args: |
|
|
include_inferred: Include inferred triplets (default: True) |
|
|
min_confidence: Minimum confidence threshold (default: 0.0) |
|
|
""" |
|
|
super().__init__(**kwargs) |
|
|
self.include_inferred = include_inferred |
|
|
self.min_confidence = min_confidence |
|
|
|
|
|
|
|
|
class CausalDataset(datasets.GeneratorBasedBuilder): |
|
|
""" |
|
|
HuggingFace Dataset loader for .causal knowledge graph files. |
|
|
|
|
|
The .causal format is a binary knowledge graph with embedded deterministic |
|
|
inference. It provides zero-hallucination fact retrieval with full provenance. |
|
|
|
|
|
Usage: |
|
|
from datasets import load_dataset |
|
|
|
|
|
# Load from local file |
|
|
ds = load_dataset("chkmie/dotcausal", data_files="knowledge.causal") |
|
|
|
|
|
# Load with config |
|
|
ds = load_dataset( |
|
|
"chkmie/dotcausal", |
|
|
data_files="knowledge.causal", |
|
|
include_inferred=True, |
|
|
min_confidence=0.5, |
|
|
) |
|
|
|
|
|
Features: |
|
|
- trigger (str): The cause/trigger entity |
|
|
- mechanism (str): The relationship type |
|
|
- outcome (str): The effect/outcome entity |
|
|
- confidence (float): Confidence score (0-1) |
|
|
- is_inferred (bool): Whether derived or explicit |
|
|
- source (str): Original source (e.g., paper) |
|
|
- provenance (list): Source triplets for inferred facts |
|
|
|
|
|
References: |
|
|
- PyPI: https://pypi.org/project/dotcausal/ |
|
|
- GitHub: https://github.com/DT-Foss/dotcausal |
|
|
- Paper: https://doi.org/10.5281/zenodo.18326222 |
|
|
""" |
|
|
|
|
|
BUILDER_CONFIG_CLASS = CausalConfig |
|
|
BUILDER_CONFIGS = [ |
|
|
CausalConfig( |
|
|
name="default", |
|
|
version=datasets.Version("1.0.0"), |
|
|
description="Load all triplets from .causal files", |
|
|
), |
|
|
CausalConfig( |
|
|
name="explicit_only", |
|
|
version=datasets.Version("1.0.0"), |
|
|
description="Load only explicit triplets (no inferred)", |
|
|
include_inferred=False, |
|
|
), |
|
|
CausalConfig( |
|
|
name="high_confidence", |
|
|
version=datasets.Version("1.0.0"), |
|
|
description="Load triplets with confidence >= 0.8", |
|
|
min_confidence=0.8, |
|
|
), |
|
|
] |
|
|
DEFAULT_CONFIG_NAME = "default" |
|
|
|
|
|
def _info(self): |
|
|
return DatasetInfo( |
|
|
description="""\ |
|
|
.causal knowledge graph dataset with embedded deterministic inference. |
|
|
Each row represents a causal triplet (trigger → mechanism → outcome). |
|
|
""", |
|
|
features=Features( |
|
|
{ |
|
|
"trigger": Value("string"), |
|
|
"mechanism": Value("string"), |
|
|
"outcome": Value("string"), |
|
|
"confidence": Value("float32"), |
|
|
"is_inferred": Value("bool"), |
|
|
"source": Value("string"), |
|
|
"provenance": Sequence(Value("string")), |
|
|
} |
|
|
), |
|
|
homepage="https://dotcausal.com", |
|
|
license="MIT", |
|
|
citation="""\ |
|
|
@article{foss2026causal, |
|
|
author = {Foss, David Tom}, |
|
|
title = {The .causal Format: Deterministic Inference for AI-Assisted Hypothesis Amplification}, |
|
|
journal = {Zenodo}, |
|
|
year = {2026}, |
|
|
doi = {10.5281/zenodo.18326222} |
|
|
} |
|
|
""", |
|
|
) |
|
|
|
|
|
def _split_generators(self, dl_manager): |
|
|
"""Generate splits from data files.""" |
|
|
data_files = self.config.data_files |
|
|
|
|
|
if not data_files: |
|
|
raise ValueError( |
|
|
"No data_files specified. Use: load_dataset('chkmie/dotcausal', data_files='your_file.causal')" |
|
|
) |
|
|
|
|
|
|
|
|
if isinstance(data_files, dict): |
|
|
|
|
|
splits = [] |
|
|
for split_name, files in data_files.items(): |
|
|
if isinstance(files, str): |
|
|
files = [files] |
|
|
downloaded = dl_manager.download_and_extract(files) |
|
|
splits.append( |
|
|
datasets.SplitGenerator( |
|
|
name=split_name, |
|
|
gen_kwargs={"filepaths": downloaded}, |
|
|
) |
|
|
) |
|
|
return splits |
|
|
elif isinstance(data_files, (list, tuple)): |
|
|
|
|
|
downloaded = dl_manager.download_and_extract(list(data_files)) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={"filepaths": downloaded}, |
|
|
) |
|
|
] |
|
|
else: |
|
|
|
|
|
downloaded = dl_manager.download_and_extract([data_files]) |
|
|
return [ |
|
|
datasets.SplitGenerator( |
|
|
name=datasets.Split.TRAIN, |
|
|
gen_kwargs={"filepaths": downloaded}, |
|
|
) |
|
|
] |
|
|
|
|
|
def _generate_examples(self, filepaths): |
|
|
"""Generate examples from .causal files.""" |
|
|
try: |
|
|
from dotcausal import CausalReader |
|
|
except ImportError: |
|
|
raise ImportError( |
|
|
"dotcausal package required. Install with: pip install dotcausal" |
|
|
) |
|
|
|
|
|
if isinstance(filepaths, str): |
|
|
filepaths = [filepaths] |
|
|
|
|
|
idx = 0 |
|
|
for filepath in filepaths: |
|
|
reader = CausalReader(filepath) |
|
|
|
|
|
|
|
|
results = reader.search("", limit=100000) |
|
|
|
|
|
for r in results: |
|
|
|
|
|
confidence = r.get("confidence", 1.0) |
|
|
is_inferred = r.get("is_inferred", False) |
|
|
|
|
|
if confidence < self.config.min_confidence: |
|
|
continue |
|
|
if not self.config.include_inferred and is_inferred: |
|
|
continue |
|
|
|
|
|
|
|
|
provenance = r.get("provenance", []) |
|
|
if not isinstance(provenance, list): |
|
|
provenance = [str(provenance)] if provenance else [] |
|
|
else: |
|
|
provenance = [str(p) for p in provenance] |
|
|
|
|
|
yield idx, { |
|
|
"trigger": r.get("trigger", ""), |
|
|
"mechanism": r.get("mechanism", ""), |
|
|
"outcome": r.get("outcome", ""), |
|
|
"confidence": float(confidence), |
|
|
"is_inferred": bool(is_inferred), |
|
|
"source": r.get("source", ""), |
|
|
"provenance": provenance, |
|
|
} |
|
|
idx += 1 |
|
|
|