Dataset Viewer
Duplicate
The dataset viewer is not available for this split.
Cannot extract the features (columns) for the split 'train' of the config 'default' of the dataset.
Error code:   FeaturesError
Exception:    ArrowInvalid
Message:      JSON parse error: Column() changed from object to string in row 0
Traceback:    Traceback (most recent call last):
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/json/json.py", line 183, in _generate_tables
                  df = pandas_read_json(f)
                       ^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/json/json.py", line 38, in pandas_read_json
                  return pd.read_json(path_or_buf, **kwargs)
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/json/_json.py", line 815, in read_json
                  return json_reader.read()
                         ^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/json/_json.py", line 1014, in read
                  obj = self._get_object_parser(self.data)
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/json/_json.py", line 1040, in _get_object_parser
                  obj = FrameParser(json, **kwargs).parse()
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/io/json/_json.py", line 1176, in parse
                  self._parse()
                File "/usr/local/lib/python3.12/site-packages/pandas/io/json/_json.py", line 1391, in _parse
                  self.obj = DataFrame(
                             ^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/core/frame.py", line 778, in __init__
                  mgr = dict_to_mgr(data, index, columns, dtype=dtype, copy=copy, typ=manager)
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 503, in dict_to_mgr
                  return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 114, in arrays_to_mgr
                  index = _extract_index(arrays)
                          ^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/pandas/core/internals/construction.py", line 667, in _extract_index
                  raise ValueError("If using all scalar values, you must pass an index")
              ValueError: If using all scalar values, you must pass an index
              
              During handling of the above exception, another exception occurred:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 243, in compute_first_rows_from_streaming_response
                  iterable_dataset = iterable_dataset._resolve_features()
                                     ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 3608, in _resolve_features
                  features = _infer_features_from_batch(self.with_format(None)._head())
                                                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2368, in _head
                  return next(iter(self.iter(batch_size=n)))
                         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2573, in iter
                  for key, example in iterator:
                                      ^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2060, in __iter__
                  for key, pa_table in self._iter_arrow():
                                       ^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2082, in _iter_arrow
                  yield from self.ex_iterable._iter_arrow()
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 544, in _iter_arrow
                  for key, pa_table in iterator:
                                       ^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 383, in _iter_arrow
                  for key, pa_table in self.generate_tables_fn(**gen_kwags):
                                       ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/json/json.py", line 186, in _generate_tables
                  raise e
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/json/json.py", line 160, in _generate_tables
                  pa_table = paj.read_json(
                             ^^^^^^^^^^^^^^
                File "pyarrow/_json.pyx", line 342, in pyarrow._json.read_json
                File "pyarrow/error.pxi", line 155, in pyarrow.lib.pyarrow_internal_check_status
                File "pyarrow/error.pxi", line 92, in pyarrow.lib.check_status
              pyarrow.lib.ArrowInvalid: JSON parse error: Column() changed from object to string in row 0

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

# GetNID — Neurofold Identity Registry v1.0

**A deterministic, cryptographically-immutable identifier registry for 6.7 million Wikipedia concepts.**

GetNID mints a canonical **Neurofold ID (NID)** for every resolved Wikipedia article, bridging human-readable titles and Wikidata QIDs through a trustless, serverless resolution protocol. The registry is designed to function as foundational namespace infrastructure for decentralized knowledge systems.

---

## What is a NID?

A Neurofold ID is a deterministic 13-character identifier derived directly from the SHA-256 hash of a concept's Wikidata QID.

"Mathematics" → Q395 → N5336CE94E17E "Albert Einstein" → Q937 → N6F2A8C14B301


NIDs are **permanent**, **content-addressed**, and **collision-resistant**. They require no central authority to issue or resolve.

---

## Resolution Architecture

The registry implements a **double-sharding scheme** for O(1) lookup with no index scans, no search, and no server-side compute.

Title lookup (double-hop): sha256(normalize(title))[:2] % 256 → router shard → NID NID[1:5] % 8192 → data shard → record

QID / NID lookup (single-hop): sha256(QID)[:4] % 8192 → data shard → record NID[1:5] % 8192 → data shard → record


| Layer | Shards | Purpose |
|---|---|---|
| Data shards | 8,192 | `ledger(nid, qid, title, lang)` |
| Router shards | 256 | `routes(key_hash, target_nid)` |

All shards are static SQLite files (~100KB each). The full registry is ~1.5GB. Every shard is independently verifiable against a cryptographic `manifest.json` generated at genesis time.

---

## Live Demo

**[getnid.org](https://getnid.org)** — resolve any Wikipedia title, QID, or NID in the browser.

Resolution runs entirely client-side via a Web Worker using sql.js (SQLite compiled to WebAssembly). Resolved shards are cached locally via the browser's **Origin Private File System (OPFS)**, enabling zero-latency offline resolution after first access. Users can opt into a full global sync to permanently mirror the entire registry locally.

---

## Dataset Structure

v1/ ├── manifest.json # SHA-256 hash of every shard (trustless verification) ├── meta.json # Version, genesis timestamp, master checksum, metrics ├── shards/ │ ├── shard_0000.db # SQLite — ledger table │ ├── shard_0001.db │ └── ... (8,192 total) └── routers/ ├── router_000.db # SQLite — routes table ├── router_001.db └── ... (256 total)


### `ledger` schema (data shards)
```sql
CREATE TABLE ledger (
    nid   TEXT PRIMARY KEY,  -- e.g. N5336CE94E17E
    qid   TEXT UNIQUE,       -- e.g. Q395
    title TEXT,              -- e.g. Mathematics
    lang  TEXT               -- e.g. en
);

routes schema (router shards)

CREATE TABLE routes (
    key_hash   TEXT PRIMARY KEY,  -- sha256(norm_title)[:16]
    shard_id   INTEGER,
    norm_title TEXT,
    target_nid TEXT
);

Usage

Python (local resolution)

from getnid.registry import LocalRegistryClient
from pathlib import Path

client = LocalRegistryClient(Path("./v1"))

# Resolve by title
client.get_by_title("Mathematics")
# → {"nid": "N5336CE94E17E", "qid": "Q395", "title": "Mathematics", "lang": "en"}

# Resolve by QID
client.get_by_qid("Q42")
# → {"nid": "N...", "qid": "Q42", "title": "Douglas Adams", "lang": "en"}

# Resolve by NID
client.get_by_nid("N5336CE94E17E")
# → {"nid": "N5336CE94E17E", "qid": "Q395", "title": "Mathematics", "lang": "en"}

Direct shard query (any language)

import hashlib, sqlite3

def resolve_qid(qid: str, shard_dir: str) -> dict:
    h = hashlib.sha256(qid.upper().encode()).hexdigest().upper()
    shard_id = int(h[:4], 16) % 8192
    db_path = f"{shard_dir}/shard_{shard_id:04d}.db"
    with sqlite3.connect(db_path) as conn:
        row = conn.execute(
            "SELECT nid, qid, title, lang FROM ledger WHERE qid = ?",
            (qid.upper(),)
        ).fetchone()
    return dict(zip(["nid", "qid", "title", "lang"], row)) if row else None

JavaScript / Browser

const sha256 = async (text) => {
    const buf = await crypto.subtle.digest('SHA-256', new TextEncoder().encode(text));
    return Array.from(new Uint8Array(buf)).map(b => b.toString(16).padStart(2,'0')).join('').toUpperCase();
};

async function resolveQID(qid) {
    const hash = await sha256(qid.toUpperCase());
    const shardId = parseInt(hash.slice(0, 4), 16) % 8192;
    const fileName = `shard_${String(shardId).padStart(4,'0')}.db`;
    // Fetch shard, query with sql.js
}

Metrics

Metric Value
Minted NIDs ~6.7M
Languages en (v1.0)
Data shards 8,192
Router shards 256
Avg shard size ~100 KB
Total payload ~1.5 GB
Lookup complexity O(1)

Licensing

Component License
Registry code & protocol Apache 2.0
Ledger data (derived from Wikidata) CC BY-SA 4.0

Wikidata content is made available by the Wikimedia Foundation under CC BY-SA 4.0. Use of the registry data is subject to those upstream terms.


Repository

github.com/neurofold/getnid


Citation

@misc{getnid2026,
  author  = {Larson, JB},
  title   = {GetNID: Neurofold Identity Registry v1.0},
  year    = {2026},
  url     = {https://huggingface.co/datasets/Neurofold/getnid},
  note    = {Deterministic identifier registry for 6.7M Wikipedia concepts}
}
Downloads last month
7,377