language:
- en
- mul
license: cc0-1.0
task_categories:
- text-generation
- feature-extraction
- text-classification
- question-answering
pretty_name: Open Library
tags:
- books
- library
- bibliography
- open-library
- openlibrary
- isbn
- authors
- wikidata
- reading
- catalog
size_categories:
- 100M+
configs:
- config_name: authors
data_files:
- split: train
path: data/authors/*.parquet
- config_name: works
data_files:
- split: train
path: data/works/*.parquet
- config_name: editions
data_files:
- split: train
path: data/editions/*.parquet
default: true
- config_name: ratings
data_files:
- split: train
path: data/ratings/*.parquet
- config_name: reading-log
data_files:
- split: train
path: data/reading-log/*.parquet
- config_name: redirects
data_files:
- split: train
path: data/redirects/*.parquet
- config_name: deletes
data_files:
- split: train
path: data/deletes/*.parquet
- config_name: lists
data_files:
- split: train
path: data/lists/*.parquet
- config_name: other
data_files:
- split: train
path: data/other/*.parquet
- config_name: covers_metadata
data_files:
- split: train
path: data/covers_metadata/*.parquet
- config_name: wikidata
data_files:
- split: train
path: data/wikidata/*.parquet
dataset_info:
- config_name: authors
features:
- name: key
dtype: string
- name: name
dtype: string
- name: alternate_names
dtype: string
- name: bio
dtype: string
- name: birth_date
dtype: string
- name: death_date
dtype: string
- name: date
dtype: string
- name: entity_type
dtype: string
- name: fuller_name
dtype: string
- name: personal_name
dtype: string
- name: title
dtype: string
- name: photos
dtype: string
- name: links
dtype: string
- name: remote_ids
dtype: string
- name: wikidata_id
dtype: string
- name: viaf_id
dtype: string
- name: isni
dtype: string
- name: source_records
dtype: string
- name: revision
dtype: int32
- name: created
dtype: string
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 16000000
- config_name: works
features:
- name: key
dtype: string
- name: title
dtype: string
- name: subtitle
dtype: string
- name: author_keys
dtype: string
- name: covers
dtype: string
- name: description
dtype: string
- name: notes
dtype: string
- name: subjects
dtype: string
- name: subject_places
dtype: string
- name: subject_people
dtype: string
- name: subject_times
dtype: string
- name: lc_classifications
dtype: string
- name: first_publish_date
dtype: string
- name: revision
dtype: int32
- name: created
dtype: string
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 41000000
- config_name: editions
features:
- name: key
dtype: string
- name: title
dtype: string
- name: subtitle
dtype: string
- name: author_keys
dtype: string
- name: work_keys
dtype: string
- name: isbn_10
dtype: string
- name: isbn_13
dtype: string
- name: lccn
dtype: string
- name: oclc_numbers
dtype: string
- name: ocaid
dtype: string
- name: identifiers
dtype: string
- name: local_id
dtype: string
- name: publishers
dtype: string
- name: publish_date
dtype: string
- name: publish_places
dtype: string
- name: publish_country
dtype: string
- name: edition_name
dtype: string
- name: by_statement
dtype: string
- name: contributions
dtype: string
- name: languages
dtype: string
- name: translated_from
dtype: string
- name: translation_of
dtype: string
- name: number_of_pages
dtype: int32
- name: pagination
dtype: string
- name: physical_format
dtype: string
- name: physical_dimensions
dtype: string
- name: weight
dtype: string
- name: covers
dtype: string
- name: description
dtype: string
- name: first_sentence
dtype: string
- name: notes
dtype: string
- name: table_of_contents
dtype: string
- name: subjects
dtype: string
- name: dewey_decimal_class
dtype: string
- name: lc_classifications
dtype: string
- name: genres
dtype: string
- name: series
dtype: string
- name: links
dtype: string
- name: source_records
dtype: string
- name: copyright_date
dtype: string
- name: other_titles
dtype: string
- name: work_titles
dtype: string
- name: classifications
dtype: string
- name: revision
dtype: int32
- name: created
dtype: string
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 56000000
- config_name: ratings
features:
- name: work_key
dtype: string
- name: edition_key
dtype: string
- name: rating
dtype: int32
- name: date
dtype: string
splits:
- name: train
num_examples: 1000000
- config_name: reading-log
features:
- name: work_key
dtype: string
- name: edition_key
dtype: string
- name: shelf
dtype: string
- name: date
dtype: string
splits:
- name: train
num_examples: 12000000
- config_name: redirects
features:
- name: key
dtype: string
- name: location
dtype: string
- name: type_name
dtype: string
- name: revision
dtype: int32
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 2000000
- config_name: deletes
features:
- name: key
dtype: string
- name: type_name
dtype: string
- name: revision
dtype: int32
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 4000000
- config_name: lists
features:
- name: key
dtype: string
- name: name
dtype: string
- name: description
dtype: string
- name: seeds
dtype: string
- name: seed_count
dtype: int32
- name: revision
dtype: int32
- name: created
dtype: string
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 1000000
- config_name: other
features:
- name: key
dtype: string
- name: type_name
dtype: string
- name: json_data
dtype: string
- name: revision
dtype: int32
- name: last_modified
dtype: string
splits:
- name: train
num_examples: 1000000
- config_name: covers_metadata
features:
- name: id
dtype: int64
- name: width
dtype: int32
- name: height
dtype: int32
- name: created
dtype: string
splits:
- name: train
num_examples: 15000000
- config_name: wikidata
features:
- name: wikidata_id
dtype: string
- name: json_data
dtype: string
splits:
- name: train
num_examples: 1000000
Open Library
The complete Open Library catalog in clean, analysis-ready Parquet. 150.0M+ records across 11 entity types, from ISBNs and author bios to reading logs and Wikidata links.
What is it?
Open Library is a complete snapshot of the Open Library database, an open project of the Internet Archive with the mission of creating "one web page for every book ever published." The catalog is community-edited and contains bibliographic records for millions of authors, works, and physical editions, along with user-contributed star ratings and reading logs.
This dataset converts the official OpenLibrary Data Dumps from their native TSV+JSON format into clean, columnar Apache Parquet files with Zstd compression. Every field from every record type is fully preserved. Nothing is dropped or filtered.
Dump date: 2026-02 | Total records: 150.0M | License: CC0 1.0 (Public Domain)
Why this dataset?
Open Library publishes monthly bulk dumps, but they arrive as multi-gigabyte gzipped TSV files with embedded JSON. They are awkward to query, impossible to stream into a training pipeline, and painful to join across entity types. This dataset takes care of all that:
- Columnar: every field is a named Parquet column with a concrete type, queryable out of the box with DuckDB, Pandas, Polars, or Spark
- Complete: all 11 entity types and all fields, from ISBNs and Dewey Decimal numbers to Wikidata cross-references and reading shelf timestamps
- Normalized: nested JSON structures (text blocks, language references, author role arrays) are unwrapped into clean strings and flat key lists
- Sharded: roughly 1M rows per file, so you can stream or download only what you need
- Joined: the data model connects authors to works to editions, making it straightforward to build knowledge graphs, recommendation systems, or bibliographic search indexes
What is being released?
The dataset ships with 11 configs, one per entity type. Each config has one or more Parquet shards (roughly 1M rows each). The original .txt.gz dumps are also included under raw/ for full reproducibility.
data/ raw/
authors/ authors/
works/ works/
editions/ editions/
ratings/ ratings/
reading-log/ reading-log/
redirects/ redirects/
deletes/ deletes/
lists/ lists/
other/ other/
covers_metadata/ covers_metadata/
wikidata/ wikidata/
Config overview
| Config | Records | Description |
|---|---|---|
authors |
16.0M | Authors with biographies, birth/death dates, and cross-references to Wikidata/VIAF/ISNI |
works |
41.0M | Abstract works linking editions together, with subjects and descriptions |
editions |
56.0M | Physical editions with ISBNs, publishers, page counts, languages, and cover IDs |
ratings |
1.0M | User star ratings (1-5) for works and editions |
reading-log |
12.0M | User reading shelves: want-to-read, currently-reading, already-read |
redirects |
2.0M | Key redirects (merged or moved records) |
deletes |
4.0M | Deleted record stubs |
lists |
1.0M | User-created reading lists with seed references |
other |
1.0M | Miscellaneous types: languages, i18n strings, templates, macros |
covers_metadata |
15.0M | Cover image dimensions (width, height) for cover IDs |
wikidata |
1.0M | Wikidata cross-reference records with full JSON |
Data model
The three core configs form a hierarchy:
Authors ──┐
├──→ Works ──→ Editions
Authors ──┘ │
├──→ Ratings
└──→ Reading Log
An author writes one or more works (abstract creations). Each work has one or more editions (physical or digital manifestations with ISBNs, publishers, and page counts). Users attach ratings and reading log entries to works.
The remaining configs provide supporting infrastructure. Redirects resolve merged records. Deletes track removed keys. Lists are user-curated collections. Covers metadata maps cover IDs to image dimensions. Wikidata links records to the Wikidata knowledge graph. Other catches miscellaneous types like language definitions and i18n strings.
Data instance
Here is an example row from the editions config:
{
"key": "/books/OL1M",
"title": "Fantastic Mr Fox",
"subtitle": null,
"author_keys": "[\"/authors/OL34184A\"]",
"work_keys": "[\"/works/OL45804W\"]",
"isbn_10": "[\"0140328726\"]",
"isbn_13": "[\"9780140328721\"]",
"publishers": "[\"Puffin\"]",
"publish_date": "October 1, 1988",
"languages": "[\"eng\"]",
"number_of_pages": 96,
"physical_format": "Paperback",
"covers": "[6498900]",
"subjects": "[\"Animals\", \"Foxes\", \"Children's fiction\"]",
"revision": 14,
"created": "2008-04-01T03:28:50.625462",
"last_modified": "2024-01-15T10:22:33.891204"
}
Array and object fields are stored as JSON strings (not native Parquet lists) for maximum flexibility. Parse them with json.loads() in Python or json_extract() in DuckDB.
How to download and use
Using datasets
from datasets import load_dataset
# Load a specific config (editions is the default)
editions = load_dataset("open-index/open-library", "editions", split="train")
authors = load_dataset("open-index/open-library", "authors", split="train")
# Stream large configs without downloading everything
for doc in load_dataset("open-index/open-library", "editions", split="train", streaming=True):
print(doc["key"], doc["title"], doc["isbn_13"])
Using huggingface_hub
from huggingface_hub import snapshot_download
# Download just one config
snapshot_download(
"open-index/open-library",
repo_type="dataset",
local_dir="./open-library/",
allow_patterns="data/authors/*",
)
# Download everything
snapshot_download(
"open-index/open-library",
repo_type="dataset",
local_dir="./open-library/",
)
For faster downloads, install pip install huggingface_hub[hf_transfer] and set HF_HUB_ENABLE_HF_TRANSFER=1.
Using DuckDB (zero download, remote query)
DuckDB can query the Parquet files directly from HuggingFace without downloading anything to disk. Here are some queries to get you started:
Find all editions of a specific work. Let's look up every printing of Roald Dahl's "Fantastic Mr Fox":
SELECT e.key, e.title, e.isbn_13, e.publishers, e.publish_date
FROM read_parquet('hf://datasets/open-index/open-library/data/editions/*.parquet') e
WHERE e.work_keys LIKE '%OL45804W%'
ORDER BY e.publish_date
LIMIT 20;
| key | title | isbn_13 | publishers | publish_date |
|---|---|---|---|---|
| /books/OL1M | Fantastic Mr Fox | ["9780140328721"] | ["Puffin"] | Oct 1, 1988 |
| /books/OL2M | Fantastic Mr. Fox | ["9780375822063"] | ["Knopf"] | 2002 |
Discover the highest-rated books. This query finds works with at least 100 ratings, sorted by average score:
SELECT work_key,
ROUND(avg(rating), 2) AS avg_rating,
count(*) AS num_ratings
FROM read_parquet('hf://datasets/open-index/open-library/data/ratings/*.parquet')
GROUP BY work_key
HAVING count(*) >= 100
ORDER BY avg_rating DESC
LIMIT 5;
| work_key | avg_rating | num_ratings |
|---|---|---|
| /works/OL82563W | 4.72 | 142 |
| /works/OL17860744W | 4.68 | 109 |
| /works/OL5735363W | 4.65 | 287 |
| /works/OL362427W | 4.63 | 516 |
| /works/OL15413843W | 4.62 | 153 |
Find the most prolific writers. Join authors with works to see who has the most entries in the catalog:
SELECT a.name,
count(*) AS num_works
FROM read_parquet('hf://datasets/open-index/open-library/data/works/*.parquet') w,
read_parquet('hf://datasets/open-index/open-library/data/authors/*.parquet') a
WHERE w.author_keys LIKE '%' || a.key || '%'
GROUP BY a.name
ORDER BY num_works DESC
LIMIT 5;
| name | num_works |
|---|---|
| United States | 60,657 |
| Anonymous | 33,671 |
| William Shakespeare | 11,210 |
| Various | 8,435 |
| Charles Dickens | 8,206 |
Explore Wikidata cross-references. Over 177K authors are linked to Wikidata, making it easy to enrich records with external knowledge:
SELECT key, name, wikidata_id, birth_date, death_date
FROM read_parquet('hf://datasets/open-index/open-library/data/authors/*.parquet')
WHERE wikidata_id IS NOT NULL
LIMIT 5;
| key | name | wikidata_id | birth_date | death_date |
|---|---|---|---|---|
| /authors/OL34184A | Roald Dahl | Q25161 | 13 September 1916 | 23 November 1990 |
| /authors/OL23919A | J.K. Rowling | Q34660 | 31 July 1965 | |
| /authors/OL2162284A | Stephen King | Q39829 | September 21, 1947 |
See what people want to read. The reading log captures millions of shelf actions from Open Library users:
SELECT work_key, count(*) AS readers
FROM read_parquet('hf://datasets/open-index/open-library/data/reading-log/*.parquet')
WHERE shelf = 'want-to-read'
GROUP BY work_key
ORDER BY readers DESC
LIMIT 5;
| work_key | readers |
|---|---|
| /works/OL45804W | 8,432 |
| /works/OL82563W | 7,891 |
| /works/OL468516W | 6,234 |
| /works/OL15413843W | 5,987 |
| /works/OL362427W | 5,412 |
Using the raw dumps
The original .txt.gz files are preserved under raw/ for full reproducibility:
from huggingface_hub import hf_hub_download
path = hf_hub_download(
"open-index/open-library",
"raw/authors/ol_dump_authors_2026-02-28.txt.gz",
repo_type="dataset",
)
Dataset card for Open Library
Dataset Description
- Homepage: https://openlibrary.org
- Source Dumps: https://openlibrary.org/developers/dumps
- Repository: https://huggingface.co/datasets/open-index/open-library
- Point of Contact: Please create a discussion on the Community tab
- License: CC0 1.0 Universal (Public Domain)
Dataset Structure
Authors
The authors config contains one row per author record. Open Library identifies authors by keys like /authors/OL1A.
| Column | Type | Description |
|---|---|---|
key |
string | OpenLibrary key, e.g. /authors/OL34184A |
name |
string | Display name, e.g. "Roald Dahl" |
alternate_names |
string | JSON array of alternative name forms |
bio |
string | Biography text (unwrapped from text_block) |
birth_date |
string | Free-form birth date, e.g. "13 September 1916" |
death_date |
string | Free-form death date |
date |
string | General date string (used when birth/death are unclear) |
entity_type |
string | "person", "org", or "event" |
fuller_name |
string | Full legal name |
personal_name |
string | Personal name variant |
title |
string | Honorific (e.g. "Sir", "OBE") |
photos |
string | JSON array of cover image IDs |
links |
string | JSON array of {"title": "...", "url": "..."} objects |
remote_ids |
string | JSON object with wikidata, viaf, isni, goodreads, and more |
wikidata_id |
string | Extracted Wikidata Q-ID for easy cross-referencing |
viaf_id |
string | Extracted VIAF identifier |
isni |
string | Extracted ISNI identifier |
source_records |
string | JSON array of import source identifiers |
revision |
int32 | Record revision number |
created |
string | ISO 8601 creation timestamp |
last_modified |
string | ISO 8601 last modification timestamp |
Works
The works config contains abstract works, the creative unit that links multiple editions together. A "work" is language- and format-independent: "Hamlet" is one work whether it appears as a 1603 quarto or a 2024 Penguin paperback.
| Column | Type | Description |
|---|---|---|
key |
string | OpenLibrary key, e.g. /works/OL45804W |
title |
string | Work title |
subtitle |
string | Subtitle |
author_keys |
string | JSON array of author keys, e.g. ["/authors/OL34184A"] |
covers |
string | JSON array of cover image IDs |
description |
string | Work description (unwrapped from text_block) |
notes |
string | Editorial notes |
subjects |
string | JSON array of subject headings, e.g. ["Fiction", "Foxes"] |
subject_places |
string | JSON array of geographic subjects |
subject_people |
string | JSON array of people subjects |
subject_times |
string | JSON array of time period subjects |
lc_classifications |
string | JSON array of Library of Congress classification numbers |
first_publish_date |
string | Earliest known publication date |
revision |
int32 | Record revision number |
created |
string | ISO 8601 creation timestamp |
last_modified |
string | ISO 8601 last modification timestamp |
Editions
The editions config is the largest and richest, representing physical or digital manifestations of works. This is where you will find ISBNs, publishers, page counts, and cover images.
| Column | Type | Description |
|---|---|---|
key |
string | OpenLibrary key, e.g. /books/OL1M |
title |
string | Edition title |
subtitle |
string | Subtitle |
author_keys |
string | JSON array of author keys |
work_keys |
string | JSON array of parent work keys |
isbn_10 |
string | JSON array of ISBN-10 numbers |
isbn_13 |
string | JSON array of ISBN-13 numbers |
lccn |
string | JSON array of Library of Congress Control Numbers |
oclc_numbers |
string | JSON array of OCLC/WorldCat IDs |
ocaid |
string | Internet Archive identifier (links to digitized full text) |
identifiers |
string | JSON object with goodreads, librarything, amazon, and more |
local_id |
string | JSON array of local URNs |
publishers |
string | JSON array of publisher names |
publish_date |
string | Publication date (free-form, e.g. "1988", "October 1, 1988") |
publish_places |
string | JSON array of publication places |
publish_country |
string | MARC21 country code |
edition_name |
string | Edition descriptor (e.g. "1st ed.", "Rev. ed.") |
by_statement |
string | Authorship statement as printed on the book |
contributions |
string | JSON array of contributors (illustrators, translators, etc.) |
languages |
string | JSON array of language codes, e.g. ["eng"] |
translated_from |
string | JSON array of original language codes |
translation_of |
string | Original title if this edition is a translation |
number_of_pages |
int32 | Page count |
pagination |
string | Pagination description (e.g. "xi, 345 p.") |
physical_format |
string | "Paperback", "Hardcover", "E-book", etc. |
physical_dimensions |
string | Dimensions string |
weight |
string | Weight string |
covers |
string | JSON array of cover image IDs |
description |
string | Edition-specific description |
first_sentence |
string | Opening sentence of the book |
notes |
string | Editorial notes |
table_of_contents |
string | JSON array of TOC entries |
subjects |
string | JSON array of subject headings |
dewey_decimal_class |
string | JSON array of Dewey Decimal numbers |
lc_classifications |
string | JSON array of LC classification numbers |
genres |
string | JSON array of genre labels |
series |
string | JSON array of series names |
links |
string | JSON array of external links |
source_records |
string | JSON array of import sources (e.g. MARC records, Amazon) |
copyright_date |
string | Copyright year |
other_titles |
string | JSON array of alternative titles |
work_titles |
string | JSON array of related work titles |
classifications |
string | JSON object of additional classification schemes |
revision |
int32 | Record revision number |
created |
string | ISO 8601 creation timestamp |
last_modified |
string | ISO 8601 last modification timestamp |
Ratings
Anonymized user star ratings. Each row records one user's rating of a work or edition. No user identifiers are included.
| Column | Type | Description |
|---|---|---|
work_key |
string | Work reference, e.g. /works/OL45804W |
edition_key |
string | Edition reference (may be empty) |
rating |
int32 | Star rating, 1 to 5 |
date |
string | ISO date the rating was submitted |
Reading Log
Anonymized reading shelf data. Users on Open Library can mark books as "want to read", "currently reading", or "already read."
| Column | Type | Description |
|---|---|---|
work_key |
string | Work reference |
edition_key |
string | Edition reference (may be empty) |
shelf |
string | One of: want-to-read, currently-reading, already-read |
date |
string | ISO date of the shelf action |
Redirects
When records are merged or moved, Open Library creates a redirect. Use this config to resolve old keys to their current locations.
| Column | Type | Description |
|---|---|---|
key |
string | Old/redirected key |
location |
string | Target key being redirected to |
type_name |
string | Type key (always /type/redirect) |
revision |
int32 | Record revision number |
last_modified |
string | ISO 8601 timestamp |
Deletes
Stub records for keys that have been deleted from the database.
| Column | Type | Description |
|---|---|---|
key |
string | Deleted record key |
type_name |
string | Type key (always /type/delete) |
revision |
int32 | Record revision number |
last_modified |
string | ISO 8601 timestamp |
Lists
User-created reading lists. Each list has a name, description, and a set of "seeds" (references to works, editions, authors, or subjects).
| Column | Type | Description |
|---|---|---|
key |
string | List key, e.g. /people/user123/lists/OL1L |
name |
string | List name |
description |
string | List description |
seeds |
string | JSON array of seed keys (works, editions, subjects) |
seed_count |
int32 | Number of seeds in the list |
revision |
int32 | Record revision number |
created |
string | ISO 8601 creation timestamp |
last_modified |
string | ISO 8601 timestamp |
Covers Metadata
Dimensions of cover images hosted by Open Library. You can construct cover URLs as https://covers.openlibrary.org/b/id/{id}-L.jpg.
| Column | Type | Description |
|---|---|---|
id |
int64 | Cover image ID |
width |
int32 | Image width in pixels |
height |
int32 | Image height in pixels |
created |
string | Upload timestamp |
Wikidata
Cross-reference records linking Open Library entities to Wikidata. The json_data field contains the full Open Library JSON body of each Wikidata-type record.
| Column | Type | Description |
|---|---|---|
wikidata_id |
string | Wikidata Q-identifier, e.g. Q36322 |
json_data |
string | Full JSON body of the record |
Other
Miscellaneous record types: language definitions, i18n strings, internal templates, macros, and other infrastructure records that do not fit the main entity types.
| Column | Type | Description |
|---|---|---|
key |
string | Record key |
type_name |
string | Type key, e.g. /type/language, /type/i18n |
json_data |
string | Full JSON body |
revision |
int32 | Record revision number |
last_modified |
string | ISO 8601 timestamp |
Data Completeness
Every field is extracted from the source data exactly as provided by OpenLibrary. The population rates below are computed directly during conversion and reflect the natural completeness of the community-edited catalog.
Most author records are auto-imported stubs from library catalogs (just a name and key), so optional fields like bio and birth_date appear in a small fraction of records. Editions are the richest config, with strong coverage of ISBNs, publishers, languages, and page counts. Works sit in the middle, with nearly all having author links and about half having subject headings.
You can verify these numbers yourself with DuckDB. Here are the queries and results for the three core configs:
Authors (15.1M records): most are auto-imported stubs, so rich fields like bio are rare, but 1.9M have birth dates and 177K link to Wikidata.
SELECT COUNT(*) AS total,
COUNT(bio) AS has_bio,
ROUND(COUNT(bio) * 100.0 / COUNT(*), 1) AS bio_pct,
COUNT(birth_date) AS has_birth,
ROUND(COUNT(birth_date) * 100.0 / COUNT(*), 1) AS birth_pct,
COUNT(wikidata_id) AS has_wikidata,
ROUND(COUNT(wikidata_id) * 100.0 / COUNT(*), 1) AS wikidata_pct,
COUNT(remote_ids) AS has_remote_ids,
ROUND(COUNT(remote_ids) * 100.0 / COUNT(*), 1) AS remote_pct
FROM read_parquet('hf://datasets/open-index/open-library/data/authors/*.parquet');
| total | has_bio | bio_pct | has_birth | birth_pct | has_wikidata | wikidata_pct | has_remote_ids | remote_pct |
|---|---|---|---|---|---|---|---|---|
| 15,071,242 | 43,822 | 0.3% | 1,935,260 | 12.8% | 177,233 | 1.2% | 273,801 | 1.8% |
Works (40.7M records): nearly all have author links and about half have subject headings, making this config great for topic-based exploration.
SELECT COUNT(*) AS total,
COUNT(author_keys) AS has_authors,
ROUND(COUNT(author_keys) * 100.0 / COUNT(*), 1) AS authors_pct,
COUNT(subjects) AS has_subjects,
ROUND(COUNT(subjects) * 100.0 / COUNT(*), 1) AS subjects_pct,
COUNT(description) AS has_desc,
ROUND(COUNT(description) * 100.0 / COUNT(*), 1) AS desc_pct,
COUNT(covers) AS has_covers,
ROUND(COUNT(covers) * 100.0 / COUNT(*), 1) AS covers_pct
FROM read_parquet('hf://datasets/open-index/open-library/data/works/*.parquet');
| total | has_authors | authors_pct | has_subjects | subjects_pct | has_desc | desc_pct | has_covers | covers_pct |
|---|---|---|---|---|---|---|---|---|
| 40,718,247 | 38,478,663 | 94.5% | 20,024,694 | 49.2% | 1,828,315 | 4.5% | 9,645,694 | 23.7% |
Editions (55.6M records): the richest config by far, with strong coverage of publishers (94.6%), languages (86.6%), and page counts (62.6%). Over half have ISBN-13 numbers.
SELECT COUNT(*) AS total,
COUNT(isbn_13) AS has_isbn13,
ROUND(COUNT(isbn_13) * 100.0 / COUNT(*), 1) AS isbn13_pct,
COUNT(publishers) AS has_pub,
ROUND(COUNT(publishers) * 100.0 / COUNT(*), 1) AS pub_pct,
COUNT(languages) AS has_lang,
ROUND(COUNT(languages) * 100.0 / COUNT(*), 1) AS lang_pct,
COUNT(number_of_pages) AS has_pages,
ROUND(COUNT(number_of_pages) * 100.0 / COUNT(*), 1) AS pages_pct
FROM read_parquet('hf://datasets/open-index/open-library/data/editions/*.parquet');
| total | has_isbn13 | isbn13_pct | has_pub | pub_pct | has_lang | lang_pct | has_pages | pages_pct |
|---|---|---|---|---|---|---|---|---|
| 55,615,769 | 29,558,726 | 53.1% | 52,626,364 | 94.6% | 48,137,077 | 86.6% | 34,802,286 | 62.6% |
Dataset Creation
Curation Rationale
Open Library is the largest open bibliographic database in the world, but its bulk dumps are challenging to work with directly. They are multi-gigabyte gzipped TSV files with JSON embedded in the fifth column, different record types mixed together in the same file, and nested structures that require custom parsing. Researchers and ML practitioners who want to train on book metadata, build recommendation systems, or create knowledge graphs must write significant boilerplate just to read the data.
This dataset eliminates that friction. By converting every dump to typed Parquet with normalized fields, we make the entire catalog immediately queryable with standard tools. The editions config is set as the default because it is the most commonly used for ISBN lookups and bibliographic search.
Source Data
Open Library is an open, editable library catalog maintained by the Internet Archive. It publishes monthly bulk data dumps at openlibrary.org/developers/dumps covering every record in the database: authors, works, editions, ratings, reading logs, redirects, deleted records, user lists, cover metadata, and Wikidata cross-references.
The data originates from multiple sources: MARC records from libraries worldwide, Amazon product data, user contributions, and automated imports from other bibliographic databases. The catalog is continuously edited by volunteers and library professionals.
Data Processing
The processing pipeline converts each dump type from its native format into clean Parquet. The entire pipeline is written in Go and runs as a single streaming pass per dump type:
- Download the
.txt.gzdump from Internet Archive (the_latestURLs redirect to the most recent monthly snapshot) - Stream through the gzipped file line-by-line using a 16 MB scanner buffer. No intermediate uncompressed copy is created on disk.
- Parse each TSV line and extract all JSON fields using gjson for zero-allocation field extraction
- Normalize nested structures:
- Text blocks (
{"type": "/type/text", "value": "..."}) become plain strings - Language references (
[{"key": "/languages/eng"}]) become["eng"] - Author role arrays (
[{"author": {"key": "/authors/OL1A"}}]) become["/authors/OL1A"] - Identifier objects get extracted into top-level columns where useful (
wikidata_id,viaf_id,isni)
- Text blocks (
- Write to sharded Parquet files with Zstd compression (roughly 1M rows per shard, 500K rows per row group, 4096-row batch size)
Memory usage stays constant regardless of dump size thanks to the fully streaming architecture. All 11 dump types are downloaded and converted in parallel, with results committed to HuggingFace incrementally as each type completes.
Field Encoding
Array and object fields (e.g. subjects, identifiers, remote_ids) are stored as JSON strings, not native Parquet lists or maps. This is a deliberate design choice:
- Flexibility: DuckDB's
json_extract(), Python'sjson.loads(), and Polars'str.json_decode()all work out of the box - Schema stability: the underlying JSON can contain arbitrary keys without breaking the Parquet schema
- Query friendliness:
LIKE '%search_term%'works for simple substring searches, whilejson_extract()handles structured access
Frequently-used identifiers (wikidata_id, viaf_id, isni on authors) are also extracted as dedicated top-level columns so you can cross-reference without JSON parsing.
Considerations for Using the Data
Data Quality
Open Library is a community-edited catalog. Keep these characteristics in mind:
- Dates are free-form strings. You will encounter "1899", "March 1980", "c. 1650", "1st millennium BCE", and blank values. There is no single date format.
- Duplicate records exist. The
redirectsconfig maps old keys to current ones, which can help you deduplicate. - Coverage varies by era and region. Modern English-language books are well-represented, while older, non-English, and non-Western works may have sparser records.
- Some fields are sparsely populated. Not every edition has a page count, ISBN, or cover image. See the Data Completeness section above for exact population rates.
Personal Information
Author records contain biographical information (names, birth/death dates, biographies, photos) sourced from public library catalogs and community contributions. This is standard bibliographic data, publicly available from libraries worldwide.
Reading logs and ratings are fully anonymized and contain no user identifiers, usernames, or any information that could identify who submitted them.
Known Limitations
- No full text. This dataset contains metadata only. For actual book content, see the Internet Archive's Open Library Lending program or Project Gutenberg.
- No cover images. The
coversandphotosfields contain numeric IDs, not images. You can construct URLs likehttps://covers.openlibrary.org/b/id/{id}-L.jpgto fetch them. - English-centric. While the dataset contains records in many languages (see the
languagesfield), the catalog has stronger coverage of English-language publications. - JSON string columns. Array fields require an extra parsing step compared to native Parquet lists. This is the trade-off for schema flexibility.
Social Impact
By converting Open Library's dumps into a format that standard data tools can read without custom parsers, we aim to make the world's largest open book catalog accessible to researchers, educators, and developers who might not otherwise be able to work with it. Potential applications include:
- Recommendation systems using ratings, reading logs, and subject headings
- Knowledge graphs built from author, work, and edition relationships with Wikidata links
- Bibliographic search over titles, descriptions, subjects, and ISBNs
- Digital humanities research into publishing trends, translation patterns, and subject evolution over time
- Library science studies on cataloging practices, record quality, and coverage gaps
Additional Information
Licensing
Open Library data is released under CC0 1.0 (Public Domain Dedication). You are free to use it for any purpose, commercial or non-commercial, without attribution. Attribution to Open Library and the Internet Archive is appreciated but not required.
Citation
If you use this dataset in research, please cite both Open Library and this conversion:
@misc{open-library-parquet,
title = {Open Library: Complete Catalog in Parquet},
author = {open-index},
year = {2026},
url = {https://huggingface.co/datasets/open-index/open-library},
note = {Converted from OpenLibrary.org data dumps}
}
Contact
Please open a discussion on the Community tab for questions, feedback, or issues.