whiskey1983 tamnd commited on
Commit
e3597fa
·
0 Parent(s):

Duplicate from open-index/hacker-news

Browse files

Co-authored-by: Duc-Tam Nguyen <tamnd@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. README.md +561 -0
  3. data/2006/2006-10.parquet +3 -0
  4. data/2006/2006-12.parquet +3 -0
  5. data/2007/2007-02.parquet +3 -0
  6. data/2007/2007-03.parquet +3 -0
  7. data/2007/2007-04.parquet +3 -0
  8. data/2007/2007-05.parquet +3 -0
  9. data/2007/2007-06.parquet +3 -0
  10. data/2007/2007-07.parquet +3 -0
  11. data/2007/2007-08.parquet +3 -0
  12. data/2007/2007-09.parquet +3 -0
  13. data/2007/2007-10.parquet +3 -0
  14. data/2007/2007-11.parquet +3 -0
  15. data/2007/2007-12.parquet +3 -0
  16. data/2008/2008-01.parquet +3 -0
  17. data/2008/2008-02.parquet +3 -0
  18. data/2008/2008-03.parquet +3 -0
  19. data/2008/2008-04.parquet +3 -0
  20. data/2008/2008-05.parquet +3 -0
  21. data/2008/2008-06.parquet +3 -0
  22. data/2008/2008-07.parquet +3 -0
  23. data/2008/2008-08.parquet +3 -0
  24. data/2008/2008-09.parquet +3 -0
  25. data/2008/2008-10.parquet +3 -0
  26. data/2008/2008-11.parquet +3 -0
  27. data/2008/2008-12.parquet +3 -0
  28. data/2009/2009-01.parquet +3 -0
  29. data/2009/2009-02.parquet +3 -0
  30. data/2009/2009-03.parquet +3 -0
  31. data/2009/2009-04.parquet +3 -0
  32. data/2009/2009-05.parquet +3 -0
  33. data/2009/2009-06.parquet +3 -0
  34. data/2009/2009-07.parquet +3 -0
  35. data/2009/2009-08.parquet +3 -0
  36. data/2009/2009-09.parquet +3 -0
  37. data/2009/2009-10.parquet +3 -0
  38. data/2009/2009-11.parquet +3 -0
  39. data/2009/2009-12.parquet +3 -0
  40. data/2010/2010-01.parquet +3 -0
  41. data/2010/2010-02.parquet +3 -0
  42. data/2010/2010-03.parquet +3 -0
  43. data/2010/2010-04.parquet +3 -0
  44. data/2010/2010-05.parquet +3 -0
  45. data/2010/2010-06.parquet +3 -0
  46. data/2010/2010-07.parquet +3 -0
  47. data/2010/2010-08.parquet +3 -0
  48. data/2010/2010-09.parquet +3 -0
  49. data/2010/2010-10.parquet +3 -0
  50. data/2010/2010-11.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.avro filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
13
+ *.mds filter=lfs diff=lfs merge=lfs -text
14
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
+ *.model filter=lfs diff=lfs merge=lfs -text
16
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
17
+ *.npy filter=lfs diff=lfs merge=lfs -text
18
+ *.npz filter=lfs diff=lfs merge=lfs -text
19
+ *.onnx filter=lfs diff=lfs merge=lfs -text
20
+ *.ot filter=lfs diff=lfs merge=lfs -text
21
+ *.parquet filter=lfs diff=lfs merge=lfs -text
22
+ *.pb filter=lfs diff=lfs merge=lfs -text
23
+ *.pickle filter=lfs diff=lfs merge=lfs -text
24
+ *.pkl filter=lfs diff=lfs merge=lfs -text
25
+ *.pt filter=lfs diff=lfs merge=lfs -text
26
+ *.pth filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tar filter=lfs diff=lfs merge=lfs -text
32
+ *.tflite filter=lfs diff=lfs merge=lfs -text
33
+ *.tgz filter=lfs diff=lfs merge=lfs -text
34
+ *.wasm filter=lfs diff=lfs merge=lfs -text
35
+ *.xz filter=lfs diff=lfs merge=lfs -text
36
+ *.zip filter=lfs diff=lfs merge=lfs -text
37
+ *.zst filter=lfs diff=lfs merge=lfs -text
38
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - uncompressed
40
+ *.pcm filter=lfs diff=lfs merge=lfs -text
41
+ *.sam filter=lfs diff=lfs merge=lfs -text
42
+ *.raw filter=lfs diff=lfs merge=lfs -text
43
+ # Audio files - compressed
44
+ *.aac filter=lfs diff=lfs merge=lfs -text
45
+ *.flac filter=lfs diff=lfs merge=lfs -text
46
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
47
+ *.ogg filter=lfs diff=lfs merge=lfs -text
48
+ *.wav filter=lfs diff=lfs merge=lfs -text
49
+ # Image files - uncompressed
50
+ *.bmp filter=lfs diff=lfs merge=lfs -text
51
+ *.gif filter=lfs diff=lfs merge=lfs -text
52
+ *.png filter=lfs diff=lfs merge=lfs -text
53
+ *.tiff filter=lfs diff=lfs merge=lfs -text
54
+ # Image files - compressed
55
+ *.jpg filter=lfs diff=lfs merge=lfs -text
56
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
57
+ *.webp filter=lfs diff=lfs merge=lfs -text
58
+ # Video files - compressed
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,561 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ - feature-extraction
6
+ - text-classification
7
+ - question-answering
8
+ language:
9
+ - en
10
+ pretty_name: Hacker News - Complete Archive
11
+ size_categories:
12
+ - 10M<n<100M
13
+ tags:
14
+ - hacker-news
15
+ - forum
16
+ - text
17
+ - parquet
18
+ - community
19
+ - tech
20
+ - comments
21
+ - live-updated
22
+ configs:
23
+ - config_name: default
24
+ data_files:
25
+ - split: train
26
+ path: data/*/*.parquet
27
+ - config_name: today
28
+ data_files:
29
+ - split: train
30
+ path: today/**/*.parquet
31
+ ---
32
+
33
+ # Hacker News - Complete Archive
34
+
35
+ > Every Hacker News item since 2006, live-updated every 5 minutes
36
+
37
+ ## Table of Contents
38
+
39
+ - [What is it?](#what-is-it)
40
+ - [What is being released?](#what-is-being-released)
41
+ - [Breakdown by today](#breakdown-by-today)
42
+ - [Breakdown by year](#breakdown-by-year)
43
+ - [How to download and use this dataset](#how-to-download-and-use-this-dataset)
44
+ - [Dataset statistics](#dataset-statistics)
45
+ - [Content breakdown](#content-breakdown)
46
+ - [Community insights](#community-insights)
47
+ - [How it works](#how-it-works)
48
+ - [Dataset card](#dataset-card-for-hacker-news---complete-archive)
49
+ - [Dataset summary](#dataset-summary)
50
+ - [Dataset structure](#dataset-structure)
51
+ - [Dataset creation](#dataset-creation)
52
+ - [Considerations for using the data](#considerations-for-using-the-data)
53
+ - [Additional information](#additional-information)
54
+
55
+ ## What is it?
56
+
57
+ This dataset contains the complete [Hacker News](https://news.ycombinator.com) archive: every story, comment, Ask HN, Show HN, job posting, and poll ever submitted to the site. Hacker News is one of the longest-running and most influential technology communities on the internet, operated by [Y Combinator](https://www.ycombinator.com) since 2007. It has become the de facto gathering place for founders, engineers, researchers, and technologists to share and discuss what matters in technology.
58
+
59
+ The archive currently spans from **2006-10** to **2026-03-28 16:10 UTC**, with **47,484,304 items** committed. New items are fetched every 5 minutes and committed directly as individual Parquet files through an automated live pipeline, so the dataset stays current with the site itself.
60
+
61
+ We believe this is one of the most complete and regularly updated mirrors of Hacker News data available on Hugging Face. The data is stored as monthly Parquet files sorted by item ID, making it straightforward to query with DuckDB, load with the `datasets` library, or process with any tool that reads Parquet.
62
+
63
+ ## What is being released?
64
+
65
+ The dataset is organized as one Parquet file per calendar month, plus 5-minute live files for today's activity. Every 5 minutes, new items are fetched from the source and committed directly as a single Parquet block. At midnight UTC, the entire current month is refetched from the source as a single authoritative Parquet file, and today's individual 5-minute blocks are removed from the `today/` directory.
66
+
67
+ ```
68
+ data/
69
+ 2006/2006-10.parquet first month with HN data
70
+ 2006/2006-12.parquet
71
+ 2007/2007-01.parquet
72
+ ...
73
+ 2026/2026-03.parquet most recent complete month
74
+ 2026/2026-03.parquet current month, ongoing til 2026-03-27
75
+ today/
76
+ 2026/03/28/00/00.parquet 5-min live blocks (YYYY/MM/DD/HH/MM.parquet)
77
+ 2026/03/28/00/05.parquet
78
+ ...
79
+ 2026/03/28/16/10.parquet most recent committed block
80
+ stats.csv one row per committed month
81
+ stats_today.csv one row per committed 5-min block
82
+ ```
83
+
84
+ Along with the Parquet files, we include `stats.csv` which tracks every committed month with its item count, ID range, file size, fetch duration, and commit timestamp. This makes it easy to verify completeness and track the pipeline's progress.
85
+
86
+ ## Breakdown by today
87
+
88
+ The chart below shows items committed to this dataset by hour today (**2026-03-28**, **4,801 items** across **17 hours**, last updated **2026-03-28 16:15 UTC**).
89
+
90
+ ```
91
+ 00:00 ███████████████████░░░░░░░░░░░ 315
92
+ 01:00 ██████████████████░░░░░░░░░░░░ 309
93
+ 02:00 █████████████████░░░░░░░░░░░░░ 289
94
+ 03:00 ███████████████████░░░░░░░░░░░ 312
95
+ 04:00 █████████████░░░░░░░░░░░░░░░░░ 223
96
+ 05:00 ████████████░░░░░░░░░░░░░░░░░░ 202
97
+ 06:00 ████████████░░░░░░░░░░░░░░░░░░ 199
98
+ 07:00 ████████████░░░░░░░░░░░░░░░░░░ 202
99
+ 08:00 ███████████████░░░░░░░░░░░░░░░ 248
100
+ 09:00 ████████████████░░░░░░░░░░░░░░ 264
101
+ 10:00 ██████████████░░░░░░░░░░░░░░░░ 232
102
+ 11:00 █████████████████░░░░░░░░░░░░░ 282
103
+ 12:00 ████████████████████░░░░░░░░░░ 329
104
+ 13:00 █████████████████████████░░░░░ 409
105
+ 14:00 ███████████████████████░░░░░░░ 390
106
+ 15:00 ██████████████████████████████ 489
107
+ 16:00 ██████░░░░░░░░░░░░░░░░░░░░░░░░ 107
108
+ ```
109
+
110
+ ## Breakdown by year
111
+
112
+ The chart below shows items committed to this dataset by year.
113
+
114
+ ```
115
+ 2006 █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 62
116
+ 2007 █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 93.8K
117
+ 2008 ██░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 320.9K
118
+ 2009 ███░░░░░░░░░░░░░░░░░░░░░░░░░░░ 608.4K
119
+ 2010 ██████░░░░░░░░░░░░░░░░░░░░░░░░ 1.0M
120
+ 2011 ████████░░░░░░░░░░░░░░░░░░░░░░ 1.4M
121
+ 2012 ██████████░░░░░░░░░░░░░░░░░░░░ 1.6M
122
+ 2013 █████████████░░░░░░░░░░░░░░░░░ 2.0M
123
+ 2014 ███████████░░░░░░░░░░░░░░░░░░░ 1.8M
124
+ 2015 █████████████░░░░░░░░░░░░░░░░░ 2.0M
125
+ 2016 ████████████████░░░░░░░░░░░░░░ 2.5M
126
+ 2017 █████████████████░░░░░░░░░░░░░ 2.7M
127
+ 2018 ██████████████████░░░░░░░░░░░░ 2.8M
128
+ 2019 ████████████████████░░░░░░░░░░ 3.1M
129
+ 2020 ████████████████████████░░░░░░ 3.7M
130
+ 2021 ███████████████████████████░░░ 4.2M
131
+ 2022 █████████████████████████████░ 4.4M
132
+ 2023 ██████████████████████████████ 4.6M
133
+ 2024 ████████████████████████░░░░░░ 3.7M
134
+ 2025 █████████████████████████░░░░░ 3.9M
135
+ 2026 ███████░░░░░░░░░░░░░░░░░░░░░░░ 1.1M
136
+ ```
137
+
138
+ ## How to download and use this dataset
139
+
140
+ You can load the full dataset, a specific year, or even a single month. The dataset uses the standard Hugging Face Parquet layout, so it works out of the box with DuckDB, the `datasets` library, `pandas`, and `huggingface_hub`.
141
+
142
+ ### Using DuckDB
143
+
144
+ DuckDB can read Parquet files directly from Hugging Face without downloading anything first. This is the fastest way to explore the data:
145
+
146
+ The `type` column is stored as a small integer: `1` = story, `2` = comment, `3` = poll, `4` = pollopt, `5` = job. The `"by"` column (author username) must be quoted in DuckDB because `by` is a reserved keyword.
147
+
148
+ ```sql
149
+ -- Top 20 highest-scored stories of all time
150
+ SELECT id, title, "by", score, url, time
151
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
152
+ WHERE type = 1 AND title != ''
153
+ ORDER BY score DESC
154
+ LIMIT 20;
155
+ ```
156
+
157
+ ```sql
158
+ -- Monthly submission volume for a specific year
159
+ SELECT
160
+ strftime(time, '%Y-%m') AS month,
161
+ count(*) AS items,
162
+ count(*) FILTER (WHERE type = 1) AS stories,
163
+ count(*) FILTER (WHERE type = 2) AS comments
164
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/2024/*.parquet')
165
+ GROUP BY month
166
+ ORDER BY month;
167
+ ```
168
+
169
+ ```sql
170
+ -- Most discussed stories by total comment count
171
+ SELECT id, title, "by", score, descendants AS comments, url
172
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/2025/*.parquet')
173
+ WHERE type = 1 AND descendants > 0
174
+ ORDER BY descendants DESC
175
+ LIMIT 20;
176
+ ```
177
+
178
+ ```sql
179
+ -- Who posts the most Ask HN questions?
180
+ SELECT "by", count(*) AS posts
181
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
182
+ WHERE type = 1 AND title LIKE 'Ask HN:%'
183
+ GROUP BY "by"
184
+ ORDER BY posts DESC
185
+ LIMIT 20;
186
+ ```
187
+
188
+ ```sql
189
+ -- Track how often a topic appears on HN over time
190
+ SELECT
191
+ extract(year FROM time) AS year,
192
+ count(*) AS mentions
193
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
194
+ WHERE type = 1 AND lower(title) LIKE '%rust%'
195
+ GROUP BY year
196
+ ORDER BY year;
197
+ ```
198
+
199
+ ```sql
200
+ -- Top linked domains, year over year
201
+ SELECT
202
+ extract(year FROM time) AS year,
203
+ regexp_extract(url, 'https?://([^/]+)', 1) AS domain,
204
+ count(*) AS stories
205
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
206
+ WHERE type = 1 AND url != ''
207
+ GROUP BY year, domain
208
+ QUALIFY row_number() OVER (PARTITION BY year ORDER BY stories DESC) <= 5
209
+ ORDER BY year, stories DESC;
210
+ ```
211
+
212
+ ### Using `datasets`
213
+
214
+ ```python
215
+ from datasets import load_dataset
216
+
217
+ # Stream the full history without downloading everything first
218
+ ds = load_dataset("open-index/hacker-news", split="train", streaming=True)
219
+ for item in ds:
220
+ print(item["id"], item["type"], item["title"])
221
+
222
+ # Load a specific year into memory
223
+ ds = load_dataset(
224
+ "open-index/hacker-news",
225
+ data_files="data/2024/*.parquet",
226
+ split="train",
227
+ )
228
+ print(f"{len(ds):,} items in 2024")
229
+
230
+ # Load today's live blocks (updated every 5 minutes)
231
+ ds = load_dataset(
232
+ "open-index/hacker-news",
233
+ name="today",
234
+ split="train",
235
+ streaming=True,
236
+ )
237
+ ```
238
+
239
+ ### Using `huggingface_hub`
240
+
241
+ ```python
242
+ from huggingface_hub import snapshot_download
243
+
244
+ # Download only 2024 data (about 1.5 GB)
245
+ snapshot_download(
246
+ "open-index/hacker-news",
247
+ repo_type="dataset",
248
+ local_dir="./hn/",
249
+ allow_patterns="data/2024/*",
250
+ )
251
+ ```
252
+
253
+ For faster downloads, install `pip install huggingface_hub[hf_transfer]` and set `HF_HUB_ENABLE_HF_TRANSFER=1`.
254
+
255
+ ### Using the CLI
256
+
257
+ ```bash
258
+ # Download a single month
259
+ huggingface-cli download open-index/hacker-news \
260
+ data/2024/2024-01.parquet \
261
+ --repo-type dataset --local-dir ./hn/
262
+ ```
263
+
264
+ ### Using pandas + DuckDB
265
+
266
+ ```python
267
+ import duckdb
268
+
269
+ conn = duckdb.connect()
270
+
271
+ # Score distribution: what does a "typical" HN story look like?
272
+ # type=1 is story (stored as integer: 1=story, 2=comment, 3=poll, 4=pollopt, 5=job)
273
+ df = conn.sql("""
274
+ SELECT
275
+ percentile_disc(0.50) WITHIN GROUP (ORDER BY score) AS p50,
276
+ percentile_disc(0.90) WITHIN GROUP (ORDER BY score) AS p90,
277
+ percentile_disc(0.99) WITHIN GROUP (ORDER BY score) AS p99,
278
+ percentile_disc(0.999) WITHIN GROUP (ORDER BY score) AS p999
279
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
280
+ WHERE type = 1
281
+ """).df()
282
+ print(df)
283
+ ```
284
+
285
+ ## Dataset statistics
286
+
287
+ You can query the per-month statistics directly from the `stats.csv` file included in the dataset:
288
+
289
+ ```sql
290
+ SELECT * FROM read_csv_auto('hf://datasets/open-index/hacker-news/stats.csv')
291
+ ORDER BY year, month;
292
+ ```
293
+
294
+ The `stats.csv` file tracks each committed month with the following columns:
295
+
296
+ | Column | Description |
297
+ |--------|-------------|
298
+ | `year`, `month` | Calendar month |
299
+ | `lowest_id`, `highest_id` | Item ID range covered by this file |
300
+ | `count` | Number of items in the file |
301
+ | `dur_fetch_s` | Seconds to fetch from the data source |
302
+ | `dur_commit_s` | Seconds to commit to Hugging Face |
303
+ | `size_bytes` | Parquet file size on disk |
304
+ | `committed_at` | ISO 8601 timestamp of when this month was committed |
305
+
306
+ ## Content breakdown
307
+
308
+ Hacker News has five item types. The vast majority of content is comments, followed by stories (which include Ask HN, Show HN, and regular link submissions). Jobs, polls, and poll options make up a small fraction.
309
+
310
+ | Type | Count | Share |
311
+ |------|------:|------:|
312
+ | comment | 41,346,149 | 87.2% |
313
+ | story | 6,044,061 | 12.7% |
314
+ | job | 18,072 | 0.0% |
315
+ | poll | 2,240 | 0.0% |
316
+ | pollopt | 15,449 | 0.0% |
317
+
318
+ Of all stories submitted to Hacker News, **84.8%** link to an external URL. The rest are text-only posts: Ask HN questions, Show HN launches, and other self-posts where the discussion itself is the content.
319
+
320
+ The average story generates **23.9 comments** in its discussion thread. The most-discussed story of all time received 9,275 comments, which gives a sense of how deep conversations can go on particularly controversial or interesting topics.
321
+
322
+ ### Story scores
323
+
324
+ Scores on Hacker News follow a steep power law. Most stories receive only a few points, but a small number break out and reach the front page with hundreds or thousands of upvotes.
325
+
326
+ | Metric | Value |
327
+ |--------|------:|
328
+ | Average score | 1.5 |
329
+ | Median score | 0 |
330
+ | Highest score ever | 6,015 |
331
+ | Stories with 100+ points | 175,906 |
332
+ | Stories with 1,000+ points | 2,169 |
333
+
334
+ The median score of 0 reflects the fact that many stories are submitted but never gain traction. However, the long tail is where things get interesting: over 6,044,061 stories have been submitted, and the top 0.03% (those with 1,000+ points) represent the content that defined conversations across the technology industry.
335
+
336
+ ### Most-shared domains
337
+
338
+ The domains most frequently linked from Hacker News stories tell a clear story about what the community values. GitHub dominates, reflecting HN's deep roots in open source and software development. Major publications like the New York Times and Ars Technica show the community's interest in journalism and long-form analysis.
339
+
340
+ | # | Domain | Stories |
341
+ |--:|--------|--------:|
342
+ | 1 | github.com | 197,959 |
343
+ | 2 | www.youtube.com | 134,903 |
344
+ | 3 | medium.com | 124,561 |
345
+ | 4 | www.nytimes.com | 77,719 |
346
+ | 5 | en.wikipedia.org | 54,439 |
347
+ | 6 | techcrunch.com | 54,190 |
348
+ | 7 | twitter.com | 50,590 |
349
+ | 8 | arstechnica.com | 47,090 |
350
+ | 9 | www.theguardian.com | 44,337 |
351
+ | 10 | www.bloomberg.com | 37,826 |
352
+
353
+ ### Most active story submitters
354
+
355
+ These are the users who have submitted the most stories over the lifetime of Hacker News. Many of them have been active for over a decade, consistently curating and sharing content with the community.
356
+
357
+ | # | User | Stories |
358
+ |--:|------|--------:|
359
+ | 1 | rbanffy | 36,795 |
360
+ | 2 | Tomte | 26,192 |
361
+ | 3 | tosh | 24,082 |
362
+ | 4 | bookofjoe | 20,608 |
363
+ | 5 | mooreds | 20,381 |
364
+ | 6 | pseudolus | 19,917 |
365
+ | 7 | PaulHoule | 19,042 |
366
+ | 8 | todsacerdoti | 18,880 |
367
+ | 9 | ingve | 17,059 |
368
+ | 10 | thunderbong | 15,989 |
369
+ | 11 | jonbaer | 14,169 |
370
+ | 12 | rntn | 13,410 |
371
+ | 13 | doener | 12,827 |
372
+ | 14 | Brajeshwar | 12,411 |
373
+ | 15 | LinuxBender | 11,058 |
374
+
375
+ ## How it works
376
+
377
+ The pipeline is built in Go and uses [DuckDB](https://duckdb.org) for Parquet conversion. Historical data is sourced from [ClickHouse](https://clickhouse.com); live data is fetched directly from the [HN Firebase API](https://hacker-news.firebaseio.com/v2).
378
+
379
+ **Historical backfill.** The pipeline iterates through every month from October 2006 to the most recent complete month. For each month, it queries the ClickHouse source with a time-bounded SQL query, exports the result as a Parquet file sorted by `id` using DuckDB with Zstandard compression at level 22, and commits it to this repository along with an updated `stats.csv` and `README.md`. Months already tracked in `stats.csv` are skipped, making the process fully resumable.
380
+
381
+ **Live polling.** Every 5 minutes, the pipeline calls the HN Firebase API to fetch new items by ID range. Items are grouped into their 5-minute time windows, written as individual Parquet files at `today/YYYY/MM/DD/HH/MM.parquet` using DuckDB, and committed to Hugging Face immediately. Using the HN API directly means live blocks reflect real-time data with no indexing lag.
382
+
383
+ **Day rollover.** At midnight UTC, the entire current month is refetched from the ClickHouse source in a single query and written as an authoritative Parquet file. Today's individual 5-minute blocks are deleted from the repository in the same atomic commit. Refetching instead of merging ensures the monthly file is always complete and deduplicated, regardless of any local state.
384
+
385
+ ## Thanks
386
+
387
+ The data in this dataset comes from the [ClickHouse Playground](https://sql.clickhouse.com), a free public SQL endpoint maintained by [ClickHouse, Inc.](https://clickhouse.com) that mirrors the official [Hacker News Firebase API](https://github.com/HackerNewsAPI/HN-API). ClickHouse uses Hacker News as one of their canonical demo datasets. Without their public endpoint, building and maintaining a complete, regularly updated archive like this would not be practical.
388
+
389
+ The original content is created by the Hacker News community and is operated by [Y Combinator](https://www.ycombinator.com). This is an independent mirror and is not affiliated with or endorsed by Y Combinator or ClickHouse, Inc.
390
+
391
+ # Dataset card for Hacker News - Complete Archive
392
+
393
+ ## Dataset summary
394
+
395
+ This dataset is a complete mirror of the [Hacker News](https://news.ycombinator.com) archive, sourced from the [ClickHouse Playground](https://sql.clickhouse.com) which itself mirrors the official [HN Firebase API](https://github.com/HackerNewsAPI/HN-API). The data covers every item ever posted to the site, from the earliest submissions in October 2006 through today.
396
+
397
+ The dataset is intended for research, analysis, and training. Common use cases include:
398
+
399
+ - **Language model pretraining and fine-tuning** on high-quality technical discussions
400
+ - **Sentiment and trend analysis** across two decades of technology discourse
401
+ - **Community dynamics research** on one of the internet's most influential forums
402
+ - **Information retrieval** benchmarks using real-world questions and answers
403
+ - **Content recommendation** and ranking model development
404
+
405
+ ## Dataset structure
406
+
407
+ ### Data instances
408
+
409
+ Here is an example item from the dataset. This is a story submission with a link to an external URL:
410
+
411
+ ```json
412
+ {
413
+ "id": 1,
414
+ "deleted": 0,
415
+ "type": 1,
416
+ "by": "pg",
417
+ "time": "2006-10-09T18:21:51+00:00",
418
+ "text": "",
419
+ "dead": 0,
420
+ "parent": 0,
421
+ "poll": 0,
422
+ "kids": [15, 234509, 487171],
423
+ "url": "http://ycombinator.com",
424
+ "score": 57,
425
+ "title": "Y Combinator",
426
+ "parts": [],
427
+ "descendants": 0,
428
+ "words": ["y", "combinator"]
429
+ }
430
+ ```
431
+
432
+ And here is a comment, showing how discussion threads are connected via the `parent` field:
433
+
434
+ ```json
435
+ {
436
+ "id": 15,
437
+ "deleted": 0,
438
+ "type": 2,
439
+ "by": "sama",
440
+ "time": "2006-10-09T19:51:01+00:00",
441
+ "text": "\"the way to get good software is to find ...",
442
+ "dead": 0,
443
+ "parent": 1,
444
+ "poll": 0,
445
+ "kids": [17],
446
+ "url": "",
447
+ "score": 0,
448
+ "title": "",
449
+ "parts": [],
450
+ "descendants": 0,
451
+ "words": []
452
+ }
453
+ ```
454
+
455
+ ### Data fields
456
+
457
+ Every Parquet file shares the same schema, matching the [HN API](https://github.com/HackerNewsAPI/HN-API) item format:
458
+
459
+ | Column | Type | Description |
460
+ |--------|------|-------------|
461
+ | `id` | uint32 | Unique item ID, monotonically increasing across the entire site |
462
+ | `deleted` | uint8 | 1 if the item was soft-deleted by its author or by moderators, 0 otherwise |
463
+ | `type` | int8 | Item type as an integer: `1`=story, `2`=comment, `3`=poll, `4`=pollopt, `5`=job |
464
+ | `by` | string | Username of the author who created this item. Note: `by` is a reserved word in DuckDB and must be quoted as `"by"` |
465
+ | `time` | timestamp | When the item was created, in UTC |
466
+ | `text` | string | HTML body text. Used for comments, Ask HN posts, job listings, and polls |
467
+ | `dead` | uint8 | 1 if the item was flagged or killed by moderators, 0 otherwise |
468
+ | `parent` | uint32 | The ID of the parent item. For comments, this points to either a story or another comment |
469
+ | `poll` | uint32 | For poll options (`pollopt`), the ID of the associated poll |
470
+ | `kids` | list\<uint32\> | Ordered list of direct child item IDs (typically comments) |
471
+ | `url` | string | The external URL for link stories. Empty for text posts and comments |
472
+ | `score` | int32 | The item's score (upvotes minus downvotes) |
473
+ | `title` | string | Title text for stories, jobs, and polls. Empty for comments |
474
+ | `parts` | list\<uint32\> | For polls, the list of associated poll option item IDs |
475
+ | `descendants` | int32 | Total number of comments in the entire discussion tree below this item |
476
+ | `words` | list\<string\> | Tokenized words extracted from the title and text fields |
477
+
478
+ ### Data splits
479
+
480
+ The `default` configuration includes all historical monthly Parquet files. If you only need today's latest items, use the `today` configuration which includes only the 5-minute live blocks for the current day.
481
+
482
+ You can also load individual years or months by specifying `data_files`:
483
+
484
+ ```python
485
+ # Load just January 2024
486
+ ds = load_dataset("open-index/hacker-news", data_files="data/2024/2024-01.parquet", split="train")
487
+
488
+ # Load all of 2024
489
+ ds = load_dataset("open-index/hacker-news", data_files="data/2024/*.parquet", split="train")
490
+ ```
491
+
492
+ ## Dataset creation
493
+
494
+ ### Curation rationale
495
+
496
+ Hacker News is one of the richest sources of technical discussion on the internet, but accessing the full archive programmatically has historically required either scraping the Firebase API item-by-item or working with incomplete third-party dumps. This dataset provides the complete archive in a standard, efficient format that anyone can query without setting up infrastructure.
497
+
498
+ By publishing on Hugging Face with Parquet files, the data becomes immediately queryable with DuckDB (via `hf://` paths), streamable with the `datasets` library, and downloadable in bulk. The 5-minute live update pipeline means researchers always have access to near-real-time data.
499
+
500
+ ### Source data
501
+
502
+ All data is sourced from the [ClickHouse Playground](https://sql.clickhouse.com), a public SQL endpoint maintained by ClickHouse that mirrors the official Hacker News Firebase API. The ClickHouse mirror is widely used for analytics demonstrations and contains the complete dataset.
503
+
504
+ The pipeline queries the ClickHouse endpoint month-by-month, exports each month as a Parquet file using DuckDB with Zstandard compression at level 22, and commits it to this Hugging Face repository. Already-committed months are tracked in `stats.csv` and skipped on subsequent runs, making the process fully resumable.
505
+
506
+ ### Data processing steps
507
+
508
+ The pipeline runs in three modes:
509
+
510
+ 1. **Historical backfill.** Iterates through every month from October 2006 to the most recent complete month. For each month, it runs a SQL query against the ClickHouse source, writes the result as a Parquet file sorted by `id`, and commits it to Hugging Face along with an updated `stats.csv` and `README.md`.
511
+
512
+ 2. **Live polling.** After the historical backfill completes, the pipeline polls the [HN Firebase API](https://hacker-news.firebaseio.com/v2) every 5 minutes for new items. It fetches all items with IDs greater than the last committed watermark, groups them into 5-minute time windows by item timestamp, and writes each window as a `today/YYYY/MM/DD/HH/MM.parquet` file committed to Hugging Face immediately. The HN API provides real-time data with no indexing lag.
513
+
514
+ 3. **Day rollover.** At midnight UTC, the entire current month is refetched from the ClickHouse source in a single query and written as a fresh, authoritative Parquet file. Today's individual 5-minute blocks are deleted from the repository in the same atomic commit. This approach is more reliable than merging local blocks — the result is always complete and deduplicated, sourced directly from the origin.
515
+
516
+ All Parquet files use **Zstandard compression at level 22** and are sorted by `id` for efficient range scans. No filtering, deduplication, or transformation is applied to the data beyond what the source provides.
517
+
518
+ ### Personal and sensitive information
519
+
520
+ This dataset contains usernames (`by` field) and user-generated text content (`text`, `title` fields) as they appear on the public Hacker News website. No additional PII processing has been applied. The data reflects what is publicly visible on [news.ycombinator.com](https://news.ycombinator.com).
521
+
522
+ If you find content in this dataset that you believe should be removed, please open a discussion on the Community tab.
523
+
524
+ ## Considerations for using the data
525
+
526
+ ### Social impact
527
+
528
+ By providing the complete Hacker News archive in an accessible format, we hope to enable research into online community dynamics, technology trends, and the evolution of technical discourse. The dataset can serve as training data for language models that need to understand technical discussions, or as a benchmark for information retrieval and recommendation systems.
529
+
530
+ ### Discussion of biases
531
+
532
+ Hacker News has a well-documented set of community biases. The user base skews heavily toward software engineers, startup founders, and technology enthusiasts based in the United States. Topics related to Silicon Valley, programming languages, startups, and certain political viewpoints tend to receive disproportionate attention and engagement.
533
+
534
+ The moderation system (flagging, vouching, and moderator intervention) shapes what content survives and what gets killed. Stories and comments that violate community norms are flagged as `dead`, but this moderation reflects the values of the existing community rather than any objective standard.
535
+
536
+ We have not applied any additional filtering or quality scoring to the data. All items, including deleted and dead items, are preserved exactly as they appear in the source.
537
+
538
+ ### Known limitations
539
+
540
+ - **`type` is an integer.** The item type is stored as a TINYINT enum: `1`=story, `2`=comment, `3`=poll, `4`=pollopt, `5`=job. When writing DuckDB queries, use `WHERE type = 1` for stories rather than `WHERE type = 'story'`.
541
+ - **`by` is a reserved keyword in DuckDB.** Always quote it with double quotes: `"by"`.
542
+ - **`deleted` and `dead` are integers.** They are stored as 0/1 rather than booleans.
543
+ - **Comment text is HTML.** The `text` field contains raw HTML as stored by HN, not plain text. You may need to strip tags depending on your use case.
544
+ - **Deleted items have sparse fields.** When an item is deleted, most fields become empty, but the `id` and `deleted` flag are preserved.
545
+ - **Scores are point-in-time snapshots.** The score reflects the value at the time the ClickHouse mirror last synced, not necessarily the final score.
546
+ - **No user profiles.** This dataset contains items only, not user profiles (karma, bio, etc.).
547
+ - **Code content is HTML-escaped.** Code snippets in comments use HTML entities and `<code>` tags rather than Markdown formatting.
548
+
549
+ ## Additional information
550
+
551
+ ### Licensing
552
+
553
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0**. The original content is subject to the rights of its respective authors. Hacker News data is provided by [Y Combinator](https://www.ycombinator.com).
554
+
555
+ This is an independent community mirror. It is not affiliated with or endorsed by Y Combinator.
556
+
557
+ ### Contact
558
+
559
+ For questions, feedback, or issues, please open a discussion on the [Community tab](https://huggingface.co/datasets/open-index/hacker-news/discussions).
560
+
561
+ *Last updated: 2026-03-28 16:15 UTC*
data/2006/2006-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f075a057c886f70d1521ad9b4a57623155d019f29fe2ed779cc19086acca5aac
3
+ size 11994
data/2006/2006-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:febeea30ffd24936e1c46b1ece3dbf13ae78c5a110e2d39d18e66ac13005f342
3
+ size 6219
data/2007/2007-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae533795ea07490873881c25c37df4229f7f03470d1d473a88757252e718c63
3
+ size 308963
data/2007/2007-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d40cad7c65dc60f088b00cb1e102b2ee553616cb1f1b09cf0b21256f981be764
3
+ size 1253007
data/2007/2007-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07009b16156e24d1975630e8adeb011f853a8673fa83004dea13780dd26fa41b
3
+ size 2137722
data/2007/2007-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d45ee432f088d538a09f17e2b3839d0ee7530b5e29cf0b772955483d66ae399
3
+ size 1443362
data/2007/2007-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4508f484364534d7460b7cfd39a850c9921f5912adb0ea7dfaf77a744977dfd2
3
+ size 1229805
data/2007/2007-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f231b64dd84b06cf1538a530255ba9dfeddaf609d626fbc2039b976566c81e
3
+ size 1295922
data/2007/2007-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26b64308ecfb825b612160a943443a986b4f34da33e32ceff5b16d82ee7fdb9b
3
+ size 2184462
data/2007/2007-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:098d4de6a97fdb9abdb462aee3502805cb7da8b3f1fe19a1862601367c6a191f
3
+ size 2587024
data/2007/2007-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609c375301952fa2fc0615dfb83e872cbd510e764208869b928a559f5acf7f5f
3
+ size 2778304
data/2007/2007-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e7f046cbd14364bf73324fd3afe624da3c3bfbf0db674bd933c6894b459d506
3
+ size 2066776
data/2007/2007-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf9f0c82933efa2b86c9e5db01fac74065ac6418b460c41dd00fb2f74a9df8d
3
+ size 1837252
data/2008/2008-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d2ccca2df761ef1efd605f3ed9cb64fd9bab1d271878cada0de6bf44cbb1cd
3
+ size 2571038
data/2008/2008-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e5c6bf0ec13d349d273ce54e731aed56c353ab50198038d407f273eb8314b8
3
+ size 3860126
data/2008/2008-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856a4e0ff92d53f54fc46f903cc71cedf943a125774be6da4d7e9e478ba51514
3
+ size 5142506
data/2008/2008-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bc9263b6983712f37ae64a84cd00eaa1f50fa019edb6c6f0a4ff99255e33e25
3
+ size 5381500
data/2008/2008-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcc8d4aec473a1e48fdd50356f71cbaf2187a390262b5a53388849298b5adc5
3
+ size 5812797
data/2008/2008-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1da0f2ae765ecc749aaa505628f69cc75e7814cfb25c3ddfef829bcb50833c29
3
+ size 5616033
data/2008/2008-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f094450e01185099b0d30643a60818539eb3c71df9b1e5d4042fd8f62942b74
3
+ size 6459558
data/2008/2008-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0d7d3739c388d1c8d9e844d080b9e7e7eb4a34cc26660b93a4386252d2ad6fd
3
+ size 5673721
data/2008/2008-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c104da44b06d3c8c143c1b852bb34b81a1bccee5cfc6428c9a00768c8fb0471
3
+ size 5854028
data/2008/2008-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b39b63d2f003f992c8440492e1b43906ab59417efa51fc42c4427a7386c8c1
3
+ size 6066795
data/2008/2008-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5fda23eb272a6db86d63c17d4e65ad7561301501534655e0dbfc48fd7cfe60d
3
+ size 6438154
data/2008/2008-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f6985d14288a638e558d0f24db195a30b91475ccb0d9b1bf097e136033d9992
3
+ size 7601080
data/2009/2009-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a369b7d937aa7d3398c53d258922386e49897991ee1263b99fa6fd7821740da6
3
+ size 9457556
data/2009/2009-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df809084dd1f34899cddd8466686eeaff4188fc26b36cf357170efaff6ed38e6
3
+ size 8695512
data/2009/2009-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b44d147b770a2952f5968f232600bd7942490d66f80e99b2327f4bb1bda36b9
3
+ size 9583360
data/2009/2009-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa3c552bf400a4b638eff6ff8c7244e1a93c3a1bde52d72cde3cecd3928d101f
3
+ size 10503066
data/2009/2009-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e05e02cda9515224b449c9bde66008836665018c58cc25f460a627db76cdf7d
3
+ size 10414819
data/2009/2009-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be818f75a149bfbf2abb9d224cb76c6c95c40e174f70ce2f40c959ded0959ac3
3
+ size 10195957
data/2009/2009-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd5360b35c477b116b9fbdea7e14c838057bf9dc4538f5a5cac2d40bd1452df
3
+ size 11906472
data/2009/2009-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29993bb5e34c9fcbc664ee5510502a3c920110c1c71072cf2d5d4ec6922867f
3
+ size 13507872
data/2009/2009-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd3b9bb41287536249bc465fa1267f12f16e01261e0de0ebdd8a780f1f60319
3
+ size 12550137
data/2009/2009-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:025c5cb5f516a7fdb417476eaf446b93617cc661f1c781f566c35522bb32b771
3
+ size 13199103
data/2009/2009-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f5115183e26353afe2fb2210e1199c95cbdc2efb891f6bd9e74efa79bbae192
3
+ size 12147917
data/2009/2009-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67417db8eef9ca2b75a09743a327ee331133d761b1edf4d8aa90d30e5df0449f
3
+ size 12271162
data/2010/2010-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842a7c526feafdda5d864ba8c4e9acfecc2887e3bc1c16fd21b1dc7111a9b4f8
3
+ size 14626994
data/2010/2010-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac93f76c1c75eeb0b9c737f7a356350e63e60d0728a45dd738dd625ffe6d4870
3
+ size 14651835
data/2010/2010-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a95ef02c0408cfffd0f163662769d41c92a4f8aaacf50147c270e66eccab7fb
3
+ size 16555458
data/2010/2010-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de3afc1fa86a28dd53476bc158d15cedcf2b80c9c389cc9b04e4ed7ae7f98d44
3
+ size 15974936
data/2010/2010-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec5ab1ac68ba802d693d01fa1fa89a79d64b1735003341e98ac03936e9248b7a
3
+ size 17897021
data/2010/2010-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e87eee2a37dcf86f58dc4e9cb6e35fcbbb039753d2208507279e525818e8736a
3
+ size 18345933
data/2010/2010-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:542528f8a4345314e9ff96f002359305ae2093c623357d676209550a73d231c8
3
+ size 20068819
data/2010/2010-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93a6a9d28f508d6819a449d9241e9164eabf0976aec01a2827dbc94f89094e97
3
+ size 19938675
data/2010/2010-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af21f860848f159c5e7a8ce42b37c167ff088ea749c9e81eaee74760b41e00e8
3
+ size 21231521
data/2010/2010-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:220e34cde516fd7019f4522e04db6cd6bfa55963595215eff1ba81a158bd1a16
3
+ size 24842814
data/2010/2010-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c78cf4e8b3f91b15978fe72eca63cbfe87b2cce5980480de55026854ea0a01e0
3
+ size 22197036