AdversaLLC tamnd commited on
Commit
bd82bff
·
0 Parent(s):

Duplicate from open-index/hacker-news

Browse files

Co-authored-by: Duc-Tam Nguyen <tamnd@users.noreply.huggingface.co>

This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +60 -0
  2. README.md +553 -0
  3. data/2006/2006-10.parquet +3 -0
  4. data/2006/2006-11.parquet +3 -0
  5. data/2006/2006-12.parquet +3 -0
  6. data/2007/2007-01.parquet +3 -0
  7. data/2007/2007-02.parquet +3 -0
  8. data/2007/2007-03.parquet +3 -0
  9. data/2007/2007-04.parquet +3 -0
  10. data/2007/2007-05.parquet +3 -0
  11. data/2007/2007-06.parquet +3 -0
  12. data/2007/2007-07.parquet +3 -0
  13. data/2007/2007-08.parquet +3 -0
  14. data/2007/2007-09.parquet +3 -0
  15. data/2007/2007-10.parquet +3 -0
  16. data/2007/2007-11.parquet +3 -0
  17. data/2007/2007-12.parquet +3 -0
  18. data/2008/2008-01.parquet +3 -0
  19. data/2008/2008-02.parquet +3 -0
  20. data/2008/2008-03.parquet +3 -0
  21. data/2008/2008-04.parquet +3 -0
  22. data/2008/2008-05.parquet +3 -0
  23. data/2008/2008-06.parquet +3 -0
  24. data/2008/2008-07.parquet +3 -0
  25. data/2008/2008-08.parquet +3 -0
  26. data/2008/2008-09.parquet +3 -0
  27. data/2008/2008-10.parquet +3 -0
  28. data/2008/2008-11.parquet +3 -0
  29. data/2008/2008-12.parquet +3 -0
  30. data/2009/2009-01.parquet +3 -0
  31. data/2009/2009-02.parquet +3 -0
  32. data/2009/2009-03.parquet +3 -0
  33. data/2009/2009-04.parquet +3 -0
  34. data/2009/2009-05.parquet +3 -0
  35. data/2009/2009-06.parquet +3 -0
  36. data/2009/2009-07.parquet +3 -0
  37. data/2009/2009-08.parquet +3 -0
  38. data/2009/2009-09.parquet +3 -0
  39. data/2009/2009-10.parquet +3 -0
  40. data/2009/2009-11.parquet +3 -0
  41. data/2009/2009-12.parquet +3 -0
  42. data/2010/2010-01.parquet +3 -0
  43. data/2010/2010-02.parquet +3 -0
  44. data/2010/2010-03.parquet +3 -0
  45. data/2010/2010-04.parquet +3 -0
  46. data/2010/2010-05.parquet +3 -0
  47. data/2010/2010-06.parquet +3 -0
  48. data/2010/2010-07.parquet +3 -0
  49. data/2010/2010-08.parquet +3 -0
  50. data/2010/2010-09.parquet +3 -0
.gitattributes ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.avro filter=lfs diff=lfs merge=lfs -text
4
+ *.bin filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
7
+ *.ftz filter=lfs diff=lfs merge=lfs -text
8
+ *.gz filter=lfs diff=lfs merge=lfs -text
9
+ *.h5 filter=lfs diff=lfs merge=lfs -text
10
+ *.joblib filter=lfs diff=lfs merge=lfs -text
11
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
12
+ *.lz4 filter=lfs diff=lfs merge=lfs -text
13
+ *.mds filter=lfs diff=lfs merge=lfs -text
14
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
15
+ *.model filter=lfs diff=lfs merge=lfs -text
16
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
17
+ *.npy filter=lfs diff=lfs merge=lfs -text
18
+ *.npz filter=lfs diff=lfs merge=lfs -text
19
+ *.onnx filter=lfs diff=lfs merge=lfs -text
20
+ *.ot filter=lfs diff=lfs merge=lfs -text
21
+ *.parquet filter=lfs diff=lfs merge=lfs -text
22
+ *.pb filter=lfs diff=lfs merge=lfs -text
23
+ *.pickle filter=lfs diff=lfs merge=lfs -text
24
+ *.pkl filter=lfs diff=lfs merge=lfs -text
25
+ *.pt filter=lfs diff=lfs merge=lfs -text
26
+ *.pth filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tar filter=lfs diff=lfs merge=lfs -text
32
+ *.tflite filter=lfs diff=lfs merge=lfs -text
33
+ *.tgz filter=lfs diff=lfs merge=lfs -text
34
+ *.wasm filter=lfs diff=lfs merge=lfs -text
35
+ *.xz filter=lfs diff=lfs merge=lfs -text
36
+ *.zip filter=lfs diff=lfs merge=lfs -text
37
+ *.zst filter=lfs diff=lfs merge=lfs -text
38
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
39
+ # Audio files - uncompressed
40
+ *.pcm filter=lfs diff=lfs merge=lfs -text
41
+ *.sam filter=lfs diff=lfs merge=lfs -text
42
+ *.raw filter=lfs diff=lfs merge=lfs -text
43
+ # Audio files - compressed
44
+ *.aac filter=lfs diff=lfs merge=lfs -text
45
+ *.flac filter=lfs diff=lfs merge=lfs -text
46
+ *.mp3 filter=lfs diff=lfs merge=lfs -text
47
+ *.ogg filter=lfs diff=lfs merge=lfs -text
48
+ *.wav filter=lfs diff=lfs merge=lfs -text
49
+ # Image files - uncompressed
50
+ *.bmp filter=lfs diff=lfs merge=lfs -text
51
+ *.gif filter=lfs diff=lfs merge=lfs -text
52
+ *.png filter=lfs diff=lfs merge=lfs -text
53
+ *.tiff filter=lfs diff=lfs merge=lfs -text
54
+ # Image files - compressed
55
+ *.jpg filter=lfs diff=lfs merge=lfs -text
56
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
57
+ *.webp filter=lfs diff=lfs merge=lfs -text
58
+ # Video files - compressed
59
+ *.mp4 filter=lfs diff=lfs merge=lfs -text
60
+ *.webm filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: odc-by
3
+ task_categories:
4
+ - text-generation
5
+ - feature-extraction
6
+ - text-classification
7
+ - question-answering
8
+ language:
9
+ - en
10
+ pretty_name: Hacker News - Complete Archive
11
+ size_categories:
12
+ - 10M<n<100M
13
+ tags:
14
+ - hacker-news
15
+ - forum
16
+ - text
17
+ - parquet
18
+ - community
19
+ - tech
20
+ - comments
21
+ - live-updated
22
+ configs:
23
+ - config_name: default
24
+ data_files:
25
+ - split: train
26
+ path: data/*/*.parquet
27
+ - config_name: today
28
+ data_files:
29
+ - split: train
30
+ path: today/**/*.parquet
31
+ ---
32
+
33
+ # Hacker News - Complete Archive
34
+
35
+ > Every Hacker News item since 2006, live-updated every 5 minutes
36
+
37
+ ## Table of Contents
38
+
39
+ - [What is it?](#what-is-it)
40
+ - [What is being released?](#what-is-being-released)
41
+ - [Breakdown by today](#breakdown-by-today)
42
+ - [Breakdown by year](#breakdown-by-year)
43
+ - [How to download and use this dataset](#how-to-download-and-use-this-dataset)
44
+ - [Dataset statistics](#dataset-statistics)
45
+ - [Content breakdown](#content-breakdown)
46
+ - [Community insights](#community-insights)
47
+ - [How it works](#how-it-works)
48
+ - [Dataset card](#dataset-card-for-hacker-news---complete-archive)
49
+ - [Dataset summary](#dataset-summary)
50
+ - [Dataset structure](#dataset-structure)
51
+ - [Dataset creation](#dataset-creation)
52
+ - [Considerations for using the data](#considerations-for-using-the-data)
53
+ - [Additional information](#additional-information)
54
+
55
+ ## What is it?
56
+
57
+ This dataset contains the complete [Hacker News](https://news.ycombinator.com) archive: every story, comment, Ask HN, Show HN, job posting, and poll ever submitted to the site. Hacker News is one of the longest-running and most influential technology communities on the internet, operated by [Y Combinator](https://www.ycombinator.com) since 2007. It has become the de facto gathering place for founders, engineers, researchers, and technologists to share and discuss what matters in technology.
58
+
59
+ The archive currently spans from **2006-10** to **2026-04-14 08:50 UTC**, with **47,692,166 items** committed. New items are fetched every 5 minutes and committed directly as individual Parquet files through an automated live pipeline, so the dataset stays current with the site itself.
60
+
61
+ We believe this is one of the most complete and regularly updated mirrors of Hacker News data available on Hugging Face. The data is stored as monthly Parquet files sorted by item ID, making it straightforward to query with DuckDB, load with the `datasets` library, or process with any tool that reads Parquet.
62
+
63
+ ## What is being released?
64
+
65
+ The dataset is organized as one Parquet file per calendar month, plus 5-minute live files for today's activity. Every 5 minutes, new items are fetched from the source and committed directly as a single Parquet block. At midnight UTC, the entire current month is refetched from the source as a single authoritative Parquet file, and today's individual 5-minute blocks are removed from the `today/` directory.
66
+
67
+ ```
68
+ data/
69
+ 2006/2006-10.parquet first month with HN data
70
+ 2006/2006-12.parquet
71
+ 2007/2007-01.parquet
72
+ ...
73
+ 2026/2026-04.parquet most recent complete month
74
+ 2026/2026-04.parquet current month, ongoing til 2026-04-13
75
+ today/
76
+ 2026/04/14/00/00.parquet 5-min live blocks (YYYY/MM/DD/HH/MM.parquet)
77
+ 2026/04/14/00/05.parquet
78
+ ...
79
+ 2026/04/14/08/50.parquet most recent committed block
80
+ stats.csv one row per committed month
81
+ stats_today.csv one row per committed 5-min block
82
+ ```
83
+
84
+ Along with the Parquet files, we include `stats.csv` which tracks every committed month with its item count, ID range, file size, fetch duration, and commit timestamp. This makes it easy to verify completeness and track the pipeline's progress.
85
+
86
+ ## Breakdown by today
87
+
88
+ The chart below shows items committed to this dataset by hour today (**2026-04-14**, **3,203 items** across **9 hours**, last updated **2026-04-14 08:55 UTC**).
89
+
90
+ ```
91
+ 00:00 ██████████████████████████████ 419
92
+ 01:00 █████████████████████████████░ 417
93
+ 02:00 ██████████████████████████░░░░ 365
94
+ 03:00 ████████████████████████░░░░░░ 343
95
+ 04:00 ████████████████████████░░░░░░ 349
96
+ 05:00 ███████████████████████░░░░░░░ 329
97
+ 06:00 █████████████████████░░░░░░░░░ 300
98
+ 07:00 ███████████████████████████░░░ 391
99
+ 08:00 ████████████████████░░░░░░░░░░ 290
100
+ ```
101
+
102
+ ## Breakdown by year
103
+
104
+ The chart below shows items committed to this dataset by year.
105
+
106
+ ```
107
+ 2006 █░░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 62
108
+ 2007 █��░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 93.8K
109
+ 2008 ██░░░░░░░░░░░░░░░░░░░░░░░░░░░░ 320.9K
110
+ 2009 ███░░░░░░░░░░░░░░░░░░░░░░░░░░░ 608.4K
111
+ 2010 ██████░░░░░░░░░░░░░░░░░░░░░░░░ 1.0M
112
+ 2011 ████████░░░░░░░░░░░░░░░░░░░░░░ 1.4M
113
+ 2012 ██████████░░░░░░░░░░░░░░░░░░░░ 1.6M
114
+ 2013 █████████████░░░░░░░░░░░░░░░░░ 2.0M
115
+ 2014 ███████████░░░░░░░░░░░░░░░░░░░ 1.8M
116
+ 2015 █████████████░░░░░░░░░░░░░░░░░ 2.0M
117
+ 2016 ████████████████░░░░░░░░░░░░░░ 2.5M
118
+ 2017 █████████████████░░░░░░░░░░░░░ 2.7M
119
+ 2018 ██████████████████░░░░░░░░░░░░ 2.8M
120
+ 2019 ████████████████████░░░░░░░░░░ 3.1M
121
+ 2020 ████████████████████████░░░░░░ 3.7M
122
+ 2021 ███████████████████████████░░░ 4.2M
123
+ 2022 █████████████████████████████░ 4.4M
124
+ 2023 ██████████████████████████████ 4.6M
125
+ 2024 ████████████████████████░░░░░░ 3.7M
126
+ 2025 █████████████████████████░░░░░ 3.9M
127
+ 2026 ████████░░░░░░░░░░░░░░░░░░░░░░ 1.3M
128
+ ```
129
+
130
+ ## How to download and use this dataset
131
+
132
+ You can load the full dataset, a specific year, or even a single month. The dataset uses the standard Hugging Face Parquet layout, so it works out of the box with DuckDB, the `datasets` library, `pandas`, and `huggingface_hub`.
133
+
134
+ ### Using DuckDB
135
+
136
+ DuckDB can read Parquet files directly from Hugging Face without downloading anything first. This is the fastest way to explore the data:
137
+
138
+ The `type` column is stored as a small integer: `1` = story, `2` = comment, `3` = poll, `4` = pollopt, `5` = job. The `"by"` column (author username) must be quoted in DuckDB because `by` is a reserved keyword.
139
+
140
+ ```sql
141
+ -- Top 20 highest-scored stories of all time
142
+ SELECT id, title, "by", score, url, time
143
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
144
+ WHERE type = 1 AND title != ''
145
+ ORDER BY score DESC
146
+ LIMIT 20;
147
+ ```
148
+
149
+ ```sql
150
+ -- Monthly submission volume for a specific year
151
+ SELECT
152
+ strftime(time, '%Y-%m') AS month,
153
+ count(*) AS items,
154
+ count(*) FILTER (WHERE type = 1) AS stories,
155
+ count(*) FILTER (WHERE type = 2) AS comments
156
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/2024/*.parquet')
157
+ GROUP BY month
158
+ ORDER BY month;
159
+ ```
160
+
161
+ ```sql
162
+ -- Most discussed stories by total comment count
163
+ SELECT id, title, "by", score, descendants AS comments, url
164
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/2025/*.parquet')
165
+ WHERE type = 1 AND descendants > 0
166
+ ORDER BY descendants DESC
167
+ LIMIT 20;
168
+ ```
169
+
170
+ ```sql
171
+ -- Who posts the most Ask HN questions?
172
+ SELECT "by", count(*) AS posts
173
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
174
+ WHERE type = 1 AND title LIKE 'Ask HN:%'
175
+ GROUP BY "by"
176
+ ORDER BY posts DESC
177
+ LIMIT 20;
178
+ ```
179
+
180
+ ```sql
181
+ -- Track how often a topic appears on HN over time
182
+ SELECT
183
+ extract(year FROM time) AS year,
184
+ count(*) AS mentions
185
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
186
+ WHERE type = 1 AND lower(title) LIKE '%rust%'
187
+ GROUP BY year
188
+ ORDER BY year;
189
+ ```
190
+
191
+ ```sql
192
+ -- Top linked domains, year over year
193
+ SELECT
194
+ extract(year FROM time) AS year,
195
+ regexp_extract(url, 'https?://([^/]+)', 1) AS domain,
196
+ count(*) AS stories
197
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
198
+ WHERE type = 1 AND url != ''
199
+ GROUP BY year, domain
200
+ QUALIFY row_number() OVER (PARTITION BY year ORDER BY stories DESC) <= 5
201
+ ORDER BY year, stories DESC;
202
+ ```
203
+
204
+ ### Using `datasets`
205
+
206
+ ```python
207
+ from datasets import load_dataset
208
+
209
+ # Stream the full history without downloading everything first
210
+ ds = load_dataset("open-index/hacker-news", split="train", streaming=True)
211
+ for item in ds:
212
+ print(item["id"], item["type"], item["title"])
213
+
214
+ # Load a specific year into memory
215
+ ds = load_dataset(
216
+ "open-index/hacker-news",
217
+ data_files="data/2024/*.parquet",
218
+ split="train",
219
+ )
220
+ print(f"{len(ds):,} items in 2024")
221
+
222
+ # Load today's live blocks (updated every 5 minutes)
223
+ ds = load_dataset(
224
+ "open-index/hacker-news",
225
+ name="today",
226
+ split="train",
227
+ streaming=True,
228
+ )
229
+ ```
230
+
231
+ ### Using `huggingface_hub`
232
+
233
+ ```python
234
+ from huggingface_hub import snapshot_download
235
+
236
+ # Download only 2024 data (about 1.5 GB)
237
+ snapshot_download(
238
+ "open-index/hacker-news",
239
+ repo_type="dataset",
240
+ local_dir="./hn/",
241
+ allow_patterns="data/2024/*",
242
+ )
243
+ ```
244
+
245
+ For faster downloads, install `pip install huggingface_hub[hf_transfer]` and set `HF_HUB_ENABLE_HF_TRANSFER=1`.
246
+
247
+ ### Using the CLI
248
+
249
+ ```bash
250
+ # Download a single month
251
+ huggingface-cli download open-index/hacker-news \
252
+ data/2024/2024-01.parquet \
253
+ --repo-type dataset --local-dir ./hn/
254
+ ```
255
+
256
+ ### Using pandas + DuckDB
257
+
258
+ ```python
259
+ import duckdb
260
+
261
+ conn = duckdb.connect()
262
+
263
+ # Score distribution: what does a "typical" HN story look like?
264
+ # type=1 is story (stored as integer: 1=story, 2=comment, 3=poll, 4=pollopt, 5=job)
265
+ df = conn.sql("""
266
+ SELECT
267
+ percentile_disc(0.50) WITHIN GROUP (ORDER BY score) AS p50,
268
+ percentile_disc(0.90) WITHIN GROUP (ORDER BY score) AS p90,
269
+ percentile_disc(0.99) WITHIN GROUP (ORDER BY score) AS p99,
270
+ percentile_disc(0.999) WITHIN GROUP (ORDER BY score) AS p999
271
+ FROM read_parquet('hf://datasets/open-index/hacker-news/data/*/*.parquet')
272
+ WHERE type = 1
273
+ """).df()
274
+ print(df)
275
+ ```
276
+
277
+ ## Dataset statistics
278
+
279
+ You can query the per-month statistics directly from the `stats.csv` file included in the dataset:
280
+
281
+ ```sql
282
+ SELECT * FROM read_csv_auto('hf://datasets/open-index/hacker-news/stats.csv')
283
+ ORDER BY year, month;
284
+ ```
285
+
286
+ The `stats.csv` file tracks each committed month with the following columns:
287
+
288
+ | Column | Description |
289
+ |--------|-------------|
290
+ | `year`, `month` | Calendar month |
291
+ | `lowest_id`, `highest_id` | Item ID range covered by this file |
292
+ | `count` | Number of items in the file |
293
+ | `dur_fetch_s` | Seconds to fetch from the data source |
294
+ | `dur_commit_s` | Seconds to commit to Hugging Face |
295
+ | `size_bytes` | Parquet file size on disk |
296
+ | `committed_at` | ISO 8601 timestamp of when this month was committed |
297
+
298
+ ## Content breakdown
299
+
300
+ Hacker News has five item types. The vast majority of content is comments, followed by stories (which include Ask HN, Show HN, and regular link submissions). Jobs, polls, and poll options make up a small fraction.
301
+
302
+ | Type | Count | Share |
303
+ |------|------:|------:|
304
+ | comment | 41,604,069 | 87.2% |
305
+ | story | 6,077,916 | 12.7% |
306
+ | job | 18,093 | 0.0% |
307
+ | poll | 2,241 | 0.0% |
308
+ | pollopt | 15,453 | 0.0% |
309
+
310
+ Of all stories submitted to Hacker News, **84.7%** link to an external URL. The rest are text-only posts: Ask HN questions, Show HN launches, and other self-posts where the discussion itself is the content.
311
+
312
+ The average story generates **23.8 comments** in its discussion thread. The most-discussed story of all time received 9,275 comments, which gives a sense of how deep conversations can go on particularly controversial or interesting topics.
313
+
314
+ ### Story scores
315
+
316
+ Scores on Hacker News follow a steep power law. Most stories receive only a few points, but a small number break out and reach the front page with hundreds or thousands of upvotes.
317
+
318
+ | Metric | Value |
319
+ |--------|------:|
320
+ | Average score | 1.5 |
321
+ | Median score | 0 |
322
+ | Highest score ever | 6,015 |
323
+ | Stories with 100+ points | 175,933 |
324
+ | Stories with 1,000+ points | 2,169 |
325
+
326
+ The median score of 0 reflects the fact that many stories are submitted but never gain traction. However, the long tail is where things get interesting: over 6,077,916 stories have been submitted, and the top 0.03% (those with 1,000+ points) represent the content that defined conversations across the technology industry.
327
+
328
+ ### Most-shared domains
329
+
330
+ The domains most frequently linked from Hacker News stories tell a clear story about what the community values. GitHub dominates, reflecting HN's deep roots in open source and software development. Major publications like the New York Times and Ars Technica show the community's interest in journalism and long-form analysis.
331
+
332
+ | # | Domain | Stories |
333
+ |--:|--------|--------:|
334
+ | 1 | github.com | 200,836 |
335
+ | 2 | www.youtube.com | 135,530 |
336
+ | 3 | medium.com | 124,697 |
337
+ | 4 | www.nytimes.com | 77,983 |
338
+ | 5 | en.wikipedia.org | 54,642 |
339
+ | 6 | techcrunch.com | 54,277 |
340
+ | 7 | twitter.com | 50,935 |
341
+ | 8 | arstechnica.com | 47,250 |
342
+ | 9 | www.theguardian.com | 44,543 |
343
+ | 10 | www.bloomberg.com | 37,951 |
344
+
345
+ ### Most active story submitters
346
+
347
+ These are the users who have submitted the most stories over the lifetime of Hacker News. Many of them have been active for over a decade, consistently curating and sharing content with the community.
348
+
349
+ | # | User | Stories |
350
+ |--:|------|--------:|
351
+ | 1 | rbanffy | 36,896 |
352
+ | 2 | Tomte | 26,258 |
353
+ | 3 | tosh | 24,251 |
354
+ | 4 | bookofjoe | 20,778 |
355
+ | 5 | mooreds | 20,635 |
356
+ | 6 | pseudolus | 19,969 |
357
+ | 7 | PaulHoule | 19,197 |
358
+ | 8 | todsacerdoti | 18,887 |
359
+ | 9 | ingve | 17,115 |
360
+ | 10 | thunderbong | 16,125 |
361
+ | 11 | jonbaer | 14,194 |
362
+ | 12 | rntn | 13,410 |
363
+ | 13 | doener | 12,960 |
364
+ | 14 | Brajeshwar | 12,725 |
365
+ | 15 | LinuxBender | 11,058 |
366
+
367
+ ## How it works
368
+
369
+ The pipeline is built in Go and uses [DuckDB](https://duckdb.org) for Parquet conversion. Historical data is sourced from [ClickHouse](https://clickhouse.com); live data is fetched directly from the [HN Firebase API](https://hacker-news.firebaseio.com/v2).
370
+
371
+ **Historical backfill.** The pipeline iterates through every month from October 2006 to the most recent complete month. For each month, it queries the ClickHouse source with a time-bounded SQL query, exports the result as a Parquet file sorted by `id` using DuckDB with Zstandard compression at level 22, and commits it to this repository along with an updated `stats.csv` and `README.md`. Months already tracked in `stats.csv` are skipped, making the process fully resumable.
372
+
373
+ **Live polling.** Every 5 minutes, the pipeline calls the HN Firebase API to fetch new items by ID range. Items are grouped into their 5-minute time windows, written as individual Parquet files at `today/YYYY/MM/DD/HH/MM.parquet` using DuckDB, and committed to Hugging Face immediately. Using the HN API directly means live blocks reflect real-time data with no indexing lag.
374
+
375
+ **Day rollover.** At midnight UTC, the entire current month is refetched from the ClickHouse source in a single query and written as an authoritative Parquet file. Today's individual 5-minute blocks are deleted from the repository in the same atomic commit. Refetching instead of merging ensures the monthly file is always complete and deduplicated, regardless of any local state.
376
+
377
+ ## Thanks
378
+
379
+ The data in this dataset comes from the [ClickHouse Playground](https://sql.clickhouse.com), a free public SQL endpoint maintained by [ClickHouse, Inc.](https://clickhouse.com) that mirrors the official [Hacker News Firebase API](https://github.com/HackerNewsAPI/HN-API). ClickHouse uses Hacker News as one of their canonical demo datasets. Without their public endpoint, building and maintaining a complete, regularly updated archive like this would not be practical.
380
+
381
+ The original content is created by the Hacker News community and is operated by [Y Combinator](https://www.ycombinator.com). This is an independent mirror and is not affiliated with or endorsed by Y Combinator or ClickHouse, Inc.
382
+
383
+ # Dataset card for Hacker News - Complete Archive
384
+
385
+ ## Dataset summary
386
+
387
+ This dataset is a complete mirror of the [Hacker News](https://news.ycombinator.com) archive, sourced from the [ClickHouse Playground](https://sql.clickhouse.com) which itself mirrors the official [HN Firebase API](https://github.com/HackerNewsAPI/HN-API). The data covers every item ever posted to the site, from the earliest submissions in October 2006 through today.
388
+
389
+ The dataset is intended for research, analysis, and training. Common use cases include:
390
+
391
+ - **Language model pretraining and fine-tuning** on high-quality technical discussions
392
+ - **Sentiment and trend analysis** across two decades of technology discourse
393
+ - **Community dynamics research** on one of the internet's most influential forums
394
+ - **Information retrieval** benchmarks using real-world questions and answers
395
+ - **Content recommendation** and ranking model development
396
+
397
+ ## Dataset structure
398
+
399
+ ### Data instances
400
+
401
+ Here is an example item from the dataset. This is a story submission with a link to an external URL:
402
+
403
+ ```json
404
+ {
405
+ "id": 1,
406
+ "deleted": 0,
407
+ "type": 1,
408
+ "by": "pg",
409
+ "time": "2006-10-09T18:21:51+00:00",
410
+ "text": "",
411
+ "dead": 0,
412
+ "parent": 0,
413
+ "poll": 0,
414
+ "kids": [15, 234509, 487171],
415
+ "url": "http://ycombinator.com",
416
+ "score": 57,
417
+ "title": "Y Combinator",
418
+ "parts": [],
419
+ "descendants": 0,
420
+ "words": ["y", "combinator"]
421
+ }
422
+ ```
423
+
424
+ And here is a comment, showing how discussion threads are connected via the `parent` field:
425
+
426
+ ```json
427
+ {
428
+ "id": 15,
429
+ "deleted": 0,
430
+ "type": 2,
431
+ "by": "sama",
432
+ "time": "2006-10-09T19:51:01+00:00",
433
+ "text": "\"the way to get good software is to find ...",
434
+ "dead": 0,
435
+ "parent": 1,
436
+ "poll": 0,
437
+ "kids": [17],
438
+ "url": "",
439
+ "score": 0,
440
+ "title": "",
441
+ "parts": [],
442
+ "descendants": 0,
443
+ "words": []
444
+ }
445
+ ```
446
+
447
+ ### Data fields
448
+
449
+ Every Parquet file shares the same schema, matching the [HN API](https://github.com/HackerNewsAPI/HN-API) item format:
450
+
451
+ | Column | Type | Description |
452
+ |--------|------|-------------|
453
+ | `id` | uint32 | Unique item ID, monotonically increasing across the entire site |
454
+ | `deleted` | uint8 | 1 if the item was soft-deleted by its author or by moderators, 0 otherwise |
455
+ | `type` | int8 | Item type as an integer: `1`=story, `2`=comment, `3`=poll, `4`=pollopt, `5`=job |
456
+ | `by` | string | Username of the author who created this item. Note: `by` is a reserved word in DuckDB and must be quoted as `"by"` |
457
+ | `time` | timestamp | When the item was created, in UTC |
458
+ | `text` | string | HTML body text. Used for comments, Ask HN posts, job listings, and polls |
459
+ | `dead` | uint8 | 1 if the item was flagged or killed by moderators, 0 otherwise |
460
+ | `parent` | uint32 | The ID of the parent item. For comments, this points to either a story or another comment |
461
+ | `poll` | uint32 | For poll options (`pollopt`), the ID of the associated poll |
462
+ | `kids` | list\<uint32\> | Ordered list of direct child item IDs (typically comments) |
463
+ | `url` | string | The external URL for link stories. Empty for text posts and comments |
464
+ | `score` | int32 | The item's score (upvotes minus downvotes) |
465
+ | `title` | string | Title text for stories, jobs, and polls. Empty for comments |
466
+ | `parts` | list\<uint32\> | For polls, the list of associated poll option item IDs |
467
+ | `descendants` | int32 | Total number of comments in the entire discussion tree below this item |
468
+ | `words` | list\<string\> | Tokenized words extracted from the title and text fields |
469
+
470
+ ### Data splits
471
+
472
+ The `default` configuration includes all historical monthly Parquet files. If you only need today's latest items, use the `today` configuration which includes only the 5-minute live blocks for the current day.
473
+
474
+ You can also load individual years or months by specifying `data_files`:
475
+
476
+ ```python
477
+ # Load just January 2024
478
+ ds = load_dataset("open-index/hacker-news", data_files="data/2024/2024-01.parquet", split="train")
479
+
480
+ # Load all of 2024
481
+ ds = load_dataset("open-index/hacker-news", data_files="data/2024/*.parquet", split="train")
482
+ ```
483
+
484
+ ## Dataset creation
485
+
486
+ ### Curation rationale
487
+
488
+ Hacker News is one of the richest sources of technical discussion on the internet, but accessing the full archive programmatically has historically required either scraping the Firebase API item-by-item or working with incomplete third-party dumps. This dataset provides the complete archive in a standard, efficient format that anyone can query without setting up infrastructure.
489
+
490
+ By publishing on Hugging Face with Parquet files, the data becomes immediately queryable with DuckDB (via `hf://` paths), streamable with the `datasets` library, and downloadable in bulk. The 5-minute live update pipeline means researchers always have access to near-real-time data.
491
+
492
+ ### Source data
493
+
494
+ All data is sourced from the [ClickHouse Playground](https://sql.clickhouse.com), a public SQL endpoint maintained by ClickHouse that mirrors the official Hacker News Firebase API. The ClickHouse mirror is widely used for analytics demonstrations and contains the complete dataset.
495
+
496
+ The pipeline queries the ClickHouse endpoint month-by-month, exports each month as a Parquet file using DuckDB with Zstandard compression at level 22, and commits it to this Hugging Face repository. Already-committed months are tracked in `stats.csv` and skipped on subsequent runs, making the process fully resumable.
497
+
498
+ ### Data processing steps
499
+
500
+ The pipeline runs in three modes:
501
+
502
+ 1. **Historical backfill.** Iterates through every month from October 2006 to the most recent complete month. For each month, it runs a SQL query against the ClickHouse source, writes the result as a Parquet file sorted by `id`, and commits it to Hugging Face along with an updated `stats.csv` and `README.md`.
503
+
504
+ 2. **Live polling.** After the historical backfill completes, the pipeline polls the [HN Firebase API](https://hacker-news.firebaseio.com/v2) every 5 minutes for new items. It fetches all items with IDs greater than the last committed watermark, groups them into 5-minute time windows by item timestamp, and writes each window as a `today/YYYY/MM/DD/HH/MM.parquet` file committed to Hugging Face immediately. The HN API provides real-time data with no indexing lag.
505
+
506
+ 3. **Day rollover.** At midnight UTC, the entire current month is refetched from the ClickHouse source in a single query and written as a fresh, authoritative Parquet file. Today's individual 5-minute blocks are deleted from the repository in the same atomic commit. This approach is more reliable than merging local blocks — the result is always complete and deduplicated, sourced directly from the origin.
507
+
508
+ All Parquet files use **Zstandard compression at level 22** and are sorted by `id` for efficient range scans. No filtering, deduplication, or transformation is applied to the data beyond what the source provides.
509
+
510
+ ### Personal and sensitive information
511
+
512
+ This dataset contains usernames (`by` field) and user-generated text content (`text`, `title` fields) as they appear on the public Hacker News website. No additional PII processing has been applied. The data reflects what is publicly visible on [news.ycombinator.com](https://news.ycombinator.com).
513
+
514
+ If you find content in this dataset that you believe should be removed, please open a discussion on the Community tab.
515
+
516
+ ## Considerations for using the data
517
+
518
+ ### Social impact
519
+
520
+ By providing the complete Hacker News archive in an accessible format, we hope to enable research into online community dynamics, technology trends, and the evolution of technical discourse. The dataset can serve as training data for language models that need to understand technical discussions, or as a benchmark for information retrieval and recommendation systems.
521
+
522
+ ### Discussion of biases
523
+
524
+ Hacker News has a well-documented set of community biases. The user base skews heavily toward software engineers, startup founders, and technology enthusiasts based in the United States. Topics related to Silicon Valley, programming languages, startups, and certain political viewpoints tend to receive disproportionate attention and engagement.
525
+
526
+ The moderation system (flagging, vouching, and moderator intervention) shapes what content survives and what gets killed. Stories and comments that violate community norms are flagged as `dead`, but this moderation reflects the values of the existing community rather than any objective standard.
527
+
528
+ We have not applied any additional filtering or quality scoring to the data. All items, including deleted and dead items, are preserved exactly as they appear in the source.
529
+
530
+ ### Known limitations
531
+
532
+ - **`type` is an integer.** The item type is stored as a TINYINT enum: `1`=story, `2`=comment, `3`=poll, `4`=pollopt, `5`=job. When writing DuckDB queries, use `WHERE type = 1` for stories rather than `WHERE type = 'story'`.
533
+ - **`by` is a reserved keyword in DuckDB.** Always quote it with double quotes: `"by"`.
534
+ - **`deleted` and `dead` are integers.** They are stored as 0/1 rather than booleans.
535
+ - **Comment text is HTML.** The `text` field contains raw HTML as stored by HN, not plain text. You may need to strip tags depending on your use case.
536
+ - **Deleted items have sparse fields.** When an item is deleted, most fields become empty, but the `id` and `deleted` flag are preserved.
537
+ - **Scores are point-in-time snapshots.** The score reflects the value at the time the ClickHouse mirror last synced, not necessarily the final score.
538
+ - **No user profiles.** This dataset contains items only, not user profiles (karma, bio, etc.).
539
+ - **Code content is HTML-escaped.** Code snippets in comments use HTML entities and `<code>` tags rather than Markdown formatting.
540
+
541
+ ## Additional information
542
+
543
+ ### Licensing
544
+
545
+ The dataset is released under the **Open Data Commons Attribution License (ODC-By) v1.0**. The original content is subject to the rights of its respective authors. Hacker News data is provided by [Y Combinator](https://www.ycombinator.com).
546
+
547
+ This is an independent community mirror. It is not affiliated with or endorsed by Y Combinator.
548
+
549
+ ### Contact
550
+
551
+ For questions, feedback, or issues, please open a discussion on the [Community tab](https://huggingface.co/datasets/open-index/hacker-news/discussions).
552
+
553
+ *Last updated: 2026-04-14 08:55 UTC*
data/2006/2006-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f075a057c886f70d1521ad9b4a57623155d019f29fe2ed779cc19086acca5aac
3
+ size 11994
data/2006/2006-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb1756aba6069327cf119d57af3a30b6008c3e89da8105e470f383736be7ef8
3
+ size 554
data/2006/2006-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:febeea30ffd24936e1c46b1ece3dbf13ae78c5a110e2d39d18e66ac13005f342
3
+ size 6219
data/2007/2007-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb1756aba6069327cf119d57af3a30b6008c3e89da8105e470f383736be7ef8
3
+ size 554
data/2007/2007-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ae533795ea07490873881c25c37df4229f7f03470d1d473a88757252e718c63
3
+ size 308963
data/2007/2007-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d40cad7c65dc60f088b00cb1e102b2ee553616cb1f1b09cf0b21256f981be764
3
+ size 1253007
data/2007/2007-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07009b16156e24d1975630e8adeb011f853a8673fa83004dea13780dd26fa41b
3
+ size 2137722
data/2007/2007-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d45ee432f088d538a09f17e2b3839d0ee7530b5e29cf0b772955483d66ae399
3
+ size 1443362
data/2007/2007-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4508f484364534d7460b7cfd39a850c9921f5912adb0ea7dfaf77a744977dfd2
3
+ size 1229805
data/2007/2007-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f231b64dd84b06cf1538a530255ba9dfeddaf609d626fbc2039b976566c81e
3
+ size 1295922
data/2007/2007-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:26b64308ecfb825b612160a943443a986b4f34da33e32ceff5b16d82ee7fdb9b
3
+ size 2184462
data/2007/2007-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:098d4de6a97fdb9abdb462aee3502805cb7da8b3f1fe19a1862601367c6a191f
3
+ size 2587024
data/2007/2007-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:609c375301952fa2fc0615dfb83e872cbd510e764208869b928a559f5acf7f5f
3
+ size 2778304
data/2007/2007-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e7f046cbd14364bf73324fd3afe624da3c3bfbf0db674bd933c6894b459d506
3
+ size 2066776
data/2007/2007-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8cf9f0c82933efa2b86c9e5db01fac74065ac6418b460c41dd00fb2f74a9df8d
3
+ size 1837252
data/2008/2008-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d2ccca2df761ef1efd605f3ed9cb64fd9bab1d271878cada0de6bf44cbb1cd
3
+ size 2571038
data/2008/2008-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96e5c6bf0ec13d349d273ce54e731aed56c353ab50198038d407f273eb8314b8
3
+ size 3860126
data/2008/2008-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:856a4e0ff92d53f54fc46f903cc71cedf943a125774be6da4d7e9e478ba51514
3
+ size 5142506
data/2008/2008-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bc9263b6983712f37ae64a84cd00eaa1f50fa019edb6c6f0a4ff99255e33e25
3
+ size 5381500
data/2008/2008-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bcc8d4aec473a1e48fdd50356f71cbaf2187a390262b5a53388849298b5adc5
3
+ size 5812797
data/2008/2008-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1da0f2ae765ecc749aaa505628f69cc75e7814cfb25c3ddfef829bcb50833c29
3
+ size 5616033
data/2008/2008-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f094450e01185099b0d30643a60818539eb3c71df9b1e5d4042fd8f62942b74
3
+ size 6459558
data/2008/2008-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0d7d3739c388d1c8d9e844d080b9e7e7eb4a34cc26660b93a4386252d2ad6fd
3
+ size 5673721
data/2008/2008-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c104da44b06d3c8c143c1b852bb34b81a1bccee5cfc6428c9a00768c8fb0471
3
+ size 5854028
data/2008/2008-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b39b63d2f003f992c8440492e1b43906ab59417efa51fc42c4427a7386c8c1
3
+ size 6066795
data/2008/2008-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5fda23eb272a6db86d63c17d4e65ad7561301501534655e0dbfc48fd7cfe60d
3
+ size 6438154
data/2008/2008-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f6985d14288a638e558d0f24db195a30b91475ccb0d9b1bf097e136033d9992
3
+ size 7601080
data/2009/2009-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a369b7d937aa7d3398c53d258922386e49897991ee1263b99fa6fd7821740da6
3
+ size 9457556
data/2009/2009-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:df809084dd1f34899cddd8466686eeaff4188fc26b36cf357170efaff6ed38e6
3
+ size 8695512
data/2009/2009-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b44d147b770a2952f5968f232600bd7942490d66f80e99b2327f4bb1bda36b9
3
+ size 9583360
data/2009/2009-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa3c552bf400a4b638eff6ff8c7244e1a93c3a1bde52d72cde3cecd3928d101f
3
+ size 10503066
data/2009/2009-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5e05e02cda9515224b449c9bde66008836665018c58cc25f460a627db76cdf7d
3
+ size 10414819
data/2009/2009-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:be818f75a149bfbf2abb9d224cb76c6c95c40e174f70ce2f40c959ded0959ac3
3
+ size 10195957
data/2009/2009-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cd5360b35c477b116b9fbdea7e14c838057bf9dc4538f5a5cac2d40bd1452df
3
+ size 11906472
data/2009/2009-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a29993bb5e34c9fcbc664ee5510502a3c920110c1c71072cf2d5d4ec6922867f
3
+ size 13507872
data/2009/2009-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7dd3b9bb41287536249bc465fa1267f12f16e01261e0de0ebdd8a780f1f60319
3
+ size 12550137
data/2009/2009-10.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:025c5cb5f516a7fdb417476eaf446b93617cc661f1c781f566c35522bb32b771
3
+ size 13199103
data/2009/2009-11.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f5115183e26353afe2fb2210e1199c95cbdc2efb891f6bd9e74efa79bbae192
3
+ size 12147917
data/2009/2009-12.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67417db8eef9ca2b75a09743a327ee331133d761b1edf4d8aa90d30e5df0449f
3
+ size 12271162
data/2010/2010-01.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:842a7c526feafdda5d864ba8c4e9acfecc2887e3bc1c16fd21b1dc7111a9b4f8
3
+ size 14626994
data/2010/2010-02.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac93f76c1c75eeb0b9c737f7a356350e63e60d0728a45dd738dd625ffe6d4870
3
+ size 14651835
data/2010/2010-03.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a95ef02c0408cfffd0f163662769d41c92a4f8aaacf50147c270e66eccab7fb
3
+ size 16555458
data/2010/2010-04.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de3afc1fa86a28dd53476bc158d15cedcf2b80c9c389cc9b04e4ed7ae7f98d44
3
+ size 15974936
data/2010/2010-05.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ec5ab1ac68ba802d693d01fa1fa89a79d64b1735003341e98ac03936e9248b7a
3
+ size 17897021
data/2010/2010-06.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e87eee2a37dcf86f58dc4e9cb6e35fcbbb039753d2208507279e525818e8736a
3
+ size 18345933
data/2010/2010-07.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:542528f8a4345314e9ff96f002359305ae2093c623357d676209550a73d231c8
3
+ size 20068819
data/2010/2010-08.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93a6a9d28f508d6819a449d9241e9164eabf0976aec01a2827dbc94f89094e97
3
+ size 19938675
data/2010/2010-09.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af21f860848f159c5e7a8ce42b37c167ff088ea749c9e81eaee74760b41e00e8
3
+ size 21231521