cfahlgren1 HF Staff commited on
Commit
1adcf0a
·
verified ·
1 Parent(s): 10c9b1e

Update hub-stats.py

Browse files
Files changed (1) hide show
  1. hub-stats.py +107 -48
hub-stats.py CHANGED
@@ -22,6 +22,8 @@ import requests.utils
22
  from dotenv import load_dotenv
23
  from huggingface_hub import HfApi
24
  from tenacity import retry, stop_after_attempt, wait_exponential
 
 
25
 
26
  load_dotenv()
27
 
@@ -175,14 +177,91 @@ async def fetch_data_page(session, url, params=None, headers=None):
175
  return await response.json(), response.headers.get("Link")
176
 
177
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  async def create_parquet_files(skip_upload=False):
179
  start_time = time.time()
180
  endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"]
181
  created_files = []
 
182
 
183
  async with aiohttp.ClientSession() as session:
184
  for endpoint in endpoints:
185
- print(f"Processing {endpoint}...")
186
 
187
  config = ENDPOINT_CONFIGS[endpoint]
188
  base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}")
@@ -190,15 +269,12 @@ async def create_parquet_files(skip_upload=False):
190
  params.update(config["params"])
191
 
192
  headers = {"Accept": "application/json"}
193
- all_data = []
194
  url = base_url
195
  page = 0
196
 
197
- jsonl_file = None
198
- if skip_upload:
199
- jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
200
- with open(jsonl_file, "w") as f:
201
- pass
202
 
203
  while url:
204
  if endpoint == "posts":
@@ -209,52 +285,48 @@ async def create_parquet_files(skip_upload=False):
209
  session, url, params, headers
210
  )
211
 
212
- if skip_upload and jsonl_file:
213
- with open(jsonl_file, "a") as f:
214
- f.write(json.dumps(data) + "\n")
215
 
216
  if endpoint == "posts":
217
- items = data["socialPosts"]
218
- total_items = data["numTotalItems"]
219
- all_data.extend(items)
220
-
221
- if (page + 1) * params["limit"] >= total_items:
222
  url = None
223
  else:
224
  url = base_url
225
  else:
226
- all_data.extend(data)
227
  url = parse_link_header(link_header)
228
  if url:
229
  params = {}
230
 
231
- if len(all_data) % 10000 == 0:
232
- print(f" {len(all_data):,} records processed")
233
-
234
  page += 1
235
 
236
  except Exception as e:
237
- print(f"Error on page {page}: {e}")
238
  await asyncio.sleep(2)
239
  if page > 0:
240
  url = None
241
  else:
242
  raise
243
 
244
- if skip_upload and jsonl_file and os.path.exists(jsonl_file):
245
- print(f" Raw data saved to {jsonl_file}")
246
 
247
- df = pd.DataFrame(all_data)
248
- df = process_dataframe(df, endpoint)
249
-
250
- output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
251
- save_parquet(df, output_file)
252
- created_files.append(output_file)
253
 
254
- print(f" {endpoint}: {len(df):,} rows -> {output_file}")
 
 
 
 
255
 
256
- if not skip_upload:
257
- upload_to_hub(output_file, REPO_ID)
258
 
259
  elapsed = time.time() - start_time
260
  return created_files, elapsed
@@ -270,23 +342,9 @@ def recreate_from_jsonl():
270
  continue
271
 
272
  print(f"Recreating {endpoint} from {jsonl_file}...")
273
-
274
- all_data = []
275
- with open(jsonl_file, "r") as f:
276
- for line in f:
277
- data = json.loads(line.strip())
278
- if endpoint == "posts":
279
- all_data.extend(data["socialPosts"])
280
- else:
281
- all_data.extend(data)
282
-
283
- df = pd.DataFrame(all_data)
284
- df = process_dataframe(df, endpoint)
285
-
286
  output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
287
- save_parquet(df, output_file)
288
-
289
- print(f"✓ {endpoint}: {len(df):,} rows -> {output_file}")
290
 
291
 
292
  def upload_to_hub(file_path, repo_id):
@@ -312,7 +370,8 @@ def main(skip_upload=False):
312
 
313
  for file in created_files:
314
  size = os.path.getsize(file)
315
- rows = len(pd.read_parquet(file))
 
316
  print(f" {os.path.basename(file)}: {rows:,} rows, {size:,} bytes")
317
 
318
  if skip_upload:
 
22
  from dotenv import load_dotenv
23
  from huggingface_hub import HfApi
24
  from tenacity import retry, stop_after_attempt, wait_exponential
25
+ import pyarrow as pa
26
+ import pyarrow.parquet as pq
27
 
28
  load_dotenv()
29
 
 
177
  return await response.json(), response.headers.get("Link")
178
 
179
 
180
+ def jsonl_to_parquet(endpoint, jsonl_file, output_file):
181
+ if not os.path.exists(jsonl_file):
182
+ print(f"✗ {jsonl_file} not found")
183
+ return 0
184
+
185
+ # First pass: determine full column set after processing
186
+ all_columns = None
187
+ with open(jsonl_file, "r") as f:
188
+ for line in f:
189
+ line = line.strip()
190
+ if not line:
191
+ continue
192
+ data = json.loads(line)
193
+ if endpoint == "posts":
194
+ items = data.get("socialPosts", [])
195
+ else:
196
+ items = data
197
+
198
+ if not items:
199
+ continue
200
+
201
+ df = pd.DataFrame(items)
202
+ df = process_dataframe(df, endpoint)
203
+ cols = list(df.columns)
204
+
205
+ if all_columns is None:
206
+ all_columns = cols
207
+ else:
208
+ new_cols = [c for c in cols if c not in all_columns]
209
+ all_columns.extend(new_cols)
210
+
211
+ if all_columns is None:
212
+ print(f" No data found for {endpoint}")
213
+ return 0
214
+
215
+ # Second pass: write parquet in streaming fashion
216
+ if os.path.exists(output_file):
217
+ os.remove(output_file)
218
+
219
+ writer = None
220
+ total_rows = 0
221
+
222
+ with open(jsonl_file, "r") as f:
223
+ for line in f:
224
+ line = line.strip()
225
+ if not line:
226
+ continue
227
+ data = json.loads(line)
228
+ if endpoint == "posts":
229
+ items = data.get("socialPosts", [])
230
+ else:
231
+ items = data
232
+
233
+ if not items:
234
+ continue
235
+
236
+ df = pd.DataFrame(items)
237
+ if df.empty:
238
+ continue
239
+
240
+ df = process_dataframe(df, endpoint)
241
+ df = df.reindex(columns=all_columns)
242
+
243
+ table = pa.Table.from_pandas(df, preserve_index=False)
244
+ if writer is None:
245
+ writer = pq.ParquetWriter(output_file, table.schema)
246
+
247
+ writer.write_table(table)
248
+ total_rows += len(df)
249
+
250
+ if writer is not None:
251
+ writer.close()
252
+
253
+ return total_rows
254
+
255
+
256
  async def create_parquet_files(skip_upload=False):
257
  start_time = time.time()
258
  endpoints = ["daily_papers", "models", "spaces", "datasets", "posts"]
259
  created_files = []
260
+ jsonl_files = {}
261
 
262
  async with aiohttp.ClientSession() as session:
263
  for endpoint in endpoints:
264
+ print(f"Fetching {endpoint}...")
265
 
266
  config = ENDPOINT_CONFIGS[endpoint]
267
  base_url = config.get("base_url", f"https://huggingface.co/api/{endpoint}")
 
269
  params.update(config["params"])
270
 
271
  headers = {"Accept": "application/json"}
 
272
  url = base_url
273
  page = 0
274
 
275
+ jsonl_file = os.path.join(CACHE_DIR, f"{endpoint}_raw.jsonl")
276
+ with open(jsonl_file, "w") as f:
277
+ pass # truncate
 
 
278
 
279
  while url:
280
  if endpoint == "posts":
 
285
  session, url, params, headers
286
  )
287
 
288
+ with open(jsonl_file, "a") as f:
289
+ f.write(json.dumps(data) + "\n")
 
290
 
291
  if endpoint == "posts":
292
+ total_items = data.get("numTotalItems", 0)
293
+ items_on_page = len(data.get("socialPosts", []))
294
+ if (page + 1) * params["limit"] >= total_items or items_on_page == 0:
 
 
295
  url = None
296
  else:
297
  url = base_url
298
  else:
 
299
  url = parse_link_header(link_header)
300
  if url:
301
  params = {}
302
 
 
 
 
303
  page += 1
304
 
305
  except Exception as e:
306
+ print(f"Error on page {page} for {endpoint}: {e}")
307
  await asyncio.sleep(2)
308
  if page > 0:
309
  url = None
310
  else:
311
  raise
312
 
313
+ print(f" Raw data for {endpoint} saved to {jsonl_file}")
314
+ jsonl_files[endpoint] = jsonl_file
315
 
316
+ # Convert JSONL -> Parquet with streaming writer
317
+ for endpoint in endpoints:
318
+ jsonl_file = jsonl_files.get(endpoint)
319
+ if not jsonl_file or not os.path.exists(jsonl_file):
320
+ continue
 
321
 
322
+ print(f"Processing {endpoint} from JSONL...")
323
+ output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
324
+ total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file)
325
+ print(f"✓ {endpoint}: {total_rows:,} rows -> {output_file}")
326
+ created_files.append(output_file)
327
 
328
+ if not skip_upload:
329
+ upload_to_hub(output_file, REPO_ID)
330
 
331
  elapsed = time.time() - start_time
332
  return created_files, elapsed
 
342
  continue
343
 
344
  print(f"Recreating {endpoint} from {jsonl_file}...")
 
 
 
 
 
 
 
 
 
 
 
 
 
345
  output_file = os.path.join(CACHE_DIR, f"{endpoint}.parquet")
346
+ total_rows = jsonl_to_parquet(endpoint, jsonl_file, output_file)
347
+ print(f"✓ {endpoint}: {total_rows:,} rows -> {output_file}")
 
348
 
349
 
350
  def upload_to_hub(file_path, repo_id):
 
370
 
371
  for file in created_files:
372
  size = os.path.getsize(file)
373
+ pf = pq.ParquetFile(file)
374
+ rows = pf.metadata.num_rows
375
  print(f" {os.path.basename(file)}: {rows:,} rows, {size:,} bytes")
376
 
377
  if skip_upload: