cfahlgren1 HF Staff commited on
Commit
312a0c6
·
verified ·
1 Parent(s): ee9fff1

fix: write parquet with 50k row groups instead of single giant row group

Browse files
Files changed (1) hide show
  1. hub-stats.py +8 -2
hub-stats.py CHANGED
@@ -215,8 +215,14 @@ def jsonl_to_parquet(endpoint, jsonl_file, output_file):
215
  combined_df = pd.concat(all_dfs, ignore_index=True)
216
  total_rows = len(combined_df)
217
 
218
- # Write to parquet
219
- combined_df.to_parquet(output_file, index=False, engine="pyarrow")
 
 
 
 
 
 
220
 
221
  return total_rows
222
 
 
215
  combined_df = pd.concat(all_dfs, ignore_index=True)
216
  total_rows = len(combined_df)
217
 
218
+ # Write to parquet with controlled row group sizes
219
+ row_group_size = 50_000
220
+ table = pa.Table.from_pandas(combined_df, preserve_index=False)
221
+ writer = pq.ParquetWriter(output_file, table.schema)
222
+ for i in range(0, total_rows, row_group_size):
223
+ chunk = table.slice(i, min(row_group_size, total_rows - i))
224
+ writer.write_table(chunk)
225
+ writer.close()
226
 
227
  return total_rows
228