fix: write parquet with 50k row groups instead of single giant row group
Browse files- hub-stats.py +8 -2
hub-stats.py
CHANGED
|
@@ -215,8 +215,14 @@ def jsonl_to_parquet(endpoint, jsonl_file, output_file):
|
|
| 215 |
combined_df = pd.concat(all_dfs, ignore_index=True)
|
| 216 |
total_rows = len(combined_df)
|
| 217 |
|
| 218 |
-
# Write to parquet
|
| 219 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 220 |
|
| 221 |
return total_rows
|
| 222 |
|
|
|
|
| 215 |
combined_df = pd.concat(all_dfs, ignore_index=True)
|
| 216 |
total_rows = len(combined_df)
|
| 217 |
|
| 218 |
+
# Write to parquet with controlled row group sizes
|
| 219 |
+
row_group_size = 50_000
|
| 220 |
+
table = pa.Table.from_pandas(combined_df, preserve_index=False)
|
| 221 |
+
writer = pq.ParquetWriter(output_file, table.schema)
|
| 222 |
+
for i in range(0, total_rows, row_group_size):
|
| 223 |
+
chunk = table.slice(i, min(row_group_size, total_rows - i))
|
| 224 |
+
writer.write_table(chunk)
|
| 225 |
+
writer.close()
|
| 226 |
|
| 227 |
return total_rows
|
| 228 |
|