Upload script/process_from_parquet.py with huggingface_hub
Browse files
script/process_from_parquet.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
import dask.dataframe as dd
|
| 3 |
+
from functools import partial
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def read_parquet_file(parquet_file_path, npartitions=50, top=None):
|
| 7 |
+
|
| 8 |
+
print(f"Process parquet file from {parquet_file_path}")
|
| 9 |
+
file_name = parquet_file_path.split("/")[-1]
|
| 10 |
+
parquet_df = dd.read_parquet(parquet_file_path, engine="pyarrow")
|
| 11 |
+
parquet_df = parquet_df.repartition(npartitions=npartitions) # Smaller partitions
|
| 12 |
+
|
| 13 |
+
if top:
|
| 14 |
+
parquet_df = parquet_df.head(top, compute=False) # compute=False to keep it as DaskDataframe
|
| 15 |
+
|
| 16 |
+
return parquet_df, file_name
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def process_parquet_df(parquet_df, file_name, process_row_func, process_partition):
|
| 20 |
+
|
| 21 |
+
# A new function of process_row_func to allow pre-defining parameters of process-row_func.
|
| 22 |
+
process_row_with_params = partial(process_row_func, parquet_file_name=file_name)
|
| 23 |
+
result_df = parquet_df.map_partitions(process_partition, process_row_with_params)
|
| 24 |
+
|
| 25 |
+
return result_df
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def save_to_csv(df, final_path):
|
| 29 |
+
# Save the processed DataFrame to csv
|
| 30 |
+
df.to_csv(final_path, index=False, single_file=True)
|
| 31 |
+
|