dataset-builder / data1 /download_dataset.py
DouDou
Upload data1/download_dataset.py with huggingface_hub
90e19a0 verified
import os
import pandas as pd
from datasets import load_dataset
from tqdm import tqdm
# Create output directory
os.makedirs("./datasets", exist_ok=True)
# Get all available configurations
configs = ["codeparrot_github-code-chemistry-python-default", "starcoder-chemistry-default"]
all_dataframes = []
# Download and process each configuration
for config in tqdm(configs, desc="Processing configurations"):
print(f"\nLoading configuration: {config}")
# Load dataset with all splits
dataset = load_dataset("jablonkagroup/chempile-code", config, cache_dir="./datasets")
# Process each split
for split_name, split_data in dataset.items():
# Convert to DataFrame
df = pd.DataFrame(split_data)
df["config"] = config # Add configuration identifier
df["split"] = split_name # Add split identifier
all_dataframes.append(df)
# Save individual split as CSV
df.to_csv(f"./datasets/{config}_{split_name}.csv", index=False)
# Merge all data into one large DataFrame
print("\nMerging all data...")
merged_df = pd.concat(all_dataframes, ignore_index=True)
# Save merged dataset
merged_output_path = "./datasets/chempile_code_complete.csv"
merged_df.to_csv(merged_output_path, index=False)
merged_df = pd.read_csv(merged_output_path)
all_data_path = "./datasets/all_chempile_code"
# Create output directories
os.makedirs(all_data_path, exist_ok=True)
# Replace with chunked saving after merging
print("\nSaving in 500MB chunks...")
MAX_SIZE_MB = 500
chunk_num = 1
rows_per_chunk = 50000 # Initial estimate
start_idx = 0
while start_idx < len(merged_df):
# Get chunk
end_idx = min(start_idx + rows_per_chunk, len(merged_df))
chunk_df = merged_df.iloc[start_idx:end_idx]
# Save and check size
output_path = f"{all_data_path}/chempile_code_complete_{chunk_num:03d}.csv"
chunk_df.to_csv(output_path, index=False)
size_mb = os.path.getsize(output_path) / (1024 * 1024)
# Adjust rows_per_chunk for next iteration based on actual size
if size_mb > 0:
rows_per_chunk = int(rows_per_chunk * (MAX_SIZE_MB / size_mb) * 0.95)
print(f"Saved {output_path}: {size_mb:.1f}MB, {len(chunk_df):,} rows")
start_idx = end_idx
chunk_num += 1
print(f"\nTotal: {len(merged_df):,} rows in {chunk_num-1} files")