parquet_row_group_eval / export_midi_files_test.py
Nintorac
initial commit
2cc6d4e
#!/usr/bin/env python3
"""
Export MIDI files with different row group sizes for performance testing.
Only exports MIDI files whose hash key starts with 'a'.
"""
import duckdb
import numpy as np
from pathlib import Path
import os
# Database path
db_path = "data/lakh_local.duckdb"
output_dir = Path("../parquet_row_group_eval")
# Create output directory
output_dir.mkdir(exist_ok=True)
# Generate 10 row group sizes in logspace from 1024 to 16384
row_group_sizes = np.logspace(11, 14, base=2, num=4).astype(int)
print(f"Testing row group sizes: {row_group_sizes}")
# Connect to database
conn = duckdb.connect(db_path, read_only=True)
# Query to get MIDI files starting with 'a'
query = """
SELECT
hm.midi_hk,
hm.midi_md5,
smf.file_content,
smf.file_size,
smf.load_date
FROM hub_midi_file hm
JOIN sat_midi_file smf ON hm.midi_hk = smf.midi_hk
WHERE hm.midi_hk LIKE 'a%'
ORDER BY hm.midi_hk
"""
print("Querying MIDI files...")
result = conn.execute(query).fetchdf()
print(f"Found {len(result)} MIDI files with hash keys starting with 'a'")
# Export with different row group sizes
for i, row_group_size in enumerate(row_group_sizes):
output_file = output_dir / f"midi_files_rowgroup_{row_group_size}.parquet"
print(f"Exporting file {i+1}/10: {output_file} (row_group_size={row_group_size})")
# Export to parquet with specified row group size
conn.execute(f"""
COPY (
{query}
) TO '{output_file}'
(FORMAT PARQUET, ROW_GROUP_SIZE {row_group_size})
""")
# Check file size
file_size = os.path.getsize(output_file)
print(f" File size: {file_size:,} bytes")
print(f"\nAll files exported to {output_dir}/")
# Show summary
print("\nSummary of exported files:")
for file in sorted(output_dir.glob("*.parquet")):
size = os.path.getsize(file)
print(f" {file.name}: {size:,} bytes")
conn.close()