Nintorac commited on
Commit
2cc6d4e
·
0 Parent(s):

initial commit

Browse files
.gitattributes ADDED
@@ -0,0 +1 @@
 
 
1
+ **/*.parquet filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Parquet Row Group Eval
2
+
3
+ A quick experiment to figure out the optimum row group sizing for storing midi data in parquet and accessing it via hugging face.
4
+
5
+ ```
6
+ ==================================================
7
+ SUMMARY TABLE
8
+ ==================================================
9
+ filename total_rows num_row_groups avg_row_group_size min_row_group_size max_row_group_size file_size_mb
10
+ midi_files_rowgroup_2048.parquet 11262 6 1877 1022 2048 166.41
11
+ midi_files_rowgroup_4096.parquet 11262 3 3754 3070 4096 166.69
12
+ midi_files_rowgroup_8192.parquet 11262 2 5631 3070 8192 166.28
13
+ midi_files_rowgroup_16384.parquet 11262 1 11262 11262 11262 166.12
14
+ ```
check_parquet_metadata.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Check Parquet file metadata to display row group information.
4
+ """
5
+
6
+ import pyarrow.parquet as pq
7
+ from pathlib import Path
8
+ import pandas as pd
9
+
10
+ # Directory containing the exported files
11
+ parquet_dir = Path("../parquet_row_group_eval")
12
+
13
+ print("Parquet File Metadata Analysis")
14
+ print("=" * 50)
15
+
16
+ # Find all parquet files
17
+ parquet_files = list(parquet_dir.glob("*.parquet"))
18
+ parquet_files.sort()
19
+
20
+ if not parquet_files:
21
+ print(f"No parquet files found in {parquet_dir}")
22
+ exit(1)
23
+
24
+ results = []
25
+
26
+ for file_path in parquet_files:
27
+ print(f"\nFile: {file_path.name}")
28
+
29
+ # Read parquet metadata
30
+ parquet_file = pq.ParquetFile(file_path)
31
+ metadata = parquet_file.metadata
32
+
33
+ # Extract information
34
+ num_row_groups = metadata.num_row_groups
35
+ total_rows = metadata.num_rows
36
+ file_size = file_path.stat().st_size
37
+
38
+ print(f" Total rows: {total_rows:,}")
39
+ print(f" Number of row groups: {num_row_groups:,}")
40
+ print(f" File size: {file_size:,} bytes ({file_size / 1024 / 1024:.2f} MB)")
41
+
42
+ # Get row group details
43
+ row_group_sizes = []
44
+ for i in range(num_row_groups):
45
+ rg = metadata.row_group(i)
46
+ row_count = rg.num_rows
47
+ row_group_sizes.append(row_count)
48
+
49
+ avg_row_group_size = sum(row_group_sizes) / len(row_group_sizes) if row_group_sizes else 0
50
+ min_rg_size = min(row_group_sizes) if row_group_sizes else 0
51
+ max_rg_size = max(row_group_sizes) if row_group_sizes else 0
52
+
53
+ print(f" Row group sizes:")
54
+ print(f" Average: {avg_row_group_size:.0f} rows")
55
+ print(f" Min: {min_rg_size:,} rows")
56
+ print(f" Max: {max_rg_size:,} rows")
57
+
58
+ # Store results for summary table
59
+ results.append({
60
+ 'filename': file_path.name,
61
+ 'total_rows': total_rows,
62
+ 'num_row_groups': num_row_groups,
63
+ 'avg_row_group_size': int(avg_row_group_size),
64
+ 'min_row_group_size': min_rg_size,
65
+ 'max_row_group_size': max_rg_size,
66
+ 'file_size_mb': file_size / 1024 / 1024
67
+ })
68
+
69
+ # Summary table
70
+ print("\n" + "=" * 50)
71
+ print("SUMMARY TABLE")
72
+ print("=" * 50)
73
+
74
+ df = pd.DataFrame(results)
75
+ df = df.sort_values('avg_row_group_size')
76
+
77
+ print(df.to_string(index=False, float_format='%.2f'))
78
+
79
+ print(f"\nAnalyzed {len(parquet_files)} files in {parquet_dir}")
export_midi_files_test.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Export MIDI files with different row group sizes for performance testing.
4
+ Only exports MIDI files whose hash key starts with 'a'.
5
+ """
6
+
7
+ import duckdb
8
+ import numpy as np
9
+ from pathlib import Path
10
+ import os
11
+
12
+ # Database path
13
+ db_path = "data/lakh_local.duckdb"
14
+ output_dir = Path("../parquet_row_group_eval")
15
+
16
+ # Create output directory
17
+ output_dir.mkdir(exist_ok=True)
18
+
19
+ # Generate 10 row group sizes in logspace from 1024 to 16384
20
+ row_group_sizes = np.logspace(11, 14, base=2, num=4).astype(int)
21
+ print(f"Testing row group sizes: {row_group_sizes}")
22
+
23
+ # Connect to database
24
+ conn = duckdb.connect(db_path, read_only=True)
25
+
26
+ # Query to get MIDI files starting with 'a'
27
+ query = """
28
+ SELECT
29
+ hm.midi_hk,
30
+ hm.midi_md5,
31
+ smf.file_content,
32
+ smf.file_size,
33
+ smf.load_date
34
+ FROM hub_midi_file hm
35
+ JOIN sat_midi_file smf ON hm.midi_hk = smf.midi_hk
36
+ WHERE hm.midi_hk LIKE 'a%'
37
+ ORDER BY hm.midi_hk
38
+ """
39
+
40
+ print("Querying MIDI files...")
41
+ result = conn.execute(query).fetchdf()
42
+ print(f"Found {len(result)} MIDI files with hash keys starting with 'a'")
43
+
44
+ # Export with different row group sizes
45
+ for i, row_group_size in enumerate(row_group_sizes):
46
+ output_file = output_dir / f"midi_files_rowgroup_{row_group_size}.parquet"
47
+
48
+ print(f"Exporting file {i+1}/10: {output_file} (row_group_size={row_group_size})")
49
+
50
+ # Export to parquet with specified row group size
51
+ conn.execute(f"""
52
+ COPY (
53
+ {query}
54
+ ) TO '{output_file}'
55
+ (FORMAT PARQUET, ROW_GROUP_SIZE {row_group_size})
56
+ """)
57
+
58
+ # Check file size
59
+ file_size = os.path.getsize(output_file)
60
+ print(f" File size: {file_size:,} bytes")
61
+
62
+ print(f"\nAll files exported to {output_dir}/")
63
+
64
+ # Show summary
65
+ print("\nSummary of exported files:")
66
+ for file in sorted(output_dir.glob("*.parquet")):
67
+ size = os.path.getsize(file)
68
+ print(f" {file.name}: {size:,} bytes")
69
+
70
+ conn.close()
midi_files_rowgroup_16384.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a48d08941393d7fae622de4ea7ff8680d384d3fd1d275103f896573e690a98b
3
+ size 174187062
midi_files_rowgroup_2048.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1916da1277760b10c077252e1a60ca9ef963c87895de8f0d8650519b2d95eedf
3
+ size 174494378
midi_files_rowgroup_4096.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ab8dff27d6b3eb49ac8740e6c478d5739897c139d2f5470c55acab16f056c02
3
+ size 174790095
midi_files_rowgroup_8192.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89927aa74f399beb1f1b9783030616af982bd38d996392f744ae87ad7a0c057b
3
+ size 174357122