File size: 7,303 Bytes
768b9af
 
 
 
 
 
 
 
 
 
 
ef6b863
768b9af
 
 
ef6b863
768b9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ef6b863
 
768b9af
 
 
 
 
ef6b863
 
768b9af
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
#!/usr/bin/env python3
"""
Unpack Cloud4D dataset archives.

This script extracts all tar.gz archives to reconstruct the original
Cloud4D directory structure.

Usage:
    python unpack.py [--output /path/to/output] [--subset real_world|synthetic] [--jobs N]

Examples:
    # Extract everything to ./
    python unpack.py

    # Extract to a specific location
    python unpack.py --output /data/cloud4d

    # Extract only real_world data
    python unpack.py --subset real_world

    # Extract only synthetic data
    python unpack.py --subset synthetic

    # Extract a specific date-hour (real_world)
    python unpack.py --filter 20230705_10

    # Use parallel extraction
    python unpack.py --jobs 4
"""

import argparse
import subprocess
import sys
import tarfile
from pathlib import Path
from concurrent.futures import ProcessPoolExecutor, as_completed


def extract_archive(archive_path, output_dir):
    """Extract a tar.gz archive to the output directory."""
    archive_path = Path(archive_path)
    output_dir = Path(output_dir)

    # Try using pigz for parallel decompression (faster)
    try:
        subprocess.run(['which', 'pigz'], check=True, capture_output=True)
        cmd = f'pigz -dc "{archive_path}" | tar -xf - -C "{output_dir}"'
        subprocess.run(cmd, shell=True, check=True)
        return True
    except (subprocess.CalledProcessError, FileNotFoundError):
        pass

    # Fallback to regular tar
    try:
        cmd = ['tar', '-xzf', str(archive_path), '-C', str(output_dir)]
        subprocess.run(cmd, check=True)
        return True
    except subprocess.CalledProcessError:
        pass

    # Final fallback to Python tarfile
    try:
        with tarfile.open(archive_path, 'r:gz') as tar:
            tar.extractall(output_dir)
        return True
    except Exception as e:
        print(f"Error extracting {archive_path}: {e}")
        return False


def extract_single(args):
    """Worker function for parallel extraction."""
    archive_path, output_dir, name = args
    try:
        success = extract_archive(archive_path, output_dir)
        if success:
            return (name, 'extracted')
        else:
            return (name, 'failed')
    except Exception as e:
        return (name, f'error: {e}')


def main():
    parser = argparse.ArgumentParser(
        description='Unpack Cloud4D dataset archives',
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog="""
Examples:
    python unpack.py                           # Extract all to ./
    python unpack.py --output /data/cloud4D    # Extract to specific location
    python unpack.py --subset real_world       # Extract only real_world
    python unpack.py --filter 20230705         # Extract matching archives
    python unpack.py --jobs 4                  # Parallel extraction
        """
    )
    parser.add_argument('--output', '-o', type=Path, default=Path('./'),
                        help='Output directory (default: ./)')
    parser.add_argument('--subset', choices=['real_world', 'synthetic'],
                        help='Extract only a specific subset')
    parser.add_argument('--filter', type=str,
                        help='Filter archives by name (e.g., "20230705" for a specific date)')
    parser.add_argument('--jobs', '-j', type=int, default=1,
                        help='Number of parallel extraction jobs (default: 1)')
    parser.add_argument('--list', '-l', action='store_true',
                        help='List available archives without extracting')
    args = parser.parse_args()

    # Find the script directory (where archives are located)
    script_dir = Path(__file__).parent.resolve()

    # Collect archives
    archives = []

    # Real world archives
    real_world_dir = script_dir / 'real_world'
    if real_world_dir.exists() and args.subset in (None, 'real_world'):
        for archive in sorted(real_world_dir.glob('*.tar.gz')):
            if args.filter is None or args.filter in archive.name:
                archives.append(('real_world', archive))

    # Synthetic archives
    synthetic_dir = script_dir / 'synthetic'
    if synthetic_dir.exists() and args.subset in (None, 'synthetic'):
        for archive in sorted(synthetic_dir.glob('*.tar.gz')):
            if args.filter is None or args.filter in archive.name:
                archives.append(('synthetic', archive))

    if not archives:
        print("No archives found matching the criteria.")
        print(f"Searched in: {script_dir}")
        sys.exit(1)

    # List mode
    if args.list:
        print("Available archives:")
        print()
        current_subset = None
        for subset, archive in archives:
            if subset != current_subset:
                print(f"  {subset}/")
                current_subset = subset
            size_mb = archive.stat().st_size / 1024 / 1024
            print(f"    {archive.name} ({size_mb:.1f} MB)")
        print()
        total_size = sum(a.stat().st_size for _, a in archives) / 1024 / 1024 / 1024
        print(f"Total: {len(archives)} archives, {total_size:.2f} GB")
        return

    # Extract mode
    output_dir = args.output.resolve()

    print("=" * 70)
    print("Cloud4D Dataset Unpacker")
    print("=" * 70)
    print(f"Output directory: {output_dir}")
    print(f"Archives to extract: {len(archives)}")
    if args.subset:
        print(f"Subset: {args.subset}")
    if args.filter:
        print(f"Filter: {args.filter}")
    print()

    # Create output structure
    (output_dir / 'real_world').mkdir(parents=True, exist_ok=True)
    (output_dir / 'synthetic').mkdir(parents=True, exist_ok=True)

    # Prepare extraction tasks
    tasks = []
    for subset, archive in archives:
        target_dir = output_dir / subset
        name = f"{subset}/{archive.stem}"
        tasks.append((archive, target_dir, name))

    # Extract
    print("Extracting archives...")
    results = []

    if args.jobs > 1:
        with ProcessPoolExecutor(max_workers=args.jobs) as executor:
            futures = {executor.submit(extract_single, task): task[2] for task in tasks}
            for future in as_completed(futures):
                name, status = future.result()
                results.append((name, status))
                print(f"  [{status.upper()}] {name}")
    else:
        for task in tasks:
            name, status = extract_single(task)
            results.append((name, status))
            print(f"  [{status.upper()}] {name}")

    # Summary
    print()
    print("=" * 70)
    print("EXTRACTION COMPLETE")
    print("=" * 70)

    extracted = sum(1 for _, s in results if s == 'extracted')
    failed = sum(1 for _, s in results if s != 'extracted')

    print(f"Successfully extracted: {extracted}")
    if failed:
        print(f"Failed: {failed}")
    print()
    print(f"Dataset extracted to: {output_dir}")
    print()
    print("Directory structure:")
    print(f"  {output_dir}/")
    print(f"    real_world/")
    print(f"      20230705_10/")
    print(f"        perspective_1/")
    print(f"        perspective_2/")
    print(f"        perspective_3/")
    print(f"      ... (more date-hour folders)")
    print(f"    synthetic/")
    print(f"      terragen/")
    print(f"      large_eddy_simulations/")


if __name__ == '__main__':
    main()