File size: 4,405 Bytes
93dd1fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
from __future__ import print_function  # For Python 2.7 compatibility with print()
import h5py
import os
import numpy as np
import glob
import zlib

# Paths
SPLIT = "training"
INPUT_DIR = "task_ABC_D/{}/".format(SPLIT)
OUTPUT_DIR = "h5_task_ABC_D/{}/".format(SPLIT)
PROGRESS_FILE = "train_progress.txt"
TOTAL_FILES_PER_H5 = int(1e5)  # Ensure integer

# Create output directory if it doesn't exist
try:
    os.makedirs(OUTPUT_DIR)
except OSError:
    if not os.path.isdir(OUTPUT_DIR):
        raise

def get_start_index():
    """Reads the last saved index from progress file, or returns 0 if not found."""
    if os.path.exists(PROGRESS_FILE):
        with open(PROGRESS_FILE, "r") as f:
            return int(f.read().strip())
    return 0

def save_progress(index):
    """Saves the current index to the progress file."""
    with open(PROGRESS_FILE, "w") as f:
        f.write(str(index))

def delete_progress_file():
    """Deletes the progress file after successful completion."""
    if os.path.exists(PROGRESS_FILE):
        os.remove(PROGRESS_FILE)

def get_scene_no(filepath):
    """Extracts the scene number from the filepath."""
    filename = os.path.basename(filepath)
    # Assuming filename like "calvin_scene_000.npz", extract "000"
    return filename.split('_')[-1].split('.')[0]

def process_npz_files():
    """Processes .npz files and stores them in HDF5 format."""
    sorted_files = sorted(glob.glob(os.path.join(INPUT_DIR, "*.npz")))
    start_idx = get_start_index()
    current_h5 = None

    # Initialize current_h5 based on the batch containing start_idx
    if start_idx < len(sorted_files):
        # Calculate the starting index of the batch
        batch_start = (start_idx // TOTAL_FILES_PER_H5) * TOTAL_FILES_PER_H5
        # Get the scene number from the file at the batch start
        scene_no = get_scene_no(sorted_files[batch_start])
        current_h5_file = os.path.join(OUTPUT_DIR, "{}_{}.h5".format(SPLIT, scene_no))
        current_h5 = h5py.File(current_h5_file, "a")
        print("Resuming with HDF5 file: {}".format(current_h5_file))

    for i in range(start_idx, len(sorted_files)):
        filepath = sorted_files[i]
        scene_no = get_scene_no(filepath)

        # Create a new HDF5 file when starting a new batch
        if i % TOTAL_FILES_PER_H5 == 0:
            if current_h5 is not None:
                current_h5.close()
            current_h5_file = os.path.join(OUTPUT_DIR, "{}_{}.h5".format(SPLIT, scene_no))
            current_h5 = h5py.File(current_h5_file, "a")
            print("Processing scene: {}".format(scene_no))

        # Process and save .npz file into HDF5
        save_npz_to_h5(filepath, current_h5)
        
        # Save progress at every iteration
        save_progress(i)

    # Close last opened HDF5 file
    if current_h5 is not None:
        current_h5.close()

    # Remove progress file after successful completion
    delete_progress_file()  # Uncomment if desired
    print("Processing completed!")

def save_npz_to_h5(filepath, h5_file):
    """Loads an .npz file and stores its contents in an HDF5 file."""
    # Use the full filename without extension as the group name for uniqueness
    group_name = os.path.basename(filepath).split('.')[0]

    # If the group already exists, delete it to overwrite
    if group_name in h5_file:
        del h5_file[group_name]
        print("Overwriting existing group: {}".format(group_name))
        
    file_group = h5_file.create_group(group_name)
    npz_data = np.load(filepath)
    data_dict = {}

    # Try to load all keys; if any fail, skip the file
    try:
        for key in npz_data.files:
            data_dict[key] = npz_data[key]  # Attempt to decompress and load the data
    except zlib.error as e:
        print("Error decompressing data in file '{}': {}".format(filepath, e))
        npz_data.close()
        # Delete the empty group since loading failed
        del h5_file[group_name]
        # Log the skipped file
        with open("skipped_files.log", "a") as log_file:
            log_file.write("{}\n".format(filepath))
        return  # Skip this file entirely

    # If we get here, all data loaded successfully; add datasets to the existing group
    for key, data in data_dict.items():
        file_group.create_dataset(key, data=data, compression="lzf")
    
    npz_data.close()

if __name__ == "__main__":
    process_npz_files()