|
|
from __future__ import print_function |
|
|
import h5py |
|
|
import os |
|
|
import numpy as np |
|
|
import glob |
|
|
import zlib |
|
|
|
|
|
|
|
|
SPLIT = "training" |
|
|
INPUT_DIR = "task_ABC_D/{}/".format(SPLIT) |
|
|
OUTPUT_DIR = "h5_task_ABC_D/{}/".format(SPLIT) |
|
|
PROGRESS_FILE = "train_progress.txt" |
|
|
TOTAL_FILES_PER_H5 = int(1e5) |
|
|
|
|
|
|
|
|
try: |
|
|
os.makedirs(OUTPUT_DIR) |
|
|
except OSError: |
|
|
if not os.path.isdir(OUTPUT_DIR): |
|
|
raise |
|
|
|
|
|
def get_start_index(): |
|
|
"""Reads the last saved index from progress file, or returns 0 if not found.""" |
|
|
if os.path.exists(PROGRESS_FILE): |
|
|
with open(PROGRESS_FILE, "r") as f: |
|
|
return int(f.read().strip()) |
|
|
return 0 |
|
|
|
|
|
def save_progress(index): |
|
|
"""Saves the current index to the progress file.""" |
|
|
with open(PROGRESS_FILE, "w") as f: |
|
|
f.write(str(index)) |
|
|
|
|
|
def delete_progress_file(): |
|
|
"""Deletes the progress file after successful completion.""" |
|
|
if os.path.exists(PROGRESS_FILE): |
|
|
os.remove(PROGRESS_FILE) |
|
|
|
|
|
def get_scene_no(filepath): |
|
|
"""Extracts the scene number from the filepath.""" |
|
|
filename = os.path.basename(filepath) |
|
|
|
|
|
return filename.split('_')[-1].split('.')[0] |
|
|
|
|
|
def process_npz_files(): |
|
|
"""Processes .npz files and stores them in HDF5 format.""" |
|
|
sorted_files = sorted(glob.glob(os.path.join(INPUT_DIR, "*.npz"))) |
|
|
start_idx = get_start_index() |
|
|
current_h5 = None |
|
|
|
|
|
|
|
|
if start_idx < len(sorted_files): |
|
|
|
|
|
batch_start = (start_idx // TOTAL_FILES_PER_H5) * TOTAL_FILES_PER_H5 |
|
|
|
|
|
scene_no = get_scene_no(sorted_files[batch_start]) |
|
|
current_h5_file = os.path.join(OUTPUT_DIR, "{}_{}.h5".format(SPLIT, scene_no)) |
|
|
current_h5 = h5py.File(current_h5_file, "a") |
|
|
print("Resuming with HDF5 file: {}".format(current_h5_file)) |
|
|
|
|
|
for i in range(start_idx, len(sorted_files)): |
|
|
filepath = sorted_files[i] |
|
|
scene_no = get_scene_no(filepath) |
|
|
|
|
|
|
|
|
if i % TOTAL_FILES_PER_H5 == 0: |
|
|
if current_h5 is not None: |
|
|
current_h5.close() |
|
|
current_h5_file = os.path.join(OUTPUT_DIR, "{}_{}.h5".format(SPLIT, scene_no)) |
|
|
current_h5 = h5py.File(current_h5_file, "a") |
|
|
print("Processing scene: {}".format(scene_no)) |
|
|
|
|
|
|
|
|
save_npz_to_h5(filepath, current_h5) |
|
|
|
|
|
|
|
|
save_progress(i) |
|
|
|
|
|
|
|
|
if current_h5 is not None: |
|
|
current_h5.close() |
|
|
|
|
|
|
|
|
delete_progress_file() |
|
|
print("Processing completed!") |
|
|
|
|
|
def save_npz_to_h5(filepath, h5_file): |
|
|
"""Loads an .npz file and stores its contents in an HDF5 file.""" |
|
|
|
|
|
group_name = os.path.basename(filepath).split('.')[0] |
|
|
|
|
|
|
|
|
if group_name in h5_file: |
|
|
del h5_file[group_name] |
|
|
print("Overwriting existing group: {}".format(group_name)) |
|
|
|
|
|
file_group = h5_file.create_group(group_name) |
|
|
npz_data = np.load(filepath) |
|
|
data_dict = {} |
|
|
|
|
|
|
|
|
try: |
|
|
for key in npz_data.files: |
|
|
data_dict[key] = npz_data[key] |
|
|
except zlib.error as e: |
|
|
print("Error decompressing data in file '{}': {}".format(filepath, e)) |
|
|
npz_data.close() |
|
|
|
|
|
del h5_file[group_name] |
|
|
|
|
|
with open("skipped_files.log", "a") as log_file: |
|
|
log_file.write("{}\n".format(filepath)) |
|
|
return |
|
|
|
|
|
|
|
|
for key, data in data_dict.items(): |
|
|
file_group.create_dataset(key, data=data, compression="lzf") |
|
|
|
|
|
npz_data.close() |
|
|
|
|
|
if __name__ == "__main__": |
|
|
process_npz_files() |