tcm03 commited on
Commit ·
b231d57
1
Parent(s): 1098248
I think batching data loader is more general: give batch_size=1 in case limited mem
Browse files- preprocessing/main.py +26 -20
preprocessing/main.py
CHANGED
|
@@ -1,14 +1,19 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import argparse
|
| 3 |
from typing import List, Dict
|
| 4 |
from mm_datautils import process_video_frames
|
| 5 |
from preprocessor import CambrianConfig, CambrianEncoders
|
| 6 |
import torch
|
| 7 |
from safetensors.torch import save_file
|
| 8 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 9 |
from collections import defaultdict
|
| 10 |
import logging
|
| 11 |
from multiprocessing import cpu_count
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Configure logging
|
| 14 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -57,26 +62,27 @@ if __name__ == "__main__":
|
|
| 57 |
|
| 58 |
cambrianConfig = CambrianConfig.from_json_file(args.config_file)
|
| 59 |
processor = CambrianEncoders(cambrianConfig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
folder_paths: List[str] = args.folders
|
| 62 |
-
data_tensor =
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
with ThreadPoolExecutor(max_workers=optimal_workers) as executor:
|
| 68 |
-
# Submit all tasks upfront
|
| 69 |
-
futures = []
|
| 70 |
-
for folder_path in folder_paths:
|
| 71 |
-
file_names = os.listdir(folder_path)
|
| 72 |
-
for file_name in file_names:
|
| 73 |
-
file_path = os.path.join(folder_path, file_name)
|
| 74 |
-
futures.append(executor.submit(extract_features, processor, file_path, file_name))
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
|
|
|
| 81 |
|
| 82 |
-
save_file(dict(data_tensor), args.output_file)
|
|
|
|
| 1 |
import os
|
| 2 |
+
os.environ["HF_HOME"] = "D:\\hf_cache"
|
| 3 |
+
|
| 4 |
import argparse
|
| 5 |
from typing import List, Dict
|
| 6 |
from mm_datautils import process_video_frames
|
| 7 |
from preprocessor import CambrianConfig, CambrianEncoders
|
| 8 |
import torch
|
| 9 |
from safetensors.torch import save_file
|
|
|
|
| 10 |
from collections import defaultdict
|
| 11 |
import logging
|
| 12 |
from multiprocessing import cpu_count
|
| 13 |
+
from entube_dataset import EnTubeDataset
|
| 14 |
+
from torch.utils.data import Dataset, DataLoader
|
| 15 |
+
from transformers import BaseImageProcessor
|
| 16 |
+
|
| 17 |
|
| 18 |
# Configure logging
|
| 19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 62 |
|
| 63 |
cambrianConfig = CambrianConfig.from_json_file(args.config_file)
|
| 64 |
processor = CambrianEncoders(cambrianConfig)
|
| 65 |
+
image_processors = []
|
| 66 |
+
if not processor.vision_tower_aux_list[0].is_loaded:
|
| 67 |
+
processor.vision_tower_aux_list[0].load_model()
|
| 68 |
+
image_processors.append(processor.vision_tower_aux_list[0].image_processor)
|
| 69 |
+
# for vision_tower_aux in processor.vision_tower_aux_list:
|
| 70 |
+
# if not vision_tower_aux.is_loaded:
|
| 71 |
+
# vision_tower_aux.load_model()
|
| 72 |
+
# image_processors.append(vision_tower_aux.image_processor)
|
| 73 |
|
| 74 |
folder_paths: List[str] = args.folders
|
| 75 |
+
data_tensor = dict()
|
| 76 |
+
|
| 77 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 78 |
+
entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
|
| 79 |
+
dataloader = DataLoader(entube_dataset, batch_size=1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
|
| 81 |
+
for batch_idx, (videos, image_sizes) in enumerate(dataloader):
|
| 82 |
+
print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
|
| 83 |
+
print(type(videos))
|
| 84 |
+
print(type(image_sizes))
|
| 85 |
+
break
|
| 86 |
+
|
| 87 |
|
| 88 |
+
save_file(dict(data_tensor), args.output_file)
|