tcm03
commited on
Commit
·
cd45874
1
Parent(s):
a0c9d15
Update code
Browse files- .gitignore +2 -0
- preprocessing/dataset.py +30 -0
- preprocessing/main.py +21 -19
- preprocessing/mm_datautils.py +15 -5
- preprocessing/preprocessor.py +1 -1
- requirements.txt +3 -1
.gitignore
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
preprocessing/__pycache__/
|
| 2 |
+
preprocessing/vision_encoders/__pycache__/
|
preprocessing/dataset.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torch.utils.data import Dataset
|
| 2 |
+
from typing import List
|
| 3 |
+
import os
|
| 4 |
+
from mm_datautils import process_video_frames
|
| 5 |
+
from transformers import BaseImageProcessor
|
| 6 |
+
|
| 7 |
+
class EnTubeDataset(Dataset):
|
| 8 |
+
|
| 9 |
+
def __init__(
|
| 10 |
+
self,
|
| 11 |
+
folder_paths: List[str],
|
| 12 |
+
image_processor: List[BaseImageProcessor],
|
| 13 |
+
device: str
|
| 14 |
+
) -> None:
|
| 15 |
+
self.videos = []
|
| 16 |
+
self.image_sizes = []
|
| 17 |
+
self.device = device
|
| 18 |
+
for folder_path in folder_paths:
|
| 19 |
+
file_names = os.listdir(folder_path)
|
| 20 |
+
for file_name in file_names:
|
| 21 |
+
file_path = os.path.join(folder_path, file_name)
|
| 22 |
+
video, image_size = process_video_frames(file_path, image_processor, device)
|
| 23 |
+
self.videos.append(video)
|
| 24 |
+
self.image_sizes.append(image_size)
|
| 25 |
+
|
| 26 |
+
def __len__():
|
| 27 |
+
return len(self.image_sizes)
|
| 28 |
+
|
| 29 |
+
def __getitem__(idx):
|
| 30 |
+
return self.videos[idx], self.image_sizes[idx]
|
preprocessing/main.py
CHANGED
|
@@ -1,14 +1,19 @@
|
|
| 1 |
import os
|
|
|
|
|
|
|
| 2 |
import argparse
|
| 3 |
from typing import List, Dict
|
| 4 |
from mm_datautils import process_video_frames
|
| 5 |
from preprocessor import CambrianConfig, CambrianEncoders
|
| 6 |
import torch
|
| 7 |
from safetensors.torch import save_file
|
| 8 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
| 9 |
from collections import defaultdict
|
| 10 |
import logging
|
| 11 |
from multiprocessing import cpu_count
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
|
| 13 |
# Configure logging
|
| 14 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
@@ -57,26 +62,23 @@ if __name__ == "__main__":
|
|
| 57 |
|
| 58 |
cambrianConfig = CambrianConfig.from_json_file(args.config_file)
|
| 59 |
processor = CambrianEncoders(cambrianConfig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
folder_paths: List[str] = args.folders
|
| 62 |
-
data_tensor =
|
| 63 |
-
|
| 64 |
-
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
with ThreadPoolExecutor(max_workers=optimal_workers) as executor:
|
| 68 |
-
# Submit all tasks upfront
|
| 69 |
-
futures = []
|
| 70 |
-
for folder_path in folder_paths:
|
| 71 |
-
file_names = os.listdir(folder_path)
|
| 72 |
-
for file_name in file_names:
|
| 73 |
-
file_path = os.path.join(folder_path, file_name)
|
| 74 |
-
futures.append(executor.submit(extract_features, processor, file_path, file_name))
|
| 75 |
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
|
| 82 |
save_file(dict(data_tensor), args.output_file)
|
|
|
|
| 1 |
import os
|
| 2 |
+
os.environ["HF_HOME"] = "D:\\hf_cache"
|
| 3 |
+
|
| 4 |
import argparse
|
| 5 |
from typing import List, Dict
|
| 6 |
from mm_datautils import process_video_frames
|
| 7 |
from preprocessor import CambrianConfig, CambrianEncoders
|
| 8 |
import torch
|
| 9 |
from safetensors.torch import save_file
|
|
|
|
| 10 |
from collections import defaultdict
|
| 11 |
import logging
|
| 12 |
from multiprocessing import cpu_count
|
| 13 |
+
from dataset import EnTubeDataset
|
| 14 |
+
from torch.utils.data import Dataset, DataLoader
|
| 15 |
+
from transformers import BaseImageProcessor
|
| 16 |
+
|
| 17 |
|
| 18 |
# Configure logging
|
| 19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
|
|
|
| 62 |
|
| 63 |
cambrianConfig = CambrianConfig.from_json_file(args.config_file)
|
| 64 |
processor = CambrianEncoders(cambrianConfig)
|
| 65 |
+
image_processors = []
|
| 66 |
+
for vision_tower_aux in processor.vision_tower_aux_list:
|
| 67 |
+
if not vision_tower_aux.is_loaded:
|
| 68 |
+
vision_tower_aux.load_model()
|
| 69 |
+
image_processors.append(vision_tower_aux.image_processor)
|
| 70 |
|
| 71 |
folder_paths: List[str] = args.folders
|
| 72 |
+
data_tensor = dict()
|
| 73 |
+
|
| 74 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 75 |
+
entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
|
| 76 |
+
dataloader = DataLoader(entube_dataset, batch_size=4)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 77 |
|
| 78 |
+
for batch_idx, (videos, image_sizes) in enumerate(dataloader):
|
| 79 |
+
print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
|
| 80 |
+
print(type(videos))
|
| 81 |
+
print(type(image_sizes))
|
| 82 |
+
|
| 83 |
|
| 84 |
save_file(dict(data_tensor), args.output_file)
|
preprocessing/mm_datautils.py
CHANGED
|
@@ -2,6 +2,8 @@ import numpy as np
|
|
| 2 |
from PIL import Image
|
| 3 |
import torch
|
| 4 |
from decord import cpu, VideoReader
|
|
|
|
|
|
|
| 5 |
|
| 6 |
def expand2square(pil_img, background_color):
|
| 7 |
width, height = pil_img.size
|
|
@@ -16,7 +18,11 @@ def expand2square(pil_img, background_color):
|
|
| 16 |
result.paste(pil_img, ((height - width) // 2, 0))
|
| 17 |
return result
|
| 18 |
|
| 19 |
-
def process_images(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
if isinstance(image_processor, list):
|
| 21 |
processor_aux_list = image_processor
|
| 22 |
new_images_aux_list = []
|
|
@@ -43,11 +49,11 @@ def process_images(images, image_processor, model_cfg):
|
|
| 43 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
| 44 |
]
|
| 45 |
new_images_aux_list = [
|
| 46 |
-
torch.stack(image_aux).half().
|
| 47 |
]
|
| 48 |
return new_images_aux_list
|
| 49 |
else:
|
| 50 |
-
image_aspect_ratio =
|
| 51 |
new_images = []
|
| 52 |
if image_aspect_ratio == "pad":
|
| 53 |
for image in images:
|
|
@@ -64,7 +70,11 @@ def process_images(images, image_processor, model_cfg):
|
|
| 64 |
new_images = torch.stack(new_images, dim=0)
|
| 65 |
return new_images
|
| 66 |
|
| 67 |
-
def process_video_frames(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 68 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
| 69 |
fps = float(vr.get_avg_fps())
|
| 70 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
|
@@ -74,6 +84,6 @@ def process_video_frames(video_path: str):
|
|
| 74 |
video.append(img)
|
| 75 |
video = np.stack(video)
|
| 76 |
image_sizes = [video[0].shape[:2]]
|
| 77 |
-
video = process_images(video, image_processor,
|
| 78 |
video = [item.unsqueeze(0) for item in video]
|
| 79 |
return video, image_sizes
|
|
|
|
| 2 |
from PIL import Image
|
| 3 |
import torch
|
| 4 |
from decord import cpu, VideoReader
|
| 5 |
+
from transformers import BaseImageProcessor
|
| 6 |
+
from typing import List, List, Union, Tuple
|
| 7 |
|
| 8 |
def expand2square(pil_img, background_color):
|
| 9 |
width, height = pil_img.size
|
|
|
|
| 18 |
result.paste(pil_img, ((height - width) // 2, 0))
|
| 19 |
return result
|
| 20 |
|
| 21 |
+
def process_images(
|
| 22 |
+
images: torch.Tensor,
|
| 23 |
+
image_processor: List[BaseImageProcessor],
|
| 24 |
+
device: str
|
| 25 |
+
) -> Union[torch.Tensor, List[torch.Tensor]]:
|
| 26 |
if isinstance(image_processor, list):
|
| 27 |
processor_aux_list = image_processor
|
| 28 |
new_images_aux_list = []
|
|
|
|
| 49 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
| 50 |
]
|
| 51 |
new_images_aux_list = [
|
| 52 |
+
torch.stack(image_aux).half().to(device) for image_aux in new_images_aux_list
|
| 53 |
]
|
| 54 |
return new_images_aux_list
|
| 55 |
else:
|
| 56 |
+
image_aspect_ratio = "pad"
|
| 57 |
new_images = []
|
| 58 |
if image_aspect_ratio == "pad":
|
| 59 |
for image in images:
|
|
|
|
| 70 |
new_images = torch.stack(new_images, dim=0)
|
| 71 |
return new_images
|
| 72 |
|
| 73 |
+
def process_video_frames(
|
| 74 |
+
video_path: str,
|
| 75 |
+
image_processor: List[BaseImageProcessor],
|
| 76 |
+
device: str
|
| 77 |
+
) -> Tuple[List[torch.Tensor], List[Tuple[int, int]]]:
|
| 78 |
vr = VideoReader(video_path, ctx=cpu(0), num_threads=1)
|
| 79 |
fps = float(vr.get_avg_fps())
|
| 80 |
frame_indices = np.array([i for i in range(0, len(vr), round(fps),)])
|
|
|
|
| 84 |
video.append(img)
|
| 85 |
video = np.stack(video)
|
| 86 |
image_sizes = [video[0].shape[:2]]
|
| 87 |
+
video = process_images(video, image_processor, device)
|
| 88 |
video = [item.unsqueeze(0) for item in video]
|
| 89 |
return video, image_sizes
|
preprocessing/preprocessor.py
CHANGED
|
@@ -1,9 +1,9 @@
|
|
| 1 |
from vision_encoders.builder import build_vision_tower_aux_list
|
| 2 |
from transformers import Qwen2Config
|
| 3 |
-
from mm_datautils import process_video_frames
|
| 4 |
from typing import Optional, List, Tuple
|
| 5 |
import torch
|
| 6 |
import json
|
|
|
|
| 7 |
|
| 8 |
class CambrianConfig(Qwen2Config):
|
| 9 |
model_type = "cambrian_qwen"
|
|
|
|
| 1 |
from vision_encoders.builder import build_vision_tower_aux_list
|
| 2 |
from transformers import Qwen2Config
|
|
|
|
| 3 |
from typing import Optional, List, Tuple
|
| 4 |
import torch
|
| 5 |
import json
|
| 6 |
+
from transformers import BaseImageProcessor
|
| 7 |
|
| 8 |
class CambrianConfig(Qwen2Config):
|
| 9 |
model_type = "cambrian_qwen"
|
requirements.txt
CHANGED
|
@@ -4,4 +4,6 @@ decord
|
|
| 4 |
torch==2.1.2
|
| 5 |
torchvision
|
| 6 |
transformers==4.42.4
|
| 7 |
-
safetensors
|
|
|
|
|
|
|
|
|
| 4 |
torch==2.1.2
|
| 5 |
torchvision
|
| 6 |
transformers==4.42.4
|
| 7 |
+
safetensors
|
| 8 |
+
pillow
|
| 9 |
+
huggingface_hub
|