tcm03
commited on
Commit
·
ddef400
1
Parent(s):
f834242
Debug dataloader
Browse files- preprocessing/main.py +1 -1
- preprocessing/mm_datautils.py +7 -18
preprocessing/main.py
CHANGED
|
@@ -76,7 +76,7 @@ if __name__ == "__main__":
|
|
| 76 |
|
| 77 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 78 |
entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
|
| 79 |
-
dataloader = DataLoader(entube_dataset, batch_size=
|
| 80 |
|
| 81 |
for batch_idx, (videos, image_sizes) in enumerate(dataloader):
|
| 82 |
print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
|
|
|
|
| 76 |
|
| 77 |
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 78 |
entube_dataset = EnTubeDataset(folder_paths, image_processors, device)
|
| 79 |
+
dataloader = DataLoader(entube_dataset, batch_size=2)
|
| 80 |
|
| 81 |
for batch_idx, (videos, image_sizes) in enumerate(dataloader):
|
| 82 |
print(f"Processing batch {batch_idx + 1}/{len(dataloader)}")
|
preprocessing/mm_datautils.py
CHANGED
|
@@ -33,7 +33,7 @@ def process_images(
|
|
| 33 |
image = Image.fromarray(image)
|
| 34 |
image_aux_list = []
|
| 35 |
for processor_aux in processor_aux_list:
|
| 36 |
-
image_aux = image
|
| 37 |
if hasattr(processor_aux, "image_mean"):
|
| 38 |
try:
|
| 39 |
target_resolution = processor_aux.crop_size["height"]
|
|
@@ -47,25 +47,14 @@ def process_images(
|
|
| 47 |
][0]
|
| 48 |
# image_aux.shape: torch.Size([3, 384, 384])
|
| 49 |
image_aux_list.append(image_aux)
|
| 50 |
-
new_images_aux_list.append(image_aux_list)
|
| 51 |
new_images_aux_list = [
|
| 52 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
| 53 |
-
]
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
#
|
| 57 |
-
|
| 58 |
-
# ]
|
| 59 |
-
tmp_images_aux_list = []
|
| 60 |
-
for image_aux in new_images_aux_list:
|
| 61 |
-
if isinstance(image_aux, list):
|
| 62 |
-
print(f'@tcm: len(image_aux): {len(image_aux)}')
|
| 63 |
-
for i, img_aux in enumerate(image_aux):
|
| 64 |
-
# img_aux.shape: torch.Size([3, 384, 384])
|
| 65 |
-
pass
|
| 66 |
-
tmp_images_aux_list.append(torch.stack(image_aux).half().to(device))
|
| 67 |
-
new_images_aux_list = tmp_images_aux_list
|
| 68 |
-
return new_images_aux_list
|
| 69 |
else:
|
| 70 |
image_aspect_ratio = "pad"
|
| 71 |
new_images = []
|
|
|
|
| 33 |
image = Image.fromarray(image)
|
| 34 |
image_aux_list = []
|
| 35 |
for processor_aux in processor_aux_list:
|
| 36 |
+
image_aux = image # PIL.Image
|
| 37 |
if hasattr(processor_aux, "image_mean"):
|
| 38 |
try:
|
| 39 |
target_resolution = processor_aux.crop_size["height"]
|
|
|
|
| 47 |
][0]
|
| 48 |
# image_aux.shape: torch.Size([3, 384, 384])
|
| 49 |
image_aux_list.append(image_aux)
|
| 50 |
+
new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
|
| 51 |
new_images_aux_list = [
|
| 52 |
list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
|
| 53 |
+
] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
|
| 54 |
+
new_images_aux_list = [
|
| 55 |
+
torch.stack(image_aux).half().to(device) for image_aux in new_images_aux_list
|
| 56 |
+
] # torch.Tensor(num_frames, C, H, W) new_images_aux_list[num_processor]
|
| 57 |
+
return new_images_aux_list
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
else:
|
| 59 |
image_aspect_ratio = "pad"
|
| 60 |
new_images = []
|