tcm03 commited on
Commit
040ba84
·
1 Parent(s): 597f824

Debugging learn process_images

Browse files
preprocessing/entube_dataset.py CHANGED
@@ -13,7 +13,6 @@ class EnTubeDataset(Dataset):
13
  image_processor: List[BaseImageProcessor],
14
  device: str
15
  ) -> None:
16
- print(f'@tcm: In EnTubeDataset.__init__(): start')
17
  self.videos = []
18
  self.image_sizes = []
19
  self.device = device
@@ -23,12 +22,9 @@ class EnTubeDataset(Dataset):
23
  for file_name in file_names:
24
  file_path = os.path.join(folder_path, file_name)
25
  print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
26
- torch.cuda.memory._record_memory_history()
27
  video, image_size = process_video_frames(file_path, image_processor, device)
28
- torch.cuda.memory._dump_snapshot(f"{folder_path.strip('./')}-{file_name}.pickle")
29
  self.videos.append(video)
30
  self.image_sizes.append(image_size)
31
- print(f'@tcm: In EnTubeDataset.__init__(): done')
32
 
33
  def __len__(self):
34
  return len(self.image_sizes)
 
13
  image_processor: List[BaseImageProcessor],
14
  device: str
15
  ) -> None:
 
16
  self.videos = []
17
  self.image_sizes = []
18
  self.device = device
 
22
  for file_name in file_names:
23
  file_path = os.path.join(folder_path, file_name)
24
  print(f'@tcm: In EnTubeDataset.__init__(): file_path={file_path}')
 
25
  video, image_size = process_video_frames(file_path, image_processor, device)
 
26
  self.videos.append(video)
27
  self.image_sizes.append(image_size)
 
28
 
29
  def __len__(self):
30
  return len(self.image_sizes)
preprocessing/mm_datautils.py CHANGED
@@ -24,11 +24,13 @@ def process_images(
24
  device: str
25
  ) -> Union[torch.Tensor, List[torch.Tensor]]:
26
  # images.shape: (4294, 360, 640, 3)
 
27
  if isinstance(image_processor, list):
28
  processor_aux_list = image_processor
29
  new_images_aux_list = []
30
- for image in images:
31
  # image.shape: (height, width, channels)
 
32
  if isinstance(image, np.ndarray):
33
  image = Image.fromarray(image)
34
  image_aux_list = []
@@ -42,12 +44,15 @@ def process_images(
42
  image_aux = expand2square(
43
  image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
44
  ).resize((target_resolution, target_resolution))
 
45
  image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
46
  "pixel_values"
47
  ][0]
 
48
  # image_aux.shape: torch.Size([3, 384, 384])
49
  image_aux_list.append(image_aux)
50
  new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
 
51
  new_images_aux_list = [
52
  list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
53
  ] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]
 
24
  device: str
25
  ) -> Union[torch.Tensor, List[torch.Tensor]]:
26
  # images.shape: (4294, 360, 640, 3)
27
+ print(f'@tcm: In process_images(): images.shape={images.shape}')
28
  if isinstance(image_processor, list):
29
  processor_aux_list = image_processor
30
  new_images_aux_list = []
31
+ for i, image in enumerate(images):
32
  # image.shape: (height, width, channels)
33
+ print(f'@tcm: In process_images(): frame {i}')
34
  if isinstance(image, np.ndarray):
35
  image = Image.fromarray(image)
36
  image_aux_list = []
 
44
  image_aux = expand2square(
45
  image_aux, tuple(int(x * 255) for x in processor_aux.image_mean)
46
  ).resize((target_resolution, target_resolution))
47
+ print(f'@tcm: In process_images(): begin processor_aux.preprocess()')
48
  image_aux = processor_aux.preprocess(image_aux, return_tensors="pt")[
49
  "pixel_values"
50
  ][0]
51
+ print(f'@tcm: In process_images(): end processor_aux.preprocess()')
52
  # image_aux.shape: torch.Size([3, 384, 384])
53
  image_aux_list.append(image_aux)
54
  new_images_aux_list.append(image_aux_list) # torch.Tensor(C, H, W) new_images_aux_list[num_frames][num_processor]
55
+ print()
56
  new_images_aux_list = [
57
  list(batch_image_aux) for batch_image_aux in zip(*new_images_aux_list)
58
  ] # torch.Tensor(C, H, W) new_images_aux_list[num_processor][num_frames]