|
|
""" |
|
|
data_utils.py |
|
|
|
|
|
General utilities and classes for facilitating data loading and collation. |
|
|
""" |
|
|
|
|
|
from dataclasses import dataclass |
|
|
from typing import Callable, Dict, Sequence, Tuple |
|
|
import numpy as np |
|
|
|
|
|
import torch |
|
|
from torch.nn.utils.rnn import pad_sequence |
|
|
|
|
|
|
|
|
IGNORE_INDEX = -100 |
|
|
|
|
|
|
|
|
def tree_map(fn: Callable, tree: dict) -> dict: |
|
|
"""Maps a function over a nested dictionary.""" |
|
|
return {k: tree_map(fn, v) if isinstance(v, dict) else fn(v) for k, v in tree.items()} |
|
|
|
|
|
|
|
|
def tree_map_with_key(fn: Callable, tree: dict, keys: Sequence = ()) -> dict: |
|
|
"""Maps a function over a nested dictionary.""" |
|
|
return { |
|
|
k: tree_map_with_key(fn, v, (*keys, k)) if isinstance(v, dict) else fn((*keys, k), v) for k, v in tree.items() |
|
|
} |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class PaddedCollatorForLanguageModeling: |
|
|
model_max_length: int |
|
|
pad_token_id: int |
|
|
default_image_resolution: Tuple[int, int, int] |
|
|
padding_side: str = "right" |
|
|
pixel_values_dtype: torch.dtype = torch.float32 |
|
|
|
|
|
def __post_init__(self) -> None: |
|
|
self.dummy_pixel_values = torch.zeros(self.default_image_resolution, dtype=self.pixel_values_dtype) |
|
|
|
|
|
def __call__(self, instances: Sequence[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: |
|
|
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) |
|
|
pixel_values = [instance["pixel_values"] for instance in instances] |
|
|
|
|
|
|
|
|
|
|
|
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id) |
|
|
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) |
|
|
|
|
|
|
|
|
input_ids, labels = input_ids[:, : self.model_max_length], labels[:, : self.model_max_length] |
|
|
|
|
|
|
|
|
attention_mask = input_ids.ne(self.pad_token_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
multimodal_indices = torch.tensor( |
|
|
[idx for idx in range(len(pixel_values)) if pixel_values[idx] is not None], dtype=torch.long |
|
|
) |
|
|
|
|
|
|
|
|
if len(multimodal_indices) == 0: |
|
|
pixel_values = torch.stack([self.dummy_pixel_values for _ in range(len(input_ids))]) |
|
|
elif isinstance(pv_example := pixel_values[multimodal_indices[0]], torch.Tensor): |
|
|
pixel_values = torch.stack( |
|
|
[ |
|
|
pixel_values[idx] if idx in multimodal_indices else self.dummy_pixel_values |
|
|
for idx in range(len(input_ids)) |
|
|
] |
|
|
) |
|
|
elif isinstance(pv_example, dict): |
|
|
pixel_values = { |
|
|
k: torch.stack( |
|
|
[ |
|
|
pixel_values[idx][k] if idx in multimodal_indices else self.dummy_pixel_values |
|
|
for idx in range(len(input_ids)) |
|
|
] |
|
|
) |
|
|
for k in pv_example |
|
|
} |
|
|
else: |
|
|
raise ValueError(f"Unsupported `pixel_values` type = {type(pixel_values)}") |
|
|
|
|
|
return dict( |
|
|
pixel_values=pixel_values, |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
labels=labels, |
|
|
multimodal_indices=multimodal_indices, |
|
|
) |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class PaddedCollatorForActionPrediction: |
|
|
model_max_length: int |
|
|
pad_token_id: int |
|
|
padding_side: str = "right" |
|
|
pixel_values_dtype: torch.dtype = torch.float32 |
|
|
|
|
|
def __call__(self, instances: Sequence[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: |
|
|
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) |
|
|
pixel_values = [instance["pixel_values"] for instance in instances] |
|
|
if "dataset_name" in instances[0]: |
|
|
dataset_names = [instance["dataset_name"] for instance in instances] |
|
|
else: |
|
|
dataset_names = None |
|
|
|
|
|
|
|
|
|
|
|
assert self.padding_side == "right", f"Invalid Tokenizer `{self.padding_side = }`" |
|
|
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id) |
|
|
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) |
|
|
|
|
|
|
|
|
input_ids, labels = input_ids[:, : self.model_max_length], labels[:, : self.model_max_length] |
|
|
|
|
|
|
|
|
attention_mask = input_ids.ne(self.pad_token_id) |
|
|
|
|
|
|
|
|
assert all([pv is not None for pv in pixel_values]), "Invalid VLA Example with `pixel_values = None`!" |
|
|
|
|
|
|
|
|
if isinstance(pixel_values[0], torch.Tensor): |
|
|
pixel_values = torch.stack(pixel_values) |
|
|
elif isinstance(pixel_values[0], dict): |
|
|
pixel_values = { |
|
|
k: torch.stack([pixel_values[idx][k] for idx in range(len(input_ids))]) for k in pixel_values[0] |
|
|
} |
|
|
else: |
|
|
raise ValueError(f"Unsupported `pixel_values` type = {type(pixel_values)}") |
|
|
|
|
|
output = dict( |
|
|
pixel_values=pixel_values, |
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
labels=labels, |
|
|
) |
|
|
if dataset_names is not None: |
|
|
output["dataset_names"] = dataset_names |
|
|
return output |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class PaddedCollatorForActionPredictionV2: |
|
|
model_max_length: int |
|
|
pad_token_id: int |
|
|
padding_side: str = "right" |
|
|
pixel_values_dtype: torch.dtype = torch.float32 |
|
|
|
|
|
def __call__(self, instances: Sequence[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: |
|
|
input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) |
|
|
if "dataset_name" in instances[0]: |
|
|
dataset_names = [instance["dataset_name"] for instance in instances] |
|
|
else: |
|
|
dataset_names = None |
|
|
|
|
|
|
|
|
|
|
|
assert self.padding_side == "right", f"Invalid Tokenizer `{self.padding_side = }`" |
|
|
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id) |
|
|
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) |
|
|
|
|
|
|
|
|
input_ids, labels = input_ids[:, : self.model_max_length], labels[:, : self.model_max_length] |
|
|
|
|
|
|
|
|
attention_mask = input_ids.ne(self.pad_token_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_pixel_values = [instance["all_pixel_values"] for instance in instances] |
|
|
all_pixel_values = torch.stack(all_pixel_values) |
|
|
if instances[0]["all_wrist_values"] is not None: |
|
|
all_wrist_values = [instance["all_wrist_values"] for instance in instances] |
|
|
all_wrist_values = torch.stack(all_wrist_values) |
|
|
else: |
|
|
all_wrist_values = None |
|
|
|
|
|
output = dict( |
|
|
|
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
labels=labels, |
|
|
all_pixel_values=all_pixel_values, |
|
|
all_wrist_values=all_wrist_values, |
|
|
) |
|
|
if dataset_names is not None: |
|
|
output["dataset_names"] = dataset_names |
|
|
return output |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class PaddedCollatorForActionPredictionV3: |
|
|
model_max_length: int |
|
|
pad_token_id: int |
|
|
padding_side: str = "right" |
|
|
pixel_values_dtype: torch.dtype = torch.float32 |
|
|
|
|
|
def __call__(self, instances: Sequence[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: |
|
|
input_ids, windowed_labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) |
|
|
windowed_labels = torch.stack(windowed_labels) |
|
|
if "dataset_name" in instances[0]: |
|
|
dataset_names = [instance["dataset_name"] for instance in instances] |
|
|
else: |
|
|
dataset_names = None |
|
|
|
|
|
|
|
|
|
|
|
assert self.padding_side == "right", f"Invalid Tokenizer `{self.padding_side = }`" |
|
|
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id) |
|
|
|
|
|
|
|
|
input_ids = input_ids[:, : self.model_max_length] |
|
|
|
|
|
|
|
|
_windowed_labels = [] |
|
|
for idx in range(windowed_labels.shape[1]): |
|
|
labels = windowed_labels[:,idx] |
|
|
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) |
|
|
labels = labels[:, : self.model_max_length] |
|
|
_windowed_labels.append(labels) |
|
|
windowed_labels = torch.stack(_windowed_labels, dim=1) |
|
|
|
|
|
|
|
|
attention_mask = input_ids.ne(self.pad_token_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_pixel_values = [instance["all_pixel_values"] for instance in instances] |
|
|
all_pixel_values = torch.stack(all_pixel_values) |
|
|
if instances[0]["all_wrist_values"] is not None: |
|
|
all_wrist_values = [instance["all_wrist_values"] for instance in instances] |
|
|
all_wrist_values = torch.stack(all_wrist_values) |
|
|
else: |
|
|
all_wrist_values = None |
|
|
if instances[0]["all_pixel_depth_values"] is not None: |
|
|
all_pixel_depth_values = [instance["all_pixel_depth_values"] for instance in instances] |
|
|
all_pixel_depth_values = torch.stack(all_pixel_depth_values) |
|
|
else: |
|
|
all_pixel_depth_values = None |
|
|
if instances[0]["all_wrist_depth_values"] is not None: |
|
|
all_wrist_depth_values = [instance["all_wrist_depth_values"] for instance in instances] |
|
|
all_wrist_depth_values = torch.stack(all_wrist_depth_values) |
|
|
else: |
|
|
all_wrist_depth_values = None |
|
|
if instances[0]["all_pixel_seg_values"] is not None: |
|
|
all_pixel_seg_values = [instance["all_pixel_seg_values"] for instance in instances] |
|
|
all_pixel_seg_values = torch.stack(all_pixel_seg_values) |
|
|
else: |
|
|
all_pixel_seg_values = None |
|
|
if instances[0]["all_wrist_seg_values"] is not None: |
|
|
all_wrist_seg_values = [instance["all_wrist_seg_values"] for instance in instances] |
|
|
all_wrist_seg_values = torch.stack(all_wrist_seg_values) |
|
|
else: |
|
|
all_wrist_seg_values = None |
|
|
|
|
|
if "reasoning_on_image" in instances[0]: |
|
|
reasoning_on_image = [instance["reasoning_on_image"] for instance in instances] |
|
|
else: |
|
|
reasoning_on_image = None |
|
|
if "reasoning_on_wrist_image" in instances[0]: |
|
|
reasoning_on_wrist_image = [instance["reasoning_on_wrist_image"] for instance in instances] |
|
|
else: |
|
|
reasoning_on_wrist_image = None |
|
|
if "lang_nouns_ids" in instances[0]: |
|
|
lang_nouns_ids = [instance["lang_nouns_ids"] for instance in instances] |
|
|
else: |
|
|
lang_nouns_ids = None |
|
|
|
|
|
if "task" in instances[0]: |
|
|
tasks = [instance["task"] for instance in instances] |
|
|
else: |
|
|
tasks = None |
|
|
|
|
|
output = dict( |
|
|
|
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
labels=windowed_labels, |
|
|
all_pixel_values=all_pixel_values, |
|
|
all_wrist_values=all_wrist_values, |
|
|
all_pixel_depth_values=all_pixel_depth_values, |
|
|
all_wrist_depth_values=all_wrist_depth_values, |
|
|
all_pixel_seg_values=all_pixel_seg_values, |
|
|
all_wrist_seg_values=all_wrist_seg_values, |
|
|
reasoning_on_image=reasoning_on_image, |
|
|
reasoning_on_wrist_image=reasoning_on_wrist_image, |
|
|
lang_nouns_ids=lang_nouns_ids, |
|
|
tasks=tasks |
|
|
) |
|
|
if dataset_names is not None: |
|
|
output["dataset_names"] = dataset_names |
|
|
return output |
|
|
|
|
|
|
|
|
@dataclass |
|
|
class PaddedCollatorForActionPredictionV3T: |
|
|
model_max_length: int |
|
|
pad_token_id: int |
|
|
padding_side: str = "right" |
|
|
pixel_values_dtype: torch.dtype = torch.float32 |
|
|
|
|
|
def __call__(self, instances: Sequence[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: |
|
|
full_input_ids, full_labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) |
|
|
if "dataset_name" in instances[0]: |
|
|
dataset_names = [instance["dataset_name"] for instance in instances] |
|
|
else: |
|
|
dataset_names = None |
|
|
|
|
|
|
|
|
|
|
|
assert self.padding_side == "right", f"Invalid Tokenizer `{self.padding_side = }`" |
|
|
input_ids = [] |
|
|
for datum in full_input_ids: |
|
|
input_ids += datum |
|
|
input_ids = pad_sequence(input_ids, batch_first=True, padding_value=self.pad_token_id) |
|
|
|
|
|
labels = [] |
|
|
for datum in full_labels: |
|
|
labels += datum |
|
|
labels = pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) |
|
|
|
|
|
|
|
|
input_ids, labels = input_ids[:, : self.model_max_length], labels[:, : self.model_max_length] |
|
|
|
|
|
|
|
|
attention_mask = input_ids.ne(self.pad_token_id) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
all_pixel_values = [instance["all_pixel_values"] for instance in instances] |
|
|
all_pixel_values = torch.stack(all_pixel_values) |
|
|
if instances[0]["all_wrist_values"] is not None: |
|
|
all_wrist_values = [instance["all_wrist_values"] for instance in instances] |
|
|
all_wrist_values = torch.stack(all_wrist_values) |
|
|
else: |
|
|
all_wrist_values = None |
|
|
if instances[0]["all_pixel_depth_values"] is not None: |
|
|
all_pixel_depth_values = [instance["all_pixel_depth_values"] for instance in instances] |
|
|
all_pixel_depth_values = torch.stack(all_pixel_depth_values) |
|
|
else: |
|
|
all_pixel_depth_values = None |
|
|
if instances[0]["all_wrist_depth_values"] is not None: |
|
|
all_wrist_depth_values = [instance["all_wrist_depth_values"] for instance in instances] |
|
|
all_wrist_depth_values = torch.stack(all_wrist_depth_values) |
|
|
else: |
|
|
all_wrist_depth_values = None |
|
|
|
|
|
if "reasoning_on_image" in instances[0]: |
|
|
reasoning_on_image = [instance["reasoning_on_image"] for instance in instances] |
|
|
else: |
|
|
reasoning_on_image = None |
|
|
if "reasoning_on_wrist_image" in instances[0]: |
|
|
reasoning_on_wrist_image = [instance["reasoning_on_wrist_image"] for instance in instances] |
|
|
else: |
|
|
reasoning_on_wrist_image = None |
|
|
if "lang_nouns_ids" in instances[0]: |
|
|
lang_nouns_ids = [instance["lang_nouns_ids"] for instance in instances] |
|
|
else: |
|
|
lang_nouns_ids = None |
|
|
|
|
|
if "task" in instances[0]: |
|
|
tasks = [instance["task"] for instance in instances] |
|
|
else: |
|
|
tasks = None |
|
|
|
|
|
output = dict( |
|
|
|
|
|
input_ids=input_ids, |
|
|
attention_mask=attention_mask, |
|
|
labels=labels, |
|
|
all_pixel_values=all_pixel_values, |
|
|
all_wrist_values=all_wrist_values, |
|
|
all_pixel_depth_values=all_pixel_depth_values, |
|
|
all_wrist_depth_values=all_wrist_depth_values, |
|
|
reasoning_on_image=reasoning_on_image, |
|
|
reasoning_on_wrist_image=reasoning_on_wrist_image, |
|
|
lang_nouns_ids=lang_nouns_ids, |
|
|
tasks=tasks |
|
|
) |
|
|
if dataset_names is not None: |
|
|
output["dataset_names"] = dataset_names |
|
|
return output |
|
|
|
|
|
|
|
|
|
|
|
def preprocess_reasoning_bboxes(reasoning_bboxes, device=None): |
|
|
|
|
|
batched_rois = reasoning_bboxes |
|
|
revised_reasoning_bboxes = [] |
|
|
for i in range(len(batched_rois)): |
|
|
rois = batched_rois[i] |
|
|
rois = [torch.tensor(roi[1]) for roi in rois] |
|
|
revised_reasoning_bboxes.append(torch.stack(rois, dim=0).to(device)) |
|
|
|
|
|
return revised_reasoning_bboxes |
|
|
|
|
|
def preprocess_reasoning_bboxes_v2(reasoning_bboxes, device=None): |
|
|
|
|
|
bz = len(reasoning_bboxes) |
|
|
horizon = len(reasoning_bboxes[0]) |
|
|
|
|
|
revised_reasoning_bboxes = [] |
|
|
for i in range(bz): |
|
|
labeled_reasoning_bboxes = {} |
|
|
for j in range(horizon): |
|
|
for roi in reasoning_bboxes[i][j]: |
|
|
if roi[0] not in labeled_reasoning_bboxes: |
|
|
labeled_reasoning_bboxes[roi[0]] = [] |
|
|
|
|
|
batch_reasoning_keys = labeled_reasoning_bboxes.keys() |
|
|
|
|
|
for j in range(horizon): |
|
|
reasoning_dict_ij = dict(reasoning_bboxes[i][j]) |
|
|
for key in batch_reasoning_keys: |
|
|
if key in reasoning_dict_ij: |
|
|
labeled_reasoning_bboxes[key].append( |
|
|
torch.tensor(np.concatenate([reasoning_dict_ij[key], [1.0]])).float() |
|
|
) |
|
|
else: |
|
|
labeled_reasoning_bboxes[key].append( |
|
|
torch.tensor(np.zeros(5)).float() |
|
|
) |
|
|
|
|
|
for key in batch_reasoning_keys: |
|
|
labeled_reasoning_bboxes[key] = torch.stack(labeled_reasoning_bboxes[key], dim=0).to(device) |
|
|
revised_reasoning_bboxes.append(labeled_reasoning_bboxes) |
|
|
|
|
|
return revised_reasoning_bboxes |
|
|
|
|
|
|
|
|
def preprocess_reasoning_bboxes_v3(reasoning_bboxes, lang_nouns, device=None): |
|
|
|
|
|
bz = len(reasoning_bboxes) |
|
|
horizon = len(reasoning_bboxes[0]) |
|
|
|
|
|
revised_reasoning_bboxes = [] |
|
|
for i in range(bz): |
|
|
labeled_reasoning_bboxes = {} |
|
|
for j in range(horizon): |
|
|
for roi in reasoning_bboxes[i][j]: |
|
|
|
|
|
|
|
|
if roi not in labeled_reasoning_bboxes: |
|
|
labeled_reasoning_bboxes[roi] = [] |
|
|
|
|
|
batch_reasoning_keys = labeled_reasoning_bboxes.keys() |
|
|
active_objs = list(lang_nouns[i]) |
|
|
|
|
|
for j in range(horizon): |
|
|
reasoning_dict_ij = dict(reasoning_bboxes[i][j]) |
|
|
for key in batch_reasoning_keys: |
|
|
if key in reasoning_dict_ij: |
|
|
if key in active_objs: |
|
|
labeled_reasoning_bboxes[key].append( |
|
|
torch.tensor(np.concatenate([reasoning_dict_ij[key], [1.0, 1.0]])).float() |
|
|
) |
|
|
else: |
|
|
labeled_reasoning_bboxes[key].append( |
|
|
torch.tensor(np.concatenate([reasoning_dict_ij[key], [1.0, 0.0]])).float() |
|
|
) |
|
|
else: |
|
|
labeled_reasoning_bboxes[key].append( |
|
|
torch.tensor(np.zeros(6)).float() |
|
|
) |
|
|
|
|
|
for key in batch_reasoning_keys: |
|
|
labeled_reasoning_bboxes[key] = torch.stack(labeled_reasoning_bboxes[key], dim=0).to(device) |
|
|
revised_reasoning_bboxes.append(labeled_reasoning_bboxes) |
|
|
|
|
|
return revised_reasoning_bboxes |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def split_gripper_object_data(data): |
|
|
return data[:,0:1], data[:,1:] |
|
|
|
|
|
import os |
|
|
import cv2 |
|
|
from torch.nn import functional as F |
|
|
def crop_bboxes_with_score( |
|
|
image_tensor, |
|
|
bboxes, |
|
|
patch_resize: tuple = (40, 40), |
|
|
save_debug: bool = False, |
|
|
debug_dir: str = None, |
|
|
is_depth: bool = False |
|
|
) -> torch.Tensor: |
|
|
""" |
|
|
Crops and resizes bounding boxes from a batch of images. |
|
|
|
|
|
Args: |
|
|
image_tensor (torch.Tensor): |
|
|
A batch of horizon of images of shape (B, T, C, H, W). |
|
|
bboxes (List[Dict[str: torch.Tensor]]): |
|
|
A list of dictionaries of len B, each dict has `key`: label, `value`: (T, Ni, 4), |
|
|
where each bbox in (cx, cy, w, h, score) format [0, 1]. |
|
|
Coordinates are normalized in [0,1]. There are ~max(Ni)*B*T such bboxes. |
|
|
patch_resize (tuple): |
|
|
The (height, width) to resize each crop. |
|
|
save_debug (bool): |
|
|
If True, saves each cropped patch for debugging via cv2. |
|
|
debug_dir (str): |
|
|
Directory (or prefix) to save debug images. |
|
|
|
|
|
Returns: |
|
|
List[Dict[str: torch.Tensor]]: Cropped patches that correspond with `bboxes`. |
|
|
""" |
|
|
|
|
|
B, T, C, H, W = image_tensor.shape |
|
|
|
|
|
|
|
|
out_patches = [] |
|
|
|
|
|
|
|
|
if save_debug and debug_dir is not None: |
|
|
os.makedirs(debug_dir, exist_ok=True) |
|
|
|
|
|
for b_idx in range(B): |
|
|
batched_patches = {} |
|
|
for key, value in bboxes[b_idx].items(): |
|
|
|
|
|
|
|
|
cropped_patches = [] |
|
|
|
|
|
for t_idx in range(T): |
|
|
cx, cy, w, h, obj = value[t_idx][:5] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
cx_pix = cx * W |
|
|
cy_pix = cy * H |
|
|
w_pix = w * W |
|
|
h_pix = h * H |
|
|
|
|
|
|
|
|
x1 = int(cx_pix - 0.5 * w_pix) |
|
|
y1 = int(cy_pix - 0.5 * h_pix) |
|
|
x2 = int(cx_pix + 0.5 * w_pix) |
|
|
y2 = int(cy_pix + 0.5 * h_pix) |
|
|
|
|
|
|
|
|
x1 = max(0, min(x1, W)) |
|
|
x2 = max(0, min(x2, W)) |
|
|
y1 = max(0, min(y1, H)) |
|
|
y2 = max(0, min(y2, H)) |
|
|
if obj == 0 or (y2-y1 == 0 and x2-x1 == 0): |
|
|
cropped_patches.append(-torch.ones((1, patch_resize[0], patch_resize[1])).to(image_tensor.device).float()) |
|
|
continue |
|
|
|
|
|
|
|
|
patch = image_tensor[b_idx, t_idx, :, y1:y2, x1:x2].unsqueeze(0) |
|
|
|
|
|
|
|
|
patch_resized = F.interpolate( |
|
|
patch, |
|
|
size=patch_resize, |
|
|
mode='bilinear', |
|
|
align_corners=False |
|
|
).squeeze(0) |
|
|
if is_depth: |
|
|
patch_resized = patch_resized[:1,:,:].float() |
|
|
|
|
|
cropped_patches.append(patch_resized) |
|
|
|
|
|
if save_debug and debug_dir is not None: |
|
|
|
|
|
patch_np = patch_resized.permute(1, 2, 0).cpu().numpy() |
|
|
|
|
|
|
|
|
patch_np = (patch_np * 255).astype("uint8") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
out_path = os.path.join( |
|
|
debug_dir, f"patch_b{b_idx}_{key}_t{t_idx}.png" |
|
|
) |
|
|
cv2.imwrite(out_path, patch_np) |
|
|
batched_patches[key] = torch.stack(cropped_patches, dim=0) |
|
|
out_patches.append(batched_patches) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return out_patches |
|
|
|
|
|
def filter_by_active_labels(reasoning_data, lang_nouns): |
|
|
revised_reasoning_data = [] |
|
|
|
|
|
|
|
|
for bi, data in enumerate(reasoning_data): |
|
|
revised_reasoning_data.append({}) |
|
|
active_objs = list(lang_nouns[bi]) |
|
|
for key, value in data.items(): |
|
|
if key in active_objs: |
|
|
revised_reasoning_data[bi][key] = value |
|
|
|
|
|
return revised_reasoning_data |
|
|
|
|
|
def _merge_bboxes_util(bbox1, bbox2): |
|
|
|
|
|
cx1, cy1, w1, h1, score = bbox1[:,0], bbox1[:,1], bbox1[:,2], bbox1[:,3], bbox1[:,4] |
|
|
cx2, cy2, w2, h2, score = bbox2[:,0], bbox2[:,1], bbox2[:,2], bbox2[:,3], bbox2[:,4] |
|
|
|
|
|
|
|
|
x_min1 = cx1 - w1 / 2 |
|
|
y_min1 = cy1 - h1 / 2 |
|
|
x_max1 = cx1 + w1 / 2 |
|
|
y_max1 = cy1 + h1 / 2 |
|
|
|
|
|
|
|
|
x_min2 = cx2 - w2 / 2 |
|
|
y_min2 = cy2 - h2 / 2 |
|
|
x_max2 = cx2 + w2 / 2 |
|
|
y_max2 = cy2 + h2 / 2 |
|
|
|
|
|
|
|
|
x_min_merged = torch.min(x_min1, x_min2) |
|
|
y_min_merged = torch.min(y_min1, y_min2) |
|
|
x_max_merged = torch.max(x_max1, x_max2) |
|
|
y_max_merged = torch.max(y_max1, y_max2) |
|
|
|
|
|
|
|
|
cx_merged = (x_min_merged + x_max_merged) / 2 |
|
|
cy_merged = (y_min_merged + y_max_merged) / 2 |
|
|
w_merged = x_max_merged - x_min_merged |
|
|
h_merged = y_max_merged - y_min_merged |
|
|
|
|
|
if bbox1.shape[1] > 5: |
|
|
interactable = bbox1[:,5] |
|
|
res = torch.stack([cx_merged, cy_merged, w_merged, h_merged, score, interactable], dim=-1) |
|
|
else: |
|
|
res = torch.stack([cx_merged, cy_merged, w_merged, h_merged, score], dim=-1) |
|
|
return res |
|
|
|
|
|
def merge_bboxes_to_interaction(reasoning_data): |
|
|
revised_reasoning_data = [] |
|
|
|
|
|
|
|
|
for bi, data in enumerate(reasoning_data): |
|
|
revised_reasoning_data.append({}) |
|
|
for key, value in data.items(): |
|
|
revised_reasoning_data[bi][key] = _merge_bboxes_util(value, reasoning_data[bi]['robot']) |
|
|
|
|
|
return revised_reasoning_data |
|
|
|
|
|
|
|
|
def merge_bboxes(reasoning_data): |
|
|
revised_reasoning_data = [] |
|
|
|
|
|
|
|
|
for bi, data in enumerate(reasoning_data): |
|
|
revised_reasoning_data.append({}) |
|
|
for key, value in data.items(): |
|
|
if 'interaction' not in revised_reasoning_data[bi]: |
|
|
revised_reasoning_data[bi]['interaction'] = value |
|
|
else: |
|
|
revised_reasoning_data[bi]['interaction'] = _merge_bboxes_util(revised_reasoning_data[bi]['interaction'], value) |
|
|
|
|
|
return revised_reasoning_data |
|
|
|