|
|
import json |
|
|
import webdataset as wds |
|
|
import io |
|
|
import decord |
|
|
import numpy as np |
|
|
import torch |
|
|
import matplotlib.pyplot as plt |
|
|
import glob |
|
|
import cv2 |
|
|
from pathlib import Path |
|
|
import concurrent.futures |
|
|
import os |
|
|
import argparse |
|
|
import sys |
|
|
from huggingface_hub import HfFileSystem, get_token, hf_hub_url |
|
|
|
|
|
executor = concurrent.futures.ThreadPoolExecutor( |
|
|
max_workers=None, |
|
|
thread_name_prefix="JPG_Saver" |
|
|
) |
|
|
|
|
|
fs = HfFileSystem() |
|
|
files = [fs.resolve_path(path) for path in fs.glob("hf://datasets/CVML-TueAI/grounding-YT-dataset/frames/*.tar")] |
|
|
urls = [hf_hub_url(file.repo_id, file.path_in_repo, repo_type="dataset") for file in files] |
|
|
urls = f"pipe: curl -s -L -H 'Authorization:Bearer {get_token()}' {'::'.join(urls)}" |
|
|
PRED_FILE = 'random_preds.json' |
|
|
OUTPUT_DIR = Path('./output_annotations') |
|
|
OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
def save_annotated_frame(image_array_rgb, bbox, point, gt_action, pred_action, output_path): |
|
|
|
|
|
COLOR_GT = (0, 150, 0) |
|
|
COLOR_PRED = (0, 0, 255) |
|
|
COLOR_BOX = (255, 0, 0) |
|
|
COLOR_POINT = (0, 0, 255) |
|
|
|
|
|
if gt_action == pred_action: |
|
|
COLOR_PRED = (0, 150, 0) |
|
|
|
|
|
TOP_PADDING = 70 |
|
|
TEXT_OFFSET_X = 10 |
|
|
|
|
|
image_bgr = cv2.cvtColor(image_array_rgb, cv2.COLOR_RGB2BGR) |
|
|
h, w = image_bgr.shape[:2] |
|
|
|
|
|
final_image = np.full((h + TOP_PADDING, w, 3), 255, dtype=np.uint8) |
|
|
final_image[TOP_PADDING : h + TOP_PADDING, 0:w] = image_bgr |
|
|
|
|
|
cv2.putText( |
|
|
final_image, |
|
|
f"Ground Truth: {gt_action}", |
|
|
(TEXT_OFFSET_X, 30), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, |
|
|
0.8, |
|
|
COLOR_GT, |
|
|
2 |
|
|
) |
|
|
cv2.putText( |
|
|
final_image, |
|
|
f"Prediction: {str(pred_action)}", |
|
|
(TEXT_OFFSET_X, 60), |
|
|
cv2.FONT_HERSHEY_SIMPLEX, |
|
|
0.8, |
|
|
COLOR_PRED, |
|
|
2 |
|
|
) |
|
|
|
|
|
|
|
|
x_min, y_min, x_max, y_max = [int(coord) for coord in bbox] |
|
|
|
|
|
|
|
|
pt1 = (x_min, y_min + TOP_PADDING) |
|
|
|
|
|
pt2 = (x_max, y_max + TOP_PADDING) |
|
|
|
|
|
cv2.rectangle( |
|
|
final_image, |
|
|
pt1, |
|
|
pt2, |
|
|
COLOR_BOX, |
|
|
thickness=2 |
|
|
) |
|
|
|
|
|
|
|
|
a, b = point |
|
|
pt_center = (a, b + TOP_PADDING) |
|
|
|
|
|
|
|
|
cv2.circle( |
|
|
final_image, |
|
|
pt_center, |
|
|
radius=3, |
|
|
color=COLOR_POINT, |
|
|
thickness=-1 |
|
|
) |
|
|
|
|
|
|
|
|
cv2.circle( |
|
|
final_image, |
|
|
pt_center, |
|
|
radius=10, |
|
|
color=(255, 255, 255), |
|
|
thickness=2 |
|
|
) |
|
|
|
|
|
cv2.imwrite(output_path, final_image, [int(cv2.IMWRITE_JPEG_QUALITY), 95]) |
|
|
print(f"Saved annotated image to {output_path}") |
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
dataset = ( |
|
|
wds.WebDataset(urls, shardshuffle=False) |
|
|
.decode('torchrgb') |
|
|
.to_tuple("__key__","jpg", "json") |
|
|
) |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
|
|
|
parser.add_argument( |
|
|
"--predictions", type=str, required=True, help="Path to json file with predictions for each clip" |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
with open(args.predictions, 'r', encoding='utf-8') as f: |
|
|
preds = json.load(f) |
|
|
|
|
|
for key, image_tensor, meta in dataset: |
|
|
|
|
|
frame_no = meta['frame'] |
|
|
video_name = meta['video'] |
|
|
if preds.get(key) is not None: |
|
|
|
|
|
image_hwc = image_tensor.permute(1,2,0) |
|
|
image_scaled = image_hwc * 255.0 |
|
|
image_numpy_uint8 = image_scaled.numpy().astype(np.uint8) |
|
|
|
|
|
pred_point = preds[key].get(str(frame_no)).get('point') |
|
|
pred_action = preds[key].get(str(frame_no)).get('action') |
|
|
|
|
|
output_dir = OUTPUT_DIR / 'frames' / video_name |
|
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
output_img = output_dir / f'{key}.jpg' |
|
|
|
|
|
|
|
|
executor.submit( |
|
|
save_annotated_frame, |
|
|
image_array_rgb=image_numpy_uint8, |
|
|
bbox=meta['box'], |
|
|
point = pred_point, |
|
|
gt_action=meta['step_name'], |
|
|
pred_action = pred_action, |
|
|
output_path = output_img |
|
|
) |
|
|
|
|
|
print("Main loop finished. Waiting for file saving to complete...") |
|
|
executor.shutdown(wait=True) |
|
|
print("All files saved.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__': |
|
|
main() |
|
|
|
|
|
|
|
|
|