|
|
"""Utils for evaluating the OpenVLA policy.""" |
|
|
|
|
|
import json |
|
|
import os |
|
|
import time |
|
|
|
|
|
import numpy as np |
|
|
import tensorflow as tf |
|
|
import torch |
|
|
from PIL import Image |
|
|
from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor |
|
|
|
|
|
from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig |
|
|
from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction |
|
|
from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor |
|
|
|
|
|
|
|
|
ACTION_DIM = 7 |
|
|
DATE = time.strftime("%Y_%m_%d") |
|
|
DATE_TIME = time.strftime("%Y_%m_%d-%H_%M_%S") |
|
|
DEVICE = torch.device("cuda:0") if torch.cuda.is_available() else torch.device("cpu") |
|
|
np.set_printoptions(formatter={"float": lambda x: "{0:0.3f}".format(x)}) |
|
|
|
|
|
|
|
|
OPENVLA_V01_SYSTEM_PROMPT = ( |
|
|
"A chat between a curious user and an artificial intelligence assistant. " |
|
|
"The assistant gives helpful, detailed, and polite answers to the user's questions." |
|
|
) |
|
|
|
|
|
from peft import PeftModel |
|
|
from safetensors.torch import load_file |
|
|
|
|
|
|
|
|
def crop_and_resize(image, crop_scale, batch_size): |
|
|
""" |
|
|
Center-crops an image to have area `crop_scale` * (original image area), and then resizes back |
|
|
to original size. We use the same logic seen in the `dlimp` RLDS datasets wrapper to avoid |
|
|
distribution shift at test time. |
|
|
|
|
|
Args: |
|
|
image: TF Tensor of shape (batch_size, H, W, C) or (H, W, C) and datatype tf.float32 with |
|
|
values between [0,1]. |
|
|
crop_scale: The area of the center crop with respect to the original image. |
|
|
batch_size: Batch size. |
|
|
""" |
|
|
|
|
|
assert image.shape.ndims == 3 or image.shape.ndims == 4 |
|
|
expanded_dims = False |
|
|
if image.shape.ndims == 3: |
|
|
image = tf.expand_dims(image, axis=0) |
|
|
expanded_dims = True |
|
|
|
|
|
|
|
|
new_heights = tf.reshape(tf.clip_by_value(tf.sqrt(crop_scale), 0, 1), shape=(batch_size,)) |
|
|
new_widths = tf.reshape(tf.clip_by_value(tf.sqrt(crop_scale), 0, 1), shape=(batch_size,)) |
|
|
|
|
|
|
|
|
height_offsets = (1 - new_heights) / 2 |
|
|
width_offsets = (1 - new_widths) / 2 |
|
|
bounding_boxes = tf.stack( |
|
|
[ |
|
|
height_offsets, |
|
|
width_offsets, |
|
|
height_offsets + new_heights, |
|
|
width_offsets + new_widths, |
|
|
], |
|
|
axis=1, |
|
|
) |
|
|
|
|
|
|
|
|
image = tf.image.crop_and_resize(image, bounding_boxes, tf.range(batch_size), (224, 224)) |
|
|
|
|
|
|
|
|
if expanded_dims: |
|
|
image = image[0] |
|
|
|
|
|
return image |
|
|
|
|
|
|
|
|
def get_vla_action(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False): |
|
|
"""Generates an action with the VLA policy.""" |
|
|
image = Image.fromarray(obs["full_image"]) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if center_crop: |
|
|
batch_size = 1 |
|
|
crop_scale = 0.9 |
|
|
|
|
|
|
|
|
image = tf.convert_to_tensor(np.array(image)) |
|
|
orig_dtype = image.dtype |
|
|
|
|
|
|
|
|
image = tf.image.convert_image_dtype(image, tf.float32) |
|
|
|
|
|
|
|
|
image = crop_and_resize(image, crop_scale, batch_size) |
|
|
|
|
|
|
|
|
image = tf.clip_by_value(image, 0, 1) |
|
|
image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True) |
|
|
|
|
|
|
|
|
image = Image.fromarray(image.numpy()) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
if "openvla-v01" in base_vla_name: |
|
|
prompt = ( |
|
|
f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:" |
|
|
) |
|
|
else: |
|
|
prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut: _ _ _ _ _ _ _ _</s>" |
|
|
|
|
|
|
|
|
inputs = processor(prompt, image).to(DEVICE, dtype=torch.float16) |
|
|
inputs['input_ids'][0][-9] = 29871 |
|
|
|
|
|
|
|
|
|
|
|
action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False) |
|
|
return action |
|
|
|
|
|
|
|
|
def get_vla_action_v2(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False): |
|
|
"""Generates an action with the VLA policy.""" |
|
|
images = [obs["full_image"], obs["wrist_image"]] |
|
|
input_data = [] |
|
|
for image in images: |
|
|
image = Image.fromarray(image) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if center_crop: |
|
|
batch_size = 1 |
|
|
crop_scale = 0.9 |
|
|
|
|
|
|
|
|
image = tf.convert_to_tensor(np.array(image)) |
|
|
orig_dtype = image.dtype |
|
|
|
|
|
|
|
|
image = tf.image.convert_image_dtype(image, tf.float32) |
|
|
|
|
|
|
|
|
image = crop_and_resize(image, crop_scale, batch_size) |
|
|
|
|
|
|
|
|
image = tf.clip_by_value(image, 0, 1) |
|
|
image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True) |
|
|
|
|
|
|
|
|
image = Image.fromarray(image.numpy()) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
if "openvla-v01" in base_vla_name: |
|
|
prompt = ( |
|
|
f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:" |
|
|
) |
|
|
else: |
|
|
prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut: _ _ _ _ _ _ _ _</s>" |
|
|
|
|
|
|
|
|
inputs = processor(prompt, image).to(DEVICE, dtype=torch.float16) |
|
|
inputs['input_ids'][0][-9] = 29871 |
|
|
input_data.append(inputs) |
|
|
|
|
|
|
|
|
inputs = {} |
|
|
inputs['pixel_values'] = [] |
|
|
for datum in input_data: |
|
|
inputs['input_ids'] = datum['input_ids'] |
|
|
inputs['attention_mask'] = datum['attention_mask'] |
|
|
bz, cc, h, w = datum['pixel_values'].shape |
|
|
datum['pixel_values'] = datum['pixel_values'].reshape(bz, 1, cc, h, w) |
|
|
inputs['pixel_values'].append(datum['pixel_values']) |
|
|
inputs['pixel_values'] = torch.stack(inputs['pixel_values'], dim=1) |
|
|
action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False) |
|
|
return action |
|
|
|
|
|
|
|
|
def get_vla_action_v3(vla, processor, base_vla_name, obs, task_label, unnorm_key, center_crop=False): |
|
|
"""Generates an action with the VLA policy.""" |
|
|
images = [obs["full_image"], obs["wrist_image"], obs["depth_full_image"], obs["depth_wrist_image"]] |
|
|
input_data = [] |
|
|
for image in images: |
|
|
image = Image.fromarray(image) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if center_crop: |
|
|
batch_size = 1 |
|
|
crop_scale = 0.9 |
|
|
|
|
|
|
|
|
image = tf.convert_to_tensor(np.array(image)) |
|
|
orig_dtype = image.dtype |
|
|
|
|
|
|
|
|
image = tf.image.convert_image_dtype(image, tf.float32) |
|
|
|
|
|
|
|
|
image = crop_and_resize(image, crop_scale, batch_size) |
|
|
|
|
|
|
|
|
image = tf.clip_by_value(image, 0, 1) |
|
|
image = tf.image.convert_image_dtype(image, orig_dtype, saturate=True) |
|
|
|
|
|
|
|
|
image = Image.fromarray(image.numpy()) |
|
|
image = image.convert("RGB") |
|
|
|
|
|
|
|
|
if "openvla-v01" in base_vla_name: |
|
|
prompt = ( |
|
|
f"{OPENVLA_V01_SYSTEM_PROMPT} USER: What action should the robot take to {task_label.lower()}? ASSISTANT:" |
|
|
) |
|
|
else: |
|
|
prompt = f"In: What action should the robot take to {task_label.lower()}?\nOut:" |
|
|
|
|
|
|
|
|
inputs = processor(prompt, image).to(DEVICE, dtype=torch.bfloat16) |
|
|
input_data.append(inputs) |
|
|
|
|
|
|
|
|
inputs = {} |
|
|
inputs['pixel_values'] = [] |
|
|
for datum in input_data: |
|
|
inputs['input_ids'] = datum['input_ids'] |
|
|
inputs['attention_mask'] = datum['attention_mask'] |
|
|
bz, cc, h, w = datum['pixel_values'].shape |
|
|
datum['pixel_values'] = datum['pixel_values'].reshape(bz, 1, cc, h, w) |
|
|
inputs['pixel_values'].append(datum['pixel_values']) |
|
|
inputs['pixel_values'] = torch.stack(inputs['pixel_values'], dim=1) |
|
|
action = vla.predict_action(**inputs, unnorm_key=unnorm_key, do_sample=False) |
|
|
return action |
|
|
|