Aria-UI_Context-aware_Data / aitw /prepare_trajectory_grounding.py
Aria-UI's picture
Upload aitw/prepare_trajectory_grounding.py with huggingface_hub
c496804 verified
raw
history blame
16.4 kB
import json
from copy import deepcopy
import os
from pathlib import Path
import re
from PIL import Image
from tqdm import tqdm
import multiprocessing
import sys
sys.path.append('./google-research')
from android_in_the_wild.action_type import ActionType
from android_in_the_wild.visualization_utils import is_tap_action
import numpy as np
def is_tap(step):
return step["results/action_type"][0] == ActionType.DUAL_POINT and is_tap_action(np.array(step["results/yx_touch"]), np.array(step["results/yx_lift"]))
def encode_action(action_json):
"""
Encode different types of actions into human-readable descriptions.
Args:
action_json (dict): A dictionary containing action details
Returns:
str: A human-readable description of the action
"""
action_type = action_json["results/action_type"][0]
if is_tap(action_json):
return "tap on the screen"
elif action_type == ActionType.DUAL_POINT:
# Check scroll direction by comparing y-values
start_y, end_y = action_json["results/yx_touch"][1], action_json["results/yx_lift"][1]
start_x, end_x = action_json["results/yx_touch"][0], action_json["results/yx_lift"][0]
# first decide scroll vertically or horizontally
if abs(start_y - end_y) > abs(start_x - end_x):
if start_y < end_y:
return "scroll down"
else:
return "scroll up"
else:
if start_x < end_x:
return "scroll right"
else:
return "scroll left"
elif action_type == ActionType.TYPE:
text_to_type = action_json["results/type_action"][0]
return f'TYPE "{text_to_type}"'
elif action_type == ActionType.PRESS_BACK:
return "go to the previous screen"
elif action_type == ActionType.PRESS_HOME:
return "go to the home screen"
elif action_type == ActionType.PRESS_ENTER:
return "press the enter key"
elif action_type == ActionType.STATUS_TASK_COMPLETE:
return "task completed"
elif action_type == ActionType.STATUS_TASK_IMPOSSIBLE:
return "task impossible"
else:
raise ValueError(f"Unknown action type: {action_type}")
def resize_image(image, scale=0.75):
"""
Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
Args:
image: PIL Image object
Returns:
Resized PIL Image
"""
# Get current dimensions
width, height = image.size
# Calculate new dimensions
new_width = int(width * scale)
new_height = int(height * scale)
# Resize image
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
return resized_image
def merge_convs(conversations):
"""
Merge all successive 'human' conversations comprehensively.
Args:
conversations (list): List of conversation dictionaries
Returns:
list: Processed conversations with all successive human messages merged
Raises:
ValueError: If input is not a list or contains invalid conversation dictionaries
"""
# Validate input
if not isinstance(conversations, list):
raise ValueError("Input must be a list of conversation dictionaries")
# Validate each conversation dictionary structure
for conv in conversations:
if not isinstance(conv, dict):
raise ValueError("Each conversation must be a dictionary")
if 'from' not in conv or 'value' not in conv:
raise ValueError("Each conversation must have 'from' and 'value' keys")
processed_conversations = []
i = 0
while i < len(conversations):
current_conv = conversations[i]
# If current conversation is 'human', start merging
if current_conv['from'] == 'human':
# Collect all successive human conversations
merged_value = current_conv['value']
j = i + 1
while j < len(conversations) and conversations[j]['from'] == 'human':
merged_value += '\n\n' + conversations[j]['value']
j += 1
# Update current conversation with merged value
current_conv['value'] = merged_value
# Move index to last non-human conversation
i = j
else:
# For non-human conversations, just add to processed list
i += 1
processed_conversations.append(current_conv)
return processed_conversations
def parse_reasoning(input_string):
input_string = input_string.strip()
if not input_string.endswith("```"):
input_string += "```"
# Regex pattern to match texts between ```A```, ```B```, and ```C```
pattern = r'```([ABC])\n(.*?)```'
# Find all matches
matches = re.findall(pattern, input_string, re.DOTALL)
# Create a dictionary to store parsed texts
parsed_texts = []
# Populate the dictionary
for _, text in matches:
parsed_texts.append(text.strip())
if len(parsed_texts) != 3:
# print(input_string)
return None, None, None
caption, reasoning, instruction = parsed_texts
return caption, instruction.replace("Task: ", ""), reasoning
grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
grounding_step_ans = "```\n{point_str}\n```"
act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
act_step_ans = "The agent's action: {prev_action}"
user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
resize_ratios_per_window_size = {
1: 0.5,
2: 0.5,
3: 0.5,
}
def process_android_episodes(data, window_size=2):
"""
Process Android episodes and extract steps with click or long_press actions.
Args:
data (list): List of episode dictionaries
window_size (int, optional): Number of recent image-included conversations to include.
Defaults to 3 (current image + 2 previous image-included steps).
Returns:
dict: Dictionary with episode_id as key and list of filtered steps as value
"""
instructions = []
for episode in data:
episode_id = episode["episode_id"]
for i, step in enumerate(episode["steps"]):
is_grounding = step["is_grounding"]
if not is_grounding:
continue
if window_size > 0 and i == 0: # skip the first step if window_size > 0
continue
convs = [
{
"from": "human",
"value": user_start_prompt.format(
ultimate_task=episode["goal_info"][0]
),
},
]
cur_img_list = [Path(step["image_path"]).resolve()]
if window_size > 0:
window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
if i > window_size: # has more history steps larger than window_size
convs.append(
{
"from": "human",
"value": user_history_instr_prompt.format(
history_list="\n".join(
[
f"\t{j+1}. " + prev_step["step_instruction"]
for j, prev_step in enumerate(episode["steps"][:i-window_size])
]
)
),
},
)
convs.append(
{
"from": "human",
"value": "The recent steps with the GUI images are as follows:\n",
}
)
for j, win_step_i in enumerate(window_steps):
if win_step_i["is_grounding"]:
convs.append(
{
"from": "human",
"value": grounding_step_prompt.format(
instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
),
}
)
convs.append(
{
"from": "gpt",
"value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
}
)
else:
convs.append(
{
"from": "human",
"value": act_step_prompt.format(
prev_instruction=encode_action(win_step_i), step_idx=i+1-(len(window_steps)-j)
),
}
)
convs.append(
{
"from": "human",
"value": act_step_ans.format(
prev_action=encode_action(win_step_i)
),
}
)
win_img_list = [
Path(win_step["image_path"]).resolve() for win_step in window_steps
]
if not all([img_path.exists() for img_path in img_list]):
print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
continue
has_img_broken = False
for img_path in img_list:
try:
Image.open(str(img_path))
except Exception as e:
print(f"Error opening image {img_path}: {e}")
has_img_broken = True
break
if has_img_broken:
print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
continue
resize_scale = resize_ratios_per_window_size[window_size]
win_img_list_resized = []
try:
for img_path in win_img_list:
new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
new_save_dir = img_path.parent.parent / f"images_resized"
new_save_dir.mkdir(parents=True, exist_ok=True)
new_save_path = new_save_dir / new_save_name
if new_save_path.exists():
win_img_list_resized.append(new_save_path)
continue
win_img = Image.open(str(img_path))
win_img = resize_image(win_img, scale=resize_scale)
win_img.save(str(new_save_path))
win_img_list_resized.append(new_save_path)
except Exception as e:
print(f"Error resizing image: {e}: {win_img_list}")
continue
else:
convs.append(
{
"from": "human",
"value": user_history_instr_prompt.format(
history_list="\n".join(
[
f"\t{j+1}. " + prev_step["step_instruction"]
for j, prev_step in enumerate(episode["steps"][:i-window_size])
]
)
),
},
)
if window_size > 0:
img_list = win_img_list_resized + cur_img_list
else:
img_list = cur_img_list
has_img_broken = False
for img_path in img_list:
try:
Image.open(str(img_path))
except Exception as e:
print(f"Error opening image {img_path}: {e}")
has_img_broken = True
break
if has_img_broken:
print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
continue
# Current step details
convs.append(
{
"from": "human",
"value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
}
)
convs.append(
{
"from": "gpt",
"value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
}
)
convs = merge_convs(convs)
instructions.append(
{
"image": [str(img_path) for img_path in img_list],
"conversations": convs,
}
)
return instructions
# Example usage
if __name__ == "__main__":
dataset_directories = {
'general': './general_episodes_with_grounding_reasoning',
'google_apps': './google_apps_episodes_with_grounding_reasoning',
'install': './install_episodes_with_grounding_reasoning_valid',
'web_shopping': './web_shopping_episodes_with_grounding_reasoning',
}
episode_data_list = []
for dataset_name, directory_path in dataset_directories.items():
episode_data_list.extend(list(Path(directory_path).glob("*.json")))
episode_data_list = [json.load(open(str(file_path), "r", encoding="utf-8")) for file_path in episode_data_list]
episode_data_list_new = []
for episode_data in tqdm(episode_data_list, desc="Parsing fields..."):
for step in episode_data:
step["image_path"] = step["image_path"][0] if isinstance(step["image_path"], list) else step["image_path"]
if "grounding_reasoning" not in step or not step["grounding_reasoning"]:
step["step_instruction"] = encode_action(step)
step["is_grounding"] = False
continue
caption, instruction, reasoning = parse_reasoning(step["grounding_reasoning"])
step["step_instruction"] = instruction if instruction else encode_action(step)
step["caption"] = caption
step["reasoning"] = reasoning
step["is_grounding"] = not(not instruction)
step["coord_norm"] = (int(step["results/yx_touch"][1] * 1000), int(step["results/yx_touch"][0] * 1000))
episode_data = {
"episode_id": episode_data[0]["episode_id"],
"goal_info": episode_data[0]["goal_info"],
"steps": episode_data,
}
episode_data_list_new.append(episode_data)
# window_size_list = [1, 2, 3]
window_size_list = [0, 1, 2, 3]
def process_episode(args):
episode, window_size = args
return process_android_episodes([episode], window_size)
instructions = []
for window_size in window_size_list:
tasks = [(episode, window_size) for episode in episode_data_list_new]
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
for result in results:
instructions.extend(result)
print(f"Number of context aware train instructions: {len(instructions)}")
with open(f"aitw_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
json.dump(instructions, file, ensure_ascii=False, indent=4)