Aria-UI_Context-aware_Data / android_control /prepare_trajectory_grounding.py
Aria-UI's picture
Upload android_control/prepare_trajectory_grounding.py with huggingface_hub
3a0cbad verified
raw
history blame
12.1 kB
import json
from copy import deepcopy
import os
from pathlib import Path
from PIL import Image
from tqdm import tqdm
import multiprocessing
def resize_image(image, scale=0.75):
"""
Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
Args:
image: PIL Image object
Returns:
Resized PIL Image
"""
# Get current dimensions
width, height = image.size
# Calculate new dimensions
new_width = int(width * scale)
new_height = int(height * scale)
# Resize image
resized_image = image.resize((new_width, new_height), Image.LANCZOS)
return resized_image
def merge_convs(conversations):
"""
Merge all successive 'human' conversations comprehensively.
Args:
conversations (list): List of conversation dictionaries
Returns:
list: Processed conversations with all successive human messages merged
Raises:
ValueError: If input is not a list or contains invalid conversation dictionaries
"""
# Validate input
if not isinstance(conversations, list):
raise ValueError("Input must be a list of conversation dictionaries")
# Validate each conversation dictionary structure
for conv in conversations:
if not isinstance(conv, dict):
raise ValueError("Each conversation must be a dictionary")
if 'from' not in conv or 'value' not in conv:
raise ValueError("Each conversation must have 'from' and 'value' keys")
processed_conversations = []
i = 0
while i < len(conversations):
current_conv = conversations[i]
# If current conversation is 'human', start merging
if current_conv['from'] == 'human':
# Collect all successive human conversations
merged_value = current_conv['value']
j = i + 1
while j < len(conversations) and conversations[j]['from'] == 'human':
merged_value += '\n\n' + conversations[j]['value']
j += 1
# Update current conversation with merged value
current_conv['value'] = merged_value
# Move index to last non-human conversation
i = j
else:
# For non-human conversations, just add to processed list
i += 1
processed_conversations.append(current_conv)
return processed_conversations
grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
grounding_step_ans = "```\n{point_str}\n```"
act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
act_step_ans = "The agent's action: {prev_action}"
user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
def process_android_episodes(data, window_size=2):
"""
Process Android episodes and extract steps with click or long_press actions.
Args:
data (list): List of episode dictionaries
window_size (int, optional): Number of recent image-included conversations to include.
Defaults to 3 (current image + 2 previous image-included steps).
Returns:
dict: Dictionary with episode_id as key and list of filtered steps as value
"""
instructions = []
for episode in data:
episode_id = episode["episode_id"]
for i, step in enumerate(episode["steps"]):
action = step.get("action", {})
action_type = action.get("action_type")
is_grounding = action_type in ["click", "long_press"] and step["bbox_norm"] is not None
if not is_grounding:
continue
if window_size > 0 and i == 0: # skip the first step if window_size > 0
continue
convs = [
{
"from": "human",
"value": user_start_prompt.format(
ultimate_task=episode["goal"]
),
},
]
cur_img_list = [Path("./images") / Path(step["img_path"]).name]
if window_size > 0:
window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
if i > window_size: # has more history steps larger than window_size
convs.append(
{
"from": "human",
"value": user_history_instr_prompt.format(
history_list="\n".join(
[
f"\t{j+1}. " + prev_step["step_instruction"]
for j, prev_step in enumerate(episode["steps"][:i-window_size])
]
)
),
},
)
convs.append(
{
"from": "human",
"value": "The recent steps with the GUI images are as follows:\n",
}
)
for j, win_step_i in enumerate(window_steps):
if win_step_i["action"]["action_type"] in ["click", "long_press"]:
convs.append(
{
"from": "human",
"value": grounding_step_prompt.format(
instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
),
}
)
convs.append(
{
"from": "gpt",
"value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
}
)
else:
convs.append(
{
"from": "human",
"value": act_step_prompt.format(
prev_instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
),
}
)
convs.append(
{
"from": "human",
"value": act_step_ans.format(
prev_action=win_step_i["action"]
),
}
)
win_img_list = [
Path("./images") / Path(win_step["img_path"]).name for win_step in window_steps
]
if not all([img_path.exists() for img_path in win_img_list+cur_img_list]):
print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
continue
resize_scale = 0.5
win_img_list_resized = []
for img_path in win_img_list:
new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
new_save_dir = img_path.parent.parent / f"images_resized"
new_save_dir.mkdir(parents=True, exist_ok=True)
new_save_path = new_save_dir / new_save_name
if new_save_path.exists():
try:
Image.open(str(new_save_path))
except Exception as e:
print(f"Error opening image {new_save_path}: {e}")
os.remove(new_save_path)
else:
win_img_list_resized.append(new_save_path)
continue
win_img = Image.open(str(img_path))
win_img = resize_image(win_img, scale=resize_scale)
win_img.save(str(new_save_path))
win_img_list_resized.append(new_save_path)
else: # window_size == 0
convs.append(
{
"from": "human",
"value": user_history_instr_prompt.format(
history_list="\n".join(
[
f"\t{j+1}. " + prev_step["step_instruction"]
for j, prev_step in enumerate(episode["steps"][:i])
]
)
),
},
)
if window_size > 0:
img_list = win_img_list_resized + cur_img_list
else:
img_list = cur_img_list
if not all([img_path.exists() for img_path in img_list]):
print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
continue
has_img_broken = False
for img_path in img_list:
try:
Image.open(str(img_path))
except Exception as e:
print(f"Error opening image {img_path}: {e}")
has_img_broken = True
break
if has_img_broken:
print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
continue
# Current step details
convs.append(
{
"from": "human",
"value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
}
)
convs.append(
{
"from": "gpt",
"value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
}
)
convs = merge_convs(convs)
instructions.append(
{
"image": [str(img_path) for img_path in img_list],
"conversations": convs,
"bbox_norm": step["bbox_norm"],
}
)
return instructions
# Example usage
if __name__ == "__main__":
# Sample data loading (replace with your actual file path)
with open("ac_train_eposides_13603.json", "r") as file:
data = json.load(file)
# Process the episodes with default window_size=3
# Process the episodes with default window_size=3
# window_size_list = [1, 2, 3]
window_size_list = [0, 1, 2, 3]
def process_episode(args):
episode, window_size = args
return process_android_episodes([episode], window_size)
instructions = []
for window_size in window_size_list:
tasks = [(episode, window_size) for episode in data]
with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
for result in results:
instructions.extend(result)
print(f"Number of context aware train instructions: {len(instructions)}")
with open(f"ac_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
json.dump(instructions, file, ensure_ascii=False, indent=4)