|
|
import json |
|
|
from copy import deepcopy |
|
|
import os |
|
|
from pathlib import Path |
|
|
import re |
|
|
from PIL import Image |
|
|
from multiprocessing import Pool |
|
|
import ast |
|
|
|
|
|
def merge_convs(conversations): |
|
|
""" |
|
|
Merge all successive 'human' conversations comprehensively. |
|
|
|
|
|
Args: |
|
|
conversations (list): List of conversation dictionaries |
|
|
|
|
|
Returns: |
|
|
list: Processed conversations with all successive human messages merged |
|
|
|
|
|
Raises: |
|
|
ValueError: If input is not a list or contains invalid conversation dictionaries |
|
|
""" |
|
|
|
|
|
if not isinstance(conversations, list): |
|
|
raise ValueError("Input must be a list of conversation dictionaries") |
|
|
|
|
|
|
|
|
for conv in conversations: |
|
|
if not isinstance(conv, dict): |
|
|
raise ValueError("Each conversation must be a dictionary") |
|
|
if 'from' not in conv or 'value' not in conv: |
|
|
raise ValueError("Each conversation must have 'from' and 'value' keys") |
|
|
|
|
|
processed_conversations = [] |
|
|
i = 0 |
|
|
while i < len(conversations): |
|
|
current_conv = conversations[i] |
|
|
|
|
|
|
|
|
if current_conv['from'] == 'human': |
|
|
|
|
|
merged_value = current_conv['value'] |
|
|
j = i + 1 |
|
|
while j < len(conversations) and conversations[j]['from'] == 'human': |
|
|
merged_value += '\n\n' + conversations[j]['value'] |
|
|
j += 1 |
|
|
|
|
|
|
|
|
current_conv['value'] = merged_value |
|
|
|
|
|
|
|
|
i = j |
|
|
else: |
|
|
|
|
|
i += 1 |
|
|
|
|
|
processed_conversations.append(current_conv) |
|
|
|
|
|
return processed_conversations |
|
|
|
|
|
def transform_bbox(bbox, image_x, image_y): |
|
|
|
|
|
y, x, height, width = bbox |
|
|
x1 = int(1000 * x / image_x) |
|
|
y1 = int(1000 * y / image_y) |
|
|
x2 = int(1000 * (x + width) / image_x) |
|
|
y2 = int(1000 * (y + height) / image_y) |
|
|
bbox_norm = [x1, y1, x2, y2] |
|
|
|
|
|
return bbox_norm |
|
|
|
|
|
grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}" |
|
|
grounding_step_ans = "```\n{point_str}\n```" |
|
|
act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}" |
|
|
act_step_ans = "The agent's action: {prev_action}" |
|
|
user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}." |
|
|
user_history_instr_prompt = "History of the agent's steps:\n{history_list}." |
|
|
|
|
|
def process_android_episodes(data, window_size=2): |
|
|
""" |
|
|
Process Android episodes and extract steps with click or long_press actions. |
|
|
|
|
|
Args: |
|
|
data (list): List of episode dictionaries |
|
|
window_size (int, optional): Number of recent image-included conversations to include. |
|
|
Defaults to 3 (current image + 2 previous image-included steps). |
|
|
|
|
|
Returns: |
|
|
dict: Dictionary with episode_id as key and list of filtered steps as value |
|
|
""" |
|
|
instructions = [] |
|
|
for episode in data: |
|
|
for i, step in enumerate(episode): |
|
|
res_touch_yx = ast.literal_eval(step["result_touch_yx"]) |
|
|
res_touch_yx = [round(res_touch_yx[0], 3), round(res_touch_yx[1], 3)] |
|
|
res_lift_yx = ast.literal_eval(step["result_lift_yx"]) |
|
|
res_lift_yx = [round(res_lift_yx[0], 3), round(res_lift_yx[1], 3)] |
|
|
|
|
|
is_tap = int(res_touch_yx[0]) != -1 and (res_touch_yx[0] == res_lift_yx[0] and res_touch_yx[1] == res_lift_yx[1]) |
|
|
|
|
|
step["is_tap"] = is_tap |
|
|
|
|
|
if "coat_action_desc" not in step or step["coat_action_desc"] is None: |
|
|
break |
|
|
|
|
|
if not is_tap: |
|
|
continue |
|
|
|
|
|
if window_size > 0 and i == 0: |
|
|
continue |
|
|
|
|
|
convs = [ |
|
|
{ |
|
|
"from": "human", |
|
|
"value": user_start_prompt.format( |
|
|
ultimate_task=step["instruction"] |
|
|
), |
|
|
}, |
|
|
] |
|
|
|
|
|
cur_img_list = [Path("./") / Path(step["image_path"])] |
|
|
|
|
|
if window_size > 0: |
|
|
window_steps = episode[i-window_size:i] if i >= window_size else episode[:i] |
|
|
|
|
|
if i > window_size: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": user_history_instr_prompt.format( |
|
|
history_list="\n".join( |
|
|
[ |
|
|
f"\t{j+1}. " + prev_step["coat_action_desc"] |
|
|
for j, prev_step in enumerate(episode[:i-window_size]) |
|
|
] |
|
|
) |
|
|
), |
|
|
}, |
|
|
) |
|
|
|
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": "The recent steps with the GUI images are as follows:\n", |
|
|
} |
|
|
) |
|
|
|
|
|
for j, win_step_i in enumerate(window_steps): |
|
|
if win_step_i["is_tap"]: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": grounding_step_prompt.format( |
|
|
instruction=win_step_i["coat_action_desc"], step_idx=i+1-(len(window_steps)-j) |
|
|
), |
|
|
} |
|
|
) |
|
|
convs.append( |
|
|
{ |
|
|
"from": "gpt", |
|
|
"value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"), |
|
|
} |
|
|
) |
|
|
else: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": act_step_prompt.format( |
|
|
prev_instruction=win_step_i["coat_action_desc"], step_idx=i+1-(len(window_steps)-j) |
|
|
), |
|
|
} |
|
|
) |
|
|
if win_step_i["result_action_text"]: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": act_step_ans.format( |
|
|
prev_action=f"Type: {win_step_i['result_action_text']}" |
|
|
), |
|
|
} |
|
|
) |
|
|
else: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": act_step_ans.format( |
|
|
prev_action=win_step_i["coat_action_desc"] |
|
|
), |
|
|
} |
|
|
) |
|
|
win_img_list = [ |
|
|
str(Path("./") / Path(win_step["image_path"])) for win_step in window_steps |
|
|
] |
|
|
|
|
|
else: |
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": user_history_instr_prompt.format( |
|
|
history_list="\n".join( |
|
|
[ |
|
|
f"\t{j+1}. " + prev_step["coat_action_desc"] |
|
|
for j, prev_step in enumerate(episode[:i-window_size]) |
|
|
] |
|
|
) |
|
|
), |
|
|
}, |
|
|
) |
|
|
|
|
|
img_list = cur_img_list + win_img_list if window_size > 0 else cur_img_list |
|
|
|
|
|
has_img_broken = False |
|
|
for img_path in img_list: |
|
|
try: |
|
|
Image.open(str(img_path)) |
|
|
except Exception as e: |
|
|
print(f"Error opening image {img_path}: {e}") |
|
|
has_img_broken = True |
|
|
break |
|
|
if has_img_broken: |
|
|
print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...") |
|
|
continue |
|
|
|
|
|
|
|
|
convs.append( |
|
|
{ |
|
|
"from": "human", |
|
|
"value": grounding_step_prompt.format(instruction=step["coat_action_desc"], step_idx=i+1), |
|
|
} |
|
|
) |
|
|
convs.append( |
|
|
{ |
|
|
"from": "gpt", |
|
|
"value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"), |
|
|
} |
|
|
) |
|
|
|
|
|
convs = merge_convs(convs) |
|
|
|
|
|
instructions.append( |
|
|
{ |
|
|
"image": [str(img_path) for img_path in img_list], |
|
|
"conversations": convs, |
|
|
} |
|
|
) |
|
|
|
|
|
return instructions |
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
|
|
|
|
|
|
data = [] |
|
|
|
|
|
episode_files = list(Path("./").rglob("*/*/*.json")) |
|
|
for episode_file in episode_files: |
|
|
with open(episode_file, "r", encoding="utf-8") as file: |
|
|
episode_data = json.load(file) |
|
|
data.append(episode_data) |
|
|
|
|
|
img_parent_path = Path("./") |
|
|
|
|
|
def preprocess_coord_norm(episode): |
|
|
for step in episode: |
|
|
if int(ast.literal_eval(step["result_touch_yx"])[0]) != -1: |
|
|
if not Path(img_parent_path / step["image_path"]).exists(): |
|
|
continue |
|
|
image_x, image_y = Image.open(img_parent_path / step["image_path"]).size |
|
|
elem_bboxes = ast.literal_eval(step["ui_positions"]) |
|
|
elem_bboxes = [transform_bbox(bbox, image_x, image_y) for bbox in elem_bboxes] |
|
|
click_point_yx = ast.literal_eval(step["result_touch_yx"]) |
|
|
click_point = [1000*click_point_yx[1], 1000*click_point_yx[0]] |
|
|
|
|
|
|
|
|
bbox = None |
|
|
for elem_bbox in elem_bboxes: |
|
|
if elem_bbox[0] <= click_point[0] <= elem_bbox[2] and elem_bbox[1] <= click_point[1] <= elem_bbox[3]: |
|
|
if bbox is None: |
|
|
bbox = elem_bbox |
|
|
else: |
|
|
|
|
|
area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1]) |
|
|
new_area = (elem_bbox[2] - elem_bbox[0]) * (elem_bbox[3] - elem_bbox[1]) |
|
|
if new_area < area: |
|
|
bbox = elem_bbox |
|
|
if bbox is None: |
|
|
coord_norm = [int(click_point[0]), int(click_point[1])] |
|
|
else: |
|
|
coord_norm = [(bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2] |
|
|
|
|
|
step["coord_norm"] = coord_norm |
|
|
return episode |
|
|
|
|
|
with Pool() as pool: |
|
|
data = pool.map(preprocess_coord_norm, data) |
|
|
|
|
|
|
|
|
|
|
|
window_size_list = [0, 1, 2, 3] |
|
|
|
|
|
instructions = [] |
|
|
for window_size in window_size_list: |
|
|
instructions.extend(process_android_episodes(data, window_size=window_size)) |
|
|
|
|
|
print(f"Number of context aware train instructions: {len(instructions)}") |
|
|
|
|
|
with open(f"aitz_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file: |
|
|
json.dump(instructions, file, ensure_ascii=False, indent=4) |