| import json |
| from copy import deepcopy |
| import os |
| from pathlib import Path |
| import re |
| from PIL import Image |
| from tqdm import tqdm |
| import multiprocessing |
|
|
| def resize_image(image, scale=0.75): |
| """ |
| Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio. |
| |
| Args: |
| image: PIL Image object |
| |
| Returns: |
| Resized PIL Image |
| """ |
| |
| width, height = image.size |
| |
| |
| new_width = int(width * scale) |
| new_height = int(height * scale) |
| |
| |
| resized_image = image.resize((new_width, new_height), Image.LANCZOS) |
| return resized_image |
|
|
| def merge_convs(conversations): |
| """ |
| Merge all successive 'human' conversations comprehensively. |
| |
| Args: |
| conversations (list): List of conversation dictionaries |
| |
| Returns: |
| list: Processed conversations with all successive human messages merged |
| |
| Raises: |
| ValueError: If input is not a list or contains invalid conversation dictionaries |
| """ |
| |
| if not isinstance(conversations, list): |
| raise ValueError("Input must be a list of conversation dictionaries") |
| |
| |
| for conv in conversations: |
| if not isinstance(conv, dict): |
| raise ValueError("Each conversation must be a dictionary") |
| if 'from' not in conv or 'value' not in conv: |
| raise ValueError("Each conversation must have 'from' and 'value' keys") |
| |
| processed_conversations = [] |
| i = 0 |
| while i < len(conversations): |
| current_conv = conversations[i] |
| |
| |
| if current_conv['from'] == 'human': |
| |
| merged_value = current_conv['value'] |
| j = i + 1 |
| while j < len(conversations) and conversations[j]['from'] == 'human': |
| merged_value += '\n\n' + conversations[j]['value'] |
| j += 1 |
| |
| |
| current_conv['value'] = merged_value |
| |
| |
| i = j |
| else: |
| |
| i += 1 |
| |
| processed_conversations.append(current_conv) |
| |
| return processed_conversations |
|
|
|
|
| def parse_reasoning(input_string): |
| input_string = input_string.strip() |
| if not input_string.endswith("```"): |
| input_string += "```" |
| |
| pattern = r'```([ABC])\n(.*?)```' |
| |
| |
| matches = re.findall(pattern, input_string, re.DOTALL) |
| |
| |
| parsed_texts = [] |
| |
| |
| for _, text in matches: |
| parsed_texts.append(text.strip()) |
|
|
| if len(parsed_texts) != 3: |
| |
| return None, None, None |
|
|
| caption, instruction, reasoning = parsed_texts |
| |
| return caption, instruction.replace("Task: ", ""), reasoning |
|
|
| def encode_action(action_json): |
| """ |
| Encode different types of actions into human-readable descriptions. |
| |
| Args: |
| action_json (dict): A dictionary containing action details |
| |
| Returns: |
| str: A human-readable description of the action |
| """ |
| action_type = action_json.get("action", "") |
| |
| if action_type == "SCROLL": |
| |
| if len(action_json.get("info", [])) >= 2: |
| start_y = action_json["info"][0][1] |
| end_y = action_json["info"][1][1] |
| |
| |
| if start_y > end_y: |
| return "SCROLL UP" |
| elif start_y < end_y: |
| return "SCROLL DOWN" |
| |
| elif action_type == "TEXT": |
| text_to_type = action_json.get("info", "") |
| return f'TYPE "{text_to_type}"' |
| |
| elif action_type == "CLICK": |
| |
| if action_json.get("info") == "KEY_HOME": |
| return "go to the home screen" |
| elif action_json.get("info") == "KEY_BACK": |
| return "go to the previous screen" |
| elif action_json.get("info") == "KEY_RECENT": |
| return "go to the previous App" |
| |
| |
| return f"Perform {action_type} action" |
|
|
| grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}" |
| grounding_step_ans = "```\n{point_str}\n```" |
| act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}" |
| act_step_ans = "The agent's action: {prev_action}" |
| user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}." |
| user_history_instr_prompt = "History of the agent's steps:\n{history_list}." |
|
|
| resize_ratios_per_window_size = { |
| 1: 0.25, |
| 2: 0.25, |
| 3: 0.25, |
| } |
|
|
| def process_android_episodes(data, window_size=2): |
| """ |
| Process Android episodes and extract steps with click or long_press actions. |
| |
| Args: |
| data (list): List of episode dictionaries |
| window_size (int, optional): Number of recent image-included conversations to include. |
| Defaults to 3 (current image + 2 previous image-included steps). |
| |
| Returns: |
| dict: Dictionary with episode_id as key and list of filtered steps as value |
| """ |
| instructions = [] |
| for episode in data: |
| episode_id = episode["episode_id"] |
| |
| for i, step in enumerate(episode["steps"]): |
| is_grounding = step["is_grounding"] |
| |
| if not is_grounding: |
| continue |
|
|
| convs = [ |
| { |
| "from": "human", |
| "value": user_start_prompt.format( |
| ultimate_task=episode["task_info"]["task"] + " " + episode["task_info"]["instruction"] |
| ), |
| }, |
| ] |
|
|
| cur_img_list = [Path("GUI-Odyssey/screenshots") / Path(step["screenshot"]).name] |
|
|
| if window_size > 0: |
| window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i] |
|
|
| if i > window_size: |
| convs.append( |
| { |
| "from": "human", |
| "value": user_history_instr_prompt.format( |
| history_list="\n".join( |
| [ |
| f"\t{j+1}. " + prev_step["step_instruction"] |
| for j, prev_step in enumerate(episode["steps"][:i-window_size]) |
| ] |
| ) |
| ), |
| }, |
| ) |
|
|
| convs.append( |
| { |
| "from": "human", |
| "value": "The recent steps with the GUI images are as follows:\n", |
| } |
| ) |
|
|
| for j, win_step_i in enumerate(window_steps): |
| if win_step_i["is_grounding"]: |
| convs.append( |
| { |
| "from": "human", |
| "value": grounding_step_prompt.format( |
| instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j) |
| ), |
| } |
| ) |
| convs.append( |
| { |
| "from": "gpt", |
| "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"), |
| } |
| ) |
| else: |
| convs.append( |
| { |
| "from": "human", |
| "value": act_step_prompt.format( |
| prev_instruction=encode_action(win_step_i), step_idx=i+1-(len(window_steps)-j) |
| ), |
| } |
| ) |
| convs.append( |
| { |
| "from": "human", |
| "value": act_step_ans.format( |
| prev_action=encode_action(win_step_i) |
| ), |
| } |
| ) |
| |
| win_img_list = [ |
| (Path("GUI-Odyssey/screenshots") / Path(win_step["screenshot"]).name) for win_step in window_steps |
| ] |
| img_list = win_img_list + cur_img_list |
|
|
| if not all([img_path.exists() for img_path in img_list]): |
| print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...") |
| continue |
| |
| has_img_broken = False |
| for img_path in img_list: |
| try: |
| Image.open(str(img_path)) |
| except Exception as e: |
| print(f"Error opening image {img_path}: {e}") |
| has_img_broken = True |
| break |
| if has_img_broken: |
| print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...") |
| continue |
| |
| resize_scale = resize_ratios_per_window_size[window_size] |
| win_img_list_resized = [] |
| for img_path in win_img_list: |
| new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix |
| new_save_dir = img_path.parent.parent / f"images_resized" |
| new_save_dir.mkdir(parents=True, exist_ok=True) |
| new_save_path = new_save_dir / new_save_name |
| if new_save_path.exists(): |
| win_img_list_resized.append(new_save_path) |
| continue |
| win_img = Image.open(str(img_path)) |
| win_img = resize_image(win_img, scale=resize_scale) |
| win_img.save(str(new_save_path)) |
| win_img_list_resized.append(new_save_path) |
|
|
| else: |
| convs.append( |
| { |
| "from": "human", |
| "value": user_history_instr_prompt.format( |
| history_list="\n".join( |
| [ |
| f"\t{j+1}. " + prev_step["step_instruction"] |
| for j, prev_step in enumerate(episode["steps"][:i]) |
| ] |
| ) |
| ), |
| }, |
| ) |
|
|
| cur_img_list_resized = [] |
| for img_path in cur_img_list: |
| new_save_name = img_path.stem + f"_{0.5}x" + img_path.suffix |
| new_save_dir = img_path.parent.parent / f"images_resized" |
| new_save_dir.mkdir(parents=True, exist_ok=True) |
| new_save_path = new_save_dir / new_save_name |
| if new_save_path.exists(): |
| try: |
| Image.open(str(new_save_path)) |
| except Exception as e: |
| print(f"Error opening image {new_save_path}: {e}") |
| os.remove(new_save_path) |
| else: |
| cur_img_list_resized.append(new_save_path) |
| continue |
| cur_img = Image.open(str(img_path)) |
| cur_img = resize_image(cur_img, scale=0.5) |
| cur_img.save(str(new_save_path)) |
| cur_img_list_resized.append(new_save_path) |
|
|
| if window_size > 0: |
| img_list = win_img_list_resized + cur_img_list_resized |
| else: |
| img_list = cur_img_list_resized |
| |
| if not all([img_path.exists() for img_path in img_list]): |
| print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...") |
| continue |
|
|
| has_img_broken = False |
| for img_path in img_list: |
| try: |
| Image.open(str(img_path)) |
| except Exception as e: |
| print(f"Error opening image {img_path}: {e}") |
| has_img_broken = True |
| break |
| if has_img_broken: |
| print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...") |
| continue |
|
|
| |
| convs.append( |
| { |
| "from": "human", |
| "value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1), |
| } |
| ) |
| convs.append( |
| { |
| "from": "gpt", |
| "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"), |
| } |
| ) |
|
|
| convs = merge_convs(convs) |
|
|
| instructions.append( |
| { |
| "image": [str(img_path) for img_path in img_list], |
| "conversations": convs, |
| } |
| ) |
| |
| return instructions |
|
|
| |
| if __name__ == "__main__": |
| |
| splits_info = json.load(open("GUI-Odyssey/splits/random_split.json", "r", encoding="utf-8")) |
|
|
| episode_data_list = list(Path("episodes_grounding_inner_reasoning_v3").glob("*.json")) |
| episode_data_list = [p for p in episode_data_list if p.name in splits_info["train"]] |
| episode_data_list = [json.load(open(str(p), "r", encoding="utf-8")) for p in episode_data_list] |
|
|
| for episode_data in tqdm(episode_data_list, desc="Parsing fields..."): |
| for step in episode_data["steps"]: |
| if "grounding_reasoning" not in step: |
| step["step_instruction"] = encode_action(step) |
| step["is_grounding"] = False |
| continue |
|
|
| caption, instruction, reasoning = parse_reasoning(step["grounding_reasoning"]) |
| step["step_instruction"] = instruction |
| step["caption"] = caption |
| step["reasoning"] = reasoning |
| step["is_grounding"] = not(not instruction) |
| step["coord_norm"] = step["info"][0] |
|
|
| |
| |
| window_size_list = [0] |
|
|
| |
| |
| |
| |
| def process_episode(args): |
| episode, window_size = args |
| return process_android_episodes([episode], window_size) |
|
|
| instructions = [] |
| for window_size in window_size_list: |
| tasks = [(episode, window_size) for episode in episode_data_list] |
| with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool: |
| results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}")) |
| for result in results: |
| instructions.extend(result) |
|
|
| print(f"Number of context aware train instructions: {len(instructions)}") |
| |
| with open(f"go_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file: |
| json.dump(instructions, file, ensure_ascii=False, indent=4) |