Aria-UI commited on
Commit
3a0cbad
·
verified ·
1 Parent(s): d07e6f6

Upload android_control/prepare_trajectory_grounding.py with huggingface_hub

Browse files
android_control/prepare_trajectory_grounding.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ import os
4
+ from pathlib import Path
5
+ from PIL import Image
6
+ from tqdm import tqdm
7
+ import multiprocessing
8
+
9
+ def resize_image(image, scale=0.75):
10
+ """
11
+ Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
12
+
13
+ Args:
14
+ image: PIL Image object
15
+
16
+ Returns:
17
+ Resized PIL Image
18
+ """
19
+ # Get current dimensions
20
+ width, height = image.size
21
+
22
+ # Calculate new dimensions
23
+ new_width = int(width * scale)
24
+ new_height = int(height * scale)
25
+
26
+ # Resize image
27
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
28
+ return resized_image
29
+
30
+
31
+ def merge_convs(conversations):
32
+ """
33
+ Merge all successive 'human' conversations comprehensively.
34
+
35
+ Args:
36
+ conversations (list): List of conversation dictionaries
37
+
38
+ Returns:
39
+ list: Processed conversations with all successive human messages merged
40
+
41
+ Raises:
42
+ ValueError: If input is not a list or contains invalid conversation dictionaries
43
+ """
44
+ # Validate input
45
+ if not isinstance(conversations, list):
46
+ raise ValueError("Input must be a list of conversation dictionaries")
47
+
48
+ # Validate each conversation dictionary structure
49
+ for conv in conversations:
50
+ if not isinstance(conv, dict):
51
+ raise ValueError("Each conversation must be a dictionary")
52
+ if 'from' not in conv or 'value' not in conv:
53
+ raise ValueError("Each conversation must have 'from' and 'value' keys")
54
+
55
+ processed_conversations = []
56
+ i = 0
57
+ while i < len(conversations):
58
+ current_conv = conversations[i]
59
+
60
+ # If current conversation is 'human', start merging
61
+ if current_conv['from'] == 'human':
62
+ # Collect all successive human conversations
63
+ merged_value = current_conv['value']
64
+ j = i + 1
65
+ while j < len(conversations) and conversations[j]['from'] == 'human':
66
+ merged_value += '\n\n' + conversations[j]['value']
67
+ j += 1
68
+
69
+ # Update current conversation with merged value
70
+ current_conv['value'] = merged_value
71
+
72
+ # Move index to last non-human conversation
73
+ i = j
74
+ else:
75
+ # For non-human conversations, just add to processed list
76
+ i += 1
77
+
78
+ processed_conversations.append(current_conv)
79
+
80
+ return processed_conversations
81
+
82
+ grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
83
+ grounding_step_ans = "```\n{point_str}\n```"
84
+ act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
85
+ act_step_ans = "The agent's action: {prev_action}"
86
+ user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
87
+ user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
88
+
89
+ def process_android_episodes(data, window_size=2):
90
+ """
91
+ Process Android episodes and extract steps with click or long_press actions.
92
+
93
+ Args:
94
+ data (list): List of episode dictionaries
95
+ window_size (int, optional): Number of recent image-included conversations to include.
96
+ Defaults to 3 (current image + 2 previous image-included steps).
97
+
98
+ Returns:
99
+ dict: Dictionary with episode_id as key and list of filtered steps as value
100
+ """
101
+ instructions = []
102
+ for episode in data:
103
+ episode_id = episode["episode_id"]
104
+
105
+ for i, step in enumerate(episode["steps"]):
106
+ action = step.get("action", {})
107
+ action_type = action.get("action_type")
108
+ is_grounding = action_type in ["click", "long_press"] and step["bbox_norm"] is not None
109
+
110
+ if not is_grounding:
111
+ continue
112
+
113
+ if window_size > 0 and i == 0: # skip the first step if window_size > 0
114
+ continue
115
+
116
+ convs = [
117
+ {
118
+ "from": "human",
119
+ "value": user_start_prompt.format(
120
+ ultimate_task=episode["goal"]
121
+ ),
122
+ },
123
+ ]
124
+
125
+ cur_img_list = [Path("./images") / Path(step["img_path"]).name]
126
+
127
+ if window_size > 0:
128
+ window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
129
+
130
+ if i > window_size: # has more history steps larger than window_size
131
+ convs.append(
132
+ {
133
+ "from": "human",
134
+ "value": user_history_instr_prompt.format(
135
+ history_list="\n".join(
136
+ [
137
+ f"\t{j+1}. " + prev_step["step_instruction"]
138
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
139
+ ]
140
+ )
141
+ ),
142
+ },
143
+ )
144
+
145
+ convs.append(
146
+ {
147
+ "from": "human",
148
+ "value": "The recent steps with the GUI images are as follows:\n",
149
+ }
150
+ )
151
+
152
+ for j, win_step_i in enumerate(window_steps):
153
+ if win_step_i["action"]["action_type"] in ["click", "long_press"]:
154
+ convs.append(
155
+ {
156
+ "from": "human",
157
+ "value": grounding_step_prompt.format(
158
+ instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
159
+ ),
160
+ }
161
+ )
162
+ convs.append(
163
+ {
164
+ "from": "gpt",
165
+ "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
166
+ }
167
+ )
168
+ else:
169
+ convs.append(
170
+ {
171
+ "from": "human",
172
+ "value": act_step_prompt.format(
173
+ prev_instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
174
+ ),
175
+ }
176
+ )
177
+ convs.append(
178
+ {
179
+ "from": "human",
180
+ "value": act_step_ans.format(
181
+ prev_action=win_step_i["action"]
182
+ ),
183
+ }
184
+ )
185
+
186
+ win_img_list = [
187
+ Path("./images") / Path(win_step["img_path"]).name for win_step in window_steps
188
+ ]
189
+
190
+ if not all([img_path.exists() for img_path in win_img_list+cur_img_list]):
191
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
192
+ continue
193
+
194
+ resize_scale = 0.5
195
+ win_img_list_resized = []
196
+ for img_path in win_img_list:
197
+ new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
198
+ new_save_dir = img_path.parent.parent / f"images_resized"
199
+ new_save_dir.mkdir(parents=True, exist_ok=True)
200
+ new_save_path = new_save_dir / new_save_name
201
+ if new_save_path.exists():
202
+ try:
203
+ Image.open(str(new_save_path))
204
+ except Exception as e:
205
+ print(f"Error opening image {new_save_path}: {e}")
206
+ os.remove(new_save_path)
207
+ else:
208
+ win_img_list_resized.append(new_save_path)
209
+ continue
210
+ win_img = Image.open(str(img_path))
211
+ win_img = resize_image(win_img, scale=resize_scale)
212
+ win_img.save(str(new_save_path))
213
+ win_img_list_resized.append(new_save_path)
214
+
215
+ else: # window_size == 0
216
+ convs.append(
217
+ {
218
+ "from": "human",
219
+ "value": user_history_instr_prompt.format(
220
+ history_list="\n".join(
221
+ [
222
+ f"\t{j+1}. " + prev_step["step_instruction"]
223
+ for j, prev_step in enumerate(episode["steps"][:i])
224
+ ]
225
+ )
226
+ ),
227
+ },
228
+ )
229
+
230
+ if window_size > 0:
231
+ img_list = win_img_list_resized + cur_img_list
232
+ else:
233
+ img_list = cur_img_list
234
+
235
+ if not all([img_path.exists() for img_path in img_list]):
236
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
237
+ continue
238
+
239
+ has_img_broken = False
240
+ for img_path in img_list:
241
+ try:
242
+ Image.open(str(img_path))
243
+ except Exception as e:
244
+ print(f"Error opening image {img_path}: {e}")
245
+ has_img_broken = True
246
+ break
247
+ if has_img_broken:
248
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
249
+ continue
250
+
251
+ # Current step details
252
+ convs.append(
253
+ {
254
+ "from": "human",
255
+ "value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
256
+ }
257
+ )
258
+ convs.append(
259
+ {
260
+ "from": "gpt",
261
+ "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
262
+ }
263
+ )
264
+
265
+ convs = merge_convs(convs)
266
+
267
+ instructions.append(
268
+ {
269
+ "image": [str(img_path) for img_path in img_list],
270
+ "conversations": convs,
271
+ "bbox_norm": step["bbox_norm"],
272
+ }
273
+ )
274
+
275
+ return instructions
276
+
277
+ # Example usage
278
+ if __name__ == "__main__":
279
+ # Sample data loading (replace with your actual file path)
280
+ with open("ac_train_eposides_13603.json", "r") as file:
281
+ data = json.load(file)
282
+
283
+ # Process the episodes with default window_size=3
284
+ # Process the episodes with default window_size=3
285
+ # window_size_list = [1, 2, 3]
286
+ window_size_list = [0, 1, 2, 3]
287
+
288
+ def process_episode(args):
289
+ episode, window_size = args
290
+ return process_android_episodes([episode], window_size)
291
+
292
+ instructions = []
293
+ for window_size in window_size_list:
294
+ tasks = [(episode, window_size) for episode in data]
295
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
296
+ results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
297
+ for result in results:
298
+ instructions.extend(result)
299
+
300
+ print(f"Number of context aware train instructions: {len(instructions)}")
301
+
302
+ with open(f"ac_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
303
+ json.dump(instructions, file, ensure_ascii=False, indent=4)