Aria-UI commited on
Commit
dc05889
·
verified ·
1 Parent(s): e5badb6

Upload aitz/prepare_trajectory_grounding.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. aitz/prepare_trajectory_grounding.py +309 -0
aitz/prepare_trajectory_grounding.py ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ import os
4
+ from pathlib import Path
5
+ import re
6
+ from PIL import Image
7
+ from multiprocessing import Pool
8
+ import ast
9
+
10
+ def merge_convs(conversations):
11
+ """
12
+ Merge all successive 'human' conversations comprehensively.
13
+
14
+ Args:
15
+ conversations (list): List of conversation dictionaries
16
+
17
+ Returns:
18
+ list: Processed conversations with all successive human messages merged
19
+
20
+ Raises:
21
+ ValueError: If input is not a list or contains invalid conversation dictionaries
22
+ """
23
+ # Validate input
24
+ if not isinstance(conversations, list):
25
+ raise ValueError("Input must be a list of conversation dictionaries")
26
+
27
+ # Validate each conversation dictionary structure
28
+ for conv in conversations:
29
+ if not isinstance(conv, dict):
30
+ raise ValueError("Each conversation must be a dictionary")
31
+ if 'from' not in conv or 'value' not in conv:
32
+ raise ValueError("Each conversation must have 'from' and 'value' keys")
33
+
34
+ processed_conversations = []
35
+ i = 0
36
+ while i < len(conversations):
37
+ current_conv = conversations[i]
38
+
39
+ # If current conversation is 'human', start merging
40
+ if current_conv['from'] == 'human':
41
+ # Collect all successive human conversations
42
+ merged_value = current_conv['value']
43
+ j = i + 1
44
+ while j < len(conversations) and conversations[j]['from'] == 'human':
45
+ merged_value += '\n\n' + conversations[j]['value']
46
+ j += 1
47
+
48
+ # Update current conversation with merged value
49
+ current_conv['value'] = merged_value
50
+
51
+ # Move index to last non-human conversation
52
+ i = j
53
+ else:
54
+ # For non-human conversations, just add to processed list
55
+ i += 1
56
+
57
+ processed_conversations.append(current_conv)
58
+
59
+ return processed_conversations
60
+
61
+ def transform_bbox(bbox, image_x, image_y):
62
+ # transform [y,x,height,width] to [x1,y1,x2,y2]
63
+ y, x, height, width = bbox
64
+ x1 = int(1000 * x / image_x)
65
+ y1 = int(1000 * y / image_y)
66
+ x2 = int(1000 * (x + width) / image_x)
67
+ y2 = int(1000 * (y + height) / image_y)
68
+ bbox_norm = [x1, y1, x2, y2]
69
+
70
+ return bbox_norm
71
+
72
+ grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
73
+ grounding_step_ans = "```\n{point_str}\n```"
74
+ act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
75
+ act_step_ans = "The agent's action: {prev_action}"
76
+ user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
77
+ user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
78
+
79
+ def process_android_episodes(data, window_size=2):
80
+ """
81
+ Process Android episodes and extract steps with click or long_press actions.
82
+
83
+ Args:
84
+ data (list): List of episode dictionaries
85
+ window_size (int, optional): Number of recent image-included conversations to include.
86
+ Defaults to 3 (current image + 2 previous image-included steps).
87
+
88
+ Returns:
89
+ dict: Dictionary with episode_id as key and list of filtered steps as value
90
+ """
91
+ instructions = []
92
+ for episode in data:
93
+ for i, step in enumerate(episode):
94
+ res_touch_yx = ast.literal_eval(step["result_touch_yx"])
95
+ res_touch_yx = [round(res_touch_yx[0], 3), round(res_touch_yx[1], 3)]
96
+ res_lift_yx = ast.literal_eval(step["result_lift_yx"])
97
+ res_lift_yx = [round(res_lift_yx[0], 3), round(res_lift_yx[1], 3)]
98
+
99
+ is_tap = int(res_touch_yx[0]) != -1 and (res_touch_yx[0] == res_lift_yx[0] and res_touch_yx[1] == res_lift_yx[1])
100
+
101
+ step["is_tap"] = is_tap
102
+
103
+ if "coat_action_desc" not in step or step["coat_action_desc"] is None:
104
+ break
105
+
106
+ if not is_tap:
107
+ continue
108
+
109
+ if window_size > 0 and i == 0: # skip the first step if window_size > 0
110
+ continue
111
+
112
+ convs = [
113
+ {
114
+ "from": "human",
115
+ "value": user_start_prompt.format(
116
+ ultimate_task=step["instruction"]
117
+ ),
118
+ },
119
+ ]
120
+
121
+ cur_img_list = [Path("./") / Path(step["image_path"])]
122
+
123
+ if window_size > 0:
124
+ window_steps = episode[i-window_size:i] if i >= window_size else episode[:i]
125
+
126
+ if i > window_size: # has more history steps larger than window_size
127
+ convs.append(
128
+ {
129
+ "from": "human",
130
+ "value": user_history_instr_prompt.format(
131
+ history_list="\n".join(
132
+ [
133
+ f"\t{j+1}. " + prev_step["coat_action_desc"]
134
+ for j, prev_step in enumerate(episode[:i-window_size])
135
+ ]
136
+ )
137
+ ),
138
+ },
139
+ )
140
+
141
+ convs.append(
142
+ {
143
+ "from": "human",
144
+ "value": "The recent steps with the GUI images are as follows:\n",
145
+ }
146
+ )
147
+
148
+ for j, win_step_i in enumerate(window_steps):
149
+ if win_step_i["is_tap"]:
150
+ convs.append(
151
+ {
152
+ "from": "human",
153
+ "value": grounding_step_prompt.format(
154
+ instruction=win_step_i["coat_action_desc"], step_idx=i+1-(len(window_steps)-j)
155
+ ),
156
+ }
157
+ )
158
+ convs.append(
159
+ {
160
+ "from": "gpt",
161
+ "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
162
+ }
163
+ )
164
+ else:
165
+ convs.append(
166
+ {
167
+ "from": "human",
168
+ "value": act_step_prompt.format(
169
+ prev_instruction=win_step_i["coat_action_desc"], step_idx=i+1-(len(window_steps)-j)
170
+ ),
171
+ }
172
+ )
173
+ if win_step_i["result_action_text"]:
174
+ convs.append(
175
+ {
176
+ "from": "human",
177
+ "value": act_step_ans.format(
178
+ prev_action=f"Type: {win_step_i['result_action_text']}"
179
+ ),
180
+ }
181
+ )
182
+ else:
183
+ convs.append(
184
+ {
185
+ "from": "human",
186
+ "value": act_step_ans.format(
187
+ prev_action=win_step_i["coat_action_desc"]
188
+ ),
189
+ }
190
+ )
191
+ win_img_list = [
192
+ str(Path("./") / Path(win_step["image_path"])) for win_step in window_steps
193
+ ]
194
+
195
+ else:
196
+ convs.append(
197
+ {
198
+ "from": "human",
199
+ "value": user_history_instr_prompt.format(
200
+ history_list="\n".join(
201
+ [
202
+ f"\t{j+1}. " + prev_step["coat_action_desc"]
203
+ for j, prev_step in enumerate(episode[:i-window_size])
204
+ ]
205
+ )
206
+ ),
207
+ },
208
+ )
209
+
210
+ img_list = cur_img_list + win_img_list if window_size > 0 else cur_img_list
211
+
212
+ has_img_broken = False
213
+ for img_path in img_list:
214
+ try:
215
+ Image.open(str(img_path))
216
+ except Exception as e:
217
+ print(f"Error opening image {img_path}: {e}")
218
+ has_img_broken = True
219
+ break
220
+ if has_img_broken:
221
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
222
+ continue
223
+
224
+ # Current step details
225
+ convs.append(
226
+ {
227
+ "from": "human",
228
+ "value": grounding_step_prompt.format(instruction=step["coat_action_desc"], step_idx=i+1),
229
+ }
230
+ )
231
+ convs.append(
232
+ {
233
+ "from": "gpt",
234
+ "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
235
+ }
236
+ )
237
+
238
+ convs = merge_convs(convs)
239
+
240
+ instructions.append(
241
+ {
242
+ "image": [str(img_path) for img_path in img_list],
243
+ "conversations": convs,
244
+ }
245
+ )
246
+
247
+ return instructions
248
+
249
+
250
+ # Example usage
251
+ if __name__ == "__main__":
252
+ # Sample data loading (replace with your actual file path)
253
+
254
+ data = []
255
+
256
+ episode_files = list(Path("./").rglob("*/*/*.json"))
257
+ for episode_file in episode_files:
258
+ with open(episode_file, "r", encoding="utf-8") as file:
259
+ episode_data = json.load(file)
260
+ data.append(episode_data)
261
+
262
+ img_parent_path = Path("./")
263
+
264
+ def preprocess_coord_norm(episode):
265
+ for step in episode:
266
+ if int(ast.literal_eval(step["result_touch_yx"])[0]) != -1:
267
+ if not Path(img_parent_path / step["image_path"]).exists():
268
+ continue
269
+ image_x, image_y = Image.open(img_parent_path / step["image_path"]).size
270
+ elem_bboxes = ast.literal_eval(step["ui_positions"]) # (y, x, height, width)
271
+ elem_bboxes = [transform_bbox(bbox, image_x, image_y) for bbox in elem_bboxes]
272
+ click_point_yx = ast.literal_eval(step["result_touch_yx"])
273
+ click_point = [1000*click_point_yx[1], 1000*click_point_yx[0]]
274
+
275
+ # check which element contains the click point
276
+ bbox = None
277
+ for elem_bbox in elem_bboxes:
278
+ if elem_bbox[0] <= click_point[0] <= elem_bbox[2] and elem_bbox[1] <= click_point[1] <= elem_bbox[3]:
279
+ if bbox is None:
280
+ bbox = elem_bbox
281
+ else:
282
+ # calculate area, take the smaller one
283
+ area = (bbox[2] - bbox[0]) * (bbox[3] - bbox[1])
284
+ new_area = (elem_bbox[2] - elem_bbox[0]) * (elem_bbox[3] - elem_bbox[1])
285
+ if new_area < area:
286
+ bbox = elem_bbox
287
+ if bbox is None:
288
+ coord_norm = [int(click_point[0]), int(click_point[1])]
289
+ else:
290
+ coord_norm = [(bbox[0] + bbox[2]) // 2, (bbox[1] + bbox[3]) // 2]
291
+
292
+ step["coord_norm"] = coord_norm
293
+ return episode
294
+
295
+ with Pool() as pool:
296
+ data = pool.map(preprocess_coord_norm, data)
297
+
298
+ # Process the episodes with default window_size=3
299
+ # window_size_list = [1, 2, 3]
300
+ window_size_list = [0, 1, 2, 3]
301
+
302
+ instructions = []
303
+ for window_size in window_size_list:
304
+ instructions.extend(process_android_episodes(data, window_size=window_size))
305
+
306
+ print(f"Number of context aware train instructions: {len(instructions)}")
307
+
308
+ with open(f"aitz_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
309
+ json.dump(instructions, file, ensure_ascii=False, indent=4)