Aria-UI commited on
Commit
6b98f7b
·
verified ·
1 Parent(s): f2e7ea6

Upload AMEX/prepare_trajectory_grounding.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. AMEX/prepare_trajectory_grounding.py +412 -0
AMEX/prepare_trajectory_grounding.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ import os
4
+ from pathlib import Path
5
+ import re
6
+ from multiprocessing import Pool
7
+ from PIL import Image
8
+ from tqdm import tqdm
9
+ import multiprocessing
10
+
11
+ def resize_image(image, scale=0.75):
12
+ """
13
+ Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
14
+
15
+ Args:
16
+ image: PIL Image object
17
+
18
+ Returns:
19
+ Resized PIL Image
20
+ """
21
+ # Get current dimensions
22
+ width, height = image.size
23
+
24
+ # Calculate new dimensions
25
+ new_width = int(width * scale)
26
+ new_height = int(height * scale)
27
+
28
+ # Resize image
29
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
30
+ return resized_image
31
+
32
+ def merge_convs(conversations):
33
+ """
34
+ Merge all successive 'human' conversations comprehensively.
35
+
36
+ Args:
37
+ conversations (list): List of conversation dictionaries
38
+
39
+ Returns:
40
+ list: Processed conversations with all successive human messages merged
41
+
42
+ Raises:
43
+ ValueError: If input is not a list or contains invalid conversation dictionaries
44
+ """
45
+ # Validate input
46
+ if not isinstance(conversations, list):
47
+ raise ValueError("Input must be a list of conversation dictionaries")
48
+
49
+ # Validate each conversation dictionary structure
50
+ for conv in conversations:
51
+ if not isinstance(conv, dict):
52
+ raise ValueError("Each conversation must be a dictionary")
53
+ if 'from' not in conv or 'value' not in conv:
54
+ raise ValueError("Each conversation must have 'from' and 'value' keys")
55
+
56
+ processed_conversations = []
57
+ i = 0
58
+ while i < len(conversations):
59
+ current_conv = conversations[i]
60
+
61
+ # If current conversation is 'human', start merging
62
+ if current_conv['from'] == 'human':
63
+ # Collect all successive human conversations
64
+ merged_value = current_conv['value']
65
+ j = i + 1
66
+ while j < len(conversations) and conversations[j]['from'] == 'human':
67
+ merged_value += '\n\n' + conversations[j]['value']
68
+ j += 1
69
+
70
+ # Update current conversation with merged value
71
+ current_conv['value'] = merged_value
72
+
73
+ # Move index to last non-human conversation
74
+ i = j
75
+ else:
76
+ # For non-human conversations, just add to processed list
77
+ i += 1
78
+
79
+ processed_conversations.append(current_conv)
80
+
81
+ return processed_conversations
82
+
83
+ def parse_reasoning(input_string):
84
+ input_string = input_string.strip()
85
+ if not input_string.endswith("```"):
86
+ input_string += "```"
87
+ # Regex pattern to match texts between ```A```, ```B```, and ```C```
88
+ pattern = r'```([ABC])\n(.*?)```'
89
+
90
+ # Find all matches
91
+ matches = re.findall(pattern, input_string, re.DOTALL)
92
+
93
+ # Create a dictionary to store parsed texts
94
+ parsed_texts = []
95
+
96
+ # Populate the dictionary
97
+ for _, text in matches:
98
+ parsed_texts.append(text.strip())
99
+
100
+ if len(parsed_texts) != 3:
101
+ # print(input_string)
102
+ return None, None, None
103
+
104
+ caption, instruction, reasoning = parsed_texts
105
+
106
+ return caption, instruction.replace("Task: ", ""), reasoning
107
+
108
+ def encode_action(action_json):
109
+ """
110
+ Encode different types of actions into human-readable descriptions.
111
+
112
+ Args:
113
+ action_json (dict): A dictionary containing action details
114
+
115
+ Returns:
116
+ str: A human-readable description of the action
117
+ """
118
+ action_type = action_json.get("action", "")
119
+
120
+ if action_type == "SWIPE":
121
+ # Check scroll direction by comparing y-values
122
+ if len(action_json.get("info", [])) >= 2:
123
+ start_y = action_json["touch_coord"][1]
124
+ end_y = action_json["lift_coord"][1]
125
+
126
+ # Determine scroll direction
127
+ if start_y > end_y:
128
+ return "SCROLL UP"
129
+ elif start_y < end_y:
130
+ return "SCROLL DOWN"
131
+
132
+ elif action_type == "TYPE":
133
+ text_to_type = action_json.get("type_text", "")
134
+ return f'TYPE "{text_to_type}"'
135
+
136
+ elif action_type == "CLICK":
137
+ # Check for home key action
138
+ if action_json.get("info") == "KEY_HOME":
139
+ return "go to the home screen"
140
+ elif action_json.get("info") == "KEY_BACK":
141
+ return "go to the previous screen"
142
+ elif action_json.get("info") == "KEY_RECENT":
143
+ return "go to the previous App"
144
+
145
+ # Default case for unrecognized actions
146
+ return f"Perform {action_type} action"
147
+
148
+ grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
149
+ grounding_step_ans = "```\n{point_str}\n```"
150
+ act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
151
+ act_step_ans = "The agent's action: {prev_action}"
152
+ user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
153
+ user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
154
+
155
+ resize_ratios_per_window_size = {
156
+ 1: 0.25,
157
+ 2: 0.25,
158
+ 3: 0.25,
159
+ }
160
+
161
+ def process_android_episodes(data, window_size=2, img_dir="./AMEX/screenshot"):
162
+ """
163
+ Process Android episodes and extract steps with click or long_press actions.
164
+
165
+ Args:
166
+ data (list): List of episode dictionaries
167
+ window_size (int, optional): Number of recent image-included conversations to include.
168
+ Defaults to 3 (current image + 2 previous image-included steps).
169
+
170
+ Returns:
171
+ dict: Dictionary with episode_id as key and list of filtered steps as value
172
+ """
173
+ instructions = []
174
+ for episode in data:
175
+ episode_id = episode["episode_id"]
176
+
177
+ for i, step in enumerate(episode["steps"]):
178
+ is_grounding = step["is_grounding"]
179
+
180
+ if not is_grounding:
181
+ continue
182
+
183
+ if window_size > 0 and i == 0: # skip the first step if window_size > 0
184
+ continue
185
+
186
+ convs = [
187
+ {
188
+ "from": "human",
189
+ "value": user_start_prompt.format(
190
+ ultimate_task=episode["instruction"]
191
+ ),
192
+ },
193
+ ]
194
+
195
+ cur_img_list = [Path(img_dir) / Path(step["image_path"]).name]
196
+
197
+ if window_size > 0:
198
+ window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
199
+
200
+ if i > window_size: # has more history steps larger than window_size
201
+ convs.append(
202
+ {
203
+ "from": "human",
204
+ "value": user_history_instr_prompt.format(
205
+ history_list="\n".join(
206
+ [
207
+ f"\t{j+1}. " + prev_step["step_instruction"]
208
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
209
+ ]
210
+ )
211
+ ),
212
+ },
213
+ )
214
+
215
+ convs.append(
216
+ {
217
+ "from": "human",
218
+ "value": "The recent steps with the GUI images are as follows:\n",
219
+ }
220
+ )
221
+
222
+ for j, win_step_i in enumerate(window_steps):
223
+ if win_step_i["is_grounding"]:
224
+ convs.append(
225
+ {
226
+ "from": "human",
227
+ "value": grounding_step_prompt.format(
228
+ instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
229
+ ),
230
+ }
231
+ )
232
+ convs.append(
233
+ {
234
+ "from": "gpt",
235
+ "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
236
+ }
237
+ )
238
+ else:
239
+ convs.append(
240
+ {
241
+ "from": "human",
242
+ "value": act_step_prompt.format(
243
+ prev_instruction=encode_action(win_step_i), step_idx=i+1-(len(window_steps)-j)
244
+ ),
245
+ }
246
+ )
247
+ convs.append(
248
+ {
249
+ "from": "human",
250
+ "value": act_step_ans.format(
251
+ prev_action=encode_action(win_step_i)
252
+ ),
253
+ }
254
+ )
255
+
256
+ win_img_list = [
257
+ (Path(img_dir) / Path(win_step["image_path"]).name) for win_step in window_steps
258
+ ]
259
+
260
+ if not all([img_path.exists() for img_path in win_img_list+cur_img_list]):
261
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
262
+ continue
263
+
264
+ has_img_broken = False
265
+ for img_path in win_img_list+cur_img_list:
266
+ try:
267
+ Image.open(str(img_path))
268
+ except Exception as e:
269
+ print(f"Error opening image {img_path}: {e}")
270
+ has_img_broken = True
271
+ break
272
+ if has_img_broken:
273
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
274
+ continue
275
+
276
+ resize_scale = resize_ratios_per_window_size[window_size]
277
+ win_img_list_resized = []
278
+ for img_path in win_img_list:
279
+ new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
280
+ new_save_dir = img_path.parent.parent / f"images_resized"
281
+ new_save_dir.mkdir(parents=True, exist_ok=True)
282
+ new_save_path = new_save_dir / new_save_name
283
+ if new_save_path.exists():
284
+ try:
285
+ Image.open(str(new_save_path))
286
+ except Exception as e:
287
+ print(f"Error opening image {new_save_path}: {e}")
288
+ os.remove(new_save_path)
289
+ else:
290
+ win_img_list_resized.append(new_save_path)
291
+ continue
292
+ win_img = Image.open(str(img_path))
293
+ win_img = resize_image(win_img, scale=resize_scale)
294
+ win_img.save(str(new_save_path))
295
+ win_img_list_resized.append(new_save_path)
296
+ else:
297
+ convs.append(
298
+ {
299
+ "from": "human",
300
+ "value": user_history_instr_prompt.format(
301
+ history_list="\n".join(
302
+ [
303
+ f"\t{j+1}. " + prev_step["step_instruction"]
304
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
305
+ ]
306
+ )
307
+ ),
308
+ },
309
+ )
310
+
311
+ cur_img_list_resized = []
312
+ for img_path in cur_img_list:
313
+ new_save_name = img_path.stem + f"_{0.5}x" + img_path.suffix
314
+ new_save_dir = img_path.parent.parent / f"images_resized"
315
+ new_save_dir.mkdir(parents=True, exist_ok=True)
316
+ new_save_path = new_save_dir / new_save_name
317
+ if new_save_path.exists():
318
+ try:
319
+ Image.open(str(new_save_path))
320
+ except Exception as e:
321
+ print(f"Error opening image {new_save_path}: {e}")
322
+ os.remove(new_save_path)
323
+ else:
324
+ cur_img_list_resized.append(new_save_path)
325
+ continue
326
+ cur_img = Image.open(str(img_path))
327
+ cur_img = resize_image(cur_img, scale=0.5)
328
+ cur_img.save(str(new_save_path))
329
+ cur_img_list_resized.append(new_save_path)
330
+
331
+ if window_size > 0:
332
+ img_list = win_img_list_resized + cur_img_list_resized
333
+ else:
334
+ img_list = cur_img_list_resized
335
+
336
+ convs.append(
337
+ {
338
+ "from": "human",
339
+ "value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
340
+ }
341
+ )
342
+ convs.append(
343
+ {
344
+ "from": "gpt",
345
+ "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
346
+ }
347
+ )
348
+
349
+ convs = merge_convs(convs)
350
+
351
+ instructions.append(
352
+ {
353
+ "image": [str(img_path) for img_path in img_list],
354
+ "conversations": convs,
355
+ }
356
+ )
357
+
358
+ return instructions
359
+
360
+ # Example usage
361
+ if __name__ == "__main__":
362
+ # Sample data loading (replace with your actual file path)
363
+ data = []
364
+
365
+ episode_files = list(Path("./episodes_grounding_inner_reasoning_v3").glob("*.json"))
366
+ for episode_file in episode_files:
367
+ with open(episode_file, "r", encoding="utf-8") as file:
368
+ episode_data = json.load(file)
369
+ for i, step_i in enumerate(episode_data["steps"]):
370
+ if not step_i.get("grounding_reasoning"):
371
+ step_i["step_instruction"] = encode_action(step_i)
372
+ step_i["is_grounding"] = False
373
+ continue
374
+ _, step_instruction, _ = parse_reasoning(step_i["grounding_reasoning"])
375
+ step_i["step_instruction"] = step_instruction
376
+ step_i["is_grounding"] = bool(step_instruction) and step_i["action"] == "TAP"
377
+ data.append(episode_data)
378
+
379
+ def preprocess_coord_norm(episode):
380
+ for step in episode["steps"]:
381
+ image_path = Path("./AMEX/screenshot") / step["image_path"]
382
+ if not image_path.exists():
383
+ print(f"Image not found: {image_path}")
384
+ if step["action"] == "TAP":
385
+ image = Image.open(image_path)
386
+ image_width, image_height = image.size
387
+ step["coord_norm"] = [int(step["touch_coord"][0]/image_width*1000), int(step["touch_coord"][1]/image_height*1000)]
388
+ return episode
389
+
390
+ with Pool() as pool:
391
+ data = pool.map(preprocess_coord_norm, data)
392
+
393
+ # Process the episodes with default window_size=3
394
+ # window_size_list = [1, 2, 3]
395
+ window_size_list = [0,1,2,3]
396
+
397
+ def process_episode(args):
398
+ episode, window_size = args
399
+ return process_android_episodes([episode], window_size)
400
+
401
+ instructions = []
402
+ for window_size in window_size_list:
403
+ tasks = [(episode, window_size) for episode in data]
404
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
405
+ results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
406
+ for result in results:
407
+ instructions.extend(result)
408
+
409
+ print(f"Number of context aware train instructions: {len(instructions)}")
410
+
411
+ with open(f"amex_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
412
+ json.dump(instructions, file, ensure_ascii=False, indent=4)