Aria-UI commited on
Commit
0a54d46
·
verified ·
1 Parent(s): 10a4ff3

Upload GUI-Odyssey/prepare_trajectory_grounding.py with huggingface_hub

Browse files
GUI-Odyssey/prepare_trajectory_grounding.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ from copy import deepcopy
3
+ import os
4
+ from pathlib import Path
5
+ import re
6
+ from PIL import Image
7
+ from tqdm import tqdm
8
+ import multiprocessing
9
+
10
+ def resize_image(image, scale=0.75):
11
+ """
12
+ Resize image to have its shorter edge equal to 720 pixels while maintaining aspect ratio.
13
+
14
+ Args:
15
+ image: PIL Image object
16
+
17
+ Returns:
18
+ Resized PIL Image
19
+ """
20
+ # Get current dimensions
21
+ width, height = image.size
22
+
23
+ # Calculate new dimensions
24
+ new_width = int(width * scale)
25
+ new_height = int(height * scale)
26
+
27
+ # Resize image
28
+ resized_image = image.resize((new_width, new_height), Image.LANCZOS)
29
+ return resized_image
30
+
31
+ def merge_convs(conversations):
32
+ """
33
+ Merge all successive 'human' conversations comprehensively.
34
+
35
+ Args:
36
+ conversations (list): List of conversation dictionaries
37
+
38
+ Returns:
39
+ list: Processed conversations with all successive human messages merged
40
+
41
+ Raises:
42
+ ValueError: If input is not a list or contains invalid conversation dictionaries
43
+ """
44
+ # Validate input
45
+ if not isinstance(conversations, list):
46
+ raise ValueError("Input must be a list of conversation dictionaries")
47
+
48
+ # Validate each conversation dictionary structure
49
+ for conv in conversations:
50
+ if not isinstance(conv, dict):
51
+ raise ValueError("Each conversation must be a dictionary")
52
+ if 'from' not in conv or 'value' not in conv:
53
+ raise ValueError("Each conversation must have 'from' and 'value' keys")
54
+
55
+ processed_conversations = []
56
+ i = 0
57
+ while i < len(conversations):
58
+ current_conv = conversations[i]
59
+
60
+ # If current conversation is 'human', start merging
61
+ if current_conv['from'] == 'human':
62
+ # Collect all successive human conversations
63
+ merged_value = current_conv['value']
64
+ j = i + 1
65
+ while j < len(conversations) and conversations[j]['from'] == 'human':
66
+ merged_value += '\n\n' + conversations[j]['value']
67
+ j += 1
68
+
69
+ # Update current conversation with merged value
70
+ current_conv['value'] = merged_value
71
+
72
+ # Move index to last non-human conversation
73
+ i = j
74
+ else:
75
+ # For non-human conversations, just add to processed list
76
+ i += 1
77
+
78
+ processed_conversations.append(current_conv)
79
+
80
+ return processed_conversations
81
+
82
+
83
+ def parse_reasoning(input_string):
84
+ input_string = input_string.strip()
85
+ if not input_string.endswith("```"):
86
+ input_string += "```"
87
+ # Regex pattern to match texts between ```A```, ```B```, and ```C```
88
+ pattern = r'```([ABC])\n(.*?)```'
89
+
90
+ # Find all matches
91
+ matches = re.findall(pattern, input_string, re.DOTALL)
92
+
93
+ # Create a dictionary to store parsed texts
94
+ parsed_texts = []
95
+
96
+ # Populate the dictionary
97
+ for _, text in matches:
98
+ parsed_texts.append(text.strip())
99
+
100
+ if len(parsed_texts) != 3:
101
+ # print(input_string)
102
+ return None, None, None
103
+
104
+ caption, instruction, reasoning = parsed_texts
105
+
106
+ return caption, instruction.replace("Task: ", ""), reasoning
107
+
108
+ def encode_action(action_json):
109
+ """
110
+ Encode different types of actions into human-readable descriptions.
111
+
112
+ Args:
113
+ action_json (dict): A dictionary containing action details
114
+
115
+ Returns:
116
+ str: A human-readable description of the action
117
+ """
118
+ action_type = action_json.get("action", "")
119
+
120
+ if action_type == "SCROLL":
121
+ # Check scroll direction by comparing y-values
122
+ if len(action_json.get("info", [])) >= 2:
123
+ start_y = action_json["info"][0][1]
124
+ end_y = action_json["info"][1][1]
125
+
126
+ # Determine scroll direction
127
+ if start_y > end_y:
128
+ return "SCROLL UP"
129
+ elif start_y < end_y:
130
+ return "SCROLL DOWN"
131
+
132
+ elif action_type == "TEXT":
133
+ text_to_type = action_json.get("info", "")
134
+ return f'TYPE "{text_to_type}"'
135
+
136
+ elif action_type == "CLICK":
137
+ # Check for home key action
138
+ if action_json.get("info") == "KEY_HOME":
139
+ return "go to the home screen"
140
+ elif action_json.get("info") == "KEY_BACK":
141
+ return "go to the previous screen"
142
+ elif action_json.get("info") == "KEY_RECENT":
143
+ return "go to the previous App"
144
+
145
+ # Default case for unrecognized actions
146
+ return f"Perform {action_type} action"
147
+
148
+ grounding_step_prompt = "<|img|>Step {step_idx}. Given a GUI image, what are the relative (0-1000) pixel point coordinates for the element corresponding to the following instruction or description: {instruction}"
149
+ grounding_step_ans = "```\n{point_str}\n```"
150
+ act_step_prompt = "<|img|>Step {step_idx}. Instruction: {prev_instruction}"
151
+ act_step_ans = "The agent's action: {prev_action}"
152
+ user_start_prompt = "The agent is performing the ultimate task: {ultimate_task}."
153
+ user_history_instr_prompt = "History of the agent's steps:\n{history_list}."
154
+
155
+ resize_ratios_per_window_size = {
156
+ 1: 0.25,
157
+ 2: 0.25,
158
+ 3: 0.25,
159
+ }
160
+
161
+ def process_android_episodes(data, window_size=2):
162
+ """
163
+ Process Android episodes and extract steps with click or long_press actions.
164
+
165
+ Args:
166
+ data (list): List of episode dictionaries
167
+ window_size (int, optional): Number of recent image-included conversations to include.
168
+ Defaults to 3 (current image + 2 previous image-included steps).
169
+
170
+ Returns:
171
+ dict: Dictionary with episode_id as key and list of filtered steps as value
172
+ """
173
+ instructions = []
174
+ for episode in data:
175
+ episode_id = episode["episode_id"]
176
+
177
+ for i, step in enumerate(episode["steps"]):
178
+ is_grounding = step["is_grounding"]
179
+
180
+ if not is_grounding:
181
+ continue
182
+
183
+ convs = [
184
+ {
185
+ "from": "human",
186
+ "value": user_start_prompt.format(
187
+ ultimate_task=episode["task_info"]["task"] + " " + episode["task_info"]["instruction"]
188
+ ),
189
+ },
190
+ ]
191
+
192
+ cur_img_list = [Path("GUI-Odyssey/screenshots") / Path(step["screenshot"]).name]
193
+
194
+ if window_size > 0:
195
+ window_steps = episode["steps"][i-window_size:i] if i >= window_size else episode["steps"][:i]
196
+
197
+ if i > window_size: # has more history steps larger than window_size
198
+ convs.append(
199
+ {
200
+ "from": "human",
201
+ "value": user_history_instr_prompt.format(
202
+ history_list="\n".join(
203
+ [
204
+ f"\t{j+1}. " + prev_step["step_instruction"]
205
+ for j, prev_step in enumerate(episode["steps"][:i-window_size])
206
+ ]
207
+ )
208
+ ),
209
+ },
210
+ )
211
+
212
+ convs.append(
213
+ {
214
+ "from": "human",
215
+ "value": "The recent steps with the GUI images are as follows:\n",
216
+ }
217
+ )
218
+
219
+ for j, win_step_i in enumerate(window_steps):
220
+ if win_step_i["is_grounding"]:
221
+ convs.append(
222
+ {
223
+ "from": "human",
224
+ "value": grounding_step_prompt.format(
225
+ instruction=win_step_i["step_instruction"], step_idx=i+1-(len(window_steps)-j)
226
+ ),
227
+ }
228
+ )
229
+ convs.append(
230
+ {
231
+ "from": "gpt",
232
+ "value": grounding_step_ans.format(point_str=f"({win_step_i['coord_norm'][0]}, {win_step_i['coord_norm'][1]})"),
233
+ }
234
+ )
235
+ else:
236
+ convs.append(
237
+ {
238
+ "from": "human",
239
+ "value": act_step_prompt.format(
240
+ prev_instruction=encode_action(win_step_i), step_idx=i+1-(len(window_steps)-j)
241
+ ),
242
+ }
243
+ )
244
+ convs.append(
245
+ {
246
+ "from": "human",
247
+ "value": act_step_ans.format(
248
+ prev_action=encode_action(win_step_i)
249
+ ),
250
+ }
251
+ )
252
+
253
+ win_img_list = [
254
+ (Path("GUI-Odyssey/screenshots") / Path(win_step["screenshot"]).name) for win_step in window_steps
255
+ ]
256
+ img_list = win_img_list + cur_img_list
257
+
258
+ if not all([img_path.exists() for img_path in img_list]):
259
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
260
+ continue
261
+
262
+ has_img_broken = False
263
+ for img_path in img_list:
264
+ try:
265
+ Image.open(str(img_path))
266
+ except Exception as e:
267
+ print(f"Error opening image {img_path}: {e}")
268
+ has_img_broken = True
269
+ break
270
+ if has_img_broken:
271
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
272
+ continue
273
+
274
+ resize_scale = resize_ratios_per_window_size[window_size]
275
+ win_img_list_resized = []
276
+ for img_path in win_img_list:
277
+ new_save_name = img_path.stem + f"_{resize_scale}x" + img_path.suffix
278
+ new_save_dir = img_path.parent.parent / f"images_resized"
279
+ new_save_dir.mkdir(parents=True, exist_ok=True)
280
+ new_save_path = new_save_dir / new_save_name
281
+ if new_save_path.exists():
282
+ win_img_list_resized.append(new_save_path)
283
+ continue
284
+ win_img = Image.open(str(img_path))
285
+ win_img = resize_image(win_img, scale=resize_scale)
286
+ win_img.save(str(new_save_path))
287
+ win_img_list_resized.append(new_save_path)
288
+
289
+ else: # window_size == 0
290
+ convs.append(
291
+ {
292
+ "from": "human",
293
+ "value": user_history_instr_prompt.format(
294
+ history_list="\n".join(
295
+ [
296
+ f"\t{j+1}. " + prev_step["step_instruction"]
297
+ for j, prev_step in enumerate(episode["steps"][:i])
298
+ ]
299
+ )
300
+ ),
301
+ },
302
+ )
303
+
304
+ cur_img_list_resized = []
305
+ for img_path in cur_img_list:
306
+ new_save_name = img_path.stem + f"_{0.5}x" + img_path.suffix
307
+ new_save_dir = img_path.parent.parent / f"images_resized"
308
+ new_save_dir.mkdir(parents=True, exist_ok=True)
309
+ new_save_path = new_save_dir / new_save_name
310
+ if new_save_path.exists():
311
+ try:
312
+ Image.open(str(new_save_path))
313
+ except Exception as e:
314
+ print(f"Error opening image {new_save_path}: {e}")
315
+ os.remove(new_save_path)
316
+ else:
317
+ cur_img_list_resized.append(new_save_path)
318
+ continue
319
+ cur_img = Image.open(str(img_path))
320
+ cur_img = resize_image(cur_img, scale=0.5)
321
+ cur_img.save(str(new_save_path))
322
+ cur_img_list_resized.append(new_save_path)
323
+
324
+ if window_size > 0:
325
+ img_list = win_img_list_resized + cur_img_list_resized
326
+ else:
327
+ img_list = cur_img_list_resized
328
+
329
+ if not all([img_path.exists() for img_path in img_list]):
330
+ print(f"Image not found for episode {episode_id}, step {i+1}. Skipping...")
331
+ continue
332
+
333
+ has_img_broken = False
334
+ for img_path in img_list:
335
+ try:
336
+ Image.open(str(img_path))
337
+ except Exception as e:
338
+ print(f"Error opening image {img_path}: {e}")
339
+ has_img_broken = True
340
+ break
341
+ if has_img_broken:
342
+ print(f"Image broken for episode {episode_id}, step {i+1}. Skipping...")
343
+ continue
344
+
345
+ # Current step details
346
+ convs.append(
347
+ {
348
+ "from": "human",
349
+ "value": grounding_step_prompt.format(instruction=step["step_instruction"], step_idx=i+1),
350
+ }
351
+ )
352
+ convs.append(
353
+ {
354
+ "from": "gpt",
355
+ "value": grounding_step_ans.format(point_str=f"({step['coord_norm'][0]}, {step['coord_norm'][1]})"),
356
+ }
357
+ )
358
+
359
+ convs = merge_convs(convs)
360
+
361
+ instructions.append(
362
+ {
363
+ "image": [str(img_path) for img_path in img_list],
364
+ "conversations": convs,
365
+ }
366
+ )
367
+
368
+ return instructions
369
+
370
+ # Example usage
371
+ if __name__ == "__main__":
372
+ # Sample data loading (replace with your actual file path)
373
+ splits_info = json.load(open("GUI-Odyssey/splits/random_split.json", "r", encoding="utf-8"))
374
+
375
+ episode_data_list = list(Path("episodes_grounding_inner_reasoning_v3").glob("*.json"))
376
+ episode_data_list = [p for p in episode_data_list if p.name in splits_info["train"]]
377
+ episode_data_list = [json.load(open(str(p), "r", encoding="utf-8")) for p in episode_data_list]
378
+
379
+ for episode_data in tqdm(episode_data_list, desc="Parsing fields..."):
380
+ for step in episode_data["steps"]:
381
+ if "grounding_reasoning" not in step:
382
+ step["step_instruction"] = encode_action(step)
383
+ step["is_grounding"] = False
384
+ continue
385
+
386
+ caption, instruction, reasoning = parse_reasoning(step["grounding_reasoning"])
387
+ step["step_instruction"] = instruction
388
+ step["caption"] = caption
389
+ step["reasoning"] = reasoning
390
+ step["is_grounding"] = not(not instruction)
391
+ step["coord_norm"] = step["info"][0]
392
+
393
+ # Process the episodes with default window_size=3
394
+ # window_size_list = [1, 2, 3]
395
+ window_size_list = [0]
396
+
397
+ # instructions = []
398
+ # for window_size in window_size_list:
399
+ # instructions.extend(process_android_episodes(episode_data_list, window_size=window_size))
400
+
401
+ def process_episode(args):
402
+ episode, window_size = args
403
+ return process_android_episodes([episode], window_size)
404
+
405
+ instructions = []
406
+ for window_size in window_size_list:
407
+ tasks = [(episode, window_size) for episode in episode_data_list]
408
+ with multiprocessing.Pool(processes=multiprocessing.cpu_count()) as pool:
409
+ results = list(tqdm(pool.imap(process_episode, tasks), total=len(tasks), desc=f"Window Size {window_size}"))
410
+ for result in results:
411
+ instructions.extend(result)
412
+
413
+ print(f"Number of context aware train instructions: {len(instructions)}")
414
+
415
+ with open(f"go_train_window_{'-'.join([str(e) for e in window_size_list])}_{len(instructions)//1000}k.json", "w", encoding="utf-8") as file:
416
+ json.dump(instructions, file, ensure_ascii=False, indent=4)