File size: 13,282 Bytes
483f96c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
import os
import json
import numpy as np
from PIL import Image
from datetime import datetime
from eval import Eval
from env.thor_env import ThorEnv

class EvalTask(Eval):
    '''
    evaluate overall task performance
    '''

    @classmethod
    def run(cls, model, resnet, task_queue, args, lock, successes, failures, results):
        '''
        evaluation loop
        '''
        # start THOR
        env = ThorEnv()

        while True:
            if task_queue.qsize() == 0:
                break

            task = task_queue.get()

            try:
                traj = model.load_task_json(task)
                r_idx = task['repeat_idx']
                print("Evaluating: %s" % (traj['root']))
                print("No. of trajectories left: %d" % (task_queue.qsize()))
                cls.evaluate(env, model, r_idx, resnet, traj, args, lock, successes, failures, results)
            except Exception as e:
                import traceback
                traceback.print_exc()
                print("Error: " + repr(e))

        # stop THOR
        env.stop()


    @classmethod
    def evaluate(cls, env, model, r_idx, resnet, traj_data, args, lock, successes, failures, results):
        # reset model
        model.reset()

        # # GT
        # run_traj = [
        #     # valid_seen
        #     #'data/json_feat_2.1.0/pick_heat_then_place_in_recep-AppleSliced-None-Fridge-18/trial_T20190911_045200_839773', # heated apple -> (slice) -> (not heated) slices.
        #     #'data/json_feat_2.1.0/pick_and_place_with_movable_recep-Pen-Mug-Desk-309/trial_T20190909_023218_500741', # slippery issue
        #     #'data/json_feat_2.1.0/pick_and_place_simple-SprayBottle-None-Toilet-422/trial_T20190909_124835_952557', # invalid mask
        #     #'data/json_feat_2.1.0/pick_clean_then_place_in_recep-LettuceSliced-None-Fridge-19/trial_T20190909_010452_388275', # collision
        #     #'data/json_feat_2.1.0/pick_and_place_with_movable_recep-TomatoSliced-Pan-DiningTable-28/trial_T20190908_133804_285739', # planning error
        #     #'data/json_feat_2.1.0/pick_and_place_with_movable_recep-AppleSliced-Pot-CounterTop-4/trial_T20190906_180021_201134', # slippery issue, leading to nav fail
        #     #'data/json_feat_2.1.0/pick_cool_then_place_in_recep-AppleSliced-None-SideTable-21/trial_T20190907_013001_399963', # slippery issue
        #     #'data/json_feat_2.1.0/pick_and_place_simple-ToiletPaper-None-ToiletPaperHanger-415/trial_T20190908_050518_595510', # reran but it suceeded.
        #     #'data/json_feat_2.1.0/pick_and_place_simple-SprayBottle-None-Toilet-415/trial_T20190909_075955_678702', # reran but it succeeded.
        #
        #     # valid_unseen
        #     #'data/json_feat_2.1.0/pick_heat_then_place_in_recep-AppleSliced-None-GarbageCan-10/trial_T20190908_073749_086690', # heated apple -> (slice) -> (not heated) slices.
        #     'data/json_feat_2.1.0/pick_heat_then_place_in_recep-PotatoSliced-None-Fridge-10/trial_T20190909_123732_489327', # heated apple -> (slice) -> (not heated) slices.
        # ]
        # if traj_data['root'] not in run_traj:
        #     return

        # setup scene
        reward_type = 'dense'
        cls.setup_scene(env, traj_data, r_idx, args, reward_type=reward_type)

        # extract language features
        #feat = model.featurize([traj_data], load_mask=False)
        
        # traj_data_pddl = json.load(open(traj_data['root'].replace('json_feat_2.1.0', 'json_2.1.0_generated/'+traj_data['split']) + '/traj_data.json', 'r'))
        traj_data_pddl = json.load(open(traj_data['root'].replace('json_feat_2.1.0', 'json_2.1.0_GT/'+traj_data['split']) + '/traj_data.json', 'r'))
        # traj_data_pddl = json.load(open(traj_data['root'].replace('json_feat_2.1.0', 'json_2.1.0_predTaskArgs/'+traj_data['split']) + '/traj_data.json', 'r'))
        gt_actions = traj_data_pddl['plan']['low_actions']

		# aug28 turk_annotation template switch
        new_traj = dict()
        for i in list(traj_data.keys()):
            if i != 'template':
                new_traj[i] = traj_data[i]
            else:
                # new_traj['turk_annotations'] = {'anns': [traj_data[i]]}
                new_traj['turk_annotations'] = {'anns': [{'high_descs': traj_data[i]['high_descs'][:-1], 'task_desc': traj_data[i]['task_desc']}]} 
        traj_data = new_traj

        # goal instr
        goal_instr = traj_data['turk_annotations']['anns'][r_idx]['task_desc']

        done, success = False, False
        fails = 0
        t = 0
        reward = 0
        while not done:
            # break if max_steps reached
            if t >= args.max_steps or t >= len(gt_actions):
                break

            # extract visual features
            #curr_image = Image.fromarray(np.uint8(env.last_event.frame))
            #feat['frames'] = resnet.featurize([curr_image], batch=1).unsqueeze(0)

            # forward model
            #m_out = model.step(feat)
            #m_pred = model.extract_preds(m_out, [traj_data], feat, clean_special_tokens=False)
            #m_pred = list(m_pred.values())[0]

            # check if <<stop>> was predicted
            #if m_pred['action_low'] == cls.STOP_TOKEN:
            #    print("\tpredicted STOP")
            #    break

            # get action and mask
            #action, mask = m_pred['action_low'], m_pred['action_low_mask'][0]
            #mask = np.squeeze(mask, axis=0) if model.has_interaction(action) else None
            action = gt_actions[t]['discrete_action']['action']
            mask = None
            if model.has_interaction(action):
                from gen.utils.image_util import decompress_mask
                mask = decompress_mask(gt_actions[t]['discrete_action']['args']['mask'])
                #mask = decompress_mask([gt_actions[t]['discrete_action']['args']['point']])
                #mask = env.last_event.instance_masks[gt_actions[t]['api_action']['objectId']]
                import matplotlib.pyplot as plt
                rgb = np.array(env.last_event.frame, dtype=np.float32) / 255
                plt.imsave('../tmp/{:05d}.png'.format(t), rgb/2 + mask[:,:,np.newaxis]/2)
            
            # print action
            if args.debug:
                print(action)

            # use predicted action and mask (if available) to interact with the env
            t_success, _, _, err, _ = env.va_interact(action, interact_mask=mask, smooth_nav=args.smooth_nav, debug=args.debug)
            if not t_success:
                fails += 1
                print("Interact API failed %d times" % fails + "; latest error '%s'" % err)
                if fails >= args.max_fails:
                    print("Interact API failed %d times" % fails + "; latest error '%s'" % err)
                    break
            
            #if t > 30:
            #    print(t)
            #    for o in env.last_event.metadata['objects']:
            #        if 'Apple' in o['name']:
            #            print('  -', o['name'], o['

            # next time-step
            t_reward, t_done = env.get_transition_reward()
            reward += t_reward
            t += 1

        # check if goal was satisfied
        goal_satisfied = env.get_goal_satisfied()
        if goal_satisfied:
            print("Goal Reached")
            success = True


        # goal_conditions
        pcs = env.get_goal_conditions_met()
        goal_condition_success_rate = pcs[0] / float(pcs[1])

        # SPL
        path_len_weight = len(traj_data['plan']['low_actions'])
        s_spl = (1 if goal_satisfied else 0) * min(1., path_len_weight / float(t))
        pc_spl = goal_condition_success_rate * min(1., path_len_weight / float(t))

        # path length weighted SPL
        plw_s_spl = s_spl * path_len_weight
        plw_pc_spl = pc_spl * path_len_weight

        # log success/fails
        lock.acquire()
        log_entry = {'trial': traj_data['task_id'],
                     'type': traj_data['task_type'],
                     'repeat_idx': int(r_idx),
                     'goal_instr': goal_instr,
                     'completed_goal_conditions': int(pcs[0]),
                     'total_goal_conditions': int(pcs[1]),
                     'goal_condition_success': float(goal_condition_success_rate),
                     'success_spl': float(s_spl),
                     'path_len_weighted_success_spl': float(plw_s_spl),
                     'goal_condition_spl': float(pc_spl),
                     'path_len_weighted_goal_condition_spl': float(plw_pc_spl),
                     'path_len_weight': int(path_len_weight),
                     'reward': float(reward)}
        if success:
            successes.append(log_entry)
        else:
            failures.append(log_entry)

        # overall results
        results['all'] = cls.get_metrics(successes, failures)

        print("-------------")
        print("SR: %d/%d = %.3f" % (results['all']['success']['num_successes'],
                                    results['all']['success']['num_evals'],
                                    results['all']['success']['success_rate']))
        print("GC: %d/%d = %.3f" % (results['all']['goal_condition_success']['completed_goal_conditions'],
                                    results['all']['goal_condition_success']['total_goal_conditions'],
                                    results['all']['goal_condition_success']['goal_condition_success_rate']))
        print("PLW SR: %.3f" % (results['all']['path_length_weighted_success_rate']))
        print("PLW GC: %.3f" % (results['all']['path_length_weighted_goal_condition_success_rate']))
        print("-------------")

        # task type specific results
        task_types = ['pick_and_place_simple', 'pick_clean_then_place_in_recep', 'pick_heat_then_place_in_recep',
                      'pick_cool_then_place_in_recep', 'pick_two_obj_and_place', 'look_at_obj_in_light',
                      'pick_and_place_with_movable_recep']
        for task_type in task_types:
            task_successes = [s for s in (list(successes)) if s['type'] == task_type]
            task_failures = [f for f in (list(failures)) if f['type'] == task_type]
            if len(task_successes) > 0 or len(task_failures) > 0:
                results[task_type] = cls.get_metrics(task_successes, task_failures)
            else:
                results[task_type] = {}

        lock.release()

    @classmethod
    def get_metrics(cls, successes, failures):
        '''
        compute overall succcess and goal_condition success rates along with path-weighted metrics
        '''
        # stats
        num_successes, num_failures = len(successes), len(failures)
        num_evals = len(successes) + len(failures)
        total_path_len_weight = sum([entry['path_len_weight'] for entry in successes]) + \
                                sum([entry['path_len_weight'] for entry in failures])
        completed_goal_conditions = sum([entry['completed_goal_conditions'] for entry in successes]) + \
                                   sum([entry['completed_goal_conditions'] for entry in failures])
        total_goal_conditions = sum([entry['total_goal_conditions'] for entry in successes]) + \
                               sum([entry['total_goal_conditions'] for entry in failures])

        # metrics
        sr = float(num_successes) / num_evals
        pc = completed_goal_conditions / float(total_goal_conditions)
        plw_sr = (float(sum([entry['path_len_weighted_success_spl'] for entry in successes]) +
                        sum([entry['path_len_weighted_success_spl'] for entry in failures])) /
                  total_path_len_weight)
        plw_pc = (float(sum([entry['path_len_weighted_goal_condition_spl'] for entry in successes]) +
                        sum([entry['path_len_weighted_goal_condition_spl'] for entry in failures])) /
                  total_path_len_weight)

        # result table
        res = dict()
        res['success'] = {'num_successes': num_successes,
                          'num_evals': num_evals,
                          'success_rate': sr}
        res['goal_condition_success'] = {'completed_goal_conditions': completed_goal_conditions,
                                        'total_goal_conditions': total_goal_conditions,
                                        'goal_condition_success_rate': pc}
        res['path_length_weighted_success_rate'] = plw_sr
        res['path_length_weighted_goal_condition_success_rate'] = plw_pc

        return res

    def create_stats(self):
            '''
            storage for success, failure, and results info
            '''
            self.successes, self.failures = self.manager.list(), self.manager.list()
            self.results = self.manager.dict()

    def save_results(self):
        results = {'successes': list(self.successes),
                   'failures': list(self.failures),
                   'results': dict(self.results)}

        save_path = os.path.dirname(self.args.model_path)
        save_path = os.path.join(save_path, 'task_results_' + self.args.eval_split + '_' + datetime.now().strftime("%Y%m%d_%H%M%S_%f") + '.json')
        with open(save_path, 'w') as r:
            json.dump(results, r, indent=4, sort_keys=True)