File size: 12,209 Bytes
1120a2f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
import os
import sys
import torch.utils.data as data
import torch
import numpy as np
from PIL import Image
import pdb
import copy
import random
from random import choice
from bert.tokenization_bert import BertTokenizer
from textblob import TextBlob
import random
import h5py
from refer.refer import REFER
import json
from args import get_parser

# Dataset configuration initialization
parser = get_parser()
args = parser.parse_args()


class ReferDataset_HP_Filter(data.Dataset):

    def __init__(self,
                 args,
                 image_transforms=None,
                 target_transforms=None,
                 split='train',
                 eval_mode=False):

        self.classes = []
        self.image_transforms = image_transforms
        self.target_transform = target_transforms
        self.split = split
        self.refer = REFER(args.refer_data_root, args.dataset, args.splitBy)
        self.dataset_type = args.dataset
        self.max_tokens = 20
        ref_ids = self.refer.getRefIds(split=self.split)
        self.img_ids = self.refer.getImgIds()

        all_imgs = self.refer.Imgs
        self.imgs = list(all_imgs[i] for i in self.img_ids)
        self.ref_ids = ref_ids


        self.input_ids = []
        self.input_ids_masked = []
        self.attention_masks = []
        self.tokenizer = BertTokenizer.from_pretrained(args.bert_tokenizer)


        # for metric learning #####################
        # self.ROOT = '/data2/dataset/RefCOCO/VRIS'
        self.ROOT = '/data2/projects/seunghoon/VerbRIS/VerbCentric_CY/datasets/VRIS'
        self.all_hp_root = "/data2/dataset/RefCOCO/refcocog/SBERT_gref_umd"
        self.metric_learning = args.metric_learning
        self.exclude_multiobj = args.exclude_multiobj
        self.metric_mode = args.metric_mode
        self.exclude_position = False
        self.hp_selection = args.hp_selection
        self.get_all_verbs = args.get_all_verbs

        if self.metric_learning and eval_mode == False:
            self.hardneg_prob = args.hn_prob 
            self.multi_obj_ref_ids = self._load_multi_obj_ref_ids()
            self.hardpos_meta = self._load_metadata()
        else:
            self.hardneg_prob = 0.0
            self.multi_obj_ref_ids = None
            self.hardpos_meta = None
        #############################################    

        self.eval_mode = eval_mode
        

        
        for r in ref_ids:
            ref = self.refer.Refs[r]

            sentences_for_ref = []
            sentences_for_ref_masked = []
            attentions_for_ref = []

            for i, (el, sent_id) in enumerate(zip(ref['sentences'], ref['sent_ids'])):
                sentence_raw = el['raw']
                attention_mask = [0] * self.max_tokens
                padded_input_ids = [0] * self.max_tokens
                padded_input_ids_masked = [0] * self.max_tokens

                blob = TextBlob(sentence_raw.lower())
                chara_list = blob.tags
                mask_ops = []
                mask_ops1 = []
                for word_i, (word_now, chara) in enumerate(chara_list):
                    if (chara == 'NN' or chara == 'NNS') and word_i < 19 and word_now.lower():
                        mask_ops.append(word_i)
                        mask_ops1.append(word_now)
                mask_ops2 = self.get_adjacent_word(mask_ops)


                input_ids = self.tokenizer.encode(text=sentence_raw, add_special_tokens=True)

                # truncation of tokens
                input_ids = input_ids[:self.max_tokens]

                padded_input_ids[:len(input_ids)] = input_ids
                attention_mask[:len(input_ids)] = [1]*len(input_ids)
                if len(mask_ops) == 0:
                    attention_remask = attention_mask
                    input_ids_masked = input_ids
                else:
                    could_mask = choice(mask_ops2)
                    input_ids_masked = copy.deepcopy(input_ids)
                    for i in could_mask:
                        input_ids_masked[i + 1] = 0
                padded_input_ids_masked[:len(input_ids_masked)] = input_ids_masked


                sentences_for_ref.append(torch.tensor(padded_input_ids).unsqueeze(0))
                sentences_for_ref_masked.append(torch.tensor(padded_input_ids_masked).unsqueeze(0))
                attentions_for_ref.append(torch.tensor(attention_mask).unsqueeze(0))

            self.input_ids.append(sentences_for_ref)
            self.input_ids_masked.append(sentences_for_ref_masked)
            self.attention_masks.append(attentions_for_ref)


    def get_classes(self):
        return self.classes

    def __len__(self):
        return len(self.ref_ids)

    def get_adjacent_word(self, mask_list):
        output_mask_list = []
        length = len(mask_list)
        i = 0
        while i < length:
            begin_pos = i
            while i+1 < length and mask_list[i+1] == mask_list[i] + 1:
                i += 1
            end_pos = i+1
            output_mask_list.append(mask_list[begin_pos:end_pos])
            i = end_pos

        return output_mask_list
    

    # for metric learning #####################
    ###########################################
    def _tokenize(self, sentence):
        attention_mask = [0] * self.max_tokens
        padded_input_ids = [0] * self.max_tokens

        input_ids = self.tokenizer.encode(text=sentence, add_special_tokens=True)
        # truncation of tokens
        input_ids = input_ids[:self.max_tokens]
        padded_input_ids[:len(input_ids)] = input_ids
        attention_mask[:len(input_ids)] = [1]*len(input_ids)

        # match shape as (1, max_tokens)
        return torch.tensor(padded_input_ids).unsqueeze(0), torch.tensor(attention_mask).unsqueeze(0)
    

    def _load_multi_obj_ref_ids(self):
        # Load multi-object reference IDs based on configurations
        if not self.exclude_multiobj and not self.exclude_position :
            return None
        elif self.exclude_position:
            multiobj_path = os.path.join(self.ROOT, 'multiobj_ov2_nopos.txt')
        elif self.exclude_multiobj :
            multiobj_path = os.path.join(self.ROOT, 'multiobj_ov3.txt')
        with open(multiobj_path, 'r') as f:
            return [int(line.strip()) for line in f.readlines()]

    def _load_metadata(self):
        # Load metadata for hard positive verb phrases, hard negative queries
        if 'refined' in self.metric_mode or 'hardneg' in self.metric_mode :
            hardpos_path = '/data2/projects/seunghoon/VerbRIS/CrossVLT/hardpos_verdict_gref_v4.json'
        else :
            hardpos_path = os.path.join(self.ROOT, 'hardpos_verbphrase_0906upd.json')
        with open(hardpos_path, 'r', encoding='utf-8') as f:
            hardpos_json = json.load(f)
        return hardpos_json
    

    def _get_hardpos_verb(self, ref, seg_id, sent_idx) :
        sbert_emb_size = 384
        if seg_id in self.multi_obj_ref_ids:
            verb_embed = torch.zeros(sbert_emb_size, dtype=torch.float32)
            return '', verb_embed
        
        # Extract metadata for hard positives if present            
        hardpos_dict = self.hardpos_meta.get(str(seg_id), {})
        if self.hp_selection == 'strict' :
            sent_id_list = list(hardpos_dict.keys())
            cur_sent_id = sent_id_list[sent_idx]
            cur_hardpos = hardpos_dict.get(cur_sent_id, {}).get('phrases', [])

        # only implement hp_selection 'strict' mode.
        if cur_hardpos :
            rand_index = random.randint(0, len(cur_hardpos) - 1)
            raw_verb = cur_hardpos[rand_index]
            verb_embed = torch.from_numpy(self._get_hardpos_embed(seg_id, cur_sent_id, rand_index))
            if self.get_all_verbs :
                return raw_verb, verb_embed
            else :
                return cur_hardpos, verb_embed
            
        verb_embed = torch.zeros(sbert_emb_size, dtype=torch.float32)
        return '', verb_embed
    
    def _get_hardpos_embed(self, seg_id, sent_id, rand_index):
        emb_folder = os.path.join(self.all_hp_root, str(seg_id))
        emb_files = sorted([f for f in os.listdir(emb_folder) if f.startswith(f"hp_{sent_id}_") and f.endswith(".npy")])
        selected_emb_file = os.path.join(emb_folder, emb_files[rand_index])

        return np.load(selected_emb_file)
    
    # def _get_hardpos_verb_singlephrase(self, ref, seg_id, sent_idx) :
    #     if seg_id in self.multi_obj_ref_ids:
    #         return ''
        
    #     # Extract metadata for hard positives if present            
    #     hardpos_dict = self.hardpos_meta.get(str(seg_id), {})
    #     if self.hp_selection == 'strict' :
    #         sent_id_list = list(hardpos_dict.keys())
    #         cur_hardpos = hardpos_dict.get(sent_id_list[sent_idx], {}).get('phrases', [])
    #     else : 
    #         cur_hardpos = list(itertools.chain.from_iterable(hardpos_dict[sid]['phrases'] for sid in hardpos_dict))

    #     if cur_hardpos:
    #         # Assign a hard positive verb phrase if available
    #         raw_verb = random.choice(cur_hardpos)
    #         return raw_verb
        
    #     return ''
    ###########################################
    ###########################################
    

    def __getitem__(self, index):
        this_ref_id = self.ref_ids[index]
        this_img_id = self.refer.getImgIds(this_ref_id)
        this_img = self.refer.Imgs[this_img_id[0]]

        img = Image.open(os.path.join('/data2/dataset/COCO2014/trainval2014/', this_img['file_name'])).convert("RGB")
        ref = self.refer.loadRefs(this_ref_id)
                
        if self.dataset_type == 'ref_zom':
            source_type = ref[0]['source']
        else:
            source_type = 'one'

        ref_mask = np.array(self.refer.getMask(ref[0])['mask'])

        annot = np.zeros(ref_mask.shape)
        annot[ref_mask == 1] = 1
        annot = Image.fromarray(annot.astype(np.uint8), mode="P")

        if self.image_transforms is not None:
            img, target = self.image_transforms(img, annot)
            
        if self.eval_mode:
            embedding = []
            embedding_masked = []
            att = []
            for s in range(len(self.input_ids[index])):
                e = self.input_ids[index][s]
                a = self.attention_masks[index][s]
                embedding.append(e.unsqueeze(-1))
                embedding_masked.append(e.unsqueeze(-1))
                att.append(a.unsqueeze(-1))
            
            tensor_embeddings = torch.cat(embedding, dim=-1)
            tensor_embeddings_masked = torch.cat(embedding_masked, dim=-1)
            attention_mask = torch.cat(att, dim=-1)
            return img, target, source_type, tensor_embeddings, tensor_embeddings_masked, attention_mask
        
        else:
            # train phase
            choice_sent = np.random.choice(len(self.input_ids[index]))
            tensor_embeddings = self.input_ids[index][choice_sent]
            tensor_embeddings_masked = self.input_ids_masked[index][choice_sent]
            attention_mask = self.attention_masks[index][choice_sent]
            
            if self.metric_learning :
                pos_sent = torch.zeros_like(tensor_embeddings)
                pos_attn_mask = torch.zeros_like(attention_mask)
                
                ## Only the case with hardpos_ in metric_mode
                if 'hardpos_' in self.metric_mode or self.hardneg_prob == 0.0:
                    pos_type = 'zero'
                    # we always use refined mode as default.
                    if 'refined' in self.metric_mode :
                        pos_sent_picked, hardpos_embed = self._get_hardpos_verb(ref, this_ref_id, choice_sent)
                    if pos_sent_picked:
                        pos_type = 'hardpos'
                        pos_sent, pos_attn_mask = self._tokenize(pos_sent_picked)      
                    return img, target, source_type, tensor_embeddings, tensor_embeddings_masked, attention_mask, pos_sent, pos_attn_mask, pos_type, hardpos_embed

            return img, target, source_type, tensor_embeddings, tensor_embeddings_masked, attention_mask