seg / lisa_data /refcoco.py
Lillyr's picture
upload dataset file to repo
402ae9f verified
import json
from refer import REFER
import random
from pycocotools import mask as maskUtils
from tqdm import tqdm
def annToMask(mask_ann, h=None, w=None):
if isinstance(mask_ann, list):
rles = maskUtils.frPyObjects(mask_ann, h, w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, h, w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
refer_api = REFER('/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/refer_seg', 'refcocog', 'umd')
ref_ids_train = refer_api.getRefIds(split="val")
images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)
refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)
# loaded_images = refer_api.loadImgs(image_ids=images_ids_train)
annotation = refer_api.Anns
img2refs = {}
for ref in refs_train:
image_id = ref["image_id"]
img2refs[image_id] = img2refs.get(image_id, []) + [
ref,
]
final_data = []
for idx in tqdm(img2refs):
dic = {}
data = img2refs[idx]
img_idx = data[0]['image_id']
image = refer_api.loadImgs(image_ids=img_idx)[0]
dic['image'] = 'refer_seg/images/mscoco/images/train2014/'+image['file_name']
# dic['image'] = 'refer_seg/images/saiapr_tc-12/'+image['file_name']
dic['height']= image['height']
dic['width'] = image['width']
cats = []
masks = []
for ann in data:
ann_idx = ann['ann_id']
annotation_ = annotation[ann_idx]
cat_list = set()
for cat in ann['sentences']:
cat_list.add(cat['sent'])
cat_list = list(cat_list)
cats+=cat_list
for i in range(len(cat_list)):
masks.append(annotation_['segmentation'])
dic['cat'] = cats
dic['masks'] = masks
final_data.append(dic)
print(len(final_data))
with open('/mnt/workspace/workgroup/yuanyq/code/seg-llava/val/refcocog_val.json', 'w') as f:
f.write(json.dumps(final_data))
# conversations.append({'from': 'human', 'value': random.choice(SHORT_QUESTION).format(class_name=text.lower())})
# conversations.append({'from': 'gpt', 'value': random.choice(ANSWER_LIST)})