Lillyr commited on
Commit
f0d42f6
·
verified ·
1 Parent(s): 83fdbb0

upload dataset file to repo

Browse files
Files changed (1) hide show
  1. lisa_data/reasonseg.py +147 -0
lisa_data/reasonseg.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import json
2
+ import glob
3
+ import random
4
+ import cv2
5
+ from tqdm import tqdm
6
+ import numpy as np
7
+ from pycocotools import mask as maskUtils
8
+
9
+ def singleMask2rle(mask):
10
+ rle = maskUtils.encode(np.array(mask[:, :, None], order='F', dtype="uint8"))[0]
11
+ rle["counts"] = rle["counts"].decode("utf-8")
12
+ return rle
13
+
14
+
15
+ def get_mask_from_json(inform, height, width):
16
+
17
+ ### sort polies by area
18
+ area_list = []
19
+ valid_poly_list = []
20
+ for i in inform:
21
+ label_id = i["label"]
22
+ points = i["points"]
23
+ if "flag" == label_id.lower(): ## meaningless deprecated annotations
24
+ continue
25
+
26
+ tmp_mask = np.zeros((height, width), dtype=np.uint8)
27
+ cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)
28
+ cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)
29
+ tmp_area = tmp_mask.sum()
30
+
31
+ area_list.append(tmp_area)
32
+ valid_poly_list.append(i)
33
+
34
+ ### ground-truth mask
35
+ sort_index = np.argsort(area_list)[::-1].astype(np.int32)
36
+ sort_index = list(sort_index)
37
+ sort_inform = []
38
+ for s_idx in sort_index:
39
+ sort_inform.append(valid_poly_list[s_idx])
40
+
41
+ mask = np.zeros((height, width), dtype=np.uint8)
42
+ for i in sort_inform:
43
+ label_id = i["label"]
44
+ points = i["points"]
45
+
46
+ if "ignore" in label_id.lower():
47
+ label_value = 255 # ignored during evaluation
48
+ else:
49
+ label_value = 1 # target
50
+
51
+ cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)
52
+ cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)
53
+
54
+ return mask
55
+
56
+ SHORT_QUESTION_LIST = [
57
+ "Can you segment the {class_name} in this image?",
58
+ "Please segment the {class_name} in this image.",
59
+ "What is {class_name} in this image? Please respond with segmentation mask.",
60
+ "What is {class_name} in this image? Please output segmentation mask.",
61
+ "Could you identify and segment the {class_name} in this image?",
62
+ "Would you be able to segment the {class_name} in this image?",
63
+ "Can you provide a segmentation mask for the {class_name} in this image?",
64
+ "Please provide a segmentation mask for the {class_name} in this image.",
65
+ "Could you please segment the {class_name} in this image for me?",
66
+ "What {class_name} is present in this image? Kindly respond with a segmentation mask.",
67
+ "Which part of this image contains {class_name}? Please output with segmentation mask.",
68
+ "Is there a {class_name} in this image? If so, please provide the segmentation mask.",
69
+ "Can you segment out the {class_name} visible in this image?",
70
+ "Would you identify and provide a segmentation mask for the {class_name} in this image?",
71
+ ]
72
+
73
+ LONG_QUESTION_LIST = [
74
+ "{sent} Please respond with segmentation mask.",
75
+ "{sent} Please output segmentation mask.",
76
+ ]
77
+
78
+ EXPLANATORY_QUESTION_LIST = [
79
+ "Please output segmentation mask and explain why.",
80
+ "Please output segmentation mask and explain the reason.",
81
+ "Please output segmentation mask and give some explanation.",
82
+ ]
83
+
84
+ ANSWER_LIST = [
85
+ "It is [SEG].",
86
+ "Sure, [SEG].",
87
+ "Sure, it is [SEG].",
88
+ "Sure, the segmentation result is [SEG].",
89
+ "[SEG].",
90
+ ]
91
+
92
+
93
+ json_data = glob.glob('/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/reason_seg/ReasonSeg/train/*.json')
94
+ final_data = []
95
+ idx = 0
96
+ for d in tqdm(json_data):
97
+ data = json.load(open(d))
98
+ texts = data['text']
99
+ img_path = d.replace('.json','.jpg')
100
+ image = cv2.imread(img_path)
101
+ h, w = image.shape[:2]
102
+ for text in texts:
103
+ dic = {}
104
+ dic['id'] = f'reasonseg_{idx}'
105
+ idx+=1
106
+ dic['image'] = img_path.replace('/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/', '')
107
+ if data['is_sentence']:
108
+ question_template = random.choice(LONG_QUESTION_LIST)
109
+ question = question_template.format(sent=text)
110
+ else:
111
+ question_template = random.choice(SHORT_QUESTION_LIST)
112
+ question = question_template.format(class_name=text.lower())
113
+
114
+ answer = random.choice(ANSWER_LIST)
115
+ dic['conversations'] = []
116
+ dic['conversations'].append({'from': 'human', 'value': '<image>\n' + question})
117
+ dic['conversations'].append({'from': 'gpt', 'value': answer})
118
+ dic['masks'] = []
119
+ msk = get_mask_from_json(data['shapes'], h, w)
120
+ rle = singleMask2rle(msk)
121
+ dic['masks'].append(rle)
122
+ # dic['height'] = h
123
+ # dic['width'] = w
124
+ final_data.append(dic)
125
+
126
+ explanatory = json.load(open('/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/reason_seg/ReasonSeg/explanatory/train.json'))
127
+ for data in tqdm(explanatory):
128
+ dic = {}
129
+ dic['id'] = f'reasonseg_{idx}'
130
+ idx+=1
131
+ dic['image'] = 'reason_seg/ReasonSeg/train/'+data['image']
132
+ dic['conversations'] = []
133
+ dic['conversations'].append({'from': 'human', 'value': '<image>\n' + data['query']})
134
+ dic['conversations'].append({'from': 'gpt', 'value': random.choice(ANSWER_LIST) + ' ' + data['outputs']})
135
+ json_data = json.load(open(f'/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/reason_seg/ReasonSeg/train/{data["json"]}'))
136
+ dic['masks'] = []
137
+ msk = get_mask_from_json(json_data['shapes'], h, w)
138
+ rle = singleMask2rle(msk)
139
+ dic['masks'].append(rle)
140
+ image = cv2.imread('/mnt/workspace/workgroup/yuanyq/code/LISA/dataset/reason_seg/ReasonSeg/train/'+data['image'])
141
+ h, w = image.shape[:2]
142
+ dic['height'] = h
143
+ dic['width'] = w
144
+ final_data.append(dic)
145
+ print(len(final_data))
146
+ with open('reason_seg.json', 'w') as f:
147
+ f.write(json.dumps(final_data, indent=4))