Files changed (2) hide show
  1. Voc--yolo.py +182 -0
  2. Voc.py +150 -0
Voc--yolo.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import xml.etree.ElementTree as ET
2
+ import pickle
3
+ import os
4
+ from os import listdir, getcwd
5
+ from os.path import join
6
+ import random
7
+ from shutil import copyfile
8
+
9
+
10
+ classes = ["pig"]
11
+ # Set the ratio of training data and validation data, with the remainder being used as test data.
12
+ TRAIN_RATIO = 80
13
+ VAL_RATIO = 10
14
+
15
+
16
+ def clear_hidden_files(path):
17
+ dir_list = os.listdir(path)
18
+ for i in dir_list:
19
+ abspath = os.path.join(os.path.abspath(path), i)
20
+ if os.path.isfile(abspath):
21
+ if i.startswith("._"):
22
+ os.remove(abspath)
23
+ else:
24
+ clear_hidden_files(abspath)
25
+
26
+
27
+ def convert(size, box):
28
+ dw = 1. / size[0]
29
+ dh = 1. / size[1]
30
+ x = (box[0] + box[1]) / 2.0
31
+ y = (box[2] + box[3]) / 2.0
32
+ w = box[1] - box[0]
33
+ h = box[3] - box[2]
34
+ x = x * dw
35
+ w = w * dw
36
+ y = y * dh
37
+ h = h * dh
38
+ return (x, y, w, h)
39
+
40
+
41
+ def convert_annotation(image_id):
42
+ in_file = open('VOCdevkit/VOC2007/Annotations/%s.xml' % image_id)
43
+ out_file = open('VOCdevkit/VOC2007/YOLOLabels/%s.txt' % image_id, 'w')
44
+ tree = ET.parse(in_file)
45
+ root = tree.getroot()
46
+ size = root.find('size')
47
+ w = int(size.find('width').text)
48
+ h = int(size.find('height').text)
49
+
50
+ for obj in root.iter('object'):
51
+ difficult = obj.find('difficult').text
52
+ cls = obj.find('name').text
53
+ if cls not in classes or int(difficult) == 1:
54
+ continue
55
+ cls_id = classes.index(cls)
56
+ xmlbox = obj.find('bndbox')
57
+ b = (float(xmlbox.find('xmin').text), float(xmlbox.find('xmax').text), float(xmlbox.find('ymin').text),
58
+ float(xmlbox.find('ymax').text))
59
+ bb = convert((w, h), b)
60
+ out_file.write(str(cls_id) + " " + " ".join([str(a) for a in bb]) + '\n')
61
+ in_file.close()
62
+ out_file.close()
63
+
64
+
65
+ wd = os.getcwd()
66
+ data_base_dir = os.path.join(wd, "VOCdevkit/")
67
+ if not os.path.isdir(data_base_dir):
68
+ os.mkdir(data_base_dir)
69
+
70
+ work_sapce_dir = os.path.join(data_base_dir, "VOC2007/")
71
+ if not os.path.isdir(work_sapce_dir):
72
+ os.mkdir(work_sapce_dir)
73
+
74
+ annotation_dir = os.path.join(work_sapce_dir, "Annotations/")
75
+ if not os.path.isdir(annotation_dir):
76
+ os.mkdir(annotation_dir)
77
+ clear_hidden_files(annotation_dir)
78
+
79
+ image_dir = os.path.join(work_sapce_dir, "JPEGImages/")
80
+ if not os.path.isdir(image_dir):
81
+ os.mkdir(image_dir)
82
+ clear_hidden_files(image_dir)
83
+
84
+ yolo_labels_dir = os.path.join(work_sapce_dir, "YOLOLabels/")
85
+ if not os.path.isdir(yolo_labels_dir):
86
+ os.mkdir(yolo_labels_dir)
87
+ clear_hidden_files(yolo_labels_dir)
88
+
89
+ yolov5_images_dir = os.path.join(data_base_dir, "images/")
90
+ if not os.path.isdir(yolov5_images_dir):
91
+ os.mkdir(yolov5_images_dir)
92
+ clear_hidden_files(yolov5_images_dir)
93
+
94
+ yolov5_labels_dir = os.path.join(data_base_dir, "YOLOLabels/")
95
+ if not os.path.isdir(yolov5_labels_dir):
96
+ os.mkdir(yolov5_labels_dir)
97
+ clear_hidden_files(yolov5_labels_dir)
98
+
99
+
100
+ yolov5_images_train_dir = os.path.join(yolov5_images_dir, "train2007/")
101
+ if not os.path.isdir(yolov5_images_train_dir):
102
+ os.mkdir(yolov5_images_train_dir)
103
+ clear_hidden_files(yolov5_images_train_dir)
104
+
105
+ yolov5_images_val_dir = os.path.join(yolov5_images_dir, "val2007/")
106
+ if not os.path.isdir(yolov5_images_val_dir):
107
+ os.mkdir(yolov5_images_val_dir)
108
+ clear_hidden_files(yolov5_images_val_dir)
109
+
110
+ yolov5_images_test_dir = os.path.join(yolov5_images_dir, "val2007/")
111
+ if not os.path.isdir(yolov5_images_test_dir):
112
+ os.mkdir(yolov5_images_test_dir)
113
+ clear_hidden_files(yolov5_images_test_dir)
114
+
115
+
116
+ yolov5_labels_train_dir = os.path.join(yolov5_labels_dir, "train2007/")
117
+ if not os.path.isdir(yolov5_labels_train_dir):
118
+ os.mkdir(yolov5_labels_train_dir)
119
+ clear_hidden_files(yolov5_labels_train_dir)
120
+
121
+ yolov5_labels_val_dir = os.path.join(yolov5_images_dir, "val2007/")
122
+ if not os.path.isdir(yolov5_labels_val_dir):
123
+ os.mkdir(yolov5_labels_val_dir)
124
+ clear_hidden_files(yolov5_labels_val_dir)
125
+
126
+ yolov5_labels_test_dir = os.path.join(yolov5_labels_dir, "val2007/")
127
+ if not os.path.isdir(yolov5_labels_test_dir):
128
+ os.mkdir(yolov5_labels_test_dir)
129
+ clear_hidden_files(yolov5_labels_test_dir)
130
+
131
+ train_file = open(os.path.join(wd, "train.txt"), 'w')
132
+ val_file = open(os.path.join(wd, "val.txt"), 'w')
133
+ test_file = open(os.path.join(wd, "test.txt"), 'w')
134
+ train_file.close()
135
+ val_file.close()
136
+ test_file.close()
137
+
138
+ train_file = open(os.path.join(wd, "train.txt"), 'a')
139
+ val_file = open(os.path.join(wd, "val.txt"), 'a')
140
+ test_file = open(os.path.join(wd, "test.txt"), 'a')
141
+
142
+ list_imgs = os.listdir(image_dir) # list image files
143
+ prob = random.randint(1, 100)
144
+ print("Probability: %d" % prob)
145
+ for i in range(0, len(list_imgs)):
146
+ path = os.path.join(image_dir, list_imgs[i])
147
+ if os.path.isfile(path):
148
+ image_path = image_dir + list_imgs[i]
149
+ voc_path = list_imgs[i]
150
+ (nameWithoutExtention, extention) = os.path.splitext(os.path.basename(image_path))
151
+ (voc_nameWithoutExtention, voc_extention) = os.path.splitext(os.path.basename(voc_path))
152
+ annotation_name = nameWithoutExtention + '.xml'
153
+ annotation_path = os.path.join(annotation_dir, annotation_name)
154
+ label_name = nameWithoutExtention + '.txt'
155
+ label_path = os.path.join(yolo_labels_dir, label_name)
156
+ prob = random.randint(1, 100)
157
+ print("Probability: %d" % prob)
158
+
159
+ if (prob < TRAIN_RATIO): # train dataset
160
+ if os.path.exists(annotation_path):
161
+ train_file.write(image_path + '\n')
162
+ convert_annotation(nameWithoutExtention) # convert label
163
+ copyfile(image_path, yolov5_images_train_dir + voc_path)
164
+ copyfile(label_path, yolov5_labels_train_dir + label_name)
165
+
166
+ elif(prob < TRAIN_RATIO + VAL_RATIO): # test dataset
167
+ if os.path.exists(annotation_path):
168
+ val_file.write(image_path + '\n')
169
+ convert_annotation(nameWithoutExtention) # convert label
170
+ copyfile(image_path, yolov5_images_test_dir + voc_path)
171
+ copyfile(label_path, yolov5_labels_test_dir + label_name)
172
+
173
+ else:
174
+ if os.path.exists(annotation_path):
175
+ test_file.write(image_path + '\n')
176
+ convert_annotation(nameWithoutExtention) # convert label
177
+ copyfile(image_path, yolov5_images_test_dir + voc_path)
178
+ copyfile(label_path, yolov5_labels_test_dir + label_name)
179
+
180
+ train_file.close()
181
+ test_file.close()
182
+ val_file.close()
Voc.py ADDED
@@ -0,0 +1,150 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ import xml.etree.ElementTree as ET
4
+
5
+ import numpy as np
6
+
7
+ from utils.utils import get_classes
8
+
9
+ # annotation_mode is used to specify what is computed during the runtime of this file.(defult : 0)
10
+ # annotation_mode 0 represents the entire label processing process, including obtaining the txt files inside VOCdevkit/VOC2007/ImageSets
11
+ # and the training files 2007_train.txt and 2007_val.txt.
12
+ # annotation_mode 1 represents obtaining the txt files inside VOCdevkit/VOC2007/ImageSets.
13
+ # annotation_mode 2 represents obtaining the training files 2007_train.txt and 2007_val.txt.
14
+ annotation_mode = 0
15
+
16
+ # classes
17
+ classes = 'pig'
18
+
19
+ # trainval_percent is used to specify the ratio of (training set + validation set) to the test set.
20
+ # train_percent is used to specify the ratio of the training set to the validation set within (training set + validation set).
21
+ trainval_percent = 0.8
22
+ train_percent = 0.75
23
+
24
+ # Points to the folder containing the VOC dataset.
25
+ # It defaults to the VOC dataset in the root directory.
26
+
27
+ VOCdevkit_path = 'VOCdevkit'
28
+ VOCdevkit_sets = [('2007', 'train'), ('2007', 'val')]
29
+
30
+ # Counting the number of labels
31
+ photo_nums = np.zeros(len(VOCdevkit_sets))
32
+ nums = np.zeros(len(classes))
33
+
34
+
35
+ def convert_annotation(year, image_id, list_file):
36
+ in_file = open(os.path.join(VOCdevkit_path, 'VOC%s/Annotations/%s.xml' % (year, image_id)), encoding='utf-8')
37
+ tree = ET.parse(in_file)
38
+ root = tree.getroot()
39
+
40
+ for obj in root.iter('object'):
41
+ difficult = 0
42
+ if obj.find('difficult') != None:
43
+ difficult = obj.find('difficult').text
44
+ cls = obj.find('name').text
45
+ if cls not in classes or int(difficult) == 1:
46
+ continue
47
+ cls_id = classes.index(cls)
48
+ xmlbox = obj.find('bndbox')
49
+ b = (int(float(xmlbox.find('xmin').text)), int(float(xmlbox.find('ymin').text)),
50
+ int(float(xmlbox.find('xmax').text)), int(float(xmlbox.find('ymax').text)))
51
+ list_file.write(" " + ",".join([str(a) for a in b]) + ',' + str(cls_id))
52
+
53
+ nums[classes.index(cls)] = nums[classes.index(cls)] + 1
54
+
55
+
56
+ if __name__ == "__main__":
57
+ random.seed(0)
58
+ if " " in os.path.abspath(VOCdevkit_path):
59
+ raise ValueError(
60
+ "There should be no spaces in the folder path where the dataset is stored and in the image names, as it can affect the model training. Please make sure to make the necessary corrections.")
61
+
62
+ if annotation_mode == 0 or annotation_mode == 1:
63
+ print("Generate txt in ImageSets.")
64
+ xmlfilepath = os.path.join(VOCdevkit_path, 'VOC2007/Annotations')
65
+ saveBasePath = os.path.join(VOCdevkit_path, 'VOC2007/ImageSets/Main')
66
+ temp_xml = os.listdir(xmlfilepath)
67
+ total_xml = []
68
+ for xml in temp_xml:
69
+ if xml.endswith(".xml"):
70
+ total_xml.append(xml)
71
+
72
+ num = len(total_xml)
73
+ list = range(num)
74
+ tv = int(num * trainval_percent)
75
+ tr = int(tv * train_percent)
76
+ trainval = random.sample(list, tv)
77
+ train = random.sample(trainval, tr)
78
+
79
+ print("train and val size", tv)
80
+ print("train size", tr)
81
+ ftrainval = open(os.path.join(saveBasePath, 'trainval.txt'), 'w')
82
+ ftest = open(os.path.join(saveBasePath, 'test.txt'), 'w')
83
+ ftrain = open(os.path.join(saveBasePath, 'train.txt'), 'w')
84
+ fval = open(os.path.join(saveBasePath, 'val.txt'), 'w')
85
+
86
+ for i in list:
87
+ name = total_xml[i][:-4] + '\n'
88
+ if i in trainval:
89
+ ftrainval.write(name)
90
+ if i in train:
91
+ ftrain.write(name)
92
+ else:
93
+ fval.write(name)
94
+ else:
95
+ ftest.write(name)
96
+
97
+ ftrainval.close()
98
+ ftrain.close()
99
+ fval.close()
100
+ ftest.close()
101
+ print("Generate txt in ImageSets done.")
102
+
103
+ if annotation_mode == 0 or annotation_mode == 2:
104
+ print("Generate 2007_train.txt and 2007_val.txt for train.")
105
+ type_index = 0
106
+ for year, image_set in VOCdevkit_sets:
107
+ image_ids = open(os.path.join(VOCdevkit_path, 'VOC%s/ImageSets/Main/%s.txt' % (year, image_set)),
108
+ encoding='utf-8').read().strip().split()
109
+ list_file = open('%s_%s.txt' % (year, image_set), 'w', encoding='utf-8')
110
+ for image_id in image_ids:
111
+ list_file.write('%s/VOC%s/JPEGImages/%s.jpg' % (os.path.abspath(VOCdevkit_path), year, image_id))
112
+
113
+ convert_annotation(year, image_id, list_file)
114
+ list_file.write('\n')
115
+ photo_nums[type_index] = len(image_ids)
116
+ type_index += 1
117
+ list_file.close()
118
+ print("Generate 2007_train.txt and 2007_val.txt for train done.")
119
+
120
+
121
+ def printTable(List1, List2):
122
+ for i in range(len(List1[0])):
123
+ print("|", end=' ')
124
+ for j in range(len(List1)):
125
+ print(List1[j][i].rjust(int(List2[j])), end=' ')
126
+ print("|", end=' ')
127
+ print()
128
+
129
+
130
+ str_nums = [str(int(x)) for x in nums]
131
+ tableData = [
132
+ classes, str_nums
133
+ ]
134
+ colWidths = [0] * len(tableData)
135
+ len1 = 0
136
+ for i in range(len(tableData)):
137
+ for j in range(len(tableData[i])):
138
+ if len(tableData[i][j]) > colWidths[i]:
139
+ colWidths[i] = len(tableData[i][j])
140
+ printTable(tableData, colWidths)
141
+
142
+ if photo_nums[0] <= 500:
143
+ print("The number of training samples is less than 500, which is a small dataset. Please consider setting a larger number of training epochs (Epochs) to ensure an adequate number of gradient descent steps.")
144
+
145
+ if np.sum(nums) == 0:
146
+ print("No objects have been detected in the dataset. Please make sure to update the 'classes_path' to match your dataset and ensure that the class labels are correct; otherwise, the training will be ineffective!")
147
+ print("No objects have been detected in the dataset. Please make sure to update the 'classes_path' to match your dataset and ensure that the class labels are correct; otherwise, the training will be ineffective!")
148
+ print("No objects have been detected in the dataset. Please make sure to update the 'classes_path' to match your dataset and ensure that the class labels are correct; otherwise, the training will be ineffective!")
149
+ print("(Important information repeated three times).")
150
+