DataNerd2021 commited on
Commit
6086490
·
1 Parent(s): 494c74f

upload .pth and python script

Browse files
mexico_5_column_weights.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b6e09ef4426541bc2d42b583f15e1e32c7a458d228dbe292d4ff12b9dc602f5e
3
+ size 351208539
mexico_census_5column_segment_josh.py ADDED
@@ -0,0 +1,287 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Need to set up out dir and also how it is being split. Bounding boxes?
2
+
3
+ # Some basic setup
4
+ # Setup detectron2 logger
5
+ from sys import argv
6
+ import detectron2
7
+ from detectron2.utils.logger import setup_logger
8
+ setup_logger()
9
+
10
+ # import some common libraries
11
+ import matplotlib.pyplot as plt
12
+ import numpy as np
13
+ #from google.cloud import storage
14
+ from io import BytesIO
15
+ import cv2
16
+ from glob import glob
17
+ import subprocess
18
+ from shlex import quote
19
+ import csv
20
+ from tqdm import tqdm
21
+
22
+
23
+
24
+ # import some common detectron2 utilities
25
+ from detectron2 import model_zoo
26
+ from detectron2.engine import DefaultPredictor
27
+ from detectron2.config import get_cfg
28
+ from detectron2.utils.visualizer import Visualizer
29
+ from detectron2.data import MetadataCatalog, DatasetCatalog
30
+ from detectron2.structures import BoxMode #I added this
31
+ from detectron2.evaluation import COCOEvaluator, inference_on_dataset
32
+ from detectron2.data import build_detection_test_loader
33
+ import statistics
34
+
35
+ import random
36
+ from detectron2.engine import DefaultTrainer
37
+ from detectron2.config import get_cfg
38
+ import os
39
+ import traceback
40
+
41
+ numdir = argv[1]
42
+ album = argv[2]
43
+
44
+
45
+ # Set Up Models
46
+ # the cfg object here is an instantiation of the model. The merge_from_file function gets arguments from a default YAML
47
+ # file to configure the model. The functions that follow update certain arguments that were set to default from the YAML file.
48
+ cfg = get_cfg()
49
+ cfg.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")) # define model
50
+ cfg.MODEL.WEIGHTS = r"C:\Users\Chase\OneDrive\Documents\service-project\mexico_5_column_weights.pth" # SET UP WEIGHTS HERE
51
+ cfg.MODEL.DEVICE = 'cpu'
52
+ cfg.MODEL.ROI_HEADS.NUM_CLASSES = 5 # 5 classes (5 columns in this instance, but you may have more depending on what you are doing)
53
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model (confidence threshold)
54
+ predictor = DefaultPredictor(cfg)
55
+
56
+ cfg2 = get_cfg()
57
+ cfg2.merge_from_file(model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"))
58
+ cfg2.MODEL.WEIGHTS = r"C:\Users\Chase\OneDrive\Documents\service-project\mexico_5_column_weights.pth" # SET UP WEIGHTS HERE
59
+ cfg2.MODEL.DEVICE = 'cpu'
60
+ cfg2.MODEL.ROI_HEADS.NUM_CLASSES = 1 # 1 class (Cause of Death in this instance, but you may have more depending on what you are doing)
61
+ cfg2.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.8 # set the testing threshold for this model
62
+ predictor2 = DefaultPredictor(cfg2)
63
+
64
+ #FUNCTIONS
65
+ #This function returns a list of vertical lines found within the image passed to the function.
66
+ def get_vertical_lines(img, width=385, line_height=2000, circle = 155): #this function takes as parameter an image and default integers. It returns a list.
67
+ '''This function takes an image and default integers as parameters and outputs a list.'''
68
+ ys=[]
69
+ keepers=[]
70
+ n=0
71
+ # convert between RGB/BGR and grayscale
72
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
73
+ # use an Adaptive Thresholding approach where the threshold value = Gaussian weighted sum of the neighborhood values - constant value.
74
+ # In other words, it is a weighted sum of the blockSize^2 neighborhood of a point minus the constant.
75
+ # in this example, we are setting the maximum threshold value as 255 with the block size of 155 (as set in the "circle" parameter) and the
76
+ # constant is 2 (as specified in the last argument)
77
+ edges = ~cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,circle,2)
78
+ # create a 3x3 matrix of ones.
79
+ # An image kernel is a small matrix used to apply effects like the ones you might find in Photoshop or Gimp, such as blurring, sharpening, outlining or embossing. They're also used in machine learning for 'feature extraction', a technique for determining the most important portions of an image.
80
+ kernel = np.ones((3, 3), np.uint8)
81
+ # The basic idea of erosion is just like soil erosion only, it erodes away the boundaries of foreground object (Always try to keep foreground in white). It is normally performed on binary images. It needs two inputs, one is our original image, second one is called structuring element or kernel which decides the nature of operation. A pixel in the original image (either 1 or 0) will be considered 1 only if all the pixels under the kernel is 1, otherwise it is eroded (made to zero).
82
+ th2 = cv2.erode(edges, kernel, iterations=1)
83
+ # create a 1x7 matrix of ones.
84
+ kernel = np.ones((1, 7), np.uint8)
85
+ # The basic idea of dilation is accentuating the features of the images. Whereas erosion is used to reduce the amount of noise in the image, dilation is used to enhance the features of the image.
86
+ th3 = cv2.dilate(th2, kernel, iterations=1)
87
+ # The Hough Transform is a method that is used in image processing to detect any shape, if that shape can be represented in mathematical form. It can detect the shape even if it is broken or distorted a little bit.
88
+ # Any line can be represented in these two terms, (r, ?). Let rows denote the r and columns denote the (?) theta.
89
+ # First parameter, Input image should be a binary image, so apply threshold edge detection before finding applying hough transform. In this instance, the "th3" variable will represent our edges.
90
+ # Second and third parameters are r and ?(theta) accuracies respectively.
91
+ lines = cv2.HoughLines(th3,1,np.pi/180, line_height)
92
+ for line in range(len(lines)):
93
+ if lines[line][0][1]>-.1 and lines[line][0][1]<.1:
94
+ keepers.append(lines[line])
95
+ n+=1
96
+ for line2 in range(n):
97
+ for rho,theta in keepers[line2]:
98
+ b = np.sin(theta)
99
+ y0 = b*rho
100
+ a = np.cos(theta)
101
+ x0 = a*rho
102
+ x1 = int(x0 + 30*(-b))
103
+ y1 = int(y0 + 30*(a))
104
+ x2 = int(x0 - 30*(-b))
105
+ y2 = int(y0 - 30*(a))
106
+ slope = (y2-y1) / (x2-x1)
107
+ intercept = y1 - (slope * x1)
108
+ side = slope * width + intercept
109
+ ys.append(intercept)
110
+ ys.append(side)
111
+ return ys
112
+
113
+ #This function returns a list of horizontal lines found in the image passed into the function.
114
+ def get_horizontal_lines(img, width=385, line_width=150, circle = 155): #this function takes as parameter and image and default integers. It returns a list.
115
+ ys=[]
116
+ keepers=[]
117
+ n=0
118
+ gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) #converts image to grayscale
119
+ edges = ~cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,circle,2)#applies threshold on image
120
+ kernel = np.ones((3, 3), np.uint8)
121
+ th2 = cv2.erode(edges, kernel, iterations=1)
122
+ kernel = np.ones((7, 1), np.uint8)
123
+ th3 = cv2.dilate(th2, kernel, iterations=1)
124
+ lines = cv2.HoughLines(th3,1,np.pi/180, line_width)
125
+ for line in range(len(lines)):
126
+ if lines[line][0][1]>1.45 and lines[line][0][1]<1.7:
127
+ keepers.append(lines[line])
128
+ n+=1
129
+ for line2 in range(n):
130
+ for rho,theta in keepers[line2]:
131
+ b = np.sin(theta)
132
+ y0 = b*rho
133
+ a = np.cos(theta)
134
+ x0 = a*rho
135
+ x1 = int(x0 + 30*(-b))
136
+ y1 = int(y0 + 30*(a))
137
+ x2 = int(x0 - 30*(-b))
138
+ y2 = int(y0 - 30*(a))
139
+ slope = (y2-y1) / (x2-x1)
140
+ intercept = y1 - (slope * x1)
141
+ side = slope * width + intercept
142
+ ys.append(intercept)
143
+ ys.append(side)
144
+ return ys
145
+
146
+
147
+ def crop_bot(img, width = 385, line_width_crop = 300):
148
+ temp=img[-50:,0:width]
149
+ try:
150
+ ys = get_horizontal_lines(temp, line_width = line_width_crop)
151
+ return img[:img.shape[0]-50+int(np.mean(ys)),0:width]
152
+ except:
153
+ return img
154
+
155
+
156
+ def make_snippets(img, ys, rows = 50, pixels_per_row = 60, pixels_on_either_side = 15, file_path = "", column = "lit", add_to_end = 0):
157
+ start = 0
158
+ for y in range(rows):
159
+ finish = start + pixels_per_row
160
+ x_check = start - pixels_on_either_side
161
+ x_check2 = start + pixels_on_either_side
162
+ y_check = finish - pixels_on_either_side
163
+ y_check2 = finish + pixels_on_either_side
164
+ newlist = [x for x in ys if (x > x_check) & (x < x_check2)]
165
+ newlist2 = [x for x in ys if (x > y_check) & (x < y_check2)]
166
+ if len(newlist)!=0:
167
+ start = round(statistics.median(newlist))
168
+ if len(newlist2)!=0:
169
+ finish = round(statistics.median(newlist2))
170
+ if y==rows-1:
171
+ snippet=img[start:]
172
+ elif y!=rows-1:
173
+ snippet=img[start:finish]
174
+ start = finish
175
+ cv2.imwrite(file_path + "_" + column + "_row_" + str(y+1) + ".jpg", snippet)
176
+
177
+ # CODE THAT DOES THE SEGMENTATION
178
+ bad=[]
179
+ files = os.listdir(r'C:/Users/Chase/OneDrive/Documents/34/d32/')[:24]
180
+ #files = random.sample(os.listdir(), 4)
181
+ for d in tqdm(files):
182
+ if d[-4:] == ".jpg":
183
+ try:
184
+ out_dir = "C:/Users/Chase/OneDrive/Documents/service-project/{}".format(numdir + "/" + album)
185
+ im = cv2.imread(d)
186
+ outputs = predictor(im)
187
+ objects = outputs["instances"].pred_classes
188
+ boxes = outputs["instances"].pred_boxes
189
+ masks = outputs["instances"].pred_masks
190
+ boxes_np = boxes.tensor.cpu().numpy()
191
+ obj_np = objects.cpu().numpy()
192
+ masks_np = masks.cpu().numpy()
193
+ m = 0
194
+ for box in range(len(boxes_np)):
195
+ left = int(boxes_np[box][0])
196
+ top = int(boxes_np[box][1])
197
+ right = int(boxes_np[box][2])
198
+ bottom = int(boxes_np[box][3])
199
+ cropped_array = im[top:bottom,left:right]
200
+ mask = masks_np[m][top:bottom,left:right]
201
+ h , w = mask.shape
202
+ tl = int(np.argwhere(mask[200]==True)[0])
203
+ bl = int(np.argwhere(mask[h-200]==True)[0])
204
+ white1 = np.zeros([h,w,3],dtype=np.uint8)
205
+ white1.fill(255)
206
+ white2 = np.zeros([h,w,3],dtype=np.uint8)
207
+ white2.fill(255)
208
+ change = (tl-bl)/h
209
+ white3= (cropped_array * mask[..., None]) + (white1 * ~mask[..., None])
210
+ for i in range(h):
211
+ start = int(tl - i*change)
212
+ if len(np.argwhere(mask[i]==True))>0:
213
+ last = int(np.argwhere(mask[i]==True)[-1])
214
+ elif len(np.argwhere(mask[i]==True))==0:
215
+ last = w-start
216
+ white2[i][0:last-start] = white3[i][start:last]
217
+ if obj_np[m] == 0:
218
+ white3=white2[:,0:60]
219
+ outputs2 = predictor2(white3)
220
+ boxes2 = outputs2["instances"].pred_boxes
221
+ boxes_np2 = boxes2.tensor.cpu().numpy()
222
+ bottom2 = int(boxes_np2[0][3])
223
+ no_top=white3[bottom2:,:]
224
+ no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
225
+ no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
226
+ ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
227
+ make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lit1')
228
+ elif obj_np[m] == 1:
229
+ white3=white2[:,0:60]
230
+ outputs2 = predictor2(white3)
231
+ boxes2 = outputs2["instances"].pred_boxes
232
+ boxes_np2 = boxes2.tensor.cpu().numpy()
233
+ bottom2 = int(boxes_np2[0][3])
234
+ no_top=white3[bottom2:,:]
235
+ no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
236
+ no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
237
+ ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
238
+ make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lit2')
239
+ elif obj_np[m] == 2:
240
+ white3=white2[:,0:60]
241
+ outputs2 = predictor2(white3)
242
+ boxes2 = outputs2["instances"].pred_boxes
243
+ boxes_np2 = boxes2.tensor.cpu().numpy()
244
+ bottom2 = int(boxes_np2[0][3])
245
+ no_top=white3[bottom2:,:]
246
+ no_bot_or_top = crop_bot(no_top, width = 60, line_width_crop= 45)
247
+ no_bot_or_top = cv2.resize(no_bot_or_top,(60,3000))
248
+ ys = get_horizontal_lines(no_bot_or_top,width=60, line_width=45)
249
+ make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lang1')
250
+ elif obj_np[m] == 3:
251
+ white3=white2[:,0:350]
252
+ outputs2 = predictor2(white3)
253
+ boxes2 = outputs2["instances"].pred_boxes
254
+ boxes_np2 = boxes2.tensor.cpu().numpy()
255
+ bottom2 = int(boxes_np2[0][3])
256
+ no_top=white3[bottom2:,:]
257
+ no_bot_or_top = crop_bot(no_top, line_width_crop=265)
258
+ no_bot_or_top = cv2.resize(no_bot_or_top,(350,3000))
259
+ ys = get_horizontal_lines(no_bot_or_top,width=350, line_width=265)
260
+ make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'lang2')
261
+ elif obj_np[m] == 4:
262
+ white3=white2[:,0:225]
263
+ outputs2 = predictor2(white3)
264
+ boxes2 = outputs2["instances"].pred_boxes
265
+ boxes_np2 = boxes2.tensor.cpu().numpy()
266
+ bottom2 = int(boxes_np2[0][3])
267
+ no_top=white3[bottom2:,:]
268
+ no_bot_or_top = crop_bot(no_top, line_width_crop=300)
269
+ no_bot_or_top = cv2.resize(no_bot_or_top,(225,3000))
270
+ ys = get_horizontal_lines(no_bot_or_top,width=225, line_width=150)
271
+ make_snippets(no_bot_or_top, ys, rows=50, pixels_per_row=60, pixels_on_either_side = 15, file_path = out_dir + "/" + d[:-4], column= 'rel')
272
+ m += 1
273
+ except KeyboardInterrupt:
274
+ exit(1)
275
+ except:
276
+ bad.append(d)
277
+ traceback.print_exc()
278
+ print("image failed: " + d)
279
+ pass
280
+
281
+ print("Percent Error: " + str(len(bad)/len(files)))
282
+ print(bad)
283
+ with open(f'C:/Users/Chase/OneDrive/Documents/service-project/{numdir}.csv', 'a') as output:
284
+ # /home/jmorri33/fsl_groups/fslg_census/compute/projects/Mexico_Census/error_img/mexico_error_62.csv
285
+ # ../../../../error_img
286
+ writer = csv.writer(output, delimiter=',')
287
+ writer.writerow(bad)