Harpreet1313 commited on
Commit
bf0e92a
·
1 Parent(s): c0356b8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +133 -132
app.py CHANGED
@@ -6,139 +6,140 @@ import gradio as gr
6
 
7
 
8
  ########################################################################
9
- webCamFeed = True
10
- pathImage = "5.jpg"
11
- cap = cv2.VideoCapture(1)
12
- cap.set(10,160)
13
- heightImg = 700
14
- widthImg = 700
15
- questions=5
16
- choices=5
17
- ans= [1,2,0,2,4]
18
- ########################################################################
19
-
20
-
21
- count=0
22
-
23
- while True:
24
-
25
- if webCamFeed:success, img = cap.read()
26
- else:img = cv2.imread(pathImage)
27
- img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
28
- imgFinal = img.copy()
29
- imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
30
- imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
31
- imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
32
- imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY
33
-
34
- try:
35
- ## FIND ALL COUNTOURS
36
- imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
37
- imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
38
- contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
39
- cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
40
- rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
41
- biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
42
- gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE
43
-
44
-
45
-
46
- if biggestPoints.size != 0 and gradePoints.size != 0:
47
-
48
- # BIGGEST RECTANGLE WARPING
49
- biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
50
- cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
51
- pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
52
- pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
53
- matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
54
- imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE
55
-
56
- # SECOND BIGGEST RECTANGLE WARPING
57
- cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20) # DRAW THE BIGGEST CONTOUR
58
- gradePoints = utlis.reorder(gradePoints) # REORDER FOR WARPING
59
- ptsG1 = np.float32(gradePoints) # PREPARE POINTS FOR WARP
60
- ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]]) # PREPARE POINTS FOR WARP
61
- matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)# GET TRANSFORMATION MATRIX
62
- imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150)) # APPLY WARP PERSPECTIVE
63
-
64
- # APPLY THRESHOLD
65
- imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
66
- imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE
67
-
68
- boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
69
- cv2.imshow("Split Test ", boxes[3])
70
- countR=0
71
- countC=0
72
- myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
73
- for image in boxes:
74
- #cv2.imshow(str(countR)+str(countC),image)
75
- totalPixels = cv2.countNonZero(image)
76
- myPixelVal[countR][countC]= totalPixels
77
- countC += 1
78
- if (countC==choices):countC=0;countR +=1
79
-
80
- # FIND THE USER ANSWERS AND PUT THEM IN A LIST
81
- myIndex=[]
82
- for x in range (0,questions):
83
- arr = myPixelVal[x]
84
- myIndexVal = np.where(arr == np.amax(arr))
85
- myIndex.append(myIndexVal[0][0])
86
- #print("USER ANSWERS",myIndex)
87
-
88
- # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
89
- grading=[]
90
- for x in range(0,questions):
91
- if ans[x] == myIndex[x]:
92
- grading.append(1)
93
- else:grading.append(0)
94
- #print("GRADING",grading)
95
- score = (sum(grading)/questions)*100 # FINAL GRADE
96
- #print("SCORE",score)
97
-
98
- # DISPLAYING ANSWERS
99
- utlis.showAnswers(imgWarpColored,myIndex,grading,ans) # DRAW DETECTED ANSWERS
100
- utlis.drawGrid(imgWarpColored) # DRAW GRID
101
- imgRawDrawings = np.zeros_like(imgWarpColored) # NEW BLANK IMAGE WITH WARP IMAGE SIZE
102
- utlis.showAnswers(imgRawDrawings, myIndex, grading, ans) # DRAW ON NEW IMAGE
103
- invMatrix = cv2.getPerspectiveTransform(pts2, pts1) # INVERSE TRANSFORMATION MATRIX
104
- imgInvWarp = cv2.warpPerspective(imgRawDrawings, invMatrix, (widthImg, heightImg)) # INV IMAGE WARP
105
-
106
- # DISPLAY GRADE
107
- imgRawGrade = np.zeros_like(imgGradeDisplay,np.uint8) # NEW BLANK IMAGE WITH GRADE AREA SIZE
108
- cv2.putText(imgRawGrade,str(int(score))+"%",(70,100)
109
- ,cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3) # ADD THE GRADE TO NEW IMAGE
110
- invMatrixG = cv2.getPerspectiveTransform(ptsG2, ptsG1) # INVERSE TRANSFORMATION MATRIX
111
- imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg)) # INV IMAGE WARP
112
-
113
- # SHOW ANSWERS AND GRADE ON FINAL IMAGE
114
- imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1,0)
115
- imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1,0)
116
-
117
- # IMAGE ARRAY FOR DISPLAY
 
 
 
 
 
118
  imageArray = ([img,imgGray,imgCanny,imgContours],
119
- [imgBigContour,imgThresh,imgWarpColored,imgFinal])
120
- cv2.imshow("Final Result", imgFinal)
121
- except:
122
- imageArray = ([img,imgGray,imgCanny,imgContours],
123
- [imgBlank, imgBlank, imgBlank, imgBlank])
124
-
125
- # LABELS FOR DISPLAY
126
- lables = [["Original","Gray","Edges","Contours"],
127
- ["Biggest Contour","Threshold","Warpped","Final"]]
128
-
129
- stackedImage = utlis.stackImages(imageArray,0.5,lables)
130
- cv2.imshow("Result",stackedImage)
131
-
132
- # SAVE IMAGE WHEN 's' key is pressed
133
- if cv2.waitKey(1) & 0xFF == ord('s'):
134
- cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgFinal)
135
- cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
136
- (1100, 350), (0, 255, 0), cv2.FILLED)
137
- cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
138
- cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
139
- cv2.imshow('Result', stackedImage)
140
- cv2.waitKey(300)
141
- count += 1
142
 
143
 
144
  app=gr.Interface(fn=process_video,
 
6
 
7
 
8
  ########################################################################
9
+ def process_video(image_path):
10
+ webCamFeed = True
11
+ pathImage = "5.jpg"
12
+ cap = cv2.VideoCapture(1)
13
+ cap.set(10,160)
14
+ heightImg = 700
15
+ widthImg = 700
16
+ questions=5
17
+ choices=5
18
+ ans= [1,2,0,2,4]
19
+ ########################################################################
20
+
21
+
22
+ count=0
23
+
24
+ while True:
25
+
26
+ if webCamFeed:success, img = cap.read()
27
+ else:img = cv2.imread(pathImage)
28
+ img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
29
+ imgFinal = img.copy()
30
+ imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
31
+ imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
32
+ imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
33
+ imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY
34
+
35
+ try:
36
+ ## FIND ALL COUNTOURS
37
+ imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
38
+ imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
39
+ contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
40
+ cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
41
+ rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
42
+ biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
43
+ gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE
44
+
45
+
46
+
47
+ if biggestPoints.size != 0 and gradePoints.size != 0:
48
+
49
+ # BIGGEST RECTANGLE WARPING
50
+ biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
51
+ cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
52
+ pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
53
+ pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
54
+ matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
55
+ imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE
56
+
57
+ # SECOND BIGGEST RECTANGLE WARPING
58
+ cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20) # DRAW THE BIGGEST CONTOUR
59
+ gradePoints = utlis.reorder(gradePoints) # REORDER FOR WARPING
60
+ ptsG1 = np.float32(gradePoints) # PREPARE POINTS FOR WARP
61
+ ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]]) # PREPARE POINTS FOR WARP
62
+ matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)# GET TRANSFORMATION MATRIX
63
+ imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150)) # APPLY WARP PERSPECTIVE
64
+
65
+ # APPLY THRESHOLD
66
+ imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
67
+ imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE
68
+
69
+ boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
70
+ cv2.imshow("Split Test ", boxes[3])
71
+ countR=0
72
+ countC=0
73
+ myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
74
+ for image in boxes:
75
+ #cv2.imshow(str(countR)+str(countC),image)
76
+ totalPixels = cv2.countNonZero(image)
77
+ myPixelVal[countR][countC]= totalPixels
78
+ countC += 1
79
+ if (countC==choices):countC=0;countR +=1
80
+
81
+ # FIND THE USER ANSWERS AND PUT THEM IN A LIST
82
+ myIndex=[]
83
+ for x in range (0,questions):
84
+ arr = myPixelVal[x]
85
+ myIndexVal = np.where(arr == np.amax(arr))
86
+ myIndex.append(myIndexVal[0][0])
87
+ #print("USER ANSWERS",myIndex)
88
+
89
+ # COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
90
+ grading=[]
91
+ for x in range(0,questions):
92
+ if ans[x] == myIndex[x]:
93
+ grading.append(1)
94
+ else:grading.append(0)
95
+ #print("GRADING",grading)
96
+ score = (sum(grading)/questions)*100 # FINAL GRADE
97
+ #print("SCORE",score)
98
+
99
+ # DISPLAYING ANSWERS
100
+ utlis.showAnswers(imgWarpColored,myIndex,grading,ans) # DRAW DETECTED ANSWERS
101
+ utlis.drawGrid(imgWarpColored) # DRAW GRID
102
+ imgRawDrawings = np.zeros_like(imgWarpColored) # NEW BLANK IMAGE WITH WARP IMAGE SIZE
103
+ utlis.showAnswers(imgRawDrawings, myIndex, grading, ans) # DRAW ON NEW IMAGE
104
+ invMatrix = cv2.getPerspectiveTransform(pts2, pts1) # INVERSE TRANSFORMATION MATRIX
105
+ imgInvWarp = cv2.warpPerspective(imgRawDrawings, invMatrix, (widthImg, heightImg)) # INV IMAGE WARP
106
+
107
+ # DISPLAY GRADE
108
+ imgRawGrade = np.zeros_like(imgGradeDisplay,np.uint8) # NEW BLANK IMAGE WITH GRADE AREA SIZE
109
+ cv2.putText(imgRawGrade,str(int(score))+"%",(70,100)
110
+ ,cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3) # ADD THE GRADE TO NEW IMAGE
111
+ invMatrixG = cv2.getPerspectiveTransform(ptsG2, ptsG1) # INVERSE TRANSFORMATION MATRIX
112
+ imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg)) # INV IMAGE WARP
113
+
114
+ # SHOW ANSWERS AND GRADE ON FINAL IMAGE
115
+ imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1,0)
116
+ imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1,0)
117
+
118
+ # IMAGE ARRAY FOR DISPLAY
119
+ imageArray = ([img,imgGray,imgCanny,imgContours],
120
+ [imgBigContour,imgThresh,imgWarpColored,imgFinal])
121
+ cv2.imshow("Final Result", imgFinal)
122
+ except:
123
  imageArray = ([img,imgGray,imgCanny,imgContours],
124
+ [imgBlank, imgBlank, imgBlank, imgBlank])
125
+
126
+ # LABELS FOR DISPLAY
127
+ lables = [["Original","Gray","Edges","Contours"],
128
+ ["Biggest Contour","Threshold","Warpped","Final"]]
129
+
130
+ stackedImage = utlis.stackImages(imageArray,0.5,lables)
131
+ cv2.imshow("Result",stackedImage)
132
+
133
+ # SAVE IMAGE WHEN 's' key is pressed
134
+ if cv2.waitKey(1) & 0xFF == ord('s'):
135
+ cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgFinal)
136
+ cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
137
+ (1100, 350), (0, 255, 0), cv2.FILLED)
138
+ cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
139
+ cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
140
+ cv2.imshow('Result', stackedImage)
141
+ cv2.waitKey(300)
142
+ count += 1
 
 
 
 
143
 
144
 
145
  app=gr.Interface(fn=process_video,