Spaces:
Runtime error
Runtime error
Upload OMR_Main.py
Browse files- OMR_Main.py +139 -0
OMR_Main.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
import utlis
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
########################################################################
|
| 7 |
+
webCamFeed = True
|
| 8 |
+
pathImage = "5.jpg"
|
| 9 |
+
cap = cv2.VideoCapture(1)
|
| 10 |
+
cap.set(10,160)
|
| 11 |
+
heightImg = 700
|
| 12 |
+
widthImg = 700
|
| 13 |
+
questions=5
|
| 14 |
+
choices=5
|
| 15 |
+
ans= [1,2,0,2,4]
|
| 16 |
+
########################################################################
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
count=0
|
| 20 |
+
|
| 21 |
+
while True:
|
| 22 |
+
|
| 23 |
+
if webCamFeed:success, img = cap.read()
|
| 24 |
+
else:img = cv2.imread(pathImage)
|
| 25 |
+
img = cv2.resize(img, (widthImg, heightImg)) # RESIZE IMAGE
|
| 26 |
+
imgFinal = img.copy()
|
| 27 |
+
imgBlank = np.zeros((heightImg,widthImg, 3), np.uint8) # CREATE A BLANK IMAGE FOR TESTING DEBUGGING IF REQUIRED
|
| 28 |
+
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # CONVERT IMAGE TO GRAY SCALE
|
| 29 |
+
imgBlur = cv2.GaussianBlur(imgGray, (5, 5), 1) # ADD GAUSSIAN BLUR
|
| 30 |
+
imgCanny = cv2.Canny(imgBlur,10,70) # APPLY CANNY
|
| 31 |
+
|
| 32 |
+
try:
|
| 33 |
+
## FIND ALL COUNTOURS
|
| 34 |
+
imgContours = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
|
| 35 |
+
imgBigContour = img.copy() # COPY IMAGE FOR DISPLAY PURPOSES
|
| 36 |
+
contours, hierarchy = cv2.findContours(imgCanny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) # FIND ALL CONTOURS
|
| 37 |
+
cv2.drawContours(imgContours, contours, -1, (0, 255, 0), 10) # DRAW ALL DETECTED CONTOURS
|
| 38 |
+
rectCon = utlis.rectContour(contours) # FILTER FOR RECTANGLE CONTOURS
|
| 39 |
+
biggestPoints= utlis.getCornerPoints(rectCon[0]) # GET CORNER POINTS OF THE BIGGEST RECTANGLE
|
| 40 |
+
gradePoints = utlis.getCornerPoints(rectCon[1]) # GET CORNER POINTS OF THE SECOND BIGGEST RECTANGLE
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
if biggestPoints.size != 0 and gradePoints.size != 0:
|
| 45 |
+
|
| 46 |
+
# BIGGEST RECTANGLE WARPING
|
| 47 |
+
biggestPoints=utlis.reorder(biggestPoints) # REORDER FOR WARPING
|
| 48 |
+
cv2.drawContours(imgBigContour, biggestPoints, -1, (0, 255, 0), 20) # DRAW THE BIGGEST CONTOUR
|
| 49 |
+
pts1 = np.float32(biggestPoints) # PREPARE POINTS FOR WARP
|
| 50 |
+
pts2 = np.float32([[0, 0],[widthImg, 0], [0, heightImg],[widthImg, heightImg]]) # PREPARE POINTS FOR WARP
|
| 51 |
+
matrix = cv2.getPerspectiveTransform(pts1, pts2) # GET TRANSFORMATION MATRIX
|
| 52 |
+
imgWarpColored = cv2.warpPerspective(img, matrix, (widthImg, heightImg)) # APPLY WARP PERSPECTIVE
|
| 53 |
+
|
| 54 |
+
# SECOND BIGGEST RECTANGLE WARPING
|
| 55 |
+
cv2.drawContours(imgBigContour, gradePoints, -1, (255, 0, 0), 20) # DRAW THE BIGGEST CONTOUR
|
| 56 |
+
gradePoints = utlis.reorder(gradePoints) # REORDER FOR WARPING
|
| 57 |
+
ptsG1 = np.float32(gradePoints) # PREPARE POINTS FOR WARP
|
| 58 |
+
ptsG2 = np.float32([[0, 0], [325, 0], [0, 150], [325, 150]]) # PREPARE POINTS FOR WARP
|
| 59 |
+
matrixG = cv2.getPerspectiveTransform(ptsG1, ptsG2)# GET TRANSFORMATION MATRIX
|
| 60 |
+
imgGradeDisplay = cv2.warpPerspective(img, matrixG, (325, 150)) # APPLY WARP PERSPECTIVE
|
| 61 |
+
|
| 62 |
+
# APPLY THRESHOLD
|
| 63 |
+
imgWarpGray = cv2.cvtColor(imgWarpColored,cv2.COLOR_BGR2GRAY) # CONVERT TO GRAYSCALE
|
| 64 |
+
imgThresh = cv2.threshold(imgWarpGray, 170, 255,cv2.THRESH_BINARY_INV )[1] # APPLY THRESHOLD AND INVERSE
|
| 65 |
+
|
| 66 |
+
boxes = utlis.splitBoxes(imgThresh) # GET INDIVIDUAL BOXES
|
| 67 |
+
cv2.imshow("Split Test ", boxes[3])
|
| 68 |
+
countR=0
|
| 69 |
+
countC=0
|
| 70 |
+
myPixelVal = np.zeros((questions,choices)) # TO STORE THE NON ZERO VALUES OF EACH BOX
|
| 71 |
+
for image in boxes:
|
| 72 |
+
#cv2.imshow(str(countR)+str(countC),image)
|
| 73 |
+
totalPixels = cv2.countNonZero(image)
|
| 74 |
+
myPixelVal[countR][countC]= totalPixels
|
| 75 |
+
countC += 1
|
| 76 |
+
if (countC==choices):countC=0;countR +=1
|
| 77 |
+
|
| 78 |
+
# FIND THE USER ANSWERS AND PUT THEM IN A LIST
|
| 79 |
+
myIndex=[]
|
| 80 |
+
for x in range (0,questions):
|
| 81 |
+
arr = myPixelVal[x]
|
| 82 |
+
myIndexVal = np.where(arr == np.amax(arr))
|
| 83 |
+
myIndex.append(myIndexVal[0][0])
|
| 84 |
+
#print("USER ANSWERS",myIndex)
|
| 85 |
+
|
| 86 |
+
# COMPARE THE VALUES TO FIND THE CORRECT ANSWERS
|
| 87 |
+
grading=[]
|
| 88 |
+
for x in range(0,questions):
|
| 89 |
+
if ans[x] == myIndex[x]:
|
| 90 |
+
grading.append(1)
|
| 91 |
+
else:grading.append(0)
|
| 92 |
+
#print("GRADING",grading)
|
| 93 |
+
score = (sum(grading)/questions)*100 # FINAL GRADE
|
| 94 |
+
#print("SCORE",score)
|
| 95 |
+
|
| 96 |
+
# DISPLAYING ANSWERS
|
| 97 |
+
utlis.showAnswers(imgWarpColored,myIndex,grading,ans) # DRAW DETECTED ANSWERS
|
| 98 |
+
utlis.drawGrid(imgWarpColored) # DRAW GRID
|
| 99 |
+
imgRawDrawings = np.zeros_like(imgWarpColored) # NEW BLANK IMAGE WITH WARP IMAGE SIZE
|
| 100 |
+
utlis.showAnswers(imgRawDrawings, myIndex, grading, ans) # DRAW ON NEW IMAGE
|
| 101 |
+
invMatrix = cv2.getPerspectiveTransform(pts2, pts1) # INVERSE TRANSFORMATION MATRIX
|
| 102 |
+
imgInvWarp = cv2.warpPerspective(imgRawDrawings, invMatrix, (widthImg, heightImg)) # INV IMAGE WARP
|
| 103 |
+
|
| 104 |
+
# DISPLAY GRADE
|
| 105 |
+
imgRawGrade = np.zeros_like(imgGradeDisplay,np.uint8) # NEW BLANK IMAGE WITH GRADE AREA SIZE
|
| 106 |
+
cv2.putText(imgRawGrade,str(int(score))+"%",(70,100)
|
| 107 |
+
,cv2.FONT_HERSHEY_COMPLEX,3,(0,255,255),3) # ADD THE GRADE TO NEW IMAGE
|
| 108 |
+
invMatrixG = cv2.getPerspectiveTransform(ptsG2, ptsG1) # INVERSE TRANSFORMATION MATRIX
|
| 109 |
+
imgInvGradeDisplay = cv2.warpPerspective(imgRawGrade, invMatrixG, (widthImg, heightImg)) # INV IMAGE WARP
|
| 110 |
+
|
| 111 |
+
# SHOW ANSWERS AND GRADE ON FINAL IMAGE
|
| 112 |
+
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvWarp, 1,0)
|
| 113 |
+
imgFinal = cv2.addWeighted(imgFinal, 1, imgInvGradeDisplay, 1,0)
|
| 114 |
+
|
| 115 |
+
# IMAGE ARRAY FOR DISPLAY
|
| 116 |
+
imageArray = ([img,imgGray,imgCanny,imgContours],
|
| 117 |
+
[imgBigContour,imgThresh,imgWarpColored,imgFinal])
|
| 118 |
+
cv2.imshow("Final Result", imgFinal)
|
| 119 |
+
except:
|
| 120 |
+
imageArray = ([img,imgGray,imgCanny,imgContours],
|
| 121 |
+
[imgBlank, imgBlank, imgBlank, imgBlank])
|
| 122 |
+
|
| 123 |
+
# LABELS FOR DISPLAY
|
| 124 |
+
lables = [["Original","Gray","Edges","Contours"],
|
| 125 |
+
["Biggest Contour","Threshold","Warpped","Final"]]
|
| 126 |
+
|
| 127 |
+
stackedImage = utlis.stackImages(imageArray,0.5,lables)
|
| 128 |
+
cv2.imshow("Result",stackedImage)
|
| 129 |
+
|
| 130 |
+
# SAVE IMAGE WHEN 's' key is pressed
|
| 131 |
+
if cv2.waitKey(1) & 0xFF == ord('s'):
|
| 132 |
+
cv2.imwrite("Scanned/myImage"+str(count)+".jpg",imgFinal)
|
| 133 |
+
cv2.rectangle(stackedImage, ((int(stackedImage.shape[1] / 2) - 230), int(stackedImage.shape[0] / 2) + 50),
|
| 134 |
+
(1100, 350), (0, 255, 0), cv2.FILLED)
|
| 135 |
+
cv2.putText(stackedImage, "Scan Saved", (int(stackedImage.shape[1] / 2) - 200, int(stackedImage.shape[0] / 2)),
|
| 136 |
+
cv2.FONT_HERSHEY_DUPLEX, 3, (0, 0, 255), 5, cv2.LINE_AA)
|
| 137 |
+
cv2.imshow('Result', stackedImage)
|
| 138 |
+
cv2.waitKey(300)
|
| 139 |
+
count += 1
|