Abs6187 commited on
Commit
56ab51d
·
verified ·
1 Parent(s): 91ced9f

Upload 28 files

Browse files
.gitattributes CHANGED
@@ -1,2 +1,7 @@
1
  Helmet-Detect-model/best.pt filter=lfs diff=lfs merge=lfs -text
2
  Helmet-Detect-model/yolov11nbest.pt filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
1
  Helmet-Detect-model/best.pt filter=lfs diff=lfs merge=lfs -text
2
  Helmet-Detect-model/yolov11nbest.pt filter=lfs diff=lfs merge=lfs -text
3
+ ANPR_IND/licence_character.pt filter=lfs diff=lfs merge=lfs -text
4
+ ANPR_IND/licence_plat.pt filter=lfs diff=lfs merge=lfs -text
5
+ ANPR_IND/output/sample.jpg filter=lfs diff=lfs merge=lfs -text
6
+ ANPR_IND/sample_image3.jpg filter=lfs diff=lfs merge=lfs -text
7
+ ANPR_IND/sample/video/sample_video.mp4 filter=lfs diff=lfs merge=lfs -text
ANPR_IND/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
ANPR_IND/README.md ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: ANPR IND
3
+ emoji: 🔥
4
+ colorFrom: gray
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 4.13.0
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
ANPR_IND/app.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import gradio as gr
4
+ import numpy as np
5
+ # from configs.config import Configuration
6
+ from scripts.charExtraction import CharExtraction
7
+ from scripts.bboxAnnotator import BBOXAnnotator
8
+
9
+ wPathPlat = "licence_plat.pt"
10
+ wPathChar = "licence_character.pt"
11
+ classList = np.array(['A','B','C','D','E','F','G','H','I','J','K','L','M',
12
+ 'N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
13
+ '0','1','2','3','4','5','6','7','8','9'])
14
+ sizePlat = (416,200)
15
+ # imageSample = Configuration['imageSample']
16
+
17
+
18
+ extractor = CharExtraction(wPlatePath=wPathPlat, wCharacterPath=wPathChar, classList=classList,
19
+ sizePlate=sizePlat, conf=0.5)
20
+ annotator = BBOXAnnotator()
21
+
22
+ def getAnnotatedImage(frame, conf):
23
+ bbox, plateNum, confidence = extractor.predict(image=frame, conf=conf)
24
+ annotateImg, plateNum = annotator.draw_bbox(frame, bbox, plateNum)
25
+ prob_dict = {key: value for key, value in zip(plateNum, confidence)}
26
+
27
+ # pathOu = os.path.join(os.path.abspath(''), "output/sample.jpg")
28
+ # cv2.imwrite(pathOu, annotateImg)
29
+
30
+ return annotateImg, prob_dict
31
+
32
+ def video_identity(frame, conf=0.45):
33
+ image, plateNumber = getAnnotatedImage(frame, conf)
34
+
35
+ return image, plateNumber
36
+
37
+
38
+ demo = gr.Interface(fn=video_identity,
39
+ inputs=[gr.Image(label='Input image'), gr.Slider(minimum=0.1, maximum=1.0, label='Conf value')],
40
+ outputs=[gr.Image(label='Result'), gr.Label(label='Plate Number Detected')],
41
+ examples = [["sample_image2.jpg"], ["sample_image3.jpg"], ["sample_image5.jpg"], ["sample_image6.jpg"]],
42
+ cache_examples=True)
43
+
44
+ if __name__ == "__main__":
45
+ demo.queue()
46
+ demo.launch()
ANPR_IND/configs/__pycache__/config.cpython-39.pyc ADDED
Binary file (616 Bytes). View file
 
ANPR_IND/configs/config.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+
4
+ current_dir = os.getcwd()
5
+ # print('current direc: ', current_dir)
6
+ Configuration = {
7
+ 'sizePlat': (416,200),
8
+ 'weightPlatDir': os.path.join(current_dir, "weights\licence_plat.pt"),
9
+ 'weightCharDir': os.path.join(current_dir, "weights\licence_character.pt"),
10
+ # 'imageSample': os.path.join(current_dir, "samples\mobil_2.jpg"),
11
+ # 'outputFile': os.path.join(current_dir, "runs\detect"),
12
+ 'classListCharacter': np.array(['A','B','C','D','E','F','G','H','I','J','K','L','M',
13
+ 'N','O','P','Q','R','S','T','U','V','W','X','Y','Z',
14
+ '0','1','2','3','4','5','6','7','8','9'])
15
+ }
ANPR_IND/licence_character.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84025e3a1af9d88f9dee0320a139b7ee05a9adfbdeae02532094cdba6781fe32
3
+ size 87648392
ANPR_IND/licence_plat.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3d6ae8c01ce44cbed9cb40cda72da699c45b08520156fe558c936cfdb989b209
3
+ size 87605512
ANPR_IND/output/sample.jpg ADDED

Git LFS Details

  • SHA256: 1e6488650be66438201d99d5e26e321006b0ee6995ca85a9b455eb3403097b04
  • Pointer size: 131 Bytes
  • Size of remote file: 214 kB
ANPR_IND/output/sample.mp4 ADDED
Binary file (44 Bytes). View file
 
ANPR_IND/packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ python3-opencv
ANPR_IND/requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio
2
+ opencv-python
3
+ ultralytics
ANPR_IND/sample/video/sample_video.mp4 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73dfcf18c6bbe34e552dcf72f099ae993a5fb3097974f8eed99280141c4f93ad
3
+ size 852036
ANPR_IND/sample_image2.jpg ADDED
ANPR_IND/sample_image3.jpg ADDED

Git LFS Details

  • SHA256: 9ea406a52dd25e5a2c1fa8b153cc9d02e1b3a851f2d86f8af48773b816d7f501
  • Pointer size: 131 Bytes
  • Size of remote file: 160 kB
ANPR_IND/sample_image5.jpg ADDED
ANPR_IND/sample_image6.jpg ADDED
ANPR_IND/scripts/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+
ANPR_IND/scripts/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (165 Bytes). View file
 
ANPR_IND/scripts/__pycache__/bboxAnnotator.cpython-312.pyc ADDED
Binary file (1.31 kB). View file
 
ANPR_IND/scripts/__pycache__/bboxAnnotator.cpython-39.pyc ADDED
Binary file (878 Bytes). View file
 
ANPR_IND/scripts/__pycache__/charExtraction.cpython-312.pyc ADDED
Binary file (3.95 kB). View file
 
ANPR_IND/scripts/__pycache__/charExtraction.cpython-39.pyc ADDED
Binary file (2.42 kB). View file
 
ANPR_IND/scripts/__pycache__/extractor.cpython-312.pyc ADDED
Binary file (3.63 kB). View file
 
ANPR_IND/scripts/__pycache__/extractor.cpython-39.pyc ADDED
Binary file (2.35 kB). View file
 
ANPR_IND/scripts/bboxAnnotator.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+
4
+ class BBOXAnnotator:
5
+ def draw_bbox(self, ImgPath, bboxPlate, plateNumber):
6
+ img = ImgPath
7
+
8
+ for idc, bb in enumerate(bboxPlate):
9
+ # print('bboxp : ', bboxp)
10
+ # print('bb : ', bb)
11
+ x1, y1, x2, y2 = map(int, bb) # Ubah koordinat ke dalam integer
12
+ color = (0, 255, 0) # Warna bounding box (hijau)
13
+ thickness = 2 # Ketebalan garis bounding box
14
+
15
+ # Gambar bounding box pada gambar
16
+ cv2.rectangle(img, (x1, y1), (x2, y2), color, thickness)
17
+
18
+ # Tambahkan label nama di atas bounding box
19
+ if len(plateNumber) == len(bboxPlate):
20
+ print('plateNumber : ', plateNumber)
21
+ # print('idc : ', idc)
22
+ cv2.putText(img, plateNumber[idc], (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color, thickness)
23
+
24
+ if len(plateNumber) == 0:
25
+ plateNumber = ['No Plate Detected']
26
+
27
+ return img, plateNumber
ANPR_IND/scripts/charExtraction.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .extractor import CharExtractor
2
+ from ultralytics import YOLO
3
+ import numpy as np
4
+ import cv2
5
+
6
+
7
+ class CharExtraction:
8
+ def __init__(self, wPlatePath, wCharacterPath, classList, sizePlate, conf):
9
+ self.wPlatePath = wPlatePath
10
+ self.wCharacterPath = wCharacterPath
11
+ self.classList = classList
12
+ self.width, self.height = sizePlate
13
+ self.conf = conf
14
+
15
+ self.model_plat = YOLO(self.wPlatePath)
16
+ self.model_char = YOLO(self.wCharacterPath)
17
+
18
+ self.extractor = CharExtractor()
19
+
20
+ def predict(self, image, conf):
21
+ if conf == None:
22
+ self.conf = 0.3
23
+ else:
24
+ self.conf = conf
25
+
26
+ charpredict = []
27
+ platePrediction = self.model_plat(image, conf=self.conf, verbose=False) ##Melakukan prediksi dan rectange gambar
28
+ platBoxes, croppedPlateImg, confident = self._get_cropped_plate(platePrediction)
29
+ # print('lcrop : ', len(croppedPlateImg))
30
+
31
+ for croppImg in croppedPlateImg:
32
+ characterPrediction = self.model_char(croppImg, conf=self.conf, verbose=False)
33
+ charpredict.append(characterPrediction)
34
+
35
+ # print('lchar : ', len(charpredict))
36
+ characterBBOX, characterClass = self._get_bboxClass_char(charpredict, self.classList)
37
+ # print('charBox : ', len(characterBBOX))
38
+ plateNumber = self.extractor._extract_bbox(characterBBOX, characterClass)
39
+ # print('Plate Number : ', plateNumber)
40
+
41
+ # print('plateboxes : ', platBoxes)
42
+ # print('platenum : ', plateNumber)
43
+ return platBoxes, plateNumber, confident
44
+
45
+ def _get_cropped_plate(self, imgPlate): # Get cropped plate image from whole image
46
+ croppedImage = [] ##Bisa berisi banyak bbox
47
+ boxes = imgPlate[0].boxes
48
+ originalImage = imgPlate[0].orig_img
49
+ confid = boxes.conf.numpy()
50
+ h,w,c = originalImage.shape
51
+ if len(boxes) != 0:
52
+ for box in boxes.xyxy:
53
+ x1,y1,x2,y2 = int(box[0]), int(box[1]), int(box[2]), int(box[3])
54
+ croppedPlateImage = originalImage[y1:y2, x1:x2, :]
55
+ croppedPlateImage = cv2.resize(croppedPlateImage, (self.width, self.height))
56
+ croppedImage.append(croppedPlateImage)
57
+ else:
58
+ croppedImage.append(originalImage)
59
+
60
+ return boxes.xyxy, croppedImage, confid
61
+
62
+ def _get_bboxClass_char(self, imgCroppedPlate, classList):
63
+ boxChar, classC = [], []
64
+ for imgCropped in imgCroppedPlate:
65
+ boxes = imgCropped[0].boxes
66
+ if len(boxes.cls) != 0:
67
+ # print(boxes.cls)
68
+ classId = [int(x) for x in boxes.cls.numpy()]
69
+ # print('classId : ', classId)
70
+ classCharacter = np.array(classList[classId])
71
+ characterBBOX = boxes.xywh
72
+ boxChar.append(characterBBOX)
73
+ classC.append(classCharacter)
74
+ # else:
75
+ # classCharacter = []
76
+ # characterBBOX = []
77
+
78
+ return boxChar, classC
79
+
ANPR_IND/scripts/extractor.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import matplotlib.pyplot as plt
4
+
5
+ class CharExtractor:
6
+ def __init__(self):
7
+ self.classes = None
8
+ self.class_char = []
9
+ self.y_class = None
10
+ self.x_class = None
11
+ self.bbox = None
12
+ self.x_bbox = None
13
+ self.y_bbox = None
14
+
15
+ def _extract_bbox(self, bboxes, classes):
16
+ plate_number = []
17
+ self.bbox = bboxes
18
+ self.classes = classes
19
+
20
+ for ix,bbox in enumerate(bboxes):
21
+ if len(bbox) != 0:
22
+ lowest_x, highest_y = self._get_xy(bbox, ix)
23
+ print('x_class : ', self.x_class)
24
+ # print('y_bbox : ', self.y_bbox)
25
+ for idx, point in enumerate(self.y_bbox):
26
+ x, y = point[0], point[1]
27
+ if y <= highest_y and x >= lowest_x and self.x_class.size > 1:
28
+ self.class_char.append(self.x_class[idx])
29
+
30
+ plate_num = ''.join(str(v) for v in self.class_char)
31
+ self.class_char.clear()
32
+
33
+ else:
34
+ plate_num = ''
35
+
36
+ plate_number.append(plate_num)
37
+
38
+ return plate_number
39
+
40
+ def _get_xy(self, bbox, ix):
41
+ y_id = bbox[:, 1].argsort()
42
+ # print('y_id : ', y_)
43
+ if len(bbox[y_id]) >= 4:
44
+ sorted_y = bbox[y_id][:-4]
45
+ self.y_bbox = sorted_y
46
+ # print('y_class : ', self.classes[ix])
47
+ # print('yid : ', y_id)
48
+ self.y_class = np.array(self.classes[ix][y_id])[:-4]
49
+ highest_y = max([x[1] for x in sorted_y])
50
+
51
+ x_id = sorted_y[:,0].argsort()
52
+ sorted_x = sorted_y[x_id]
53
+ self.x_bbox = sorted_x
54
+ # print('char : ', self.y_class)
55
+ self.x_class = self.y_class[x_id]
56
+ lowest_x = min([x[0] for x in sorted_x])
57
+
58
+ else:
59
+ sorted_y = bbox[y_id]
60
+ self.y_bbox = sorted_y
61
+ # print('y_class : ', self.classes)
62
+ # print('yid : ', y_id)
63
+ self.y_class = np.array(self.classes[ix][y_id])
64
+ highest_y = max([x[1] for x in sorted_y])
65
+
66
+ x_id = sorted_y[:,0].argsort()
67
+ sorted_x = sorted_y[x_id]
68
+ self.x_bbox = sorted_x
69
+ # print('char : ', self.y_class)
70
+ self.x_class = self.y_class
71
+ lowest_x = min([x[0] for x in sorted_x])
72
+
73
+ return lowest_x, highest_y
74
+
75
+
76
+
ANPR_IND/test.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ 'test application'