Muhammad7777 commited on
Commit
e31183b
·
verified ·
1 Parent(s): 03f4eff

Upload folder using huggingface_hub

Browse files
app.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import cv2
3
+ from inference import InferencePipeline
4
+
5
+ pipeline = InferencePipeline("artifacts/model.onnx")
6
+
7
+ def predict(image):
8
+ if image.shape[0] > image.shape[1]:
9
+ image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
10
+ image = cv2.resize(image, (640, 480), interpolation=cv2.INTER_AREA)
11
+ return pipeline(image)
12
+
13
+
14
+ with gr.Blocks() as demo:
15
+ gr.Markdown("# Banknotes Classifier",)
16
+ with gr.Row():
17
+ with gr.Column():
18
+ image_input = gr.Image(type="numpy", label="Input Image")
19
+ predict_button = gr.Button("Predict")
20
+ with gr.Column():
21
+ output = gr.Textbox(label="Prediction")
22
+ predict_button.click(predict, inputs=image_input, outputs=output)
23
+
24
+ if __name__ == "__main__":
25
+ demo.launch()
artifacts/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /model.onnx
artifacts/checkpoints/.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ /best-checkpoint.ckpt
artifacts/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88b71d7447a1c0be15eb34c144d2cecac9b107207d15ceaadbafcd2c633d489c
3
+ size 16103539
inference.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import onnxruntime as ort
4
+
5
+
6
+ class InferencePipeline:
7
+ def __init__(self, model_path):
8
+ self.model = ort.InferenceSession(model_path)
9
+ self.input_name = self.model.get_inputs()[0].name
10
+ self.output_name = self.model.get_outputs()[0].name
11
+ self.mapping = {
12
+ 0: "1 pound face",
13
+ 1: "1 pound back",
14
+ 2: "5 pounds face",
15
+ 3: "5 pounds back",
16
+ 4: "10 pounds face",
17
+ 5: "10 pounds back",
18
+ 6: "20 pounds face",
19
+ 7: "20 pounds back",
20
+ 8: "50 pounds face",
21
+ 9: "50 pounds back",
22
+ 10: "100 pounds face",
23
+ 11: "100 pounds back",
24
+ 12: "200 pounds face",
25
+ 13: "200 pounds back",
26
+ }
27
+
28
+ def __call__(self, image):
29
+ image = self._prepare_input(image)
30
+ output = self.predict(image)
31
+ predicted_class = self.mapping[output.argmax().item()]
32
+ predicted_confidence = output[0][output.argmax().item()] * 100
33
+ return f"The model is {predicted_confidence:.2f}% confident that the image is {predicted_class}"
34
+
35
+ def _prepare_input(self, image):
36
+ image = image.astype(np.float32)
37
+ image = image.transpose(2, 0, 1)
38
+ image = np.expand_dims(image, axis=0)
39
+ return image
40
+
41
+ def predict(self, image):
42
+ output = self.model.run([self.output_name], {self.input_name: image})[0]
43
+ return output
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ dvc==3.53.1
2
+ dvc-gdrive
3
+ numpy==1.26.0
4
+ opencv_python_headless==4.10.0.84
5
+ onnxruntime==1.18.1