Isaacgv commited on
Commit
c66fb0b
·
1 Parent(s): 56efab4

add model

Browse files
Dockerfile ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.8.13-bullseye
2
+
3
+ LABEL author="Bastien GUILLAUME"
4
+ LABEL version="0.0.2"
5
+
6
+
7
+ RUN apt-get update \
8
+ && apt-get upgrade -y \
9
+ && apt-get install ffmpeg libsm6 libxext6 -y
10
+
11
+ COPY requirements.txt /opt/app/
12
+ WORKDIR /opt/app
13
+ RUN pip install -r requirements.txt
14
+
15
+ COPY inference.py /opt/app/
16
+
17
+ CMD ["python", "inference.py"]
18
+ EXPOSE 80
app.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=no-name-in-module
2
+ # pylint: disable=no-member
3
+
4
+ """
5
+ Author : Bastien GUILLAUME
6
+ Version : 0.0.1
7
+ Date : 2023-03-16
8
+
9
+ Title : Inference With Gradio running an onnxruntime backend
10
+ """
11
+
12
+ import os
13
+
14
+ import gradio as gr
15
+ from config_parser import *
16
+ from inferencer import *
17
+
18
+ gr.close_all()
19
+
20
+
21
+ def infertace_builder():
22
+ interface_list = []
23
+ logging.log(level=logging.INFO, msg=f"Building Interfaces")
24
+ for task_number in range(0, len(tasks)):
25
+ logging.log(level=logging.INFO, msg=f"Building Interface n° {task_number}")
26
+ interface_list.append(
27
+ gr.Interface(
28
+ title=tasks[task_number],
29
+ description=tasks_description[task_number],
30
+ fn=analysis,
31
+ # if tasks_name[task_number] == "Étiquetage"
32
+ # else corck_screwing_analysis,
33
+ allow_flagging="never",
34
+ inputs=[
35
+ gr.Textbox(
36
+ value=f"task{task_number+1}", visible=False, interactive=False
37
+ ),
38
+ gr.Dropdown(
39
+ tasks_products[task_number],
40
+ type="value",
41
+ value=tasks_products[task_number][0],
42
+ label="Choix",
43
+ visible=True if len(tasks_products[task_number]) > 1 else False,
44
+ info="Sur quel type de produit, voulez vous lancer l'analyse ?",
45
+ ),
46
+ gr.Image(
47
+ label="Image à analyser",
48
+ shape=None,
49
+ image_mode="RGB",
50
+ invert_colors=False,
51
+ source="upload",
52
+ tool="editor",
53
+ type="numpy",
54
+ ),
55
+ ],
56
+ outputs=gr.Label(
57
+ label="Résultats",
58
+ ),
59
+ )
60
+ )
61
+ logging.log(level=logging.INFO, msg=f"Interfaces ready\n")
62
+ return interface_list
63
+
64
+
65
+ generated_interface = infertace_builder()
66
+
67
+ iface_generated = gr.TabbedInterface(
68
+ interface_list=generated_interface,
69
+ tab_names=tasks_name,
70
+ title=title,
71
+ )
72
+
73
+ if __name__ == "__main__":
74
+ logging.log(level=logging.INFO, msg="Starting the Gradio server...")
75
+ iface_generated.launch(
76
+ server_name="0.0.0.0", server_port=int(os.getenv("PORT", "8150"))
77
+ )
78
+ logging.log(level=logging.INFO, msg="Stopping the Gradio server...")
build_image.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -e
2
+ image_name=gcr.io/tough-variety-310920/openvino_inference
3
+ image_tag=1.0.1
4
+ full_image_name=${image_name}:${image_tag}
5
+
6
+ cd "$(dirname "$0")"
7
+ docker build -t "${full_image_name}" .
8
+ docker push "$full_image_name"
config_parser.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=no-name-in-module
2
+ # pylint: disable=no-member
3
+
4
+ """
5
+ Author : Bastien GUILLAUME
6
+ Version : 0.0.1
7
+ Date : 2023-03-16
8
+
9
+ Title : Inference With Gradio running an onnxruntime backend
10
+ """
11
+
12
+ import json
13
+ import logging
14
+ import os
15
+ from functools import reduce
16
+
17
+ logging.basicConfig(
18
+ format="%(asctime)s.%(msecs)03d %(levelname)-8s %(message)s",
19
+ level=logging.INFO,
20
+ datefmt="%Y%m%d-%H%M%S",
21
+ )
22
+
23
+ with open("demo_yvesrocher.json") as config_file:
24
+ config = json.load(config_file)
25
+
26
+ logging.log(level=logging.DEBUG, msg=f"Loaded config file : {json.dumps(config)}")
27
+
28
+
29
+ def deep_get(dictionary, keys, default=None):
30
+ return reduce(
31
+ lambda d, key: d.get(key, default) if isinstance(d, dict) else default,
32
+ keys.split("."),
33
+ dictionary,
34
+ )
35
+
36
+
37
+ def get_tasks():
38
+ tasks = []
39
+ for task in config["tasks"].keys():
40
+ tasks.append(config["tasks"][task]["name"]["fr"])
41
+ return tasks
42
+
43
+
44
+ def get_tasks_name():
45
+ tasks_name = []
46
+ for task in config["tasks"].keys():
47
+ tasks_name.append(config["tasks"][task]["shortname"])
48
+ return tasks_name
49
+
50
+
51
+ def get_tasks_description():
52
+ tasks_descripion = []
53
+ for task in config["tasks"].keys():
54
+ tasks_descripion.append(config["tasks"][task]["description"]["fr"])
55
+ return tasks_descripion
56
+
57
+
58
+ def get_tasks_products():
59
+ tasks_products = []
60
+ for task in config["tasks"].keys():
61
+ tasks_products.append(config["tasks"][task]["products"])
62
+ return tasks_products
63
+
64
+
65
+ title = (
66
+ os.getenv("GRADIO_TITLE")
67
+ if "GRADIO_TITLE" in os.environ
68
+ else config.get("title", "TITLE neither set in config file nor in ENV")
69
+ )
70
+ description = (
71
+ os.getenv("GRADIO_DESCRIPTION")
72
+ if "GRADIO_DESCRIPTION" in os.environ
73
+ else config.get("description", "DESCRIPTION neither set in config file nor in ENV")
74
+ )
75
+ tasks = get_tasks()
76
+ tasks_name = get_tasks_name()
77
+ tasks_description = get_tasks_description()
78
+ tasks_products = get_tasks_products()
79
+ logging.log(level=logging.INFO, msg=f"Parsed Data :")
80
+ logging.log(level=logging.INFO, msg=f"Title : {title}")
81
+ logging.log(level=logging.INFO, msg=f"Description : {description}")
82
+ logging.log(level=logging.INFO, msg=f"Tasks : {tasks}")
83
+ logging.log(level=logging.INFO, msg=f"Tasks name : {tasks_name}")
84
+ logging.log(level=logging.INFO, msg=f"Tasks descrption : {tasks_description}")
85
+ logging.log(level=logging.INFO, msg=f"Tasks products : {tasks_products}")
86
+ logging.log(level=logging.INFO, msg=f"End of Parsed Data\n")
configtest.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "YVES ROCHER",
3
+ "description": "Démonstration des algos de reconnaissance d'étiquetage/bouchage correct",
4
+ "tasks": {
5
+ "task1": {
6
+ "shortname": "Étiquetage",
7
+ "name": {
8
+ "en": "Quality Control of Labels",
9
+ "fr": "Contrôle de l'Étiquetage"
10
+ },
11
+ "description": {
12
+ "en": "",
13
+ "fr": "Est-ce que l'étiquette est bien positionnée"
14
+ },
15
+ "products":[
16
+ "497 Pure Algue 200ml",
17
+ "505 Pure Calmille 200ml",
18
+ "614 Eco Douche 100ml",
19
+ "648 Hair Care 300ml"
20
+ ],
21
+ "models": {
22
+ "497 Pure Algue 200ml": {
23
+ "type": "classification",
24
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
25
+ "input_shape": 224,
26
+ "mean": [0.485, 0.456, 0.406],
27
+ "std": [0.229, 0.224, 0.225],
28
+ "class_names": ["Étiquetage incorrect", "Étiquetage correct"]
29
+ },
30
+ "505 Pure Calmille 200ml": {
31
+ "type": "classification",
32
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
33
+ "input_shape": 224,
34
+ "mean": [0.485, 0.456, 0.406],
35
+ "std": [0.229, 0.224, 0.225],
36
+ "class_names": ["Étiquetage incorrect", "Étiquetage correct"]
37
+ },
38
+ "614 Eco Douche 100ml": {
39
+ "type": "classification",
40
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
41
+ "input_shape": 224,
42
+ "mean": [0.485, 0.456, 0.406],
43
+ "std": [0.229, 0.224, 0.225],
44
+ "class_names": ["Étiquetage incorrect", "Étiquetage correct"]
45
+ },
46
+ "648 Hair Care 300ml": {
47
+ "type": "classification",
48
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
49
+ "input_shape": 224,
50
+ "mean": [0.485, 0.456, 0.406],
51
+ "std": [0.229, 0.224, 0.225],
52
+ "class_names": ["Étiquetage incorrect", "Étiquetage correct"]
53
+ }
54
+ }
55
+ },
56
+ "task2": {
57
+ "shortname": "Bouchage",
58
+ "name": {
59
+ "en": "Quality Control of Corck Screwing",
60
+ "fr": "Contrôle du Bouchage"
61
+ },
62
+ "description": {
63
+ "en": "",
64
+ "fr": "Est-ce que le bouchon est bien positionné et entièrement vissé"
65
+ },
66
+ "products":[
67
+ "187 Hamamelis 300ml",
68
+ "550 SVC 300ml",
69
+ "600 PN 500 ml"
70
+ ],
71
+ "models": {
72
+ "187 Hamamelis 300ml":{
73
+ "type": "classification",
74
+ "path": "/Users/bastien/Downloads/model_corck_screwing.onnx",
75
+ "input_shape": 256,
76
+ "mean": [0.485, 0.456, 0.406],
77
+ "std": [0.229, 0.224, 0.225],
78
+ "class_names": ["Bouchage incorrect", "Bouchage correct"]
79
+ },
80
+ "550 SVC 300ml": {
81
+ "type": "classification",
82
+ "path": "/Users/bastien/Downloads/model_corck_screwing.onnx",
83
+ "input_shape": 256,
84
+ "mean": [0.485, 0.456, 0.406],
85
+ "std": [0.229, 0.224, 0.225],
86
+ "class_names": ["Bouchage incorrect", "Bouchage correct"]
87
+ },
88
+ "600 PN 500 ml": {
89
+ "type": "classification",
90
+ "path": "/Users/bastien/Downloads/model_corck_screwing.onnx",
91
+ "input_shape": 256,
92
+ "mean": [0.485, 0.456, 0.406],
93
+ "std": [0.229, 0.224, 0.225],
94
+ "class_names": ["Bouchage incorrect", "Bouchage correct"]
95
+ }
96
+ }
97
+ }
98
+ }
99
+ }
100
+
demo_full_yvesrocher.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "YVES ROCHER",
3
+ "description": "Démonstration des algos de reconnaissance d'étiquetage/bouchage correct",
4
+ "tasks": {
5
+ "task1": {
6
+ "shortname": "Étiquetage",
7
+ "name": {
8
+ "en": "Quality Control of Labels",
9
+ "fr": "Contrôle de l'Étiquetage"
10
+ },
11
+ "description": {
12
+ "en": "Is the label in the right position ?",
13
+ "fr": "Est-ce que l'étiquette est bien positionnée ?"
14
+ },
15
+ "products":[
16
+ "497 Pure Algue 200ml",
17
+ "505 Pure Calmille 200ml",
18
+ "614 Eco Douche 100ml",
19
+ "648 Hair Care 300ml"
20
+ ],
21
+ "models": {
22
+ "497 Pure Algue 200ml": {
23
+ "type": "classification",
24
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
25
+ "input_shape": 224,
26
+ "mean": [0.485, 0.456, 0.406],
27
+ "std": [0.229, 0.224, 0.225],
28
+ "class_names": ["Étiquetage correct", "Étiquetage incorrect"]
29
+ },
30
+ "505 Pure Calmille 200ml": {
31
+ "type": "classification",
32
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
33
+ "input_shape": 224,
34
+ "mean": [0.485, 0.456, 0.406],
35
+ "std": [0.229, 0.224, 0.225],
36
+ "class_names": ["Étiquetage correct", "Étiquetage incorrect"]
37
+ },
38
+ "614 Eco Douche 100ml": {
39
+ "type": "classification",
40
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
41
+ "input_shape": 224,
42
+ "mean": [0.485, 0.456, 0.406],
43
+ "std": [0.229, 0.224, 0.225],
44
+ "class_names": ["Étiquetage correct", "Étiquetage incorrect"]
45
+ },
46
+ "648 Hair Care 300ml": {
47
+ "type": "classification",
48
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/po2xfZzK0KtWZUpXrFjZ/model.onnx",
49
+ "input_shape": 224,
50
+ "mean": [0.485, 0.456, 0.406],
51
+ "std": [0.229, 0.224, 0.225],
52
+ "class_names": ["Étiquetage correct", "Étiquetage incorrect"]
53
+ }
54
+ }
55
+ },
56
+ "task2": {
57
+ "shortname": "Bouchage",
58
+ "name": {
59
+ "en": "Quality Control of Corck Screwing",
60
+ "fr": "Contrôle du Bouchage"
61
+ },
62
+ "description": {
63
+ "en": "Is the corck in the right position ?",
64
+ "fr": "Est-ce que le bouchon est bien positionné et entièrement vissé ?"
65
+ },
66
+ "products":[
67
+ "187 Hamamelis 300ml",
68
+ "550 SVC 300ml",
69
+ "600 PN 500 ml"
70
+ ],
71
+ "models": {
72
+ "187 Hamamelis 300ml":{
73
+ "type": "classification",
74
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/Qp6BRHBcLq7KKxqCWqmV/model.onnx",
75
+ "input_shape": 224,
76
+ "mean": [0.485, 0.456, 0.406],
77
+ "std": [0.229, 0.224, 0.225],
78
+ "class_names": ["Bouchage correct", "Bouchage incorrect"]
79
+ },
80
+ "550 SVC 300ml": {
81
+ "type": "classification",
82
+ "path": "/Users/bastien/Downloads/model_corck_screwing.onnx",
83
+ "input_shape": 256,
84
+ "mean": [0.485, 0.456, 0.406],
85
+ "std": [0.229, 0.224, 0.225],
86
+ "class_names": ["Bouchage correct", "Bouchage incorrect"]
87
+ },
88
+ "600 PN 500 ml": {
89
+ "type": "classification",
90
+ "path": "/Users/bastien/Downloads/model_corck_screwing.onnx",
91
+ "input_shape": 256,
92
+ "mean": [0.485, 0.456, 0.406],
93
+ "std": [0.229, 0.224, 0.225],
94
+ "class_names": ["Bouchage correct", "Bouchage incorrect"]
95
+ }
96
+ }
97
+ }
98
+ }
99
+ }
100
+
demo_yvesrocher.json ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "title": "YVES ROCHER",
3
+ "description": "Démonstration des algos de reconnaissance d'étiquetage/bouchage correct",
4
+ "tasks": {
5
+ "task1": {
6
+ "shortname": "Étiquetage",
7
+ "name": {
8
+ "en": "Quality Control of Labels",
9
+ "fr": "Contrôle de l'Étiquetage"
10
+ },
11
+ "description": {
12
+ "en": "Is the label in the right position ?",
13
+ "fr": "Est-ce que l'étiquette est bien positionnée ?"
14
+ },
15
+ "products":[
16
+ "505 Pure Calmille 200ml"
17
+ ],
18
+ "models": {
19
+ "505 Pure Calmille 200ml": {
20
+ "type": "classification",
21
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/dmeQ6Mae0HKDwkHBTdh2/model.onnx",
22
+ "input_shape": 224,
23
+ "mean": [0.485, 0.456, 0.406],
24
+ "std": [0.229, 0.224, 0.225],
25
+ "class_names": ["Étiquetage correct", "Étiquetage incorrect"]
26
+ }
27
+ }
28
+ },
29
+ "task2": {
30
+ "shortname": "Bouchage",
31
+ "name": {
32
+ "en": "Quality Control of Corck Screwing",
33
+ "fr": "Contrôle du Bouchage"
34
+ },
35
+ "description": {
36
+ "en": "Is the corck in the right position ?",
37
+ "fr": "Est-ce que le bouchon est bien positionné et entièrement vissé ?"
38
+ },
39
+ "products":[
40
+ "187 Hamamelis 300ml"
41
+ ],
42
+ "models": {
43
+ "187 Hamamelis 300ml":{
44
+ "type": "classification",
45
+ "path": "https://automi-test-models.s3.eu-west-3.amazonaws.com/inference-pipeline/3nkoTiBxDNBzqyGtE5w8/model.onnx",
46
+ "input_shape": 224,
47
+ "mean": [0.485, 0.456, 0.406],
48
+ "std": [0.229, 0.224, 0.225],
49
+ "class_names": ["Bouchage correct", "Bouchage incorrect"]
50
+ }
51
+ }
52
+ }
53
+ }
54
+ }
55
+
docker-compose.yaml ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ magnetoscopie:
5
+ image: openvino_inference:1.0.1
6
+ container_name: gradio_showcase_onnx_model
7
+ volumes:
8
+ - ./models:/opt/app/models
9
+ ports:
10
+ - "8150:80"
11
+ env_file:
12
+ - .env
13
+ restart: unless-stopped
inferencer.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=no-name-in-module
2
+ # pylint: disable=no-member
3
+
4
+ """
5
+ Author : Bastien GUILLAUME
6
+ Version : 0.0.1
7
+ Date : 2023-03-16
8
+
9
+ Title : Inference With Gradio running an onnxruntime backend
10
+ """
11
+
12
+ import numpy as np
13
+ import onnxruntime as ort
14
+ import requests
15
+ from config_parser import *
16
+ from torchvision import transforms
17
+
18
+ corck_screwing_metadata = {
19
+ "image_threshold": 0.9247307181358337,
20
+ "pixel_threshold": 0.9247307181358337,
21
+ "min": 4.034907666260021e-26,
22
+ "max": 0.998478353023529,
23
+ }
24
+
25
+ inferencer_arr = {}
26
+ logging.log(level=logging.INFO, msg="Loading models...")
27
+ for task in config["tasks"].keys():
28
+ inferencer_arr[task] = {}
29
+ r = None
30
+ for model in config["tasks"][task]["models"]:
31
+ product_name = model
32
+ model_path = config["tasks"][task]["models"][product_name]["path"]
33
+ logging.log(
34
+ level=logging.INFO,
35
+ msg=f"Loading labeling model for product {product_name}",
36
+ )
37
+ inferencer_arr[task][product_name] = {}
38
+ if model_path.startswith("http"):
39
+ r = requests.get(model_path, stream=True).content
40
+ inferencer_arr[task][product_name][product_name] = ort.InferenceSession(
41
+ r if model_path.startswith("http") else model_path
42
+ )
43
+ inferencer_arr[task][product_name]["input_name"] = (
44
+ inferencer_arr[task][product_name][product_name].get_inputs()[0].name
45
+ )
46
+ inferencer_arr[task][product_name]["output_name"] = (
47
+ inferencer_arr[task][product_name][product_name].get_outputs()[0].name
48
+ )
49
+ logging.log(level=logging.INFO, msg=f"Model for {product_name} loaded\n")
50
+ logging.log(level=logging.INFO, msg="All models loaded...\n\n")
51
+ logging.log(level=logging.DEBUG, msg=f"Inferencer Array : {inferencer_arr}")
52
+
53
+
54
+ def softmax(x):
55
+ e_x = np.exp(x - np.max(x))
56
+ return e_x / e_x.sum(axis=0)
57
+
58
+
59
+ def is_anomalous_classification(prediction, meta_data):
60
+ pred_label = None
61
+ pred_score = prediction.reshape(-1).max()
62
+ if "image_threshold" in meta_data:
63
+ pred_label = (
64
+ config["tasks"][task]["models"][model]["class_names"][0]
65
+ if (pred_score >= meta_data["image_threshold"])
66
+ else config["tasks"][task]["models"][model]["class_names"][1]
67
+ )
68
+ return pred_label, pred_score
69
+
70
+
71
+ def analysis(task, use_case, image):
72
+ """
73
+ Main function that process inference and return results strings
74
+ Args:
75
+ - task
76
+ - use case
77
+ - image
78
+ Returns:
79
+ - String including label and confidence of the model
80
+ """
81
+
82
+ input_image = pre_process_all(task, use_case, image)
83
+ result = inference(task, use_case, input_image)
84
+ logging.log(level=logging.DEBUG, msg=result)
85
+ return result
86
+
87
+
88
+ def pre_process_all(task, use_case, image):
89
+ preprocessed_image = []
90
+ input_shape = config["tasks"][task]["models"][use_case]["input_shape"]
91
+ mean = config["tasks"][task]["models"][use_case]["mean"]
92
+ std = config["tasks"][task]["models"][use_case]["std"]
93
+ logging.log(level=logging.DEBUG, msg=f"Shape {input_shape}")
94
+ logging.log(level=logging.DEBUG, msg=f"Mean {mean}")
95
+ logging.log(level=logging.DEBUG, msg=f"Std {std}")
96
+ data_transforms = transforms.Compose(
97
+ [
98
+ transforms.ToPILImage(),
99
+ transforms.Resize(input_shape),
100
+ transforms.CenterCrop(input_shape),
101
+ transforms.ToTensor(),
102
+ transforms.Normalize(mean, std),
103
+ ]
104
+ )
105
+ preprocessed_image = data_transforms(image).detach().numpy()
106
+ preprocessed_image = np.expand_dims(preprocessed_image, axis=0)
107
+ logging.log(level=logging.DEBUG, msg=preprocessed_image)
108
+ return preprocessed_image
109
+
110
+
111
+ def inference(task, model, input_image):
112
+ """
113
+ Process inference for bottle labels
114
+ Args:
115
+ - task
116
+ - use_case (product)
117
+ - image
118
+ Returns:
119
+ - String including label and confidence of the model
120
+ """
121
+
122
+ prediction = inferencer_arr[task][model][model].run(
123
+ [inferencer_arr[task][model]["output_name"]],
124
+ {inferencer_arr[task][model]["input_name"]: input_image},
125
+ )
126
+ prediction = prediction[0].squeeze()
127
+ logging.log(level=logging.INFO, msg=f"Softmaxed prediction {softmax(prediction)}")
128
+ if config["tasks"][task]["models"][model]["type"] == "classification":
129
+ result = f"{config['tasks'][task]['models'][model]['class_names'][np.argmax(prediction)]} avec une confiance de {str(round(softmax(prediction)[np.argmax(prediction)]*100))} %"
130
+ elif task == "task2":
131
+ label, score = is_anomalous_classification(prediction, corck_screwing_metadata)
132
+ result = f"{label} avec une confiance de {str(round(score*100))} %"
133
+ else:
134
+ result = "Houston, we've got a problem"
135
+ logging.log(level=logging.DEBUG, msg=result)
136
+ return result
liccheck.ini ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authorized and unauthorized licenses in LOWER CASE
2
+ [Licenses]
3
+ authorized_licenses:
4
+ bsd
5
+ new bsd
6
+ bsd license
7
+ new bsd license
8
+ simplified bsd
9
+ apache
10
+ apache 2.0
11
+ apache software license
12
+ gnu lgpl
13
+ lgpl with exceptions or zpl
14
+ isc license
15
+ isc license (iscl)
16
+ mit
17
+ mit license
18
+ python software foundation
19
+ python software foundation license
20
+ zpl 2.1
21
+ historical permission notice and disclaimer
22
+
23
+ unauthorized_licenses:
24
+ gpl v3
25
+
26
+ [Authorized Packages]
27
+ aiohttp: 3.8.1
28
+ aiosignal: 1.2.0
29
+ async-timeout: 4.0.2
30
+ bcrypt: 3.2.0
31
+ certifi: 2021.10.8
32
+ frozenlist: 1.3.0
33
+ gradio: 2.9.1
34
+ monotonic: 1.6
35
+ multidict: 6.0.2
36
+ openvino: 2022.1.0
37
+ paramiko: 2.10.3
38
+ Pillow: 9.1.0
39
+ PyNaCl: 1.5.0
40
+ python-multipart: 0.0.5
41
+ requests: 2.27.1
42
+ yarl: 1.7.2
43
+
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "Model Demo with Gradio"
3
+ version = "0.0.1"
4
+ description = ""
5
+ authors = ["bastien@automi.ai"]
6
+
7
+ [tool.poetry.dependencies]
8
+ python = "^3.8, <3.11"
9
+ gradio = "^3.23.0"
10
+ logging = "^0.4.9.6"
11
+ numpy = "^1.24.2"
12
+ onnxruntime = "^1.14.1"
13
+ requests = "^2.28.2"
14
+ torchvision = "^0.15.1"
15
+
16
+ [tool.poetry.group.dev.dependencies]
17
+ black = "^23.1.0"
18
+ liccheck = "^0.8.3"
19
+ pylint = "^2.17.0"
20
+ pytest = "^7.2.2"
21
+
22
+ [build-system]
23
+ requires = ["poetry-core>=1.0.0"]
24
+ build-backend = "poetry.core.masonry.api"
25
+
26
+ #[tool.liccheck]
27
+ #authorized_licenses = [
28
+ # "bsd",
29
+ # "new bsd",
30
+ # "bsd license",
31
+ # "new bsd license",
32
+ # "simplified bsd",
33
+ # "apache",
34
+ # "apache 2.0",
35
+ # "apache software license",
36
+ # "gnu lgpl",
37
+ # "lgpl with exceptions or zpl",
38
+ # "isc license",
39
+ # "isc license (iscl)",
40
+ # "mit",
41
+ # "mit license",
42
+ # "python software foundation",
43
+ # "python software foundation license",
44
+ # "zpl 2.1",
45
+ # "historical permission notice and disclaimer",
46
+ # "gpl v3"
47
+ #]
48
+ #unauthorized_licenses = [""]
49
+ #authorized_packages = [
50
+ # "aiohttp: *",
51
+ # "aiosignal: *",
52
+ # "async-timeout: *",
53
+ # "bcrypt: *",
54
+ # "certifi: *",
55
+ # "frozenlist: *",
56
+ # "gradio: *",
57
+ # "monotonic: *",
58
+ # "multidict: *",
59
+ # "openvino: *",
60
+ # "paramiko: *",
61
+ # "Pillow: *",
62
+ # "PyNaCl: *",
63
+ # "python-multipart: *",
64
+ # "requests: *",
65
+ # "yarl: *"
66
+ #]
requirements.txt ADDED
The diff for this file is too large to render. See raw diff