Isaacgv commited on
Commit
17979b3
·
1 Parent(s): 5b24840
Files changed (4) hide show
  1. app.py +3 -4
  2. benchmarker.py +85 -0
  3. build_image.sh +8 -0
  4. inferencer.py +41 -12
app.py CHANGED
@@ -23,8 +23,6 @@ sys.path.append(".")
23
  from config_parser import *
24
  from inferencer import *
25
 
26
-
27
-
28
  gr.close_all()
29
 
30
 
@@ -91,6 +89,7 @@ def create_interface(task_number, product, model_number):
91
  model_name = (
92
  f'Résultst of {model_info["name"]}' if "name" in model_info else "Résultats"
93
  )
 
94
  task_info = config["tasks"][f"task{task_number+1}"]
95
  product_examples = (
96
  [
@@ -111,7 +110,7 @@ def create_interface(task_number, product, model_number):
111
  created_interface = gr.Interface(
112
  title=title,
113
  description=description,
114
- fn=inferencer_arr[f"task{task_number+1}"][product][str(model_number)][
115
  "function"
116
  ],
117
  # fn=lambda x: x,
@@ -211,5 +210,5 @@ if __name__ == "__main__":
211
  iface_generated.launch(
212
  server_name="0.0.0.0"
213
  )
214
- shutil.rmtree("examples")
215
  logging.log(level=logging.INFO, msg="Stopping the Gradio server...")
 
23
  from config_parser import *
24
  from inferencer import *
25
 
 
 
26
  gr.close_all()
27
 
28
 
 
89
  model_name = (
90
  f'Résultst of {model_info["name"]}' if "name" in model_info else "Résultats"
91
  )
92
+ model_uuid = model_info["path"].split("/")[-2:-1][0]
93
  task_info = config["tasks"][f"task{task_number+1}"]
94
  product_examples = (
95
  [
 
110
  created_interface = gr.Interface(
111
  title=title,
112
  description=description,
113
+ fn=inferencer_arr[f"task{task_number+1}"][product][str(model_number)][model_uuid][
114
  "function"
115
  ],
116
  # fn=lambda x: x,
 
210
  iface_generated.launch(
211
  server_name="0.0.0.0"
212
  )
213
+ # shutil.rmtree("examples")
214
  logging.log(level=logging.INFO, msg="Stopping the Gradio server...")
benchmarker.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Author : Bastien GUILLAUME
3
+ Version : 0.0.1
4
+ Date : 2023-03-16
5
+
6
+ Title : Benchmark ONNX model from a config file made for gradio_interfacer
7
+ """
8
+
9
+ import os
10
+
11
+ from config_parser import *
12
+ from inferencer import *
13
+ from pathlib import Path
14
+
15
+ def format_examples(task_number, product, product_example):
16
+ response = requests.get(product_example)
17
+ examples_folder = Path(f"examples/{product}")
18
+ os.makedirs(examples_folder, exist_ok=True)
19
+ filepath = Path(examples_folder / f'{product_example.split("/")[-1]}')
20
+ if filepath.exists():
21
+ pass
22
+ else:
23
+ with open(filepath, "wb") as f:
24
+ f.write(response.content)
25
+ return [f"task{task_number+1}", product, filepath]
26
+
27
+ def benchmark_models(task_number, product):
28
+ logging.log(level=logging.INFO, msg=f"Entering benchmark_models")
29
+ models_to_benchamrk = config["tasks"][f"task{task_number+1}"]["models"][product]
30
+ number_of_model = len(models_to_benchamrk)
31
+ task_info = config["tasks"][f"task{task_number+1}"]
32
+ result = []
33
+ product_examples = (
34
+ [
35
+ format_examples(task_number, product, product_example)
36
+ for product_example in task_info["examples"][product]
37
+ ]
38
+ if "examples" in task_info and product in task_info["examples"]
39
+ else []
40
+ )
41
+ for model in models_to_benchamrk:
42
+ for product_example in product_examples:
43
+ result.append(inference(task_number, product, product_example, number_of_model))
44
+ return result
45
+
46
+ benchmark_builder_list = []
47
+ benchmark_builder_dict = {}
48
+ logging.log(level=logging.INFO, msg=f"Building Interfaces")
49
+ logging.log(level=logging.INFO, msg=f"Number of task(s) : {len(tasks)}")
50
+ for task_number in range(0, len(tasks)):
51
+ logging.log(level=logging.INFO, msg=f"Treating task n°{task_number+1}")
52
+ benchmark_builder_dict[tasks[task_number]] = {}
53
+ product_list = list(config["tasks"][f"task{task_number+1}"]["models"].keys())
54
+ logging.log(level=logging.DEBUG, msg=f"Products : {product_list}")
55
+ benchmark_builder_product_level_list = []
56
+ for product in product_list:
57
+ logging.log(level=logging.INFO, msg=f"Product : {product}")
58
+ benchmark_builder_dict[tasks[task_number]][product] = []
59
+ if len(config["tasks"][f"task{task_number+1}"]["models"][product]) > 1:
60
+ generated_parralel_interface = benchmark_models(
61
+ task_number, product
62
+ )
63
+ benchmark_builder_dict[tasks[task_number]][product].append(
64
+ generated_parralel_interface
65
+ )
66
+ benchmark_builder_product_level_list.append(
67
+ generated_parralel_interface
68
+ )
69
+ else:
70
+ generated_interface = create_interface(
71
+ task_number=task_number, product=product, model_number=0
72
+ )
73
+ benchmark_builder_dict[tasks[task_number]][product].append(
74
+ generated_interface
75
+ )
76
+ benchmark_builder_product_level_list.append(generated_interface)
77
+ benchmark_builder_list.append(
78
+ gr.TabbedInterface(
79
+ interface_list=benchmark_builder_product_level_list,
80
+ tab_names=product_list,
81
+ )
82
+ )
83
+ logging.log(level=logging.INFO, msg=f"Interfaces ready\n")
84
+ logging.log(level=logging.DEBUG, msg=f"Interfaces List {benchmark_builder_list}")
85
+ # logging.log(level=logging.INFO, msg=f"Interfaces Dict {interface_dict}")
build_image.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash -e
2
+ image_name=gcr.io/tough-variety-310920/openvino_inference
3
+ image_tag=1.0.1
4
+ full_image_name=${image_name}:${image_tag}
5
+
6
+ cd "$(dirname "$0")"
7
+ docker build -t "${full_image_name}" .
8
+ docker push "$full_image_name"
inferencer.py CHANGED
@@ -9,6 +9,8 @@ Date : 2023-03-16
9
  Title : Inference With Gradio running an onnxruntime backend
10
  """
11
 
 
 
12
  import numpy as np
13
  import onnxruntime as ort
14
  import requests
@@ -27,7 +29,9 @@ def make_func(task, product, model_number):
27
  Returns:
28
  - String including label and confidence of the model
29
  """
30
- input_image = pre_process_all(task=task, product=product, model_number=model_number, image=image)
 
 
31
  result = inference(task, product, input_image, model_number=model_number)
32
  logging.log(level=logging.DEBUG, msg=result)
33
  return result
@@ -35,6 +39,22 @@ def make_func(task, product, model_number):
35
  return _analysis
36
 
37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
38
  corck_screwing_metadata = {
39
  "image_threshold": 0.9247307181358337,
40
  "pixel_threshold": 0.9247307181358337,
@@ -52,26 +72,34 @@ for task in config["tasks"].keys():
52
  for model_number in range(len(config["tasks"][task]["models"][product])):
53
  model = config["tasks"][task]["models"][product][model_number]
54
  model_path = model["path"]
 
55
  logging.log(
56
  level=logging.INFO,
57
  msg=f"Loading model for product {product}, version {model_number}",
58
  )
 
 
 
 
59
  inferencer_arr[task][product][str(model_number)] = {}
60
  if model_path.startswith("http"):
61
- r = requests.get(model_path, stream=True).content
62
- inferencer_arr[task][product][str(model_number)][
 
 
63
  "model"
64
- ] = ort.InferenceSession(r if model_path.startswith("http") else model_path)
65
- inferencer_arr[task][product][str(model_number)]["function"] = make_func(
 
66
  task, product, model_number
67
  )
68
- inferencer_arr[task][product][str(model_number)]["input_name"] = (
69
- inferencer_arr[task][product][str(model_number)]["model"]
70
  .get_inputs()[0]
71
  .name
72
  )
73
- inferencer_arr[task][product][str(model_number)]["output_name"] = (
74
- inferencer_arr[task][product][str(model_number)]["model"]
75
  .get_outputs()[0]
76
  .name
77
  )
@@ -155,10 +183,11 @@ def inference(task, product, input_image, model_number):
155
  logging.log(level=logging.INFO, msg=f"Task {task}")
156
  logging.log(level=logging.INFO, msg=f"Product {product}")
157
  logging.log(level=logging.INFO, msg=f"Model {model_number}")
 
158
  result = "Algorithm not yet supported"
159
- prediction = inferencer_arr[task][product][str(model_number)]["model"].run(
160
- [inferencer_arr[task][product][str(model_number)]["output_name"]],
161
- {inferencer_arr[task][product][str(model_number)]["input_name"]: input_image},
162
  )
163
  prediction = prediction[0].squeeze()
164
  model_type = config["tasks"][task]["models"][product][int(model_number)]["type"]
 
9
  Title : Inference With Gradio running an onnxruntime backend
10
  """
11
 
12
+ from pathlib import Path
13
+
14
  import numpy as np
15
  import onnxruntime as ort
16
  import requests
 
29
  Returns:
30
  - String including label and confidence of the model
31
  """
32
+ input_image = pre_process_all(
33
+ task=task, product=product, model_number=model_number, image=image
34
+ )
35
  result = inference(task, product, input_image, model_number=model_number)
36
  logging.log(level=logging.DEBUG, msg=result)
37
  return result
 
39
  return _analysis
40
 
41
 
42
+ def download_models(product, model, model_uuid):
43
+ logging.log(level=logging.DEBUG, msg=model)
44
+ response = requests.get(model, stream=True).content
45
+ models_folder = Path(f"models/{product}/{model_uuid}")
46
+ os.makedirs(models_folder, exist_ok=True)
47
+ filepath = Path(models_folder / f'{model.split("/")[-1]}')
48
+ logging.log(level=logging.DEBUG, msg=filepath)
49
+ if filepath.exists():
50
+ pass
51
+ else:
52
+ with open(filepath, "xb") as f:
53
+ f.write(response)
54
+ # return [f"task{task_number+1}", product, filepath]
55
+ return filepath
56
+
57
+
58
  corck_screwing_metadata = {
59
  "image_threshold": 0.9247307181358337,
60
  "pixel_threshold": 0.9247307181358337,
 
72
  for model_number in range(len(config["tasks"][task]["models"][product])):
73
  model = config["tasks"][task]["models"][product][model_number]
74
  model_path = model["path"]
75
+ model_uuid = model_path.split("/")[-2:-1][0]
76
  logging.log(
77
  level=logging.INFO,
78
  msg=f"Loading model for product {product}, version {model_number}",
79
  )
80
+ logging.log(
81
+ level=logging.INFO,
82
+ msg=f"Model UUID {model_uuid}",
83
+ )
84
  inferencer_arr[task][product][str(model_number)] = {}
85
  if model_path.startswith("http"):
86
+ # r = requests.get(model_path, stream=True).content
87
+ model_path = download_models(product, model_path, model_uuid)
88
+ inferencer_arr[task][product][str(model_number)][model_uuid] = {}
89
+ inferencer_arr[task][product][str(model_number)][model_uuid][
90
  "model"
91
+ # ] = ort.InferenceSession(r if model_path.startswith("http") else model_path)
92
+ ] = ort.InferenceSession(model_path.as_posix())
93
+ inferencer_arr[task][product][str(model_number)][model_uuid]["function"] = make_func(
94
  task, product, model_number
95
  )
96
+ inferencer_arr[task][product][str(model_number)][model_uuid]["input_name"] = (
97
+ inferencer_arr[task][product][str(model_number)][model_uuid]["model"]
98
  .get_inputs()[0]
99
  .name
100
  )
101
+ inferencer_arr[task][product][str(model_number)][model_uuid]["output_name"] = (
102
+ inferencer_arr[task][product][str(model_number)][model_uuid]["model"]
103
  .get_outputs()[0]
104
  .name
105
  )
 
183
  logging.log(level=logging.INFO, msg=f"Task {task}")
184
  logging.log(level=logging.INFO, msg=f"Product {product}")
185
  logging.log(level=logging.INFO, msg=f"Model {model_number}")
186
+ model_uuid = config["tasks"][task]["models"][product][int(model_number)]["path"].split("/")[-2:-1][0]
187
  result = "Algorithm not yet supported"
188
+ prediction = inferencer_arr[task][product][str(model_number)][model_uuid]["model"].run(
189
+ [inferencer_arr[task][product][str(model_number)][model_uuid]["output_name"]],
190
+ {inferencer_arr[task][product][str(model_number)][model_uuid]["input_name"]: input_image},
191
  )
192
  prediction = prediction[0].squeeze()
193
  model_type = config["tasks"][task]["models"][product][int(model_number)]["type"]