DanielFD commited on
Commit
ebc51c9
·
1 Parent(s): 36ccca1

Upload 4 files

Browse files
Files changed (4) hide show
  1. app.py +48 -0
  2. f_measurents.py +490 -0
  3. f_segment_img.py +181 -0
  4. ipd_gradio.ipynb +183 -0
app.py ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from f_segment_img import *
2
+ from f_measurents import *
3
+ import gradio as gr
4
+ import dotenv
5
+ import ast
6
+ dotenv.load_dotenv()
7
+ #
8
+ def create_sam():
9
+ sam_checkpoint = "sam_vit_h_4b8939.pth"
10
+ model_type = "vit_h"; device = "cuda"
11
+ sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
12
+ return sam
13
+
14
+ def plt2arr(fig, draw=True):
15
+ if draw: fig.canvas.draw()
16
+ rgba_buf = fig.canvas.buffer_rgba()
17
+ (w,h) = fig.canvas.get_width_height()
18
+ rgba_arr = np.frombuffer(rgba_buf, dtype=np.uint8).reshape((h,w,4))
19
+ return rgba_arr
20
+
21
+ def frame_size_width_mm(dropdown_label):
22
+ if dropdown_label == 'Small (xx mm)': frame_width_px = 145
23
+ elif dropdown_label == 'Medium (xx mm)': frame_width_px = 150
24
+ elif dropdown_label == 'Large (xx mm)': frame_width_px = 155
25
+ return frame_width_px
26
+
27
+ #
28
+ def ipd_app(image,dropdown_label):
29
+ # Measure image
30
+ landmarks = ast.literal_eval(os.environ['landmarks'])
31
+ frame_processed, measurements = measure_landmarks_img(image, landmarks, plot_landmarks_on_img = True, plot_data_on_img = True)
32
+ # Segment Frame
33
+ image, img_cropped, masks_selection, objects_segmented = segment_frame_from_img(image, landmarks, create_sam())
34
+ # Calibrate measurements
35
+ frame_width_px = get_frame_width(masks_selection)
36
+ frame_width_mm = frame_size_width_mm(dropdown_label)
37
+ ipd_mm = ipd_calibration(measurements['ipd_px'], frame_width_px, frame_width_mm)
38
+ text_ipd = 'IPD: ' + str(round(ipd_mm,2)) + ' mm'
39
+ # Check
40
+ sam_check = plot_sam_check_segmentation_frame(image, img_cropped, objects_segmented)
41
+ sam_check_numpy = plt2arr(sam_check, draw = True)
42
+ #
43
+ return text_ipd, frame_processed, str(measurements), sam_check_numpy
44
+
45
+
46
+ dropdown = gr.Dropdown(["Small (xx mm)", "Medium (xx mm)", "Large (xx mm)"], label="Refractives Frame Size", info="For calibration")
47
+ demo = gr.Interface(fn=ipd_app, inputs=["image",dropdown], outputs=["text", "image", "text", "image"])
48
+ demo.launch(debug=True)
f_measurents.py ADDED
@@ -0,0 +1,490 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+ import mediapipe as mp
4
+ import matplotlib.pyplot as plt
5
+ import math
6
+ import imutils
7
+ from IPython import display
8
+ import time
9
+ import pandas as pd
10
+ import plotly.express as px
11
+ import plotly.graph_objects as go
12
+ from PIL import Image, ExifTags
13
+
14
+ # ---------------------------------
15
+ # GEOMETRY TOOLS
16
+ # ---------------------------------
17
+
18
+ def distanceCalculate(p):
19
+ p1 = p[0]
20
+ p2 = p[1]
21
+ dis = ((p2[0] - p1[0]) ** 2 + (p2[1] - p1[1]) ** 2) ** 0.5
22
+ return dis
23
+
24
+ def angleLinePoints(p):
25
+ p1 = p[0]
26
+ p2 = p[1]
27
+ #
28
+ p1_x = p1[0]
29
+ p1_y = p1[1]
30
+ p2_x = p2[0]
31
+ p2_y = p2[1]
32
+ #
33
+ d_x = p2_x - p1_x
34
+ d_y = p2_y - p1_y
35
+ #
36
+ angle_radians = np.arctan(d_y/d_x)
37
+ angle_degrees = math.degrees(angle_radians)
38
+ return angle_degrees
39
+
40
+ def area_px_within_polyline(stacked_array):
41
+ x = [stacked_array[0] for stacked_array in stacked_array]
42
+ y = [stacked_array[1] for stacked_array in stacked_array]
43
+ return 0.5*np.abs(np.dot(x,np.roll(y,1))-np.dot(y,np.roll(x,1)))
44
+
45
+ def focal_length_calculator(measured_distance, real_width, width_in_rf_image):
46
+ # https://www.geeksforgeeks.org/realtime-distance-estimation-using-opencv-python/
47
+ focal_length = (width_in_rf_image* measured_distance)/ real_width
48
+ return focal_length
49
+
50
+ def distance_camera_to_face_calculator(focal_length, real_face_width, face_width_in_frame):
51
+ distance = (real_face_width * focal_length)/face_width_in_frame
52
+ return distance
53
+
54
+ def put_text_args(height, width, n_lines, scale):
55
+ font_scale = int(min(width,height)/(350/scale))
56
+ font_thickness = int(min(width,height)/500)
57
+ #
58
+ line_width = int(font_thickness)
59
+ point_width = int(font_thickness)
60
+ thickness_oval = int(1.5*font_thickness)
61
+ #
62
+ x_position_0 = int(width/20)
63
+ top_padding = height/20
64
+ text_block_height = height*0.6
65
+ line_heigh_increase = text_block_height/n_lines
66
+ y_position_v = [top_padding]
67
+ for i in range(1,n_lines):
68
+ y_position_temp = int(y_position_v[i-1] + line_heigh_increase)
69
+ y_position_v.append(y_position_temp)
70
+ #
71
+ return font_scale, font_thickness, line_width, point_width, thickness_oval, x_position_0, y_position_v
72
+
73
+ # ---------------------------------
74
+ # IMAGE PROCESSING TOOLS
75
+ # ---------------------------------
76
+
77
+ def bgr_image(image):
78
+ """ The code takes in an image as input and splits it into three colors:
79
+ blue (B), green (G), and red (R)."""
80
+ (B, G, R) = cv2.split(image)
81
+ return B, G, R
82
+
83
+ def height_width_image(image):
84
+ """ The code will return the height and width of an image."""
85
+ height, width, _ = image.shape
86
+ return height, width
87
+
88
+ def image_bgr_to_rgb(image):
89
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
90
+ return image
91
+
92
+ def image_rgb_to_bgr(image):
93
+ image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
94
+ return image
95
+
96
+ def extract_exif_metadata(image_path):
97
+ try:
98
+ img = Image.open(image_path)
99
+ exif = { ExifTags.TAGS[k]: v for k, v in img._getexif().items() if k in ExifTags.TAGS }
100
+ return exif
101
+ except Exception as e: print(e), print("Check if EXIF data is availible for this image.")
102
+
103
+ def focal_length_metadata(image_path):
104
+ focal_length = 0
105
+ focal_length_in_35mm_film = 0
106
+ try:
107
+ exif = extract_exif_metadata(image_path)
108
+ focal_length = exif['FocalLength']
109
+ focal_length_in_35mm_film = exif['FocalLengthIn35mmFilm']
110
+ except Exception as e:
111
+ print(e), print("Check if EXIF data is availible for this image.")
112
+ #
113
+ return focal_length, focal_length_in_35mm_film
114
+
115
+
116
+ # ---------------------------------
117
+ # FACE LANDMARKS
118
+ # ---------------------------------
119
+
120
+
121
+ mp_face_mesh = mp.solutions.face_mesh
122
+ face_mesh = mp_face_mesh.FaceMesh(
123
+ static_image_mode=True,
124
+ max_num_faces=1,
125
+ refine_landmarks=True,
126
+ min_detection_confidence=0.5)
127
+
128
+
129
+ def face_mesh_points(image):
130
+ result = face_mesh.process(image)
131
+ height, width = height_width_image(image)
132
+ face = result.multi_face_landmarks
133
+ NoneType = type(None)
134
+ if isinstance(face, NoneType):
135
+ return None, image
136
+ mesh_points= np.array([np.multiply([p.x, p.y], [width, height]).astype(int) for p in result.multi_face_landmarks[0].landmark])
137
+ return result, mesh_points
138
+
139
+ def find_iris_location(mesh_points,landmarks):
140
+ (l_cx, l_cy), l_radius = cv2.minEnclosingCircle(mesh_points[landmarks['rightEyeIris']])
141
+ (r_cx, r_cy), r_radius = cv2.minEnclosingCircle(mesh_points[landmarks['leftEyeIris']])
142
+ center_left_iris = np.array([l_cx, l_cy], dtype=np.int32)
143
+ center_right_iris = np.array([r_cx, r_cy], dtype=np.int32)
144
+ iris_position = [center_left_iris,center_right_iris]
145
+ iris_radius = [l_radius, r_radius]
146
+ #
147
+ return iris_position, iris_radius
148
+
149
+ # ---------------------------------
150
+ # INTERACTIVE PLOTTTIN - FACE LANDMARKS
151
+ # ---------------------------------
152
+
153
+ def mesh_points_to_df(mesh_points):
154
+ df_mesh_points = pd.DataFrame(columns=['X_pos', 'Y_pos', 'idx'])
155
+ # points_x = [mesh_points[0] for stacked_array in mesh_points]
156
+ # points_y = [mesh_points[1] for stacked_array in mesh_points]
157
+ for i, point in enumerate(mesh_points):
158
+ point_x = point[0]
159
+ point_y = point[1]
160
+ df_mesh_points.at[i,'X_pos'] = point_x
161
+ df_mesh_points.at[i,'Y_pos'] = point_y
162
+ df_mesh_points.at[i,'idx'] = i
163
+ return df_mesh_points
164
+
165
+ def mesh_points_interactive_plot(mesh_points):
166
+ df_mesh_points = mesh_points_to_df(mesh_points)
167
+ fig = px.scatter(df_mesh_points, y="Y_pos", x="X_pos",hover_data=['idx'])
168
+ fig.update_traces(marker_size=5)
169
+ fig['layout']['yaxis']['autorange'] = "reversed"
170
+ fig.write_html("output/landmarks_mesh.html")
171
+ return fig
172
+
173
+ def mesh_points_interactive_plot_with_image(mesh_points, image_path):
174
+ image_temp = cv2.imread(image_path)
175
+ df_mesh_points = mesh_points_to_df(mesh_points)
176
+ height, width = height_width_image(image_temp)
177
+ #
178
+ fig = go.Figure()
179
+ fig.add_layout_image(
180
+ x=0,
181
+ sizex=width,
182
+ y=0,
183
+ sizey=height,
184
+ xref="x",
185
+ yref="y",
186
+ opacity=1.0,
187
+ layer="below",
188
+ source=image_path
189
+ )
190
+ fig.add_scatter(x=df_mesh_points['X_pos'],y=df_mesh_points['Y_pos'],mode="markers",marker=dict(size=1, color="Red"))
191
+ fig.update_xaxes(showgrid=False, range=(0, width))
192
+ fig.update_yaxes(showgrid=False, scaleanchor='x', range=(height, 0))
193
+ fig.update_layout(xaxis_range=[0,width])
194
+ fig.write_html("output/landmarks_mesh_with_photo.html")
195
+ fig.write_image("output/landmarks_mesh_with_photo.png")
196
+ fig.write_image("output/landmarks_mesh_with_photo.svg")
197
+ fig.write_image("output/landmarks_mesh_with_photo.pdf")
198
+ return fig
199
+
200
+ # ---------------------------------
201
+ # MEASUREMENTS FACE LANDMARKS
202
+ # ---------------------------------
203
+
204
+ def get_face_dimensions_px(mesh_points, landmarks):
205
+ width_face_px = distanceCalculate(mesh_points[landmarks['leftToRight']])
206
+ height_face_px = distanceCalculate(mesh_points[landmarks['topToBottom']])
207
+ return width_face_px, height_face_px
208
+
209
+ def get_ipd_px(iris_position):
210
+ center_left_iris = iris_position[0]
211
+ center_right_iris = iris_position[1]
212
+ ipd_px = distanceCalculate([center_left_iris, center_right_iris])
213
+ return ipd_px
214
+
215
+ def area_px_right_silhoutte_calc(mesh_points, landmarks):
216
+ right_silhoutte = mesh_points[landmarks['rightSilhouette']]
217
+ area_px_right_silhoutte = area_px_within_polyline(right_silhoutte)
218
+ return area_px_right_silhoutte
219
+
220
+ def area_px_left_silhoutte_calc(mesh_points, landmarks):
221
+ left_silhoutte = mesh_points[landmarks['leftSilhouette']]
222
+ area_px_left_silhoutte = area_px_within_polyline(left_silhoutte)
223
+ return area_px_left_silhoutte
224
+
225
+ def get_top_to_bottom_angle(mesh_points, landmarks):
226
+ top_to_bottom_angle = angleLinePoints(mesh_points[landmarks['topToBottom']])
227
+ return top_to_bottom_angle
228
+
229
+ def get_left_to_right_angle(mesh_points, landmarks):
230
+ left_to_right_angle = angleLinePoints(mesh_points[landmarks['leftToRight']])
231
+ return left_to_right_angle
232
+
233
+ def get_left_cheek_to_nose_angle(mesh_points, landmarks):
234
+ left_to_right_angle = angleLinePoints(mesh_points[landmarks['leftCheekToNose']])
235
+ return left_to_right_angle
236
+
237
+ def get_nose_to_right_cheek_angle(mesh_points, landmarks):
238
+ left_to_right_angle = angleLinePoints(mesh_points[landmarks['noseToRightCheek']])
239
+ return left_to_right_angle
240
+
241
+
242
+ # ---------------------------------
243
+ # DRAW LANDMARKS
244
+ # ---------------------------------
245
+ colour_line = (255, 0, 0)
246
+ colour_point = (255, 0, 0)
247
+ colour_rectangle = (255, 255, 255)
248
+ colour_oval = (255, 255, 255)
249
+ colour_point_iris = (0, 255, 0)
250
+
251
+ def print_face_mesh_image(image, result):
252
+ height, width = height_width_image(image)
253
+ for facial_landmarks in result.multi_face_landmarks:
254
+ for i in range(0, 468):
255
+ pt1 = facial_landmarks.landmark[i]
256
+ x = int(pt1.x * width)
257
+ y = int(pt1.y * height)
258
+ image = cv2.circle(image, (x, y), 1, colour_point, point_width)
259
+ return image
260
+
261
+
262
+ def print_iris_location(image, mesh_points, landmarks):
263
+ image = cv2.polylines(image, [mesh_points[landmarks['rightEyeIris']]], 1, colour_line, line_width)
264
+ image = cv2.polylines(image, [mesh_points[landmarks['leftEyeIris']]], 1, colour_line, line_width)
265
+ return image
266
+
267
+ def print_center_iris(image, iris_position):
268
+ center_left_iris = iris_position[0]
269
+ center_right_iris = iris_position[1]
270
+ image = cv2.circle(image, center_left_iris, 1, colour_point_iris, 10*point_width)
271
+ image = cv2.circle(image, center_right_iris, 1, colour_point_iris, 10*point_width)
272
+ return image
273
+
274
+ def print_line_left_to_right_iris(image, iris_position):
275
+ center_left_iris = iris_position[0]
276
+ center_right_iris = iris_position[1]
277
+ image = cv2.line(image, center_left_iris, center_right_iris, colour_line, line_width)
278
+ #
279
+ return image
280
+
281
+ def print_line_top_to_bottom(image, mesh_points, landmarks):
282
+ image = cv2.polylines(image, [mesh_points[landmarks['topToBottom']]], 1, colour_line, line_width)
283
+ return image
284
+
285
+ def print_line_left_to_right(image, mesh_points, landmarks):
286
+ image = cv2.polylines(image, [mesh_points[landmarks['leftToRight']]], 1, colour_line, line_width)
287
+ return image
288
+
289
+ def print_line_left_cheek_to_nose(image, mesh_points, landmarks):
290
+ image = cv2.polylines(image, [mesh_points[landmarks['leftCheekToNose']]], 1, colour_line, line_width)
291
+ return image
292
+
293
+ def print_line_nose_to_right_cheek(image, mesh_points, landmarks):
294
+ image = cv2.polylines(image, [mesh_points[landmarks['noseToRightCheek']]], 1, colour_line, line_width)
295
+ return image
296
+
297
+ def print_silhouette(image, mesh_points, landmarks):
298
+ image = cv2.polylines(image, [mesh_points[landmarks['silhouette']]], 1, colour_line, line_width)
299
+ return image
300
+
301
+ def print_right_silhouette(image, mesh_points, landmarks):
302
+ image = cv2.polylines(image, [mesh_points[landmarks['rightSilhouette']]], 1, colour_line, line_width)
303
+ return image
304
+
305
+ def print_left_silhouette(image, mesh_points, landmarks):
306
+ image = cv2.polylines(image, [mesh_points[landmarks['leftSilhouette']]], 1, colour_line, line_width)
307
+ return image
308
+
309
+ def print_rectangle_card_area(image, mesh_points, landmarks):
310
+ _, height_face_px = get_face_dimensions_px(mesh_points, landmarks)
311
+ x_start = mesh_points[landmarks['outerRightEyebrowUpper']][0]
312
+ y_start = mesh_points[landmarks['outerRightEyebrowUpper']][1] - 0.6*height_face_px
313
+ x_end, y_end = mesh_points[landmarks['outerLeftEyebrowUpper']]
314
+ start_point = (int(x_start), int(y_start))
315
+ end_point = (int(x_end), int(y_end))
316
+ image = cv2.rectangle(image, start_point, end_point, colour_rectangle, line_width)
317
+ return image
318
+
319
+ def print_face_oval(image):
320
+ height, width = height_width_image(image)
321
+ image = cv2.ellipse(image, center=(int(width/2), int(height/2)), axes=(int(min(width,height)/4),int(min(width,height)/3)), angle=0, startAngle=0, endAngle=360, color=colour_oval, thickness=thickness_oval)
322
+ return image
323
+
324
+ # ---------------------------------
325
+ # SCREEN-PRINTING
326
+ # ---------------------------------
327
+
328
+ # Printing text information onscreen
329
+ colour_text = (255, 255, 0)
330
+ colour_text_valid = (0,255,0)
331
+ colour_text_invalid = (255, 0, 0)
332
+ #
333
+
334
+ def screenprint_top_to_bottom_angle(image, top_to_bottom_angle, top_to_bottom_angle_ref, top_to_bottom_angle_max_deviation):
335
+ if abs(top_to_bottom_angle - top_to_bottom_angle_ref) < top_to_bottom_angle_max_deviation:
336
+ colour_text_top_to_bottom_angle = colour_text_valid
337
+ else:
338
+ colour_text_top_to_bottom_angle = colour_text_invalid
339
+ image = cv2.putText(image, f'top_to_bottom_angle: {str(round(top_to_bottom_angle,3))} [degrees]', (int(x_position_0),int(y_position_v[0])), cv2.FONT_HERSHEY_PLAIN, font_scale, colour_text_top_to_bottom_angle, font_thickness)
340
+ return image
341
+
342
+ def screenprint_top_to_bottom_angle_simple(image, top_to_bottom_angle):
343
+ image = cv2.putText(image, f'top_to_bottom_angle: {str(round(top_to_bottom_angle,3))} [degrees]', (int(x_position_0),int(y_position_v[0])), cv2.FONT_HERSHEY_PLAIN, font_scale, colour_text, font_thickness)
344
+ return image
345
+
346
+ def screenprint_left_to_right_angle(image, left_to_right_angle, left_to_right_angle_ref, left_to_right_angle_max_deviation_perc):
347
+ if abs(left_to_right_angle - left_to_right_angle_ref) < left_to_right_angle_max_deviation_perc:
348
+ colour_text_left_to_right_angle = colour_text_valid
349
+ else:
350
+ colour_text_left_to_right_angle = colour_text_invalid
351
+ image = cv2.putText(image, f'left_to_right_angle: {str(round(left_to_right_angle,3))} [degrees]', (x_position_0,int(y_position_v[1])), cv2.FONT_HERSHEY_PLAIN, font_scale, colour_text_left_to_right_angle, font_thickness)
352
+ return image
353
+
354
+ def screenprint_left_to_right_angle_simple(image, left_to_right_angle):
355
+ image = cv2.putText(image, f'left_to_right_angle: {str(round(left_to_right_angle,3))} [degrees]', (x_position_0,int(y_position_v[1])), cv2.FONT_HERSHEY_PLAIN, font_scale, colour_text, font_thickness)
356
+ return image
357
+
358
+ def screenprint_ipd_px(image, ipd_px):
359
+ image = cv2.putText(image, f'ipd_px: {str(round(ipd_px,3))} [px]', (x_position_0,int(y_position_v[2])), cv2.FONT_HERSHEY_PLAIN, font_scale, colour_text, font_thickness)
360
+ return image
361
+
362
+ def screenprint_area_right_to_left_silhoutte(image, area_right_to_left_silhoutte, area_ratio_right_to_left_ref, area_ratio_right_to_left_max_deviation_perc):
363
+ if abs(area_right_to_left_silhoutte - area_ratio_right_to_left_ref) < area_ratio_right_to_left_max_deviation_perc:
364
+ colour_text_area_right_to_left_silhoutte = colour_text_valid
365
+ else:
366
+ colour_text_area_right_to_left_silhoutte = colour_text_invalid
367
+ image = cv2.putText(image, f'area_right_to_left_silhoutte: {str(round(area_right_to_left_silhoutte,3))} [%]', (x_position_0,int(y_position_v[3])), cv2.FONT_HERSHEY_PLAIN, font_scale,colour_text_area_right_to_left_silhoutte, font_thickness)
368
+ return image
369
+
370
+ def screenprint_area_right_to_left_silhoutte_simple(image, area_right_to_left_silhoutte):
371
+ image = cv2.putText(image, f'area_right_to_left_silhoutte: {str(round(area_right_to_left_silhoutte,3))} [%]', (x_position_0,int(y_position_v[3])), cv2.FONT_HERSHEY_PLAIN, font_scale,colour_text, font_thickness)
372
+ return image
373
+
374
+ def screenprint_nose_to_cheek(image,left_cheek_to_nose_angle,nose_to_right_cheek_angle):
375
+ image = cv2.putText(image, f'Nose-Cheek Angles: {str(round(left_cheek_to_nose_angle,3)), str(round(nose_to_right_cheek_angle,3))} [degrees]', (x_position_0,int(y_position_v[4])), cv2.FONT_HERSHEY_PLAIN, font_scale,colour_text, font_thickness)
376
+ return image
377
+
378
+
379
+ # ---------------------------------
380
+ # GET ALL MEASUREMENT DATA FUNCTION
381
+ # ---------------------------------
382
+
383
+ def get_measurements_from_landmarks(mesh_points,landmarks):
384
+ #
385
+ iris_position, iris_radius = find_iris_location(mesh_points,landmarks)
386
+ ipd_px = get_ipd_px(iris_position)
387
+ width_face_px, height_face_px = get_face_dimensions_px(mesh_points, landmarks)
388
+ top_to_bottom_angle = get_top_to_bottom_angle(mesh_points, landmarks)
389
+ left_to_right_angle = get_left_to_right_angle(mesh_points, landmarks)
390
+ left_cheek_to_nose_angle = get_left_cheek_to_nose_angle(mesh_points, landmarks)
391
+ nose_to_right_cheek_angle = get_nose_to_right_cheek_angle(mesh_points, landmarks)
392
+ area_px_left_silhoutte = area_px_left_silhoutte_calc(mesh_points, landmarks)
393
+ area_px_right_silhoutte = area_px_right_silhoutte_calc(mesh_points, landmarks)
394
+ area_right_to_left_silhoutte = (1 - (area_px_right_silhoutte/area_px_left_silhoutte))*100
395
+ #
396
+ # Create dictionary to return measurements
397
+ measurements = {'iris_position': iris_position,
398
+ 'iris_radius': iris_radius,
399
+ 'ipd_px': ipd_px,
400
+ 'width_face_px': width_face_px,
401
+ 'height_face_px': height_face_px,
402
+ 'top_to_bottom_angle': top_to_bottom_angle,
403
+ 'left_to_right_angle': left_to_right_angle,
404
+ 'left_cheek_to_nose_angle': left_cheek_to_nose_angle,
405
+ 'nose_to_right_cheek_angle': nose_to_right_cheek_angle,
406
+ 'area_px_left_silhoutte': area_px_left_silhoutte,
407
+ 'area_px_right_silhoutte': area_px_right_silhoutte,
408
+ 'area_right_to_left_silhoutte': area_right_to_left_silhoutte
409
+ }
410
+ #
411
+ return measurements
412
+
413
+
414
+ # ---------------------------------
415
+ # LINE PROPERTIES
416
+ # ---------------------------------
417
+
418
+ def define_plotting_properties(image):
419
+ n_lines = 5
420
+ scale = 1
421
+ # Properties of lines
422
+ global font_scale, font_thickness, line_width, point_width, thickness_oval, x_position_0, y_position_v
423
+ height, width = height_width_image(image)
424
+ font_scale, font_thickness, line_width, point_width, thickness_oval, x_position_0, y_position_v = put_text_args(height, width, n_lines, scale)
425
+ if line_width == 0: line_width = 1
426
+
427
+
428
+ # ---------------------------------
429
+ # PRINT LANDMARKS, MEASUREMENTS AND CHECKS
430
+ # ---------------------------------
431
+
432
+ def print_landmarks_on_img(image, result, mesh_points, landmarks, iris_position):
433
+ image = print_face_mesh_image(image, result)
434
+ image = print_iris_location(image, mesh_points, landmarks)
435
+ image = print_center_iris(image, iris_position)
436
+ image = print_line_left_to_right_iris(image, iris_position)
437
+ image = print_line_top_to_bottom(image, mesh_points, landmarks)
438
+ image = print_line_left_to_right(image, mesh_points, landmarks)
439
+ image = print_line_left_cheek_to_nose(image, mesh_points, landmarks)
440
+ image = print_line_nose_to_right_cheek(image, mesh_points, landmarks)
441
+ image = print_silhouette(image, mesh_points, landmarks)
442
+ image = print_rectangle_card_area(image, mesh_points, landmarks)
443
+ image = print_right_silhouette(image, mesh_points, landmarks)
444
+ image = print_left_silhouette(image, mesh_points, landmarks)
445
+ image = print_face_oval(image)
446
+ return image
447
+
448
+ def screenprint_data_and_criteria_on_img(image, measurements, criteria):
449
+ image = screenprint_top_to_bottom_angle(image, measurements['top_to_bottom_angle'], criteria['top_to_bottom_angle_ref'], criteria['top_to_bottom_angle_max_deviation'])
450
+ image = screenprint_left_to_right_angle(image, measurements['left_to_right_angle'], criteria['left_to_right_angle_ref'], criteria['left_to_right_angle_max_deviation_perc'])
451
+ image = screenprint_area_right_to_left_silhoutte(image, measurements['area_right_to_left_silhoutte'], criteria['area_ratio_right_to_left_ref'], criteria['area_ratio_right_to_left_max_deviation_perc'])
452
+ image = screenprint_ipd_px(image, measurements['ipd_px'])
453
+ image = screenprint_nose_to_cheek(image,measurements['left_cheek_to_nose_angle'],measurements['nose_to_right_cheek_angle'])
454
+ return image
455
+
456
+ def screenprint_data_on_img(image, measurements):
457
+ image = screenprint_top_to_bottom_angle_simple(image, measurements['top_to_bottom_angle'])
458
+ image = screenprint_left_to_right_angle_simple(image, measurements['left_to_right_angle'])
459
+ image = screenprint_area_right_to_left_silhoutte_simple(image, measurements['area_right_to_left_silhoutte'])
460
+ image = screenprint_ipd_px(image, measurements['ipd_px'])
461
+ image = screenprint_nose_to_cheek(image,measurements['left_cheek_to_nose_angle'],measurements['nose_to_right_cheek_angle'])
462
+ return image
463
+
464
+ # ---------------------------------
465
+ # MAIN FUNCTION
466
+ # ---------------------------------
467
+
468
+ def measure_landmarks_img(image, landmarks, plot_landmarks_on_img = True, plot_data_on_img = True):
469
+ #
470
+ try: image = image_bgr_to_rgb(image)
471
+ except Exception as e: print(e)
472
+ # Create global parameters for plotting properties (lines, points / width, colour, etc.)
473
+ define_plotting_properties(image)
474
+ # Getting face lanmarks
475
+ result, mesh_points = face_mesh_points(image)
476
+ # Measurements
477
+ measurements = get_measurements_from_landmarks(mesh_points,landmarks)
478
+ # Printing objects
479
+ if plot_landmarks_on_img == True: image = print_landmarks_on_img(image, result, mesh_points, landmarks, measurements['iris_position'])
480
+ # Screenprinting data and checks
481
+ if plot_data_on_img == True: image = screenprint_data_on_img(image, measurements)
482
+ # Convert to RGB
483
+ image = image_bgr_to_rgb(image)
484
+ # Return image
485
+ return image, measurements
486
+
487
+
488
+ # ---------------------------------
489
+ # OTHER FUNCTIONS
490
+ # ---------------------------------
f_segment_img.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from f_measurents import *
2
+ import cv2
3
+ import numpy as np
4
+ import matplotlib.pyplot as plt
5
+ import mediapipe as mp
6
+ from mediapipe.tasks.python import vision
7
+ import sys
8
+ import os
9
+ from segment_anything import sam_model_registry, SamAutomaticMaskGenerator, SamPredictor
10
+ #
11
+ sys.path.insert(
12
+ 1, '/Users/danielfiuzadosil/Documents/GitHub_Repo/Bryant_Medical/eCommerce/App_IPD [Master]/ipd_app/src/modules')
13
+ #
14
+
15
+
16
+ def remove_background_img(img):
17
+ THRESHOLD = 0.12
18
+ # initialize mediapipe
19
+ mp_selfie_segmentation = mp.solutions.selfie_segmentation
20
+ selfie_segmentation = mp_selfie_segmentation.SelfieSegmentation(
21
+ model_selection=1)
22
+ # get the result
23
+ results = selfie_segmentation.process(img)
24
+ # extract segmented mask
25
+ mask = np.stack((results.segmentation_mask,) * 3, axis=-1) > THRESHOLD
26
+ mask_binary = mask.astype(int)*255
27
+ img_masked = img.copy()
28
+ img_masked[mask_binary == 0] = 255
29
+ #
30
+ return img_masked
31
+
32
+
33
+ def remove_background_img_v2(image_path, MODEL_PATH='/Users/danielfiuzadosil/Documents/GitHub_Repo/Bryant_Medical/eCommerce/App_IPD [Master]/ipd_app/data/external/mediapipe_models/deeplabv3.tflite'):
34
+ #
35
+ THRESHOLD = 0.12
36
+ #
37
+ BG_COLOR = (0, 0, 0) # black
38
+ MASK_COLOR = (255, 255, 255) # white
39
+ #
40
+ BaseOptions = mp.tasks.BaseOptions
41
+ OutputType = vision.ImageSegmenterOptions.OutputType
42
+ # Create the options that will be used for ImageSegmenter
43
+ base_options = BaseOptions(model_asset_path=MODEL_PATH)
44
+ options = vision.ImageSegmenterOptions(
45
+ base_options=base_options, output_type=OutputType.CATEGORY_MASK)
46
+ # Create the MediaPipe image file that will be segmented
47
+ image = mp.Image.create_from_file(image_path)
48
+ with vision.ImageSegmenter.create_from_options(options) as segmenter:
49
+ # Retrieve the masks for the segmented image
50
+ category_masks = segmenter.segment(image)
51
+ # Generate solid color images for showing the output segmentation mask.
52
+ image_data = image.numpy_view()
53
+ fg_image = np.zeros(image_data.shape, dtype=np.uint8)
54
+ fg_image[:] = MASK_COLOR
55
+ bg_image = np.zeros(image_data.shape, dtype=np.uint8)
56
+ bg_image[:] = BG_COLOR
57
+ #
58
+ condition = np.stack(
59
+ (category_masks[0].numpy_view(),) * 3, axis=-1) > THRESHOLD
60
+ mask_binary = np.where(condition, fg_image, bg_image)
61
+ #
62
+ img_masked = image_data.copy()
63
+ img_masked[mask_binary == 0] = 255
64
+ #
65
+ return img_masked
66
+
67
+
68
+ def segment_frame_from_img(image, landmarks, sam):
69
+ # Read image
70
+ image_0 = image.copy()
71
+ # Generate facial landmarks
72
+ mp_face_mesh = mp.solutions.face_mesh
73
+ face_mesh = mp_face_mesh.FaceMesh(
74
+ static_image_mode=True,
75
+ max_num_faces=1,
76
+ refine_landmarks=True,
77
+ min_detection_confidence=0.5)
78
+ # Calculate facial landmaks and other data
79
+ result, mesh_points = face_mesh_points(image)
80
+ df_mesh_points = mesh_points_to_df(mesh_points)
81
+ width_face_px, height_face_px = get_face_dimensions_px(
82
+ mesh_points, landmarks)
83
+ # Calculate rectangle where the frame will likely be (based on a reference landmarks)
84
+ squareBoxEyes = mesh_points[landmarks['squareBoxEyes']]
85
+ #
86
+ width_rectangle = squareBoxEyes[2][0] - squareBoxEyes[0][0]
87
+ tolerance_x = 0.2*width_rectangle
88
+ height_rectangle = squareBoxEyes[2][1] - squareBoxEyes[0][1]
89
+ tolerance_y = height_rectangle*0.2
90
+ #
91
+ x_start, y_start = [squareBoxEyes[0][0] -
92
+ tolerance_x, squareBoxEyes[0][1] - tolerance_y]
93
+ x_end, y_end = [squareBoxEyes[2][0] + tolerance_x, squareBoxEyes[2][1]]
94
+ # Cropped image to region where the frames will be located
95
+ img_cropped = image_0[int(y_start):int(y_end), int(x_start):int(x_end)]
96
+ # Use Meta's Segment Anything Model (SAM) to segment the frame
97
+ mask_generator = SamAutomaticMaskGenerator(sam)
98
+ masks = mask_generator.generate(img_cropped)
99
+ # Select the right object by defining the expected range of area occupied by the frame
100
+ height, width, _ = img_cropped.shape
101
+ area_photo = height*width
102
+ area_frame_min = 0.2
103
+ area_frame_max = 0.6
104
+ # Iterate through the different masks and store the ones that fulfill the criteria
105
+ masks_selection = []
106
+ objects_segmented = []
107
+ for i in range(len(masks)):
108
+ mask = masks[i]
109
+ area_object = mask['area']
110
+ if area_photo*area_frame_max > area_object > area_photo*area_frame_min:
111
+ masks_selection.append(mask)
112
+ #
113
+ mask_binary_temp = mask['segmentation'].astype(int)*255
114
+ object_segmented = img_cropped.copy()
115
+ object_segmented[mask_binary_temp == 0] = 255
116
+ objects_segmented.append(object_segmented)
117
+
118
+ return image, img_cropped, masks_selection, objects_segmented
119
+
120
+
121
+ def plot_sam_check_segmentation_frame(image, img_cropped, objects_segmented):
122
+ #
123
+ ax1 = plt.subplot(311)
124
+ ax1.imshow(image)
125
+ ax1.axis("Off")
126
+ ax2 = plt.subplot(312)
127
+ ax2.imshow(img_cropped)
128
+ ax2.axis("Off")
129
+ ax3 = plt.subplot(313)
130
+ ax3.imshow(objects_segmented[0])
131
+ fig = plt.gcf()
132
+ return fig
133
+
134
+
135
+ def plot_sam_check_segmentation_frame_and_save(image, img_cropped, objects_segmented, output_folder, filepath):
136
+ #
137
+ ax1 = plt.subplot(311)
138
+ ax1.imshow(image)
139
+ ax1.axis("Off")
140
+ ax2 = plt.subplot(312)
141
+ ax2.imshow(img_cropped)
142
+ ax2.axis("Off")
143
+ ax3 = plt.subplot(313)
144
+ ax3.imshow(objects_segmented[0])
145
+ #
146
+ plt.savefig(output_folder + os.path.basename(filepath),
147
+ transparent=True, bbox_inches='tight')
148
+ # plt.show()
149
+
150
+ def plot_segmented_object_with_bb(objects_segmented, masks_selection, image):
151
+ image = cv2.rectangle(objects_segmented[0], masks_selection[0]['bbox'], [255,0,0], 4)
152
+ plt.imshow(image)
153
+ ax = plt.gca()
154
+ #
155
+ img_height = image.shape[0]
156
+ img_width = image.shape[1]
157
+ #
158
+ major_ticks_height = np.arange(0, img_height, 100)
159
+ minor_ticks_height = np.arange(0, img_height, 10)
160
+ major_ticks_width = np.arange(0, img_width, 100)
161
+ minor_ticks_width = np.arange(0, img_width, 10)
162
+ #
163
+ ax.set_yticks(major_ticks_height)
164
+ ax.set_yticks(minor_ticks_height, minor=True)
165
+ ax.set_xticks(major_ticks_width)
166
+ ax.set_xticks(minor_ticks_width, minor=True)
167
+ #
168
+ ax.grid(which='both')
169
+ ax.grid(which='minor', alpha=0.2)
170
+ ax.grid(which='major', alpha=0.5)
171
+ #
172
+ # plt.show()
173
+
174
+ def get_frame_width(masks_selection):
175
+ frame_width = masks_selection[0]['bbox'][2]
176
+ return frame_width
177
+
178
+ def ipd_calibration(ipd_px, frame_width_px, frame_width_mm):
179
+ calibration_factor = frame_width_px/frame_width_mm
180
+ ipd_mm = ipd_px/calibration_factor
181
+ return ipd_mm
ipd_gradio.ipynb ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stderr",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "INFO: Created TensorFlow Lite XNNPACK delegate for CPU.\n"
13
+ ]
14
+ },
15
+ {
16
+ "data": {
17
+ "text/plain": [
18
+ "True"
19
+ ]
20
+ },
21
+ "execution_count": 1,
22
+ "metadata": {},
23
+ "output_type": "execute_result"
24
+ }
25
+ ],
26
+ "source": [
27
+ "from f_segment_img import *\n",
28
+ "from f_measurents import *\n",
29
+ "import gradio as gr\n",
30
+ "import dotenv\n",
31
+ "import ast\n",
32
+ "dotenv.load_dotenv()"
33
+ ]
34
+ },
35
+ {
36
+ "cell_type": "code",
37
+ "execution_count": 2,
38
+ "metadata": {},
39
+ "outputs": [],
40
+ "source": [
41
+ "def create_sam():\n",
42
+ " sam_checkpoint = \"sam_vit_h_4b8939.pth\"\n",
43
+ " model_type = \"vit_h\"; device = \"cuda\"\n",
44
+ " sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)\n",
45
+ " return sam\n"
46
+ ]
47
+ },
48
+ {
49
+ "cell_type": "code",
50
+ "execution_count": 3,
51
+ "metadata": {},
52
+ "outputs": [],
53
+ "source": [
54
+ "def plt2arr(fig, draw=True):\n",
55
+ " if draw: fig.canvas.draw()\n",
56
+ " rgba_buf = fig.canvas.buffer_rgba()\n",
57
+ " (w,h) = fig.canvas.get_width_height()\n",
58
+ " rgba_arr = np.frombuffer(rgba_buf, dtype=np.uint8).reshape((h,w,4))\n",
59
+ " return rgba_arr"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "code",
64
+ "execution_count": 4,
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "def frame_size_width_mm(dropdown_label):\n",
69
+ " if dropdown_label == 'Small (142 mm)': frame_width_px = 142\n",
70
+ " elif dropdown_label == 'Medium (xx mm)': frame_width_px = 150\n",
71
+ " elif dropdown_label == 'Large (xx mm)': frame_width_px = 155\n",
72
+ " return frame_width_px"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 5,
78
+ "metadata": {},
79
+ "outputs": [],
80
+ "source": [
81
+ "def ipd_app(image,dropdown_label):\n",
82
+ " # \n",
83
+ " landmarks = ast.literal_eval(os.environ['landmarks'])\n",
84
+ " frame_processed, measurements = measure_landmarks_img(image, landmarks, plot_landmarks_on_img = True, plot_data_on_img = True)\n",
85
+ " # \n",
86
+ " image, img_cropped, masks_selection, objects_segmented = segment_frame_from_img(image, landmarks, create_sam())\n",
87
+ " # \n",
88
+ " frame_width_px = get_frame_width(masks_selection)\n",
89
+ " frame_width_mm = frame_size_width_mm(dropdown_label)\n",
90
+ " ipd_mm = ipd_calibration(measurements['ipd_px'], frame_width_px, frame_width_mm)\n",
91
+ " text_ipd = 'IPD: ' + str(round(ipd_mm,2)) + ' mm'\n",
92
+ " # \n",
93
+ " sam_check = plot_sam_check_segmentation_frame(image, img_cropped, objects_segmented)\n",
94
+ " sam_check_numpy = plt2arr(sam_check, draw = True)\n",
95
+ " # \n",
96
+ " return text_ipd, frame_processed, str(measurements), sam_check_numpy"
97
+ ]
98
+ },
99
+ {
100
+ "cell_type": "code",
101
+ "execution_count": 6,
102
+ "metadata": {},
103
+ "outputs": [],
104
+ "source": [
105
+ "image_test = '/Users/danielfiuzadosil/Documents/GitHub_Repo/Bryant_Medical/eCommerce/App_IPD [Master]/ipd_app/data/raw/segmentation/sample_w_frames.jpeg'\n",
106
+ "image = cv2.cvtColor(cv2.imread(image_test),cv2.COLOR_BGR2RGB)\n",
107
+ "dropdown_label = \"Small (xx mm)\""
108
+ ]
109
+ },
110
+ {
111
+ "cell_type": "code",
112
+ "execution_count": 7,
113
+ "metadata": {},
114
+ "outputs": [
115
+ {
116
+ "name": "stderr",
117
+ "output_type": "stream",
118
+ "text": [
119
+ "/opt/homebrew/lib/python3.9/site-packages/gradio/deprecation.py:43: UserWarning: You have unused kwarg parameters in Dropdown, please remove them: {'info': 'For calibration'}\n",
120
+ " warnings.warn(\n"
121
+ ]
122
+ },
123
+ {
124
+ "name": "stdout",
125
+ "output_type": "stream",
126
+ "text": [
127
+ "IMPORTANT: You are using gradio version 3.7, however version 3.14.0 is available, please upgrade.\n",
128
+ "--------\n",
129
+ "Running on local URL: http://127.0.0.1:7860\n",
130
+ "\n",
131
+ "To create a public link, set `share=True` in `launch()`.\n"
132
+ ]
133
+ },
134
+ {
135
+ "data": {
136
+ "text/html": [
137
+ "<div><iframe src=\"http://127.0.0.1:7860/\" width=\"900\" height=\"500\" allow=\"autoplay; camera; microphone; clipboard-read; clipboard-write;\" frameborder=\"0\" allowfullscreen></iframe></div>"
138
+ ],
139
+ "text/plain": [
140
+ "<IPython.core.display.HTML object>"
141
+ ]
142
+ },
143
+ "metadata": {},
144
+ "output_type": "display_data"
145
+ }
146
+ ],
147
+ "source": [
148
+ "dropdown = gr.Dropdown([\"Small (142 mm)\", \"Medium (xx mm)\", \"Large (xx mm)\"], label=\"Refractives Frame Size\", info=\"For calibration\")\n",
149
+ "demo = gr.Interface(fn=ipd_app, inputs=[\"image\",dropdown], outputs=[\"text\", \"image\", \"text\", \"image\"])\n",
150
+ "demo.launch(debug=True)"
151
+ ]
152
+ },
153
+ {
154
+ "cell_type": "code",
155
+ "execution_count": null,
156
+ "metadata": {},
157
+ "outputs": [],
158
+ "source": []
159
+ }
160
+ ],
161
+ "metadata": {
162
+ "kernelspec": {
163
+ "display_name": "Python 3",
164
+ "language": "python",
165
+ "name": "python3"
166
+ },
167
+ "language_info": {
168
+ "codemirror_mode": {
169
+ "name": "ipython",
170
+ "version": 3
171
+ },
172
+ "file_extension": ".py",
173
+ "mimetype": "text/x-python",
174
+ "name": "python",
175
+ "nbconvert_exporter": "python",
176
+ "pygments_lexer": "ipython3",
177
+ "version": "3.9.15"
178
+ },
179
+ "orig_nbformat": 4
180
+ },
181
+ "nbformat": 4,
182
+ "nbformat_minor": 2
183
+ }