kerzel commited on
Commit
af3fe62
·
1 Parent(s): 8f9ff8f

initial version of the damage classification app

Browse files
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import pandas as pd
4
+
5
+ # our own helper tools
6
+ import clustering
7
+ import utils
8
+
9
+ import logging
10
+ logging.getLogger().setLevel(logging.INFO)
11
+
12
+ from tensorflow import keras
13
+
14
+ #image_threshold = 20
15
+ damage_sites = {}
16
+
17
+ model1_windowsize = [250,250]
18
+ #model1_threshold = 0.7
19
+
20
+ model1 = keras.models.load_model('rwthmaterials_dp800_network1_inclusion.h5')
21
+ model1.compile()
22
+
23
+ damage_classes = {3: "Martensite",2: "Interface",0:"Notch",1:"Shadowing"}
24
+
25
+ model2_windowsize = [100,100]
26
+ #model2_threshold = 0.5
27
+
28
+ model2 = keras.models.load_model('rwthmaterials_dp800_newtwork2_damage.h5')
29
+ model2.compile()
30
+
31
+
32
+
33
+ ##
34
+ ## Function to do the actual damage classification
35
+ ##
36
+ def damage_classification(SEM_image,image_threshold, model1_threshold, model2_threshold):
37
+
38
+
39
+ ##
40
+ ## clustering
41
+ ##
42
+ logging.debug('---------------: clustering :=====================')
43
+ all_centroids = clustering.get_centroids(SEM_image, image_threshold=image_threshold,
44
+ fill_holes=True, filter_close_centroids=True)
45
+
46
+ for i in range(len(all_centroids)) :
47
+ key = (all_centroids[i][0],all_centroids[i][1])
48
+ damage_sites[key] = 'Not Classified'
49
+
50
+ ##
51
+ ## Inclusions vs the rest
52
+ ##
53
+ logging.debug('---------------: prepare model 1 :=====================')
54
+ images_model1 = utils.prepare_classifier_input(SEM_image, all_centroids, window_size=model1_windowsize)
55
+
56
+ logging.debug('---------------: run model 1 :=====================')
57
+ y1_pred = model1.predict(np.asarray(images_model1, float))
58
+
59
+ logging.debug('---------------: model1 threshold :=====================')
60
+ inclusions = y1_pred[:,0].reshape(len(y1_pred),1)
61
+ inclusions = np.where(inclusions > model1_threshold)
62
+
63
+ logging.debug('---------------: model 1 update dict :=====================')
64
+ for i in range(len(inclusions[0])):
65
+ centroid_id = inclusions[0][i]
66
+ coordinates = all_centroids[centroid_id]
67
+ key = (coordinates[0], coordinates[1])
68
+ damage_sites[key] = 'Inclusion'
69
+ logging.debug('Damage sites after model 1')
70
+ logging.debug(damage_sites)
71
+
72
+ ##
73
+ ## Martensite cracking, etc
74
+ ##
75
+ logging.debug('---------------: prepare model 2 :=====================')
76
+ centroids_model2 = []
77
+ for key, value in damage_sites.items():
78
+ if value == 'Not Classified':
79
+ coordinates = list([key[0],key[1]])
80
+ centroids_model2.append(coordinates)
81
+ logging.debug('Centroids model 2')
82
+ logging.debug(centroids_model2)
83
+
84
+ logging.debug('---------------: prepare model 2 :=====================')
85
+ images_model2 = utils.prepare_classifier_input(SEM_image, centroids_model2, window_size=model2_windowsize)
86
+ logging.debug('Images model 2')
87
+ logging.debug(images_model2)
88
+
89
+ logging.debug('---------------: run model 2 :=====================')
90
+ y2_pred = model2.predict(np.asarray(images_model2, float))
91
+
92
+ damage_index = np.asarray(y2_pred > model2_threshold).nonzero()
93
+
94
+
95
+ for i in range(len(damage_index[0])):
96
+ index = damage_index[0][i]
97
+ identified_class = damage_index[1][i]
98
+ label = damage_classes[identified_class]
99
+ coordinates = centroids_model2[index]
100
+ #print('Damage {} \t identified as {}, \t coordinates {}'.format(i, label, coordinates))
101
+ key = (coordinates[0], coordinates[1])
102
+ damage_sites[key] = label
103
+
104
+ ##
105
+ ## show the damage sites on the image
106
+ ##
107
+ logging.debug("-----------------: final damage sites :=================")
108
+ logging.debug(damage_sites)
109
+
110
+ image_path = 'classified_damage_sites.png'
111
+ image = utils.show_boxes(SEM_image, damage_sites,
112
+ save_image=True,
113
+ image_path=image_path)
114
+
115
+ ##
116
+ ## export data
117
+ ##
118
+ csv_path = 'classified_damage_sites.csv'
119
+ cols = ['x', 'y', 'damage_type']
120
+
121
+ data = []
122
+ for key, value in damage_sites.items():
123
+ data.append([key[0], key[1], value])
124
+
125
+ df = pd.DataFrame(columns=cols, data=data)
126
+
127
+ df.to_csv(csv_path)
128
+
129
+
130
+ return image, image_path, csv_path
131
+
132
+ ## ---------------------------------------------------------------------------------------------------------------
133
+ ## main app interface
134
+ ## -----------------------------------------------------------------------------------------------------------------
135
+ with gr.Blocks() as app:
136
+ gr.Markdown('# Damage Classification in Dual Phase Steels')
137
+ gr.Markdown('This app classifies damage types in dual phase steels. Two models are used. The first model is used to identify inclusions in the steel. The second model is used to identify the remaining damage types: Martensite cracking, Interface Decohesion, Notch effect and Shadows.')
138
+
139
+ gr.Markdown('If you use this app, kindly cite the following papers:')
140
+ gr.Markdown('Kusche, C., Reclik, T., Freund, M., Al-Samman, T., Kerzel, U., & Korte-Kerzel, S. (2019). Large-area, high-resolution characterisation and classification of damage mechanisms in dual-phase steel using deep learning. PloS one, 14(5), e0216493. [Link](https://doi.org/10.1371/journal.pone.0216493)')
141
+ gr.Markdown('Medghalchi, S., Kusche, C. F., Karimi, E., Kerzel, U., & Korte-Kerzel, S. (2020). Damage analysis in dual-phase steel using deep learning: transfer from uniaxial to biaxial straining conditions by image data augmentation. Jom, 72, 4420-4430. [Link](https://link.springer.com/article/10.1007/s11837-020-04404-0)')
142
+
143
+ image_input = gr.Image()
144
+ with gr.Row():
145
+ cluster_threshold_input = gr.Number(label='Cluster Threshold', value = 20,
146
+ info='Grayscale value at which a pixel is attributed to a potential damage site')
147
+ model1_threshold_input = gr.Number(label='Model 1 Threshold', value = 0.7, info='Threshold for the model identifying inclusions')
148
+ model2_threshold_input = gr.Number(label='Model 2 Threshold', value = 0.5, info='Thrshold for the model identifying the remaining damage types')
149
+
150
+
151
+ button = gr.Button("Classify")
152
+
153
+
154
+ output_image = gr.Image()
155
+ with gr.Row():
156
+ download_image = gr.DownloadButton(label='Download Image')
157
+ download_csv = gr.DownloadButton(label='Download Damage List')
158
+
159
+
160
+ button.click(damage_classification,
161
+ inputs=[image_input, cluster_threshold_input, model1_threshold_input, model2_threshold_input],
162
+ outputs=[output_image, download_image, download_csv])
163
+
164
+
165
+
166
+ # simple interface, no title, etc
167
+ # app = gr.Interface(damage_classification,
168
+ # inputs=[gr.Image(),
169
+ # gr.Number(label='Cluster Threshold', value = 20, info='Grayscale value at which a pixel is attributed to a potential damage site'),
170
+ # gr.Number(label='Model 1 Threshold', value = 0.7, info='Threshold for the model identifying inclusions'),
171
+ # gr.Number(label='Model 2 Threshold', value = 0.5, info='Thrshold for the model identifying the remaining damage types')
172
+ # ],
173
+ # outputs=[gr.Image(),
174
+ # gr.DownloadButton(label='Download Image'),
175
+ # gr.DownloadButton(label='Download Damage List')])
176
+ if __name__ == "__main__":
177
+ app.launch()
clustering.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Before we can identify damage sites, we need to look for suitable regions in the image.
3
+ Typically, damage sites appear as dark regions in the image. Instead of simple thresholding, we use
4
+ a clustering approach to identify regions that belong together and form damage site candidates.
5
+ """
6
+
7
+ import numpy as np
8
+
9
+
10
+ import scipy.ndimage as ndi
11
+ from scipy.spatial import KDTree
12
+
13
+ from sklearn.cluster import DBSCAN
14
+
15
+ import logging
16
+
17
+
18
+ def get_centroids(image : np.ndarray, image_threshold = 20,
19
+ eps=1, min_samples=5, metric='euclidean',
20
+ min_size = 20, fill_holes = False,
21
+ filter_close_centroids = False, filter_radius = 50) -> list:
22
+ """ Determine centroids of clusters corresponding to potential damage sites.
23
+ In a first step, a threshold is applied to the input image to identify areas of potential damage sites.
24
+ Using DBSCAN, these agglomerations of pixels are fitted into clusters. Then, the mean x/y values are determined
25
+ from pixels belonging to one cluster. If the number of pixels in a given cluster excees the threshold given by min_size, this cluster is added
26
+ to the list of (x,y) coordinates that is returned as the final list potential damage sites.
27
+
28
+ Sometimes, clusters may be found in very close proximity to each other, we can reject those to avoid
29
+ classifying the same event multiple times (which may distort our statistics).
30
+
31
+ DBScan documentation: https://scikit-learn.org/stable/modules/generated/sklearn.cluster.DBSCAN.html
32
+
33
+ Args:
34
+ image (np.ndarray): Input SEM image
35
+ image_threshold (int, optional): Threshold to be applied to the image to identify candidates for damage sites. Defaults to 20.
36
+ eps (int, optional): parameter eps of DBSCAN: The maximum distance between two samples for one to be considered as in the neighborhood of the other. Defaults to 1.
37
+ min_samples (int, optional): parameter min_samples of DBSCAN: The number of samples (or total weight) in a neighborhood for a point to be considered as a core point. Defaults to 5.
38
+ metric (str, optional): parameter metric of DBSCAN. Defaults to 'euclidean'.
39
+ min_size (int, optional): Minimum number of pixels in a cluster for the damage site candidate to be considered in the final list. Defaults to 20.
40
+ fill_holes (bool, optional): Fill small holes in damage sites clusters using binary_fill_holes. Defaults to False.
41
+ filter_close_centroids (book optional): Filter cluster centroids within a given radius. Defaults to False
42
+ filter_radius (float, optional): Radius within which centroids are considered to be the same. Defaults to 50
43
+
44
+ Returns:
45
+ list: list of (x,y) coordinates of the centroids of the clusters of accepted damage site candidates.
46
+ """
47
+
48
+
49
+ centroids = []
50
+
51
+ # apply the threshold to identify regions of "dark" pixels
52
+ # the result is a binary mask (true/false) whether a given pixel is above or below the threshold
53
+ cluster_candidates = image < image_threshold
54
+
55
+ # sometimes the clusters have small holes in them, for example, individual pixels
56
+ # inside a region below the threshold. This may confuse the clustering algorith later on
57
+ # and we can use the following to fill these holes
58
+ # https://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.binary_fill_holes.html
59
+ # N.B. the algorith only works on binay data
60
+ if fill_holes:
61
+ cluster_candidates = ndi.binary_fill_holes(cluster_candidates)
62
+
63
+ # apply the treshold to the image to identify regions of "dark" pixels
64
+ #cluster_candidates = np.asarray(image < image_threshold).nonzero()
65
+
66
+ # transform image format into a numpy array to pass on to DBSCAN clustering
67
+ cluster_candidates = np.asarray(cluster_candidates).nonzero()
68
+ cluster_candidates = np.transpose(cluster_candidates)
69
+
70
+
71
+ # run the DBSCAN clustering algorithm, candidate sites that are not attributed to a cluster are labelled as "-1", i.e. "noise"
72
+ # (e.g. they are too small, etc)
73
+ # For the remaining pixels, a label is assigned to each pixel, indicating to which cluster (or noise) they belong to.
74
+ dbscan = DBSCAN(eps=eps, min_samples=min_samples, metric='euclidean')
75
+
76
+ dbscan.fit(cluster_candidates)
77
+
78
+ labels = dbscan.labels_
79
+ # Number of clusters in labels, ignoring noise if present.
80
+ n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
81
+ n_noise = list(labels).count(-1)
82
+ logging.debug('# clusters {}, #noise {}'.format(n_clusters, n_noise))
83
+
84
+
85
+ # now loop over all labels found by DBSCAN, i.e. all identified clusters and the noise
86
+ # we use "set" here, as the labels are attributed to individual pixels, i.e. they appear as often as we have pixels
87
+ # in the cluster candidates
88
+ for i in set(labels):
89
+ if i>-1:
90
+ # all points belonging to a given cluster
91
+ cluster_points = cluster_candidates[labels==i, :]
92
+ if len(cluster_points) > min_size:
93
+ x_mean=np.mean(cluster_points, axis=0)[0]
94
+ y_mean=np.mean(cluster_points, axis=0)[1]
95
+ centroids.append([x_mean,y_mean])
96
+
97
+ if filter_close_centroids:
98
+ proximity_tree = KDTree(centroids)
99
+ pairs = proximity_tree.query_pairs(filter_radius)
100
+ for p in pairs:
101
+ item = centroids.pop(p[0])
102
+
103
+ return centroids
requirements.txt ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.1.0 ; python_version >= "3.10" and python_version < "3.12"
2
+ aiofiles==23.2.1 ; python_version >= "3.10" and python_version < "3.12"
3
+ altair==5.2.0 ; python_version >= "3.10" and python_version < "3.12"
4
+ annotated-types==0.6.0 ; python_version >= "3.10" and python_version < "3.12"
5
+ anyio==4.3.0 ; python_version >= "3.10" and python_version < "3.12"
6
+ astunparse==1.6.3 ; python_version >= "3.10" and python_version < "3.12"
7
+ attrs==23.2.0 ; python_version >= "3.10" and python_version < "3.12"
8
+ cachetools==5.3.3 ; python_version >= "3.10" and python_version < "3.12"
9
+ certifi==2024.2.2 ; python_version >= "3.10" and python_version < "3.12"
10
+ charset-normalizer==3.3.2 ; python_version >= "3.10" and python_version < "3.12"
11
+ click==8.1.7 ; python_version >= "3.10" and python_version < "3.12"
12
+ colorama==0.4.6 ; python_version >= "3.10" and python_version < "3.12"
13
+ contourpy==1.2.0 ; python_version >= "3.10" and python_version < "3.12"
14
+ cycler==0.12.1 ; python_version >= "3.10" and python_version < "3.12"
15
+ exceptiongroup==1.2.0 ; python_version >= "3.10" and python_version < "3.11"
16
+ fastapi==0.110.0 ; python_version >= "3.10" and python_version < "3.12"
17
+ ffmpy==0.3.2 ; python_version >= "3.10" and python_version < "3.12"
18
+ filelock==3.13.1 ; python_version >= "3.10" and python_version < "3.12"
19
+ flatbuffers==24.3.7 ; python_version >= "3.10" and python_version < "3.12"
20
+ fonttools==4.50.0 ; python_version >= "3.10" and python_version < "3.12"
21
+ fsspec==2024.3.1 ; python_version >= "3.10" and python_version < "3.12"
22
+ gast==0.5.4 ; python_version >= "3.10" and python_version < "3.12"
23
+ google-auth-oauthlib==1.2.0 ; python_version >= "3.10" and python_version < "3.12"
24
+ google-auth==2.29.0 ; python_version >= "3.10" and python_version < "3.12"
25
+ google-pasta==0.2.0 ; python_version >= "3.10" and python_version < "3.12"
26
+ gradio-client==0.13.0 ; python_version >= "3.10" and python_version < "3.12"
27
+ gradio==4.22.0 ; python_version >= "3.10" and python_version < "3.12"
28
+ grpcio==1.62.1 ; python_version >= "3.10" and python_version < "3.12"
29
+ h11==0.14.0 ; python_version >= "3.10" and python_version < "3.12"
30
+ h5py==3.10.0 ; python_version >= "3.10" and python_version < "3.12"
31
+ httpcore==1.0.4 ; python_version >= "3.10" and python_version < "3.12"
32
+ httpx==0.27.0 ; python_version >= "3.10" and python_version < "3.12"
33
+ huggingface-hub==0.21.4 ; python_version >= "3.10" and python_version < "3.12"
34
+ idna==3.6 ; python_version >= "3.10" and python_version < "3.12"
35
+ imageio==2.34.0 ; python_version >= "3.10" and python_version < "3.12"
36
+ importlib-resources==6.4.0 ; python_version >= "3.10" and python_version < "3.12"
37
+ jinja2==3.1.3 ; python_version >= "3.10" and python_version < "3.12"
38
+ joblib==1.3.2 ; python_version >= "3.10" and python_version < "3.12"
39
+ jsonschema-specifications==2023.12.1 ; python_version >= "3.10" and python_version < "3.12"
40
+ jsonschema==4.21.1 ; python_version >= "3.10" and python_version < "3.12"
41
+ keras==2.15.0 ; python_version >= "3.10" and python_version < "3.12"
42
+ kiwisolver==1.4.5 ; python_version >= "3.10" and python_version < "3.12"
43
+ libclang==18.1.1 ; python_version >= "3.10" and python_version < "3.12"
44
+ markdown-it-py==3.0.0 ; python_version >= "3.10" and python_version < "3.12"
45
+ markdown==3.6 ; python_version >= "3.10" and python_version < "3.12"
46
+ markupsafe==2.1.5 ; python_version >= "3.10" and python_version < "3.12"
47
+ matplotlib==3.8.3 ; python_version >= "3.10" and python_version < "3.12"
48
+ mdurl==0.1.2 ; python_version >= "3.10" and python_version < "3.12"
49
+ ml-dtypes==0.2.0 ; python_version >= "3.10" and python_version < "3.12"
50
+ numpy==1.26.4 ; python_version < "3.12" and python_version >= "3.10"
51
+ oauthlib==3.2.2 ; python_version >= "3.10" and python_version < "3.12"
52
+ opt-einsum==3.3.0 ; python_version >= "3.10" and python_version < "3.12"
53
+ orjson==3.9.15 ; python_version >= "3.10" and python_version < "3.12"
54
+ packaging==24.0 ; python_version >= "3.10" and python_version < "3.12"
55
+ pandas==2.2.1 ; python_version >= "3.10" and python_version < "3.12"
56
+ pillow==10.2.0 ; python_version >= "3.10" and python_version < "3.12"
57
+ protobuf==4.25.3 ; python_version >= "3.10" and python_version < "3.12"
58
+ pyasn1-modules==0.3.0 ; python_version >= "3.10" and python_version < "3.12"
59
+ pyasn1==0.5.1 ; python_version >= "3.10" and python_version < "3.12"
60
+ pydantic-core==2.16.3 ; python_version >= "3.10" and python_version < "3.12"
61
+ pydantic==2.6.4 ; python_version >= "3.10" and python_version < "3.12"
62
+ pydub==0.25.1 ; python_version >= "3.10" and python_version < "3.12"
63
+ pygments==2.17.2 ; python_version >= "3.10" and python_version < "3.12"
64
+ pyparsing==3.1.2 ; python_version >= "3.10" and python_version < "3.12"
65
+ python-dateutil==2.9.0.post0 ; python_version >= "3.10" and python_version < "3.12"
66
+ python-multipart==0.0.9 ; python_version >= "3.10" and python_version < "3.12"
67
+ pytz==2024.1 ; python_version >= "3.10" and python_version < "3.12"
68
+ pyyaml==6.0.1 ; python_version >= "3.10" and python_version < "3.12"
69
+ referencing==0.34.0 ; python_version >= "3.10" and python_version < "3.12"
70
+ requests-oauthlib==1.4.0 ; python_version >= "3.10" and python_version < "3.12"
71
+ requests==2.31.0 ; python_version >= "3.10" and python_version < "3.12"
72
+ rich==13.7.1 ; python_version >= "3.10" and python_version < "3.12"
73
+ rpds-py==0.18.0 ; python_version >= "3.10" and python_version < "3.12"
74
+ rsa==4.9 ; python_version >= "3.10" and python_version < "3.12"
75
+ ruff==0.3.4 ; python_version >= "3.10" and python_version < "3.12"
76
+ scikit-learn==1.4.1.post1 ; python_version >= "3.10" and python_version < "3.12"
77
+ scipy==1.12.0 ; python_version >= "3.10" and python_version < "3.12"
78
+ semantic-version==2.10.0 ; python_version >= "3.10" and python_version < "3.12"
79
+ setuptools==69.2.0 ; python_version >= "3.10" and python_version < "3.12"
80
+ shellingham==1.5.4 ; python_version >= "3.10" and python_version < "3.12"
81
+ six==1.16.0 ; python_version >= "3.10" and python_version < "3.12"
82
+ sniffio==1.3.1 ; python_version >= "3.10" and python_version < "3.12"
83
+ starlette==0.36.3 ; python_version >= "3.10" and python_version < "3.12"
84
+ tensorboard-data-server==0.7.2 ; python_version >= "3.10" and python_version < "3.12"
85
+ tensorboard==2.15.2 ; python_version >= "3.10" and python_version < "3.12"
86
+ tensorflow-estimator==2.15.0 ; python_version >= "3.10" and python_version < "3.12"
87
+ tensorflow-io-gcs-filesystem==0.36.0 ; python_version >= "3.10" and python_version < "3.12"
88
+ tensorflow==2.15.0 ; python_version >= "3.10" and python_version < "3.12"
89
+ termcolor==2.4.0 ; python_version >= "3.10" and python_version < "3.12"
90
+ threadpoolctl==3.4.0 ; python_version >= "3.10" and python_version < "3.12"
91
+ tomlkit==0.12.0 ; python_version >= "3.10" and python_version < "3.12"
92
+ toolz==0.12.1 ; python_version >= "3.10" and python_version < "3.12"
93
+ tqdm==4.66.2 ; python_version >= "3.10" and python_version < "3.12"
94
+ typer[all]==0.9.0 ; python_version >= "3.10" and python_version < "3.12"
95
+ typing-extensions==4.10.0 ; python_version >= "3.10" and python_version < "3.12"
96
+ tzdata==2024.1 ; python_version >= "3.10" and python_version < "3.12"
97
+ urllib3==2.2.1 ; python_version >= "3.10" and python_version < "3.12"
98
+ uvicorn==0.29.0 ; python_version >= "3.10" and python_version < "3.12"
99
+ websockets==11.0.3 ; python_version >= "3.10" and python_version < "3.12"
100
+ werkzeug==3.0.1 ; python_version >= "3.10" and python_version < "3.12"
101
+ wheel==0.43.0 ; python_version >= "3.10" and python_version < "3.12"
102
+ wrapt==1.14.1 ; python_version >= "3.10" and python_version < "3.12"
rwthmaterials_dp800_network1_inclusion.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7005f07ed2108e18c38b3f0b547b7255215d61fe12cf69daed2337b6cc6c24f5
3
+ size 87724360
rwthmaterials_dp800_network2_damage.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a4c363089c7c2b04d4f36488720373a6b4b5eb136bac8a4e35c94358ff093bed
3
+ size 4048504
utils.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of various utils
3
+ """
4
+
5
+ import numpy as np
6
+
7
+ import imageio.v3 as iio
8
+ from PIL import Image
9
+ # we may have very large images (e.g. panoramic SEM images), allow to read them w/o warnings
10
+ Image.MAX_IMAGE_PIXELS = 933120000
11
+
12
+ import matplotlib.pyplot as plt
13
+ import matplotlib.patches as patches
14
+ from matplotlib.lines import Line2D
15
+
16
+
17
+ import math
18
+
19
+
20
+ ###
21
+ ### load SEM images
22
+ ###
23
+ def load_image(filename : str) -> np.ndarray :
24
+ """Load an SEM image
25
+
26
+ Args:
27
+ filename (str): full path and name of the image file to be loaded
28
+
29
+ Returns:
30
+ np.ndarray: file as numpy ndarray
31
+ """
32
+ image = iio.imread(filename,mode='F')
33
+
34
+ return image
35
+
36
+
37
+
38
+ ###
39
+ ### show SEM image with boxes in various colours around each damage site
40
+ ###
41
+ def show_boxes(image : np.ndarray, damage_sites : dict, box_size = [250,250],
42
+ save_image = False, image_path : str = None) :
43
+ """_summary_
44
+
45
+ Args:
46
+ image (np.ndarray): SEM image to be shown
47
+ damage_sites (dict): python dictionary using the coordinates as key (x,y), and the label as value
48
+ box_size (list, optional): size of the rectangle drawn around each centroid. Defaults to [250,250].
49
+ save_image (bool, optional): save the image with the boxes or not. Defaults to False.
50
+ image_path (str, optional) : Full path and name of the output file to be saved
51
+ """
52
+
53
+ _, ax = plt.subplots(1)
54
+ fig = plt.imshow(image,cmap='gray')
55
+ # do not show axis ticks (indicating pixels)
56
+ plt.xticks([])
57
+ plt.yticks([])
58
+
59
+ for key, label in damage_sites.items():
60
+
61
+ position = list([key[0],key[1]])
62
+
63
+ # define colours of the rectangles overlaid on the image per damage type
64
+ match label:
65
+ case 'Inclusion':
66
+ edgecolor = 'b'
67
+ case 'Interface' :
68
+ edgecolor = 'g'
69
+ case 'Martensite' :
70
+ edgecolor = 'r'
71
+ case 'Notch':
72
+ edgecolor = 'y'
73
+ case 'Shadowing' :
74
+ edgecolor = 'm'
75
+ case _:
76
+ edgecolor = 'k'
77
+
78
+
79
+ rectangle = patches.Rectangle((position[1]-box_size[1]/2., position[0]-box_size[0]/2),
80
+ box_size[0],box_size[1],
81
+ linewidth=1,edgecolor=edgecolor,facecolor='none')
82
+ ax.add_patch(rectangle)
83
+
84
+
85
+ legend_elements = [Line2D([0], [0], color='b', lw=4, label='Inclusion'),
86
+ Line2D([0], [0], color='g', lw=4, label='Interface'),
87
+ Line2D([0], [0], color='r', lw=4, label='Martensite'),
88
+ Line2D([0], [0], color='y', lw=4, label='Notch'),
89
+ Line2D([0], [0], color='m', lw=4, label='Shadow'),
90
+ Line2D([0], [0], color='k', lw=4, label='Not Classified')
91
+ ]
92
+
93
+ ax.legend(handles=legend_elements,bbox_to_anchor=(1.04, 1), loc="upper left")
94
+
95
+ if save_image:
96
+ plt.savefig(image_path,dpi=1200,bbox_inches='tight' )
97
+
98
+ canvas = plt.gca().figure.canvas
99
+ canvas.draw()
100
+ data = np.frombuffer(canvas.tostring_rgb(), dtype=np.uint8)
101
+ image = data.reshape(canvas.get_width_height()[::-1] + (3,))
102
+
103
+ plt.show()
104
+
105
+
106
+
107
+ return image
108
+
109
+
110
+ ###
111
+ ### cut out small images from panorama, append colour information
112
+ ###
113
+ def prepare_classifier_input(panorama : np.ndarray, centroids : list, window_size = [250,250]) -> list :
114
+ """Create a list of smaller images from the SEM panoramic image.
115
+ The neural networks expect images of a given size that are centered around a single damage site candiates.
116
+ For each centroid (from the clustering step before), we cut out a smaller image from the panorama of the size
117
+ expected by the classfier network.
118
+ Since the networks expect colour images, we repeat the gray-scale image 3 times for a given candiate site.
119
+
120
+ Args:
121
+ panorama (np.ndarray): SEM input image
122
+ centroids (list): list of centroids for the damage site candidates
123
+ window_size (list, optional): Size of the image expected by the neural network later. Defaults to [250,250].
124
+
125
+ Returns:
126
+ list: List of "colour" images cut out from the SEM panorama, one per damage site candidate
127
+ """
128
+
129
+ panorama_shape = panorama.shape
130
+
131
+ # list of the small images cut out from the panorama,
132
+ # each of these is then fed into the classfier model
133
+ images = []
134
+
135
+ for i in range(len(centroids)):
136
+ x1 = int(math.floor(centroids[i][0] - window_size[0]/2))
137
+ y1 = int(math.floor(centroids[i][1] - window_size[1]/2))
138
+ x2 = int(math.floor(centroids[i][0] + window_size[0]/2))
139
+ y2 = int(math.floor(centroids[i][1] + window_size[1]/2))
140
+
141
+
142
+ ##
143
+ ## Catch the cases in which the extract would go
144
+ ## over the boundaries of the original image
145
+ ##
146
+ if x1<0:
147
+ x1 = 0
148
+ x2 = window_size[0]
149
+ if x2>= panorama_shape[0]:
150
+ x1 = panorama_shape[0] - window_size[0]
151
+ x2 = panorama_shape[0]
152
+ if y1<0:
153
+ y1 = 0
154
+ y2 = window_size[1]
155
+ if y2>= panorama_shape[1]:
156
+ y1 = panorama_shape[1] - window_size[1]
157
+ y2 = panorama_shape[1]
158
+
159
+ # we now need to create the image path from the panoramic image that corresponds to the
160
+ # centroid, with the size determined by the window_size.
161
+ # First, we create an empty container with np.zeros()
162
+ tmp_img = np.zeros((window_size[1], window_size[0],1), dtype=float)
163
+
164
+ # Then we copy over the patch of the panomaric image.
165
+ # The later classfier expects colour images, i.e. 3 colour channels for RGB
166
+ # Since we use gray-scale images, we only have one colour information, so we add the image to the first colour channel
167
+ tmp_img[:,:,0] = panorama[x1:x2,y1:y2,0]
168
+
169
+ # rescale the colour values
170
+ tmp_img = tmp_img*2./255. - 1.
171
+
172
+ # The classifier expects colour images, i.e. 3 colour channels.
173
+ # We "fake" this by repeating the same gray-scale information 3 times, once per colour channel
174
+ tmp_img_colour = np.repeat(tmp_img,3, axis=2) #3
175
+
176
+ images.append(tmp_img_colour)
177
+
178
+
179
+ return images
180
+
181
+
182
+
183
+
184
+
185
+
186
+
187
+
188
+
189
+