OMGJ commited on
Commit
ea1f850
·
verified ·
1 Parent(s): 391ddda

Upload 17 files

Browse files
__pycache__/util.cpython-310.pyc ADDED
Binary file (1.34 kB). View file
 
app.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from streamlit_option_menu import option_menu
3
+ import tensorflow as tf
4
+ #importation des librairies
5
+ #from tensorflow import keras
6
+ import base64
7
+ #import torch
8
+ from keras.models import load_model
9
+ from PIL import ImageOps, Image
10
+ import numpy as np
11
+ import pandas as pd
12
+ import matplotlib.pyplot as plt
13
+ #from util import classify
14
+
15
+ def classify(image, model, class_names):
16
+ """
17
+ This function takes an image, a model, and a list of class names and returns the predicted class and confidence
18
+ score of the image.
19
+
20
+ Parameters:
21
+ image (PIL.Image.Image): An image to be classified.
22
+ model (tensorflow.keras.Model): A trained machine learning model for image classification.
23
+ class_names (list): A list of class names corresponding to the classes that the model can predict.
24
+
25
+ Returns:
26
+ A tuple of the predicted class name and the confidence score for that prediction.
27
+ """
28
+ # convert image to (224, 224)
29
+ image = ImageOps.fit(image, (224, 224), Image.Resampling.LANCZOS)
30
+
31
+ # convert image to numpy array
32
+ image_array = np.asarray(image)
33
+
34
+ # normalize image
35
+ normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
36
+
37
+ # set model input
38
+ data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
39
+ data[0] = normalized_image_array
40
+
41
+ # make prediction
42
+ prediction = model.predict(data)
43
+ # index = np.argmax(prediction)
44
+ index = 0 if prediction[0][0] > 0.95 else 1
45
+ class_name = class_names[index]
46
+ confidence_score = prediction[0][index]
47
+
48
+ return class_name, confidence_score,index
49
+ model=load_model('./models/model.h5')
50
+ with open('./models/names.txt', 'r') as f:
51
+ class_names = [a[:-1].split(' ')[1] for a in f.readlines()]
52
+ f.close()
53
+ st.set_page_config(layout='wide')
54
+ st.markdown("""
55
+ <style>
56
+ .block-container {
57
+ padding-top: 2rem;
58
+ padding-bottom: 0rem;
59
+ padding-left: 1rem;
60
+ padding-right: 1rem;
61
+ }
62
+ </style>
63
+ """, unsafe_allow_html=True)
64
+
65
+
66
+
67
+ #st.header(f"LA QUALITE DE L'AIR A")
68
+ #st.title(f"NAIROBI,KENYA")
69
+ #st.header("Pollution de l'Air en NAIROBI")
70
+
71
+ header , menu = st.columns(2)
72
+ with header:
73
+ st.image('static/image/cif2.png')
74
+
75
+ with menu:
76
+ # option_menu(menu_title=None,
77
+ # options=['Visualisation','Prédiction'],
78
+ # icons=["house","book",'envelope'],
79
+ # default_index=0,
80
+ # orientation="horizontal"
81
+ # )
82
+ selecte=option_menu(None, ["Home", "About"],
83
+ icons=['house', 'cloud-upload'],
84
+ menu_icon="cast", default_index=0, orientation="horizontal",
85
+ styles={
86
+ "container": {"padding": "0!important", "background-color": "#ffffff","font-family": "Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif"},
87
+ "icon": {"color": "red", "font-size": "25px" },
88
+ "nav-link": {"font-size": "20px", "text-align": "left", "margin":"0px", "--hover-color": "#eee"},
89
+ "nav-link-selected": {"background-color": "#F9C949","color":"white"},
90
+ "menu-title":{"color":"#424143"}
91
+ }
92
+ )
93
+ if selecte == "Home":
94
+ st.title(f"A propos de la Fraude à l'Assurance Automobile")
95
+ sect1_col1,sect1_col2, sect1_col3 = st.columns(3)
96
+ for col in (sect1_col1,sect1_col2, sect1_col3):
97
+ col.container()
98
+
99
+ with open('static/css/style.css') as f:
100
+ st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
101
+ with sect1_col2.container(height=360):
102
+
103
+ # st.markdown(
104
+ # """
105
+ # <style>
106
+ # [data-testid="stMetricValue"]{
107
+ # font-siz: 50px;
108
+ # color: #2FB56B;
109
+ # font-weight:bold;
110
+ # text-align:center;
111
+ # }
112
+ # [data-testid="metric-container"] {
113
+ # background-color: #EEEEEE;
114
+ # border: 2px solid #CCCCCC;
115
+ # padding: 5% 5% 5% 10%;
116
+ # border-radius: 5px;
117
+ # }
118
+ # </style>
119
+ # """,
120
+ # unsafe_allow_html=True,
121
+ # )
122
+ st.markdown("""
123
+ <style>
124
+ # div[data-testid="stMetric"] {
125
+ # background-color: rgba(187, 216, 158, 0.59);
126
+ # border: 1px solid rgba(28, 131, 225, 0.1);
127
+ padding:-10px;
128
+ # border-radius: 5px;
129
+ # color: rgb(30, 103, 119);
130
+ # overflow-wrap: break-word;
131
+ # font-weight:bold;
132
+
133
+
134
+ # }
135
+
136
+ [data-testid="stMetricValue"]{
137
+ font-size: 45px;
138
+ color: #ff3131;
139
+ font-weight:bold;
140
+ text-align:center;
141
+ margin-top:-33px;
142
+
143
+
144
+
145
+ }
146
+
147
+ /* breakline for metric text */
148
+ [data-testid="stMetricLabel"] {
149
+ word-wrap: break-word;
150
+ color: #ef8451;
151
+ font-size:40px;
152
+ font-weight:bold;
153
+
154
+ }
155
+
156
+
157
+ [data-testid ="stVerticalBlock"]{
158
+ #background-color: rgba(187, 216, 158, 0.59);
159
+ #border: 1px solid rgba(28, 131, 225, 0.1);
160
+ text-align:center;
161
+ }
162
+ [data-v-5af006b8]{
163
+ background-color:black;
164
+ }
165
+ </style>
166
+ """
167
+ , unsafe_allow_html=True)
168
+ st.write("Le secteur de l'assurance est confronté à un dilème:")
169
+ #st.write(" au KENYA par an ")
170
+ st.caption("Distinguer les demandes d'indmnisations authentiques des des demandes trompeuses")
171
+
172
+ with sect1_col1.container(height=360):
173
+ st.write("L'émergence de l'IA générative a contribué à: ",)
174
+ st.caption("l'augmentation des demandes d'indemnisations frauduleuses")
175
+
176
+ with sect1_col3.container(height=360):
177
+ script = """<div id= 'conte'></div>"""
178
+ st.subheader("Cout de la fraude à l'assurance ")
179
+ st.write("Le cout de la Fraud à l'assurance automobile est estimé à")
180
+ st.metric("", "Plus de 10% ")
181
+ st.write("de la somme totale des sinistres")
182
+
183
+
184
+ st.title(f"Vérifiez l'Authenticité des images de vos Clients")
185
+ st.markdown("Distinguez les images frauduleus des images non frauduleuses")
186
+ with st.container(height=400):
187
+ st.markdown(
188
+ """
189
+ <style>
190
+ .st-emotion-cache-g7r313 {
191
+ width: 700px;
192
+ margin-left:25%;
193
+ margin-rigth:25%;
194
+
195
+ }
196
+ .st-emotion-cache-1kyxreq{
197
+ flex-direction:column;
198
+ }
199
+ .st-emotion-cache-1v0mbdj{
200
+ align-items:center !important;
201
+ }
202
+ </style>
203
+ """, unsafe_allow_html=True
204
+ )
205
+ file = st.file_uploader("Choisissez une image",type=["png","jpg"])
206
+
207
+
208
+ if file is not None:
209
+ image = Image.open(file).convert('RGB')
210
+ st.image(image, use_column_width=True)
211
+
212
+
213
+ class_name, conf_score ,index = classify(image, model, class_names)
214
+
215
+
216
+ if index == 0:
217
+ st.image('static/image/not_fraud.JPG')
218
+ else:
219
+ st.image('static/image/Fraud-alert-2023.JPG')
220
+ st.write("### score: {}%".format(int(conf_score * 1000) / 10))
221
+
222
+ footer = st.container()
223
+ with footer:
224
+ st.markdown("---")
225
+ st.markdown(
226
+ """
227
+ <style>
228
+ p {
229
+ font-size: 16px;
230
+ text-align: center;
231
+ }
232
+ a {
233
+ text-decoration: none;
234
+ color: #00a;
235
+ font-weight: 600;
236
+ }
237
+ </style>
238
+ <p>
239
+ &copy; Designed by <a href="https://linkedin.com/in/mohamedyosef101">ONDOA Michelle & NGNINTEDEM Marlyne</a>.
240
+ </p>
241
+ """, unsafe_allow_html=True
242
+ )
243
+ if selecte == "About":
244
+
245
+ st.title("A propos du modèle")
246
+ st.markdown(
247
+ """
248
+ <style>
249
+ /*Les Titres*/
250
+ .st-emotion-cache-10trblm {
251
+ font-size: 1.5rem;
252
+ color: #424143;
253
+ font-weight: 300;
254
+ text-transform: uppercase;
255
+ line-height: 1.235;
256
+ font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
257
+ margin-left:0% !important;
258
+ margin-right: 5% !important;
259
+ }
260
+
261
+
262
+ [data-testid="stMetricValue"]{
263
+ font-size: 45px;
264
+ color: #ff3131;
265
+ font-weight:bold;
266
+ text-align:center;
267
+ margin-top:-33px;
268
+
269
+
270
+
271
+ }
272
+
273
+ /* breakline for metric text */
274
+ [data-testid="stMetricLabel"] {
275
+ word-wrap: break-word;
276
+ color: #ef8451;
277
+ font-size:40px;
278
+ font-weight:bold;
279
+
280
+ }
281
+ .st-emotion-cache-16idsys >p{
282
+ font-size:30px;
283
+ }
284
+
285
+ [data-testid ="stVerticalBlock"]{
286
+ #background-color: rgba(187, 216, 158, 0.59);
287
+ #border: 1px solid rgba(28, 131, 225, 0.1);
288
+ text-align:center;
289
+ }
290
+ [data-v-5af006b8]{
291
+ background-color:black;
292
+ }
293
+ .st-emotion-cache-1q7spjk{
294
+ font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
295
+ color: #FF3131;
296
+ font-size: 1.8rem;
297
+ font-weight: 300;
298
+ text-transform: uppercase;
299
+ line-height: 1.235;
300
+ margin-bottom:10px;
301
+
302
+ }
303
+ </style>
304
+ """, unsafe_allow_html=True
305
+ )
306
+
307
+ with st.container(height=1500):
308
+ st.title('Définition du problème')
309
+ st.write('Prédire si une image de voiture donnée est une demande d\'indemnisation frauduleuse ?')
310
+
311
+ st.title('Type de problème')
312
+
313
+ st.write('Problème de Classification ')
314
+
315
+ st.title('Problème Domaine')
316
+
317
+ st.write('Vision par ordinateur')
318
+
319
+ st.title('Analyse des données')
320
+ st.write('L\'examination des données a montré que les données d\'entraînement sont déséquilibrées. La différence entre la distribution des classes positives et négatives est TRÈS ÉNORME !' )
321
+ c1 , c2 = st.columns(2)
322
+ with c1:
323
+ st.metric("Classe Négative", "94%")
324
+ with c2:
325
+ st.metric("Classe Positive", "6%")
326
+
327
+ st.title("Modélisation")
328
+
329
+ st.caption('ResNet 18 (Pré-entraîné)')
330
+ st.write('Comme il s\'agit d\'un problème de vision par ordinateur, il était très clair et logique d\'essayer un réseau neuronal convolutif. Nous avons utilisé ResNet 18 avec les poids pré-entraînés sur l\'ensemble de données ImageNet .Nous avons remplacé la couche de sortie et la couche d\'entrée.Le model a donné un résultat suivants:')
331
+ col1, col2 =st.columns(2)
332
+ with col1:
333
+ st.image('./static/image/loss.png')
334
+ with col2:
335
+ st.image('./static/image/acc.png')
336
+ footer = st.container()
337
+ with footer:
338
+ st.markdown("---")
339
+ st.markdown(
340
+ """
341
+ <style>
342
+ p {
343
+ font-size: 16px;
344
+ text-align: center;
345
+ }
346
+ a {
347
+ text-decoration: none;
348
+ color: #00a;
349
+ font-weight: 600;
350
+ }
351
+ </style>
352
+ <p>
353
+ &copy; Designed by <a href="https://linkedin.com/in/mohamedyosef101">ONDOA Michelle & NGNINTEDEM Marlyne </a>.
354
+ </p>
355
+ """, unsafe_allow_html=True
356
+ )
models/model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98d708e0813f0821e12c1efd3c4522c2263d7da8cf610b1ddd08456b66a3956d
3
+ size 2453432
models/names.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ 0 Non Fraud
2
+ 1 Fraud
models/notebook.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
static/css/style.css ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Les container*/
2
+ .st-emotion-cache-6srzk2 {
3
+ background-color: #f9ca49c0;
4
+ border: 1px solid rgba(28, 131, 225, 0.1);
5
+ text-align: center;
6
+ font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
7
+
8
+
9
+ }
10
+
11
+ .menu .container-xxl[data-v-5af006b8] {
12
+ background-color: transparent !important;
13
+ }
14
+
15
+ @font-face {
16
+ font-family: "Anton";
17
+ src: url(Anton-Regular.ttf);
18
+ }
19
+ /*Les Titres*/
20
+ .st-emotion-cache-10trblm {
21
+ font-size: 1.8rem;
22
+ color: #424143;
23
+ font-weight: 300;
24
+ text-transform: uppercase;
25
+ line-height: 1.235;
26
+ font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
27
+
28
+ }
29
+ /*section2 lement2*/
30
+ .st-emotion-cache-1q7spjk{
31
+ font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
32
+ margin-left:5% !important;
33
+ margin-right: 5% !important;
34
+ color: #FF3131;
35
+ font-size: 1.8rem;
36
+ font-weight: 300;
37
+ text-transform: uppercase;
38
+ line-height: 1.235;
39
+
40
+ }
41
+ /*Les paragraphes*/
42
+ p {
43
+
44
+ color: 424143;
45
+ padding-left:20% ;
46
+ padding-right: 20%;
47
+ font-size: 20px;
48
+ }
static/image/Fraud-alert-2023.jpg ADDED
static/image/Sans titre.jpg ADDED
static/image/acc.png ADDED
static/image/alert.PNG ADDED
static/image/cif.PNG ADDED
static/image/cif1.PNG ADDED
static/image/cif2.PNG ADDED
static/image/insurancefraud.png ADDED
static/image/loss.png ADDED
static/image/not_fraud.jpg ADDED
util.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+
3
+ import streamlit as st
4
+ from PIL import ImageOps, Image
5
+ import numpy as np
6
+
7
+
8
+ def classify(image, model, class_names):
9
+ """
10
+ This function takes an image, a model, and a list of class names and returns the predicted class and confidence
11
+ score of the image.
12
+
13
+ Parameters:
14
+ image (PIL.Image.Image): An image to be classified.
15
+ model (tensorflow.keras.Model): A trained machine learning model for image classification.
16
+ class_names (list): A list of class names corresponding to the classes that the model can predict.
17
+
18
+ Returns:
19
+ A tuple of the predicted class name and the confidence score for that prediction.
20
+ """
21
+ # convert image to (224, 224)
22
+ image = ImageOps.fit(image, (224, 224), Image.Resampling.LANCZOS)
23
+
24
+ # convert image to numpy array
25
+ image_array = np.asarray(image)
26
+
27
+ # normalize image
28
+ normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
29
+
30
+ # set model input
31
+ data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
32
+ data[0] = normalized_image_array
33
+
34
+ # make prediction
35
+ prediction = model.predict(data)
36
+ # index = np.argmax(prediction)
37
+ index = 0 if prediction[0][0] > 0.95 else 1
38
+ class_name = class_names[index]
39
+ confidence_score = prediction[0][index]
40
+
41
+ return class_name, confidence_score