Spaces:
Sleeping
Sleeping
File size: 11,770 Bytes
ea1f850 b9c7406 ea1f850 2256daf ea1f850 61ff50d ea1f850 61ff50d ea1f850 2256daf ea1f850 61ff50d ea1f850 61ff50d ea1f850 2256daf ea1f850 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 | import streamlit as st
from streamlit_option_menu import option_menu
import tensorflow as tf
#importation des librairies
#from tensorflow import keras
import base64
#import torch
from keras.models import load_model
from PIL import ImageOps, Image
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#from util import classify
def classify(image, model, class_names):
"""
This function takes an image, a model, and a list of class names and returns the predicted class and confidence
score of the image.
Parameters:
image (PIL.Image.Image): An image to be classified.
model (tensorflow.keras.Model): A trained machine learning model for image classification.
class_names (list): A list of class names corresponding to the classes that the model can predict.
Returns:
A tuple of the predicted class name and the confidence score for that prediction.
"""
# convert image to (224, 224)
image = ImageOps.fit(image, (224, 224), Image.Resampling.LANCZOS)
# convert image to numpy array
image_array = np.asarray(image)
# normalize image
normalized_image_array = (image_array.astype(np.float32) / 127.5) - 1
# set model input
data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32)
data[0] = normalized_image_array
# make prediction
prediction = model.predict(data)
# index = np.argmax(prediction)
index = 0 if prediction[0][0] > 0.95 else 1
class_name = class_names[index]
confidence_score = prediction[0][index]
return class_name, confidence_score,index
model=load_model('./models/model.h5')
with open('./models/names.txt', 'r') as f:
class_names = [a[:-1].split(' ')[1] for a in f.readlines()]
f.close()
st.set_page_config(layout='wide')
st.markdown("""
<style>
.block-container {
padding-top: 2rem;
padding-bottom: 0rem;
padding-left: 1rem;
padding-right: 1rem;
}
</style>
""", unsafe_allow_html=True)
header , menu = st.columns(2)
#with header:
#st.image('static/image/cif1.png')
with menu:
# option_menu(menu_title=None,
# options=['Visualisation','Prédiction'],
# icons=["house","book",'envelope'],
# default_index=0,
# orientation="horizontal"
# )
selecte=option_menu(None, ["Home", "About"],
icons=['house', 'cloud-upload'],
menu_icon="cast", default_index=0, orientation="horizontal",
styles={
"container": {"padding": "0!important", "background-color": "#ffffff","font-family": "Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif"},
"icon": {"color": "red", "font-size": "25px" },
"nav-link": {"font-size": "20px", "text-align": "left", "margin":"0px", "--hover-color": "#eee"},
"nav-link-selected": {"background-color": "#F9C949","color":"white"},
"menu-title":{"color":"#424143"}
}
)
if selecte == "Home":
st.title(f"A propos de la Fraude à l'Assurance Automobile")
sect1_col1,sect1_col2, sect1_col3 = st.columns(3)
for col in (sect1_col1,sect1_col2, sect1_col3):
col.container()
with open('./static/css/style.css') as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
with sect1_col2.container(height=360):
# st.markdown(
# """
# <style>
# [data-testid="stMetricValue"]{
# font-siz: 50px;
# color: #2FB56B;
# font-weight:bold;
# text-align:center;
# }
# [data-testid="metric-container"] {
# background-color: #EEEEEE;
# border: 2px solid #CCCCCC;
# padding: 5% 5% 5% 10%;
# border-radius: 5px;
# }
# </style>
# """,
# unsafe_allow_html=True,
# )
st.markdown("""
<style>
# div[data-testid="stMetric"] {
# background-color: rgba(187, 216, 158, 0.59);
# border: 1px solid rgba(28, 131, 225, 0.1);
padding:-10px;
# border-radius: 5px;
# color: rgb(30, 103, 119);
# overflow-wrap: break-word;
# font-weight:bold;
# }
[data-testid="stMetricValue"]{
font-size: 45px;
color: #ff3131;
font-weight:bold;
text-align:center;
margin-top:-33px;
}
/* breakline for metric text */
[data-testid="stMetricLabel"] {
word-wrap: break-word;
color: #ef8451;
font-size:40px;
font-weight:bold;
}
[data-testid ="stVerticalBlock"]{
#background-color: rgba(187, 216, 158, 0.59);
#border: 1px solid rgba(28, 131, 225, 0.1);
text-align:center;
}
[data-v-5af006b8]{
background-color:black;
}
</style>
"""
, unsafe_allow_html=True)
st.write("Le secteur de l'assurance est confronté à un dilème:")
#st.write(" au KENYA par an ")
st.caption("Distinguer les demandes d'indmnisations authentiques des des demandes trompeuses")
with sect1_col1.container(height=360):
st.write("L'émergence de l'IA générative a contribué à: ",)
st.caption("l'augmentation des demandes d'indemnisations frauduleuses")
with sect1_col3.container(height=360):
script = """<div id= 'conte'></div>"""
st.subheader("Cout de la fraude à l'assurance ")
st.write("Le cout de la Fraud à l'assurance automobile est estimé à")
st.metric("", "Plus de 10% ")
st.write("de la somme totale des sinistres")
st.title(f"Vérifiez l'Authenticité des images de vos Clients")
st.markdown("Distinguez les images frauduleus des images non frauduleuses")
with st.container(height=400):
st.markdown(
"""
<style>
.st-emotion-cache-g7r313 {
width: 700px;
margin-left:25%;
margin-rigth:25%;
}
.st-emotion-cache-1kyxreq{
flex-direction:column;
}
.st-emotion-cache-1v0mbdj{
align-items:center !important;
}
</style>
""", unsafe_allow_html=True
)
file = st.file_uploader("Choisissez une image",type=["png","jpg"])
if file is not None:
image = Image.open(file).convert('RGB')
st.image(image, use_column_width=True)
class_name, conf_score ,index = classify(image, model, class_names)
if index == 0:
st.image('static/image/not_fraud.JPG')
else:
st.image('static/image/Fraud-alert-2023.JPG')
st.write("### score: {}%".format(int(conf_score * 1000) / 10))
footer = st.container()
with footer:
st.markdown("---")
st.markdown(
"""
<style>
p {
font-size: 16px;
text-align: center;
}
a {
text-decoration: none;
color: #00a;
font-weight: 600;
}
</style>
<p>
© Designed by <a href="#">ONDOA Michelle & NGNINTEDEM Marlyne</a>.
</p>
""", unsafe_allow_html=True
)
if selecte == "About":
st.title("A propos du modèle")
st.markdown(
"""
<style>
/*Les Titres*/
.st-emotion-cache-10trblm {
font-size: 1.5rem;
color: #424143;
font-weight: 300;
text-transform: uppercase;
line-height: 1.235;
font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
margin-left:0% !important;
margin-right: 5% !important;
}
[data-testid="stMetricValue"]{
font-size: 45px;
color: #ff3131;
font-weight:bold;
text-align:center;
margin-top:-33px;
}
/* breakline for metric text */
[data-testid="stMetricLabel"] {
word-wrap: break-word;
color: #ef8451;
font-size:40px;
font-weight:bold;
}
.st-emotion-cache-16idsys >p{
font-size:30px;
}
[data-testid ="stVerticalBlock"]{
#background-color: rgba(187, 216, 158, 0.59);
#border: 1px solid rgba(28, 131, 225, 0.1);
text-align:center;
}
[data-v-5af006b8]{
background-color:black;
}
.st-emotion-cache-1q7spjk{
font-family: Impact, Haettenschweiler, 'Arial Narrow Bold', sans-serif;
color: #FF3131;
font-size: 1.8rem;
font-weight: 300;
text-transform: uppercase;
line-height: 1.235;
margin-bottom:10px;
}
</style>
""", unsafe_allow_html=True
)
with st.container(height=1500):
st.title('Définition du problème')
st.write('Prédire si une image de voiture donnée est une demande d\'indemnisation frauduleuse ?')
st.title('Type de problème')
st.write('Problème de Classification ')
st.title('Problème Domaine')
st.write('Vision par ordinateur')
st.title('Analyse des données')
st.write('L\'examination des données a montré que les données d\'entraînement sont déséquilibrées. La différence entre la distribution des classes positives et négatives est TRÈS ÉNORME !' )
c1 , c2 = st.columns(2)
with c1:
st.metric("Classe Négative", "94%")
with c2:
st.metric("Classe Positive", "6%")
st.title("Modélisation")
st.caption('ResNet 18 (Pré-entraîné)')
st.write('Comme il s\'agit d\'un problème de vision par ordinateur, il était très clair et logique d\'essayer un réseau neuronal convolutif. Nous avons utilisé ResNet 18 avec les poids pré-entraînés sur l\'ensemble de données ImageNet .Nous avons remplacé la couche de sortie et la couche d\'entrée.Le model a donné un résultat suivants:')
col1, col2 =st.columns(2)
with col1:
st.image('static/image/loss.png')
with col2:
st.image('static/image/acc.png')
footer = st.container()
with footer:
st.markdown("---")
st.markdown(
"""
<style>
p {
font-size: 16px;
text-align: center;
}
a {
text-decoration: none;
color: #00a;
font-weight: 600;
}
</style>
<p>
© Designed by <a href="#">ONDOA Michelle & NGNINTEDEM Marlyne </a>.
</p>
""", unsafe_allow_html=True
) |